pool_op.cc 18.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/pool_op.h"
16
#include <unordered_map>
17 18 19 20 21 22
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
23 24 25 26

namespace paddle {
namespace operators {

27 28 29 30 31 32 33 34 35
int PoolOutputSize(int input_size, int filter_size, int padding, int stride,
                   bool ceil_mode) {
  int output_size;
  if (!ceil_mode) {
    output_size = (input_size - filter_size + 2 * padding) / stride + 1;
  } else {
    output_size =
        (input_size - filter_size + 2 * padding + stride - 1) / stride + 1;
  }
C
chengduoZH 已提交
36 37 38 39 40
  PADDLE_ENFORCE(output_size > 0,
                 "Due to the settings of padding(%d), filter_size(%d) and "
                 "stride(%d), the output size is less than 0, please check "
                 "again. Input_size:%d",
                 padding, filter_size, stride, input_size);
41 42 43
  return output_size;
}

C
chengduo 已提交
44
void PoolOp::InferShape(framework::InferShapeContext* ctx) const {
45 46 47 48 49 50
  PADDLE_ENFORCE(ctx->HasInput("X"), "X(Input) of Pooling should not be null.");
  PADDLE_ENFORCE(ctx->HasOutput("Out"),
                 "Out(Output) of Pooling should not be null.");

  auto in_x_dims = ctx->GetInputDim("X");

C
chengduoZH 已提交
51
  std::string pooling_type = ctx->Attrs().Get<std::string>("pooling_type");
52 53 54
  std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
  std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
  std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
55
  bool ceil_mode = ctx->Attrs().Get<bool>("ceil_mode");
56
  bool adaptive = ctx->Attrs().Get<bool>("adaptive");
57 58

  PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5,
C
chengduoZH 已提交
59
                 "Pooling intput should be 4-D or 5-D tensor.");
60

C
chengduoZH 已提交
61
  if (ctx->Attrs().Get<bool>("global_pooling")) {
62
    ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
C
fix bug  
chengduoZH 已提交
63 64
    for (size_t i = 0; i < ksize.size(); ++i) {
      paddings[i] = 0;
65
      ksize[i] = static_cast<int>(in_x_dims[i + 2]);
C
fix bug  
chengduoZH 已提交
66
    }
67
  }
68 69 70 71 72 73 74 75 76

  PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U,
                 "Input size and pooling size should be consistent.");
  PADDLE_ENFORCE_EQ(ksize.size(), strides.size(),
                    "Strides size and pooling size should be the same.");
  PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(),
                    "Paddings size and pooling size should be the same.");

  std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
77 78 79 80 81 82 83
  if (adaptive) {
    output_shape.insert(output_shape.end(), ksize.begin(), ksize.end());
  } else {
    for (size_t i = 0; i < ksize.size(); ++i) {
      output_shape.push_back(PoolOutputSize(
          in_x_dims[i + 2], ksize[i], paddings[i], strides[i], ceil_mode));
    }
84
  }
85
  ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
Y
Yang Yu 已提交
86
  ctx->ShareLoD("X", "Out");
87 88
}

89
framework::OpKernelType PoolOp::GetExpectedKernelType(
C
chengduo 已提交
90
    const framework::ExecutionContext& ctx) const {
91
  framework::LibraryType library_{framework::LibraryType::kPlain};
M
mozga-intel 已提交
92 93 94
  std::string data_format = ctx.Attr<std::string>("data_format");
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
95
#ifdef PADDLE_WITH_CUDA
96 97
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
98 99
  }
#endif
100 101 102 103
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
104
    layout_ = framework::DataLayout::kMKLDNN;
105
  }
106
#endif
107

Y
Yu Yang 已提交
108 109
  return framework::OpKernelType(ctx.Input<Tensor>("X")->type(), ctx.GetPlace(),
                                 layout_, library_);
110 111
}

C
chengduo 已提交
112
void PoolOpGrad::InferShape(framework::InferShapeContext* ctx) const {
113 114 115 116 117 118
  PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
  PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
                 "Input(X@GRAD) should not be null.");
  ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}

119
framework::OpKernelType PoolOpGrad::GetExpectedKernelType(
C
chengduo 已提交
120
    const framework::ExecutionContext& ctx) const {
121
  framework::LibraryType library_{framework::LibraryType::kPlain};
M
mozga-intel 已提交
122 123 124
  std::string data_format = ctx.Attr<std::string>("data_format");
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
125
#ifdef PADDLE_WITH_CUDA
126 127
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
128 129
  }
#endif
130 131 132 133
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
134
    layout_ = framework::DataLayout::kMKLDNN;
135
  }
136
#endif
137

Y
Yu Yang 已提交
138
  auto input_data_type = ctx.Input<Tensor>("X")->type();
K
Kexin Zhao 已提交
139 140 141 142 143 144
  if (input_data_type == framework::proto::VarType::FP16) {
    PADDLE_ENFORCE_EQ(library_, framework::LibraryType::kCUDNN,
                      "float16 can only be used when CUDNN is used");
  }
  return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
                                 library_);
145 146
}

Y
Yu Yang 已提交
147
void Pool2dOpMaker::Make() {
148 149
  AddInput(
      "X",
C
chengduoZH 已提交
150
      "(Tensor) The input tensor of pooling operator. "
K
kexinzhao 已提交
151 152 153
      "The format of input tensor is NCHW, where N is batch size, C is the "
      "number of channels, H is the height of the feature, "
      "and W is the width of the feature.");
154
  AddOutput("Out",
K
kexinzhao 已提交
155 156 157 158
            "(Tensor) The output tensor of pooling operator. "
            "The format of output tensor is also NCHW, "
            "where N is batch size, C is the number of channels, "
            "H is the height of the feature, "
159
            "and W is the width of the feature.");
160

C
chengduoZH 已提交
161
  AddAttr<std::string>("pooling_type",
C
chengduoZH 已提交
162 163
                       "(string), pooling type, can be \"max\" for max-pooling "
                       "and \"avg\" for average-pooling.")
164
      .InEnum({"max", "avg"});
C
fix bug  
chengduoZH 已提交
165
  AddAttr<std::vector<int>>("ksize",
K
kexinzhao 已提交
166 167
                            "(vector<int>) The pooling window "
                            "size(height, width) of the pooling operator. "
C
chengduoZH 已提交
168
                            "If global_pooling = true, ksize and paddings will "
C
fix bug  
chengduoZH 已提交
169 170
                            "be ignored.");  // TODO(Chengduo): Add checker.
                                             // (Currently,
C
fix doc  
chengduoZH 已提交
171
  // TypedAttrChecker don't support vector type.)
172 173 174 175
  AddAttr<bool>(
      "global_pooling",
      "(bool, default false) Whether to use the global pooling. "
      "If global_pooling = true, kernel size and paddings will be ignored.")
176
      .SetDefault(false);
K
kexinzhao 已提交
177 178 179
  AddAttr<std::vector<int>>("strides",
                            "(vector<int>, default {1, 1}), strides(height, "
                            "width) of pooling operator.")
180 181
      .SetDefault({1, 1});
  // TODO(Chengduo): Add checker. (Currently,
C
fix doc  
chengduoZH 已提交
182 183 184
  // TypedAttrChecker don't support vector type.)
  AddAttr<std::vector<int>>(
      "paddings",
C
chengduoZH 已提交
185
      "(vector<int>, default {0,0}), paddings(height, width) of pooling "
K
kexinzhao 已提交
186
      "operator."
187
      "If global_pooling = true, paddings and kernel size will be ignored.")
188
      .SetDefault({0, 0});
189 190 191 192 193 194
  AddAttr<bool>(
      "exclusive",
      "(bool, default True) When true, will exclude the zero-padding in the "
      "averaging calculating, otherwise, include the zero-padding. Note, it "
      "is only used when pooling_type is avg. The defalut is True.")
      .SetDefault(true);
195 196 197 198 199 200 201 202
  AddAttr<bool>(
      "adaptive",
      "(bool, default False) When true, will perform adaptive pooling instead, "
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
      "pooling in each grid area to get output pooling value.")
      .SetDefault(false);

203 204 205 206
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
207 208
  AddAttr<bool>(
      "ceil_mode",
D
dengkaipeng 已提交
209
      "(bool, default false) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
210 211
      "output height and width. False is the default. If it is set to False, "
      "the floor function will be used.")
212
      .SetDefault(false);
213 214 215
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
216 217 218 219 220 221
  AddAttr<bool>("use_quantizer",
                "(bool, default false) "
                "Set to true for operators that should be quantized and use "
                "int8 kernel. "
                "Only used on CPU.")
      .SetDefault(false);
222 223 224 225 226 227 228
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
229 230 231 232 233
  AddAttr<bool>("is_test",
                "(bool, default false) Set to true for inference only, false "
                "for training. Some layers may run faster when this is true.")
      .SetDefault(false);

234
  // TODO(dzhwinter): need to registered layout transform function
235 236

  AddComment(R"DOC(
C
chengduoZH 已提交
237
The pooling2d operation calculates the output based on
C
chengduoZH 已提交
238
the input, pooling_type and ksize, strides, paddings parameters.
K
kexinzhao 已提交
239 240
Input(X) and output(Out) are in NCHW format, where N is batch size, C is the
number of channels, H is the height of the feature, and W is the width of the feature.
C
fix doc  
chengduoZH 已提交
241 242
Parameters(ksize, strides, paddings) are two elements.
These two elements represent height and width, respectively.
C
chengduoZH 已提交
243 244
The input(X) size and output(Out) size may be different.

245
Example:
F
fengjiayi 已提交
246

C
chengduoZH 已提交
247
  Input:
F
fengjiayi 已提交
248

K
kexinzhao 已提交
249
       X shape: $(N, C, H_{in}, W_{in})$
F
fengjiayi 已提交
250

C
chengduoZH 已提交
251
  Output:
F
fengjiayi 已提交
252

K
kexinzhao 已提交
253
       Out shape: $(N, C, H_{out}, W_{out})$
F
fengjiayi 已提交
254

255 256
  For ceil_mode = false:
       $$
F
fengjiayi 已提交
257
       H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1
F
fengjiayi 已提交
258 259
       $$
       $$
F
fengjiayi 已提交
260
       W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1
K
kexinzhao 已提交
261
       $$
262 263
  For ceil_mode = true:
       $$
F
fengjiayi 已提交
264
       H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1
F
fengjiayi 已提交
265 266
       $$
       $$
F
fengjiayi 已提交
267
       W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1
268
       $$
K
kexinzhao 已提交
269

270
  For exclusive = false:
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
       $$
       hstart = i * strides[0] - paddings[0]
       $$
       $$
       hend = hstart + ksize[0]
       $$
       $$
       wstart = j * strides[1] - paddings[1]
       $$
       $$
       wend = wstart + ksize[1]
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
       $$
286

287
  For exclusive = true:
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
       $$
       hstart = max(0, i * strides[0] - paddings[0])
       $$
       $$
       hend = min(H, hstart + ksize[0])
       $$
       $$
       wstart = max(0, j * strides[1] - paddings[1])
       $$
       $$
       wend = min(W, wstart + ksize[1])
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
       $$
303

304
)DOC");
305 306
}

C
chengduo 已提交
307 308 309 310 311 312 313 314
class PoolOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
  std::unordered_map<std::string, std::string> GetInputOutputWithSameType()
      const override {
    return std::unordered_map<std::string, std::string>{{"X", /*->*/ "Out"}};
  }
};

Y
Yu Yang 已提交
315
void Pool3dOpMaker::Make() {
K
kexinzhao 已提交
316 317 318 319 320 321
  AddInput("X",
           "(Tensor) The input tensor of pooling operator. "
           "The format of input tensor is NCDHW, where N is batch size, C is "
           "the number of channels, and D, H and W is the depth, height and "
           "width of "
           "the feature, respectively.");
322
  AddOutput("Out",
C
chengduoZH 已提交
323
            "(Tensor) The output tensor of pooling operator."
K
kexinzhao 已提交
324 325 326
            "The format of output tensor is also NCDHW, "
            "where N is batch size, C is "
            "the number of channels, and D, H and W is the depth, height and "
327
            "width of the feature, respectively.");
328

C
chengduoZH 已提交
329
  AddAttr<std::string>("pooling_type",
K
kexinzhao 已提交
330
                       "(string) Pooling type, can be \"max\" for max-pooling "
C
chengduoZH 已提交
331
                       "and \"avg\" for average-pooling.")
332
      .InEnum({"max", "avg"});
K
kexinzhao 已提交
333 334 335 336
  AddAttr<std::vector<int>>(
      "ksize",
      "(vector<int>) The pooling window size(depth, height, "
      "width) of pooling operator. "
C
chengduoZH 已提交
337
      "If global_pooling = true, ksize and paddings will "
K
kexinzhao 已提交
338 339
      "be ignored.");  // TODO(Chengduo): Add checker.
                       // (Currently,
C
fix bug  
chengduoZH 已提交
340
  // TypedAttrChecker don't support vector type.)
C
chengduoZH 已提交
341 342 343
  AddAttr<bool>(
      "global_pooling",
      "(bool, default false) Whether to use the global pooling. "
344
      "If global_pooling = true, kernel size and paddings will be ignored.")
345
      .SetDefault(false);
K
kexinzhao 已提交
346 347 348 349
  AddAttr<std::vector<int>>(
      "strides",
      "(vector<int>, default {1,1,1}) Strides(depth, height, "
      "width) of the pooling operator.")
350 351
      .SetDefault({1, 1, 1});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
C
fix bug  
chengduoZH 已提交
352 353
  AddAttr<std::vector<int>>(
      "paddings",
C
chengduoZH 已提交
354
      "(vector<int>, default {0,0,0}), paddings(depth, height, "
K
kexinzhao 已提交
355
      "width) of pooling operator. "
C
chengduoZH 已提交
356
      "If global_pooling = true, ksize and paddings will be ignored.")
357 358
      .SetDefault({0, 0, 0});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
359 360 361 362 363 364
  AddAttr<bool>(
      "exclusive",
      "(bool, default True) When true, will exclude the zero-padding in the "
      "averaging calculating, otherwise, include the zero-padding. Note, it "
      "is only used when pooling_type is avg. The defalut is True.")
      .SetDefault(true);
365 366 367 368 369 370 371
  AddAttr<bool>(
      "adaptive",
      "(bool, default False) When true, will perform adaptive pooling instead, "
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
      "pooling in each grid area to get output pooling value.")
      .SetDefault(false);
372

373 374 375 376
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
377 378
  AddAttr<bool>(
      "ceil_mode",
D
dengkaipeng 已提交
379
      "(bool, default false) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
380 381
      "output height and width. False is the default. If it is set to False, "
      "the floor function will be used.")
382
      .SetDefault(false);
383 384 385
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
386 387 388 389 390 391 392 393 394
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
  // TODO(dzhwinter): need to registered layout transform function

395
  AddComment(R"DOC(
K
kexinzhao 已提交
396 397
Pool3d Operator.

C
chengduoZH 已提交
398
The pooling3d operation calculates the output based on
C
chengduoZH 已提交
399
the input, pooling_type, ksize, strides, and paddings parameters.
K
kexinzhao 已提交
400 401
Input(X) and output(Out) are in NCDHW format, where N is batch
size, C is the number of channels, and D, H and W are the depth, height and
402 403
width of the feature, respectively. Parameters(ksize, strides, paddings)
are three elements. These three elements represent depth, height and
K
kexinzhao 已提交
404
width, respectively. The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
405 406 407

Example:
  Input:
K
kexinzhao 已提交
408
       X shape: $(N, C, D_{in}, H_{in}, W_{in})$
C
chengduoZH 已提交
409
  Output:
K
kexinzhao 已提交
410
       Out shape: $(N, C, D_{out}, H_{out}, W_{out})$
411
  For ceil_mode = false:
412 413 414 415 416 417 418 419 420
       $$
       D_{out} = \\frac{(D_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1
       $$
       $$
       H_{out} = \\frac{(H_{in} - ksize[1] + 2 * paddings[1])}{strides[2]} + 1
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[2] + 2 * paddings[2])}{strides[2]} + 1
       $$
421
  For ceil_mode = true:
422 423 424 425 426 427 428 429 430
       $$
       D_{out} = \\frac{(D_{in} - ksize[0] + 2 * paddings[0] + strides[0] -1)}{strides[0]} + 1
       $$
       $$
       H_{out} = \\frac{(H_{in} - ksize[1] + 2 * paddings[1] + strides[1] -1)}{strides[1]} + 1
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[2] + 2 * paddings[2] + strides[2] -1)}{strides[2]} + 1
       $$
D
dengkaipeng 已提交
431

432
  For exclusive = false:
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
       $$
       dstart = i * strides[0] - paddings[0]
       $$
       $$
       dend = dstart + ksize[0]
       $$
       $$
       hstart = j * strides[1] - paddings[1]
       $$
       $$
       hend = hstart + ksize[1]
       $$
       $$
       wstart = k * strides[2] - paddings[2]
       $$
       $$
       wend = wstart + ksize[2]
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{ksize[0] * ksize[1] * ksize[2]}
       $$
454

455
  For exclusive = true:
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
       $$
       dstart = max(0, i * strides[0] - paddings[0])
       $$
       $$
       dend = min(D, dstart + ksize[0])
       $$
       $$
       hend = min(H, hstart + ksize[1])
       $$
       $$
       wstart = max(0, k * strides[2] - paddings[2])
       $$
       $$
       wend = min(W, wstart + ksize[2])
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
       $$
K
kexinzhao 已提交
474

475
)DOC");
476
}
477 478 479 480 481
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

Y
Yang Yang 已提交
482
REGISTER_OPERATOR(pool2d, ops::PoolOp, ops::Pool2dOpMaker,
C
chengduo 已提交
483
                  ops::PoolOpInferVarType,
484 485
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(pool2d_grad, ops::PoolOpGrad);
486

Q
QI JUN 已提交
487 488 489 490 491
REGISTER_OP_CPU_KERNEL(
    pool2d, ops::PoolKernel<paddle::platform::CPUDeviceContext, float>,
    ops::PoolKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    pool2d_grad, ops::PoolGradKernel<paddle::platform::CPUDeviceContext, float>,
492
    ops::PoolGradKernel<paddle::platform::CPUDeviceContext, double>);
493

Y
Yang Yang 已提交
494
REGISTER_OPERATOR(pool3d, ops::PoolOp, ops::Pool3dOpMaker,
C
chengduo 已提交
495
                  ops::PoolOpInferVarType,
496 497
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(pool3d_grad, ops::PoolOpGrad);
498

Q
QI JUN 已提交
499 500 501 502 503 504
REGISTER_OP_CPU_KERNEL(
    pool3d, ops::PoolKernel<paddle::platform::CPUDeviceContext, float>,
    ops::PoolKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    pool3d_grad, ops::PoolGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::PoolGradKernel<paddle::platform::CPUDeviceContext, double>);