pool_op.cc 18.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/pool_op.h"
16
#include <unordered_map>
17 18 19 20 21 22
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
23 24 25 26

namespace paddle {
namespace operators {

27 28 29 30 31 32 33 34 35
int PoolOutputSize(int input_size, int filter_size, int padding, int stride,
                   bool ceil_mode) {
  int output_size;
  if (!ceil_mode) {
    output_size = (input_size - filter_size + 2 * padding) / stride + 1;
  } else {
    output_size =
        (input_size - filter_size + 2 * padding + stride - 1) / stride + 1;
  }
C
chengduoZH 已提交
36 37 38 39 40
  PADDLE_ENFORCE(output_size > 0,
                 "Due to the settings of padding(%d), filter_size(%d) and "
                 "stride(%d), the output size is less than 0, please check "
                 "again. Input_size:%d",
                 padding, filter_size, stride, input_size);
41 42 43
  return output_size;
}

C
chengduo 已提交
44
void PoolOp::InferShape(framework::InferShapeContext* ctx) const {
45 46 47 48 49 50
  PADDLE_ENFORCE(ctx->HasInput("X"), "X(Input) of Pooling should not be null.");
  PADDLE_ENFORCE(ctx->HasOutput("Out"),
                 "Out(Output) of Pooling should not be null.");

  auto in_x_dims = ctx->GetInputDim("X");

C
chengduoZH 已提交
51
  std::string pooling_type = ctx->Attrs().Get<std::string>("pooling_type");
52 53 54
  std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
  std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
  std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
55
  bool ceil_mode = ctx->Attrs().Get<bool>("ceil_mode");
56
  bool adaptive = ctx->Attrs().Get<bool>("adaptive");
57 58

  PADDLE_ENFORCE(in_x_dims.size() == 4 || in_x_dims.size() == 5,
C
chengduoZH 已提交
59
                 "Pooling intput should be 4-D or 5-D tensor.");
60

C
chengduoZH 已提交
61
  if (ctx->Attrs().Get<bool>("global_pooling")) {
62
    ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
C
fix bug  
chengduoZH 已提交
63 64
    for (size_t i = 0; i < ksize.size(); ++i) {
      paddings[i] = 0;
65
      ksize[i] = static_cast<int>(in_x_dims[i + 2]);
C
fix bug  
chengduoZH 已提交
66
    }
67
  }
68 69 70 71 72 73 74 75 76

  PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U,
                 "Input size and pooling size should be consistent.");
  PADDLE_ENFORCE_EQ(ksize.size(), strides.size(),
                    "Strides size and pooling size should be the same.");
  PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(),
                    "Paddings size and pooling size should be the same.");

  std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
77 78 79 80
  if (adaptive) {
    output_shape.insert(output_shape.end(), ksize.begin(), ksize.end());
  } else {
    for (size_t i = 0; i < ksize.size(); ++i) {
K
Kaipeng Deng 已提交
81 82 83 84 85 86
      if (!ctx->IsRuntime() && in_x_dims[i + 2] <= 0) {
        output_shape.push_back(-1);
      } else {
        output_shape.push_back(PoolOutputSize(
            in_x_dims[i + 2], ksize[i], paddings[i], strides[i], ceil_mode));
      }
87
    }
88
  }
89
  ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
Y
Yang Yu 已提交
90
  ctx->ShareLoD("X", "Out");
91 92
}

93
framework::OpKernelType PoolOp::GetExpectedKernelType(
C
chengduo 已提交
94
    const framework::ExecutionContext& ctx) const {
95
  framework::LibraryType library_{framework::LibraryType::kPlain};
M
mozga-intel 已提交
96 97 98
  std::string data_format = ctx.Attr<std::string>("data_format");
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
99
#ifdef PADDLE_WITH_CUDA
100 101
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
102 103
  }
#endif
104 105 106 107
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
108
    layout_ = framework::DataLayout::kMKLDNN;
109
  }
110
#endif
111

Y
Yu Yang 已提交
112 113
  return framework::OpKernelType(ctx.Input<Tensor>("X")->type(), ctx.GetPlace(),
                                 layout_, library_);
114 115
}

C
chengduo 已提交
116
void PoolOpGrad::InferShape(framework::InferShapeContext* ctx) const {
117 118 119 120 121 122
  PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
  PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
                 "Input(X@GRAD) should not be null.");
  ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}

123
framework::OpKernelType PoolOpGrad::GetExpectedKernelType(
C
chengduo 已提交
124
    const framework::ExecutionContext& ctx) const {
125
  framework::LibraryType library_{framework::LibraryType::kPlain};
M
mozga-intel 已提交
126 127 128
  std::string data_format = ctx.Attr<std::string>("data_format");
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
129
#ifdef PADDLE_WITH_CUDA
130 131
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
132 133
  }
#endif
134 135 136 137
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
138
    layout_ = framework::DataLayout::kMKLDNN;
139
  }
140
#endif
141

Y
Yu Yang 已提交
142
  auto input_data_type = ctx.Input<Tensor>("X")->type();
K
Kexin Zhao 已提交
143 144 145 146 147 148
  if (input_data_type == framework::proto::VarType::FP16) {
    PADDLE_ENFORCE_EQ(library_, framework::LibraryType::kCUDNN,
                      "float16 can only be used when CUDNN is used");
  }
  return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
                                 library_);
149 150
}

Y
Yu Yang 已提交
151
void Pool2dOpMaker::Make() {
152 153
  AddInput(
      "X",
C
chengduoZH 已提交
154
      "(Tensor) The input tensor of pooling operator. "
K
kexinzhao 已提交
155 156 157
      "The format of input tensor is NCHW, where N is batch size, C is the "
      "number of channels, H is the height of the feature, "
      "and W is the width of the feature.");
158
  AddOutput("Out",
K
kexinzhao 已提交
159 160 161 162
            "(Tensor) The output tensor of pooling operator. "
            "The format of output tensor is also NCHW, "
            "where N is batch size, C is the number of channels, "
            "H is the height of the feature, "
163
            "and W is the width of the feature.");
164

C
chengduoZH 已提交
165
  AddAttr<std::string>("pooling_type",
C
chengduoZH 已提交
166 167
                       "(string), pooling type, can be \"max\" for max-pooling "
                       "and \"avg\" for average-pooling.")
168
      .InEnum({"max", "avg"});
C
fix bug  
chengduoZH 已提交
169
  AddAttr<std::vector<int>>("ksize",
K
kexinzhao 已提交
170 171
                            "(vector<int>) The pooling window "
                            "size(height, width) of the pooling operator. "
C
chengduoZH 已提交
172
                            "If global_pooling = true, ksize and paddings will "
C
fix bug  
chengduoZH 已提交
173 174
                            "be ignored.");  // TODO(Chengduo): Add checker.
                                             // (Currently,
C
fix doc  
chengduoZH 已提交
175
  // TypedAttrChecker don't support vector type.)
176 177 178 179
  AddAttr<bool>(
      "global_pooling",
      "(bool, default false) Whether to use the global pooling. "
      "If global_pooling = true, kernel size and paddings will be ignored.")
180
      .SetDefault(false);
K
kexinzhao 已提交
181 182 183
  AddAttr<std::vector<int>>("strides",
                            "(vector<int>, default {1, 1}), strides(height, "
                            "width) of pooling operator.")
184 185
      .SetDefault({1, 1});
  // TODO(Chengduo): Add checker. (Currently,
C
fix doc  
chengduoZH 已提交
186 187 188
  // TypedAttrChecker don't support vector type.)
  AddAttr<std::vector<int>>(
      "paddings",
C
chengduoZH 已提交
189
      "(vector<int>, default {0,0}), paddings(height, width) of pooling "
K
kexinzhao 已提交
190
      "operator."
191
      "If global_pooling = true, paddings and kernel size will be ignored.")
192
      .SetDefault({0, 0});
193 194 195 196
  AddAttr<bool>(
      "exclusive",
      "(bool, default True) When true, will exclude the zero-padding in the "
      "averaging calculating, otherwise, include the zero-padding. Note, it "
翟飞跃 已提交
197
      "is only used when pooling_type is avg. The default is True.")
198
      .SetDefault(true);
199 200 201 202 203 204 205 206
  AddAttr<bool>(
      "adaptive",
      "(bool, default False) When true, will perform adaptive pooling instead, "
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
      "pooling in each grid area to get output pooling value.")
      .SetDefault(false);

207 208 209 210
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
211 212
  AddAttr<bool>(
      "ceil_mode",
D
dengkaipeng 已提交
213
      "(bool, default false) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
214 215
      "output height and width. False is the default. If it is set to False, "
      "the floor function will be used.")
216
      .SetDefault(false);
217 218 219
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
220 221 222 223 224 225
  AddAttr<bool>("use_quantizer",
                "(bool, default false) "
                "Set to true for operators that should be quantized and use "
                "int8 kernel. "
                "Only used on CPU.")
      .SetDefault(false);
226 227 228 229 230 231 232
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
233 234 235 236 237
  AddAttr<bool>("is_test",
                "(bool, default false) Set to true for inference only, false "
                "for training. Some layers may run faster when this is true.")
      .SetDefault(false);

238
  // TODO(dzhwinter): need to registered layout transform function
239 240

  AddComment(R"DOC(
C
chengduoZH 已提交
241
The pooling2d operation calculates the output based on
C
chengduoZH 已提交
242
the input, pooling_type and ksize, strides, paddings parameters.
K
kexinzhao 已提交
243 244
Input(X) and output(Out) are in NCHW format, where N is batch size, C is the
number of channels, H is the height of the feature, and W is the width of the feature.
C
fix doc  
chengduoZH 已提交
245 246
Parameters(ksize, strides, paddings) are two elements.
These two elements represent height and width, respectively.
C
chengduoZH 已提交
247 248
The input(X) size and output(Out) size may be different.

249
Example:
F
fengjiayi 已提交
250

C
chengduoZH 已提交
251
  Input:
F
fengjiayi 已提交
252

K
kexinzhao 已提交
253
       X shape: $(N, C, H_{in}, W_{in})$
F
fengjiayi 已提交
254

C
chengduoZH 已提交
255
  Output:
F
fengjiayi 已提交
256

K
kexinzhao 已提交
257
       Out shape: $(N, C, H_{out}, W_{out})$
F
fengjiayi 已提交
258

259 260
  For ceil_mode = false:
       $$
F
fengjiayi 已提交
261
       H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1
F
fengjiayi 已提交
262 263
       $$
       $$
F
fengjiayi 已提交
264
       W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1
K
kexinzhao 已提交
265
       $$
266 267
  For ceil_mode = true:
       $$
F
fengjiayi 已提交
268
       H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1
F
fengjiayi 已提交
269 270
       $$
       $$
F
fengjiayi 已提交
271
       W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1
272
       $$
K
kexinzhao 已提交
273

274
  For exclusive = false:
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
       $$
       hstart = i * strides[0] - paddings[0]
       $$
       $$
       hend = hstart + ksize[0]
       $$
       $$
       wstart = j * strides[1] - paddings[1]
       $$
       $$
       wend = wstart + ksize[1]
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
       $$
290

291
  For exclusive = true:
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
       $$
       hstart = max(0, i * strides[0] - paddings[0])
       $$
       $$
       hend = min(H, hstart + ksize[0])
       $$
       $$
       wstart = max(0, j * strides[1] - paddings[1])
       $$
       $$
       wend = min(W, wstart + ksize[1])
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
       $$
307

308
)DOC");
309 310
}

C
chengduo 已提交
311 312 313 314 315 316 317 318
class PoolOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
  std::unordered_map<std::string, std::string> GetInputOutputWithSameType()
      const override {
    return std::unordered_map<std::string, std::string>{{"X", /*->*/ "Out"}};
  }
};

Y
Yu Yang 已提交
319
void Pool3dOpMaker::Make() {
K
kexinzhao 已提交
320 321 322 323 324 325
  AddInput("X",
           "(Tensor) The input tensor of pooling operator. "
           "The format of input tensor is NCDHW, where N is batch size, C is "
           "the number of channels, and D, H and W is the depth, height and "
           "width of "
           "the feature, respectively.");
326
  AddOutput("Out",
C
chengduoZH 已提交
327
            "(Tensor) The output tensor of pooling operator."
K
kexinzhao 已提交
328 329 330
            "The format of output tensor is also NCDHW, "
            "where N is batch size, C is "
            "the number of channels, and D, H and W is the depth, height and "
331
            "width of the feature, respectively.");
332

C
chengduoZH 已提交
333
  AddAttr<std::string>("pooling_type",
K
kexinzhao 已提交
334
                       "(string) Pooling type, can be \"max\" for max-pooling "
C
chengduoZH 已提交
335
                       "and \"avg\" for average-pooling.")
336
      .InEnum({"max", "avg"});
K
kexinzhao 已提交
337 338 339 340
  AddAttr<std::vector<int>>(
      "ksize",
      "(vector<int>) The pooling window size(depth, height, "
      "width) of pooling operator. "
C
chengduoZH 已提交
341
      "If global_pooling = true, ksize and paddings will "
K
kexinzhao 已提交
342 343
      "be ignored.");  // TODO(Chengduo): Add checker.
                       // (Currently,
C
fix bug  
chengduoZH 已提交
344
  // TypedAttrChecker don't support vector type.)
C
chengduoZH 已提交
345 346 347
  AddAttr<bool>(
      "global_pooling",
      "(bool, default false) Whether to use the global pooling. "
348
      "If global_pooling = true, kernel size and paddings will be ignored.")
349
      .SetDefault(false);
K
kexinzhao 已提交
350 351 352 353
  AddAttr<std::vector<int>>(
      "strides",
      "(vector<int>, default {1,1,1}) Strides(depth, height, "
      "width) of the pooling operator.")
354 355
      .SetDefault({1, 1, 1});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
C
fix bug  
chengduoZH 已提交
356 357
  AddAttr<std::vector<int>>(
      "paddings",
C
chengduoZH 已提交
358
      "(vector<int>, default {0,0,0}), paddings(depth, height, "
K
kexinzhao 已提交
359
      "width) of pooling operator. "
C
chengduoZH 已提交
360
      "If global_pooling = true, ksize and paddings will be ignored.")
361 362
      .SetDefault({0, 0, 0});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
363 364 365 366
  AddAttr<bool>(
      "exclusive",
      "(bool, default True) When true, will exclude the zero-padding in the "
      "averaging calculating, otherwise, include the zero-padding. Note, it "
翟飞跃 已提交
367
      "is only used when pooling_type is avg. The default is True.")
368
      .SetDefault(true);
369 370 371 372 373 374 375
  AddAttr<bool>(
      "adaptive",
      "(bool, default False) When true, will perform adaptive pooling instead, "
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
      "pooling in each grid area to get output pooling value.")
      .SetDefault(false);
376

377 378 379 380
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
381 382
  AddAttr<bool>(
      "ceil_mode",
D
dengkaipeng 已提交
383
      "(bool, default false) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
384 385
      "output height and width. False is the default. If it is set to False, "
      "the floor function will be used.")
386
      .SetDefault(false);
387 388 389
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
390 391 392 393 394 395 396 397 398
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
  // TODO(dzhwinter): need to registered layout transform function

399
  AddComment(R"DOC(
K
kexinzhao 已提交
400 401
Pool3d Operator.

C
chengduoZH 已提交
402
The pooling3d operation calculates the output based on
C
chengduoZH 已提交
403
the input, pooling_type, ksize, strides, and paddings parameters.
K
kexinzhao 已提交
404 405
Input(X) and output(Out) are in NCDHW format, where N is batch
size, C is the number of channels, and D, H and W are the depth, height and
406 407
width of the feature, respectively. Parameters(ksize, strides, paddings)
are three elements. These three elements represent depth, height and
K
kexinzhao 已提交
408
width, respectively. The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
409 410 411

Example:
  Input:
K
kexinzhao 已提交
412
       X shape: $(N, C, D_{in}, H_{in}, W_{in})$
C
chengduoZH 已提交
413
  Output:
K
kexinzhao 已提交
414
       Out shape: $(N, C, D_{out}, H_{out}, W_{out})$
415
  For ceil_mode = false:
416 417 418 419 420 421 422 423 424
       $$
       D_{out} = \\frac{(D_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1
       $$
       $$
       H_{out} = \\frac{(H_{in} - ksize[1] + 2 * paddings[1])}{strides[2]} + 1
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[2] + 2 * paddings[2])}{strides[2]} + 1
       $$
425
  For ceil_mode = true:
426 427 428 429 430 431 432 433 434
       $$
       D_{out} = \\frac{(D_{in} - ksize[0] + 2 * paddings[0] + strides[0] -1)}{strides[0]} + 1
       $$
       $$
       H_{out} = \\frac{(H_{in} - ksize[1] + 2 * paddings[1] + strides[1] -1)}{strides[1]} + 1
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[2] + 2 * paddings[2] + strides[2] -1)}{strides[2]} + 1
       $$
D
dengkaipeng 已提交
435

436
  For exclusive = false:
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
       $$
       dstart = i * strides[0] - paddings[0]
       $$
       $$
       dend = dstart + ksize[0]
       $$
       $$
       hstart = j * strides[1] - paddings[1]
       $$
       $$
       hend = hstart + ksize[1]
       $$
       $$
       wstart = k * strides[2] - paddings[2]
       $$
       $$
       wend = wstart + ksize[2]
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{ksize[0] * ksize[1] * ksize[2]}
       $$
458

459
  For exclusive = true:
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
       $$
       dstart = max(0, i * strides[0] - paddings[0])
       $$
       $$
       dend = min(D, dstart + ksize[0])
       $$
       $$
       hend = min(H, hstart + ksize[1])
       $$
       $$
       wstart = max(0, k * strides[2] - paddings[2])
       $$
       $$
       wend = min(W, wstart + ksize[2])
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
       $$
K
kexinzhao 已提交
478

479
)DOC");
480
}
481 482 483 484 485
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

Y
Yang Yang 已提交
486
REGISTER_OPERATOR(pool2d, ops::PoolOp, ops::Pool2dOpMaker,
C
chengduo 已提交
487
                  ops::PoolOpInferVarType,
488 489
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(pool2d_grad, ops::PoolOpGrad);
490

Q
QI JUN 已提交
491 492 493 494 495
REGISTER_OP_CPU_KERNEL(
    pool2d, ops::PoolKernel<paddle::platform::CPUDeviceContext, float>,
    ops::PoolKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    pool2d_grad, ops::PoolGradKernel<paddle::platform::CPUDeviceContext, float>,
496
    ops::PoolGradKernel<paddle::platform::CPUDeviceContext, double>);
497

Y
Yang Yang 已提交
498
REGISTER_OPERATOR(pool3d, ops::PoolOp, ops::Pool3dOpMaker,
C
chengduo 已提交
499
                  ops::PoolOpInferVarType,
500 501
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(pool3d_grad, ops::PoolOpGrad);
502

Q
QI JUN 已提交
503 504 505 506 507 508
REGISTER_OP_CPU_KERNEL(
    pool3d, ops::PoolKernel<paddle::platform::CPUDeviceContext, float>,
    ops::PoolKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    pool3d_grad, ops::PoolGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::PoolGradKernel<paddle::platform::CPUDeviceContext, double>);