conv_op.cc 18.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_op.h"
Y
Update  
Yi Wang 已提交
16 17 18 19

#include <string>
#include <vector>

20 21 22 23 24 25
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
C
chengduoZH 已提交
26 27 28 29

namespace paddle {
namespace operators {

C
chengduoZH 已提交
30
void ConvOp::InferShape(framework::InferShapeContext* ctx) const {
C
chengduoZH 已提交
31
  PADDLE_ENFORCE(ctx->HasInput("Input"),
C
chengduoZH 已提交
32
                 "Input(Input) of ConvOp should not be null.");
C
chengduoZH 已提交
33
  PADDLE_ENFORCE(ctx->HasInput("Filter"),
C
chengduoZH 已提交
34
                 "Input(Filter) of ConvOp should not be null.");
C
chengduoZH 已提交
35
  PADDLE_ENFORCE(ctx->HasOutput("Output"),
C
chengduoZH 已提交
36
                 "Output(Output) of ConvOp should not be null.");
C
chengduoZH 已提交
37 38 39

  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
40

C
chengduoZH 已提交
41 42 43
  std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
  std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
  int groups = ctx->Attrs().Get<int>("groups");
C
chengduoZH 已提交
44
  std::vector<int> dilations = ctx->Attrs().Get<std::vector<int>>("dilations");
C
chengduoZH 已提交
45

C
chengduoZH 已提交
46 47
  PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 5,
                 "Conv intput should be 4-D or 5-D tensor.");
C
chengduoZH 已提交
48 49 50 51 52 53 54 55 56
  PADDLE_ENFORCE_EQ(
      in_dims.size(), filter_dims.size(),
      "Conv input dimension and filter dimension should be the same.");
  PADDLE_ENFORCE(
      in_dims.size() - strides.size() == 2U,
      "Conv input dimension and strides dimension should be consistent.");
  PADDLE_ENFORCE_EQ(
      paddings.size(), strides.size(),
      "Conv paddings dimension and Conv strides dimension should be the same.");
F
fengjiayi 已提交
57

Y
Yang Yu 已提交
58
  PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[1] * groups,
C
chengduoZH 已提交
59
                    "The number of input channels should be equal to filter "
C
chengduoZH 已提交
60
                    "channels * groups.");
C
chengduoZH 已提交
61
  PADDLE_ENFORCE_EQ(
Y
Yang Yu 已提交
62
      filter_dims[0] % groups, 0,
C
chengduoZH 已提交
63 64 65
      "The number of output channels should be divided by groups.");

  std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
C
chengduoZH 已提交
66
  for (size_t i = 0; i < strides.size(); ++i) {
Y
Yang Yang 已提交
67 68 69
    output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2],
                                          dilations[i], paddings[i],
                                          strides[i]));
C
chengduoZH 已提交
70
  }
71
  ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
Y
Yang Yu 已提交
72
  ctx->ShareLoD("Input", "Output");
C
chengduoZH 已提交
73 74
}

75 76
framework::OpKernelType ConvOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
77
  framework::LibraryType library{framework::LibraryType::kPlain};
M
mozga-intel 已提交
78
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
79
  std::string data_format = ctx.Attr<std::string>("data_format");
M
mozga-intel 已提交
80 81
  framework::DataLayout layout = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
82
#ifdef PADDLE_WITH_CUDA
83
  if (platform::CanCUDNNBeUsed(ctx)) {
84
    library = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
85 86
  }
#endif
87
#ifdef PADDLE_WITH_MKLDNN
88
  if (library == framework::LibraryType::kPlain &&
89
      platform::CanMKLDNNBeUsed(ctx)) {
90
    library = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
91
    layout = framework::DataLayout::kMKLDNN;
92
  }
93
#endif
94

K
Kexin Zhao 已提交
95 96
  auto input_data_type =
      framework::ToDataType(ctx.Input<Tensor>("Input")->type());
X
xiaolil1 已提交
97 98 99 100
  //auto filter_data_type =
  //    framework::ToDataType(ctx.Input<Tensor>("Filter")->type());
  //PADDLE_ENFORCE_EQ(input_data_type, filter_data_type,
  //                  "input and filter data type should be consistent");
K
Kexin Zhao 已提交
101 102

  if (input_data_type == framework::proto::VarType::FP16) {
103
    PADDLE_ENFORCE_EQ(library, framework::LibraryType::kCUDNN,
K
Kexin Zhao 已提交
104 105 106
                      "float16 can only be used when CUDNN is used");
  }

107 108
  return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
                                 library);
109 110
}

Y
Yu Yang 已提交
111
void Conv2DOpMaker::Make() {
112 113 114 115
  AddAttr<bool>("is_test",
                "(bool, default false) Set to true for inference only, false "
                "for training. Some layers may run faster when this is true.")
      .SetDefault(false);
C
chengduoZH 已提交
116 117
  AddInput(
      "Input",
C
fix doc  
chengduoZH 已提交
118 119 120 121
      "(Tensor) The input tensor of convolution operator. "
      "The format of input tensor is NCHW, where N is batch size, C is the "
      "number of channels, H is the height of the feature, "
      "and W is the width of the feature.");
C
chengduoZH 已提交
122
  AddInput("Filter",
C
fix doc  
chengduoZH 已提交
123
           "(Tensor) The filter tensor of convolution operator. "
C
chengduoZH 已提交
124 125
           "The format of the filter tensor is MCHW, where M is the number of "
           "output image channels, C is the number of input image channels, "
C
fix doc  
chengduoZH 已提交
126 127
           "H is the height of the filter, and W is the width of the filter. "
           "If the groups attribute is greater than 1, C equals the number of "
C
chengduoZH 已提交
128
           "input image channels divided by the groups.");
129 130 131 132 133
  AddInput("Bias",
           "(Tensor) Bias to be added to each output of filter application."
           "The format of output tensor is X (one-dimensional) of size equal"
           "to the number of output channels. Only used with MKL-DNN.")
      .AsDispensable();
134
  AddInput("Scale_in",
135 136
           "(Tensor) Scale_in to be used for int8 input data."
           "Only used with INT8.")
137 138 139 140 141 142 143 144 145 146 147 148 149
      .AsDispensable();
  AddInput("Scale_in_eltwise",
           "(Tensor) Scale_in_eltwise to be used for int8 eltwise input data."
           "Only used with MKL-DNN.")
      .AsDispensable();
  AddInput("Scale_weights",
           "(Tensor) Scale_weights to be used for int8 weights data."
           "Only used with MKL-DNN.")
      .AsDispensable();
  AddInput("Scale_out",
           "(Tensor) Scale_out to be used for int8 output data."
           "Only used with MKL-DNN.")
      .AsDispensable();
C
chengduoZH 已提交
150
  AddOutput("Output",
C
fix doc  
chengduoZH 已提交
151
            "(Tensor) The output tensor of convolution operator. "
152
            "The format of output tensor is also NCHW.");
153

Z
Zhang, Guoming 已提交
154 155 156 157 158
  AddInput("ResidualData",
           "(Tensor) Tensor with residual data "
           "to which convolution output will be added."
           "Used with fuse_residual_connection fusion.")
      .AsDispensable();
C
chengduoZH 已提交
159 160 161 162
  AddAttr<std::vector<int>>("strides",
                            "(vector<int> default:{1, 1}), the "
                            "strides(h_stride, w_stride) of "
                            "convolution operator.")
C
chengduoZH 已提交
163
      .SetDefault({1, 1});
C
chengduoZH 已提交
164 165 166 167
  AddAttr<std::vector<int>>("paddings",
                            "(vector<int> default:{0, 0}), the "
                            "paddings(h_pad, w_pad) of "
                            "convolution operator.")
C
chengduoZH 已提交
168 169 170
      .SetDefault({0, 0});
  AddAttr<int>(
      "groups",
C
chengduoZH 已提交
171
      "(int default:1), the groups number of the convolution operator. "
C
fix doc  
chengduoZH 已提交
172 173 174 175
      "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: "
      "when group=2, the first half of the filters is only connected to the "
      "first half of the input channels, while the second half of the filters "
      "is only connected to the second half of the input channels.")
C
chengduoZH 已提交
176
      .SetDefault(1);
C
chengduoZH 已提交
177
  AddAttr<std::vector<int>>("dilations",
C
chengduoZH 已提交
178 179
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of "
C
chengduoZH 已提交
180
                            "convolution operator.")
C
chengduoZH 已提交
181
      .SetDefault({1, 1});
182 183 184 185
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
186 187 188
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
M
Michal Gallus 已提交
189 190
  AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
Z
Zhang, Guoming 已提交
191
  AddAttr<bool>("fuse_residual_connection",
192
                "(bool, default false) Only used in mkldnn kernel. Used "
Z
Zhang, Guoming 已提交
193 194
                "whenever convolution output is as an input to residual "
                "connection.")
195
      .SetDefault(false);
H
Haihao Shen 已提交
196
  AddAttr<bool>("force_fp32_output", "(bool, default false) Force INT8 kernel output FP32, only used in mkldnn kernel")
H
Haihao Shen 已提交
197
      .SetDefault(false);
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
  // TODO(dzhwinter): need to registered layout transform function
  AddAttr<int>("workspace_size_MB",
               "Only used in cudnn kernel. Need set use_cudnn to true."
               "workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
               "better hardware. This size should be chosen carefully.")
      .SetDefault(4096);
214 215 216 217 218
  AddAttr<bool>("exhaustive_search",
                "(bool, default false) cuDNN has many algorithm to calculation "
                "convolution, whether enable exhaustive search ",
                "for cuDNN convolution or not, defalut is False.")
      .SetDefault(false);
C
chengduoZH 已提交
219
  AddComment(R"DOC(
C
fix doc  
chengduoZH 已提交
220 221
Convolution Operator.

C
chengduoZH 已提交
222
The convolution operation calculates the output based on the input, filter
C
chengduoZH 已提交
223
and strides, paddings, dilations, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
224
parameters is checked in the infer-shape.
C
chengduoZH 已提交
225
Input(Input) and Output(Output) are in NCHW format. Where N is batch
C
fix doc  
chengduoZH 已提交
226
size, C is the number of channels, H is the height of the feature, and W is
C
chengduoZH 已提交
227 228 229 230 231 232
the width of the feature.
Filters(Input) is MCHW format. Where M is the number of output image channels, C is
the number of input image channels, H is the height of the filter, and W
is the width of the filter.
Parameters(strides, paddings, dilations) are two elements. These two elements represent
height and width, respectively.
C
chengduoZH 已提交
233 234 235 236
The input(X) size and output(Out) size may be different.

Example:
  Input:
C
chengduoZH 已提交
237 238
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{out}, C_{in}, H_f, W_f)$
C
chengduoZH 已提交
239
  Output:
C
chengduoZH 已提交
240 241 242 243 244 245
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
$$
       H_{out}= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]}+ 1 \\
       W_{out}= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]}+ 1
$$
C
chengduoZH 已提交
246
)DOC");
Q
qingqing01 已提交
247
  Apply();
C
chengduoZH 已提交
248 249
}

Y
Yu Yang 已提交
250
void Conv3DOpMaker::Make() {
C
chengduoZH 已提交
251 252
  AddInput(
      "Input",
C
fix doc  
chengduoZH 已提交
253
      "(Tensor) The input tensor of convolution operator. "
C
chengduoZH 已提交
254
      "The format of input tensor is NCDHW. Where N is batch size, C is the "
C
fix doc  
chengduoZH 已提交
255 256 257
      "number of channels, D is the depth of the feature, H is the height of "
      "the feature, "
      "and W is the width of the feature.");
C
chengduoZH 已提交
258
  AddInput("Filter",
C
fix doc  
chengduoZH 已提交
259
           "(Tensor) The filter tensor of convolution operator. "
C
chengduoZH 已提交
260 261
           "The format of the filter tensor is MCDHW, where M is the number of "
           "output image channels, C is the number of input image channels, "
C
fix doc  
chengduoZH 已提交
262 263 264
           "D is the depth of the filter, H is the height of the filter, and W "
           "is the width of the filter."
           "If the groups attribute is greater than 1, C equals the number of "
C
chengduoZH 已提交
265 266
           "input image channels divided by the groups.");
  AddOutput("Output",
C
fix doc  
chengduoZH 已提交
267
            "(Tensor) The output tensor of convolution operator."
268
            "The format of output tensor is also NCDHW.");
C
chengduoZH 已提交
269 270 271 272
  AddAttr<std::vector<int>>("strides",
                            "(vector<int>, default:{1, 1, 1}), the "
                            "strides(d_stride, h_stride, w_stride) of "
                            "convolution operator.")
C
chengduoZH 已提交
273
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
274 275 276 277
  AddAttr<std::vector<int>>("paddings",
                            "(vector<int>, default:{0, 0, 0}), the "
                            "paddings(d_pad, h_pad, w_pad) of convolution "
                            "operator.")
C
chengduoZH 已提交
278 279 280
      .SetDefault({0, 0, 0});
  AddAttr<int>(
      "groups",
C
chengduoZH 已提交
281
      "(int default:1), the groups number of the convolution operator. "
C
fix doc  
chengduoZH 已提交
282 283 284 285
      "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: "
      "when group=2, the first half of the filters is only connected to the "
      "first half of the input channels, while the second half of the filters "
      "is only connected to the second half of the input channels.")
C
chengduoZH 已提交
286
      .SetDefault(1);
C
chengduoZH 已提交
287
  AddAttr<std::vector<int>>("dilations",
C
chengduoZH 已提交
288 289
                            "(vector<int> default:{1, 1, 1}), the "
                            "dilations(d_dilation, h_dilation, w_dilation) of "
C
chengduoZH 已提交
290
                            "convolution operator.")
C
chengduoZH 已提交
291
      .SetDefault({1, 1, 1});
292 293 294 295
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
296 297 298
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
299 300 301 302 303 304 305
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
306 307 308
  AddAttr<bool>("force_fp32_output",
                "(bool, default false) Only used in mkldnn INT8 kernel")
      .SetDefault(false);
309 310 311 312 313 314 315 316
  // TODO(dzhwinter): need to registered layout transform function
  AddAttr<int>("workspace_size_MB",
               "Only used in cudnn kernel. workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
               "better hardware. This size should be chosen carefully.")
      .SetDefault(4096);
317 318 319 320 321
  AddAttr<bool>("exhaustive_search",
                "(bool, default false) cuDNN has many algorithm to calculation "
                "convolution, whether enable exhaustive search ",
                "for cuDNN convolution or not, defalut is False.")
      .SetDefault(false);
C
chengduoZH 已提交
322
  AddComment(R"DOC(
C
fix doc  
chengduoZH 已提交
323 324
Convolution3D Operator.

C
chengduoZH 已提交
325
The convolution operation calculates the output based on the input, filter
C
chengduoZH 已提交
326
and strides, paddings, dilations, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
327
parameters is checked in the infer-shape.
C
chengduoZH 已提交
328
Input(Input) and output(Output) are in NCDHW format, where N is batch
C
fix doc  
chengduoZH 已提交
329
size, C is the number of channels,D is the depth of the feature, H is the height of
C
chengduoZH 已提交
330 331 332 333 334 335
the feature, and W is the width of the feature.
Filters(Input) is MCDHW format, where M is the number of output image channels,
C is the number of input image channels, D is the depth of the filter,
H is the height of the filter, and W is the width of the filter.
Parameters(strides, paddings, dilations) are three elements. These three elements
represent depth, height and width, respectively.
C
fix doc  
chengduoZH 已提交
336 337 338 339
The input(X) size and output(Out) size may be different.

Example:
  Input:
C
chengduoZH 已提交
340 341
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{out}, C_{in}, D_f, H_f, W_f)$
C
fix doc  
chengduoZH 已提交
342
  Output:
C
chengduoZH 已提交
343 344 345 346 347 348 349
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
       D_{out}= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{ strides[0]}+ 1 \\
       H_{out}= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{ strides[1]}+ 1 \\
       W_{out}= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{ strides[2]}+ 1
  $$
C
chengduoZH 已提交
350
)DOC");
Q
qingqing01 已提交
351
  Apply();
C
chengduoZH 已提交
352 353
}

C
chengduoZH 已提交
354 355 356 357 358 359 360 361 362 363 364
void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const {
  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
  if (ctx->HasOutput(framework::GradVarName("Input"))) {
    ctx->SetOutputDim(framework::GradVarName("Input"), in_dims);
  }
  if (ctx->HasOutput(framework::GradVarName("Filter"))) {
    ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims);
  }
}

365 366
framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
367
  framework::LibraryType library_{framework::LibraryType::kPlain};
M
mozga-intel 已提交
368 369 370 371
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  std::string data_format = ctx.Attr<std::string>("data_format");
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
372
#ifdef PADDLE_WITH_CUDA
373 374
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
375 376
  }
#endif
377 378 379 380
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
381
    layout_ = framework::DataLayout::kMKLDNN;
382
  }
383
#endif
384 385 386 387 388 389

  return framework::OpKernelType(
      framework::ToDataType(ctx.Input<Tensor>("Input")->type()), ctx.GetPlace(),
      layout_, library_);
}

C
chengduoZH 已提交
390 391 392 393
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yang Yang 已提交
394
REGISTER_OPERATOR(conv2d, ops::ConvOp, ops::Conv2DOpMaker,
C
chengduo 已提交
395
                  ops::ConvOpInferVarType,
396 397
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad);
398 399

// depthwise convolution op
Y
Yang Yang 已提交
400
REGISTER_OPERATOR(depthwise_conv2d, ops::ConvOp, ops::Conv2DOpMaker,
401 402
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(depthwise_conv2d_grad, ops::ConvOpGrad);
C
chengduo 已提交
403

Y
Yang Yang 已提交
404
REGISTER_OPERATOR(conv3d, ops::ConvOp, ops::Conv3DOpMaker,
C
chengduo 已提交
405
                  ops::ConvOpInferVarType,
406 407
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv3d_grad, ops::ConvOpGrad);
C
chengduoZH 已提交
408

409 410
// depthwise conv kernel
// TODO(xingzhaolong): neon kernel for mobile
Z
zlx 已提交
411
REGISTER_OP_CPU_KERNEL(
412
    depthwise_conv2d,
X
xzl 已提交
413 414 415 416
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);

REGISTER_OP_CPU_KERNEL(
417
    depthwise_conv2d_grad,
X
xzl 已提交
418 419
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);
Z
zlx 已提交
420

C
chengduoZH 已提交
421
REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
422 423 424 425 426 427
    conv2d, ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    conv2d_grad,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);
C
chengduoZH 已提交
428 429

REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
430 431 432 433 434 435
    conv3d, ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    conv3d_grad,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);