conv_op.cc 16.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_op.h"
Y
Update  
Yi Wang 已提交
16 17 18 19

#include <string>
#include <vector>

20 21 22 23 24 25
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
C
chengduoZH 已提交
26 27 28 29

namespace paddle {
namespace operators {

C
chengduoZH 已提交
30
void ConvOp::InferShape(framework::InferShapeContext* ctx) const {
C
chengduoZH 已提交
31
  PADDLE_ENFORCE(ctx->HasInput("Input"),
C
chengduoZH 已提交
32
                 "Input(Input) of ConvOp should not be null.");
C
chengduoZH 已提交
33
  PADDLE_ENFORCE(ctx->HasInput("Filter"),
C
chengduoZH 已提交
34
                 "Input(Filter) of ConvOp should not be null.");
C
chengduoZH 已提交
35
  PADDLE_ENFORCE(ctx->HasOutput("Output"),
C
chengduoZH 已提交
36
                 "Output(Output) of ConvOp should not be null.");
C
chengduoZH 已提交
37 38 39 40 41 42

  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
  std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
  std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
  int groups = ctx->Attrs().Get<int>("groups");
C
chengduoZH 已提交
43
  std::vector<int> dilations = ctx->Attrs().Get<std::vector<int>>("dilations");
C
chengduoZH 已提交
44

C
chengduoZH 已提交
45 46
  PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 5,
                 "Conv intput should be 4-D or 5-D tensor.");
C
chengduoZH 已提交
47 48 49 50 51 52 53 54 55
  PADDLE_ENFORCE_EQ(
      in_dims.size(), filter_dims.size(),
      "Conv input dimension and filter dimension should be the same.");
  PADDLE_ENFORCE(
      in_dims.size() - strides.size() == 2U,
      "Conv input dimension and strides dimension should be consistent.");
  PADDLE_ENFORCE_EQ(
      paddings.size(), strides.size(),
      "Conv paddings dimension and Conv strides dimension should be the same.");
F
fengjiayi 已提交
56

Y
Yang Yu 已提交
57
  PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[1] * groups,
C
chengduoZH 已提交
58
                    "The number of input channels should be equal to filter "
C
chengduoZH 已提交
59
                    "channels * groups.");
F
fengjiayi 已提交
60

C
chengduoZH 已提交
61
  PADDLE_ENFORCE_EQ(
Y
Yang Yu 已提交
62
      filter_dims[0] % groups, 0,
C
chengduoZH 已提交
63 64 65
      "The number of output channels should be divided by groups.");

  std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
C
chengduoZH 已提交
66
  for (size_t i = 0; i < strides.size(); ++i) {
Y
Yang Yang 已提交
67 68 69
    output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2],
                                          dilations[i], paddings[i],
                                          strides[i]));
C
chengduoZH 已提交
70
  }
71
  ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
Y
Yang Yu 已提交
72
  ctx->ShareLoD("Input", "Output");
C
chengduoZH 已提交
73 74
}

75 76
framework::OpKernelType ConvOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
77
  framework::LibraryType library{framework::LibraryType::kPlain};
M
mozga-intel 已提交
78
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
79
  std::string data_format = ctx.Attr<std::string>("data_format");
M
mozga-intel 已提交
80 81
  framework::DataLayout layout = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
82
#ifdef PADDLE_WITH_CUDA
83
  if (platform::CanCUDNNBeUsed(ctx)) {
84
    library = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
85 86
  }
#endif
87
#ifdef PADDLE_WITH_MKLDNN
88
  if (library == framework::LibraryType::kPlain &&
89
      platform::CanMKLDNNBeUsed(ctx)) {
90
    library = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
91
    layout = framework::DataLayout::kMKLDNN;
92
  }
93
#endif
94

K
Kexin Zhao 已提交
95 96 97 98 99 100 101 102
  auto input_data_type =
      framework::ToDataType(ctx.Input<Tensor>("Input")->type());
  auto filter_data_type =
      framework::ToDataType(ctx.Input<Tensor>("Filter")->type());
  PADDLE_ENFORCE_EQ(input_data_type, filter_data_type,
                    "input and filter data type should be consistent");

  if (input_data_type == framework::proto::VarType::FP16) {
103
    PADDLE_ENFORCE_EQ(library, framework::LibraryType::kCUDNN,
K
Kexin Zhao 已提交
104 105 106
                      "float16 can only be used when CUDNN is used");
  }

107 108
  return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
                                 library);
109 110
}

Y
Yu Yang 已提交
111
void Conv2DOpMaker::Make() {
C
chengduoZH 已提交
112 113
  AddInput(
      "Input",
C
fix doc  
chengduoZH 已提交
114 115 116 117
      "(Tensor) The input tensor of convolution operator. "
      "The format of input tensor is NCHW, where N is batch size, C is the "
      "number of channels, H is the height of the feature, "
      "and W is the width of the feature.");
C
chengduoZH 已提交
118
  AddInput("Filter",
C
fix doc  
chengduoZH 已提交
119
           "(Tensor) The filter tensor of convolution operator. "
C
chengduoZH 已提交
120 121
           "The format of the filter tensor is MCHW, where M is the number of "
           "output image channels, C is the number of input image channels, "
C
fix doc  
chengduoZH 已提交
122 123
           "H is the height of the filter, and W is the width of the filter. "
           "If the groups attribute is greater than 1, C equals the number of "
C
chengduoZH 已提交
124 125
           "input image channels divided by the groups.");
  AddOutput("Output",
C
fix doc  
chengduoZH 已提交
126
            "(Tensor) The output tensor of convolution operator. "
127 128
            "The format of output tensor is also NCHW.")
      .Reuse("Input");
C
chengduoZH 已提交
129 130 131 132
  AddAttr<std::vector<int>>("strides",
                            "(vector<int> default:{1, 1}), the "
                            "strides(h_stride, w_stride) of "
                            "convolution operator.")
C
chengduoZH 已提交
133
      .SetDefault({1, 1});
C
chengduoZH 已提交
134 135 136 137
  AddAttr<std::vector<int>>("paddings",
                            "(vector<int> default:{0, 0}), the "
                            "paddings(h_pad, w_pad) of "
                            "convolution operator.")
C
chengduoZH 已提交
138 139 140
      .SetDefault({0, 0});
  AddAttr<int>(
      "groups",
C
chengduoZH 已提交
141
      "(int default:1), the groups number of the convolution operator. "
C
fix doc  
chengduoZH 已提交
142 143 144 145
      "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: "
      "when group=2, the first half of the filters is only connected to the "
      "first half of the input channels, while the second half of the filters "
      "is only connected to the second half of the input channels.")
C
chengduoZH 已提交
146
      .SetDefault(1);
C
chengduoZH 已提交
147
  AddAttr<std::vector<int>>("dilations",
C
chengduoZH 已提交
148 149
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of "
C
chengduoZH 已提交
150
                            "convolution operator.")
C
chengduoZH 已提交
151
      .SetDefault({1, 1});
152 153 154 155
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
156 157 158
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
  // TODO(dzhwinter): need to registered layout transform function
  AddAttr<int>("workspace_size_MB",
               "Only used in cudnn kernel. Need set use_cudnn to true."
               "workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
               "better hardware. This size should be chosen carefully.")
      .SetDefault(4096);
C
chengduoZH 已提交
175
  AddComment(R"DOC(
C
fix doc  
chengduoZH 已提交
176 177
Convolution Operator.

C
chengduoZH 已提交
178
The convolution operation calculates the output based on the input, filter
C
chengduoZH 已提交
179
and strides, paddings, dilations, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
180
parameters is checked in the infer-shape.
C
chengduoZH 已提交
181
Input(Input) and Output(Output) are in NCHW format. Where N is batch
C
fix doc  
chengduoZH 已提交
182
size, C is the number of channels, H is the height of the feature, and W is
C
chengduoZH 已提交
183 184 185 186 187 188
the width of the feature.
Filters(Input) is MCHW format. Where M is the number of output image channels, C is
the number of input image channels, H is the height of the filter, and W
is the width of the filter.
Parameters(strides, paddings, dilations) are two elements. These two elements represent
height and width, respectively.
C
chengduoZH 已提交
189 190 191 192
The input(X) size and output(Out) size may be different.

Example:
  Input:
C
chengduoZH 已提交
193 194
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{out}, C_{in}, H_f, W_f)$
C
chengduoZH 已提交
195
  Output:
C
chengduoZH 已提交
196 197 198 199 200 201
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
$$
       H_{out}= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]}+ 1 \\
       W_{out}= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]}+ 1
$$
C
chengduoZH 已提交
202
)DOC");
C
chengduoZH 已提交
203 204
}

Y
Yu Yang 已提交
205
void Conv3DOpMaker::Make() {
C
chengduoZH 已提交
206 207
  AddInput(
      "Input",
C
fix doc  
chengduoZH 已提交
208
      "(Tensor) The input tensor of convolution operator. "
C
chengduoZH 已提交
209
      "The format of input tensor is NCDHW. Where N is batch size, C is the "
C
fix doc  
chengduoZH 已提交
210 211 212
      "number of channels, D is the depth of the feature, H is the height of "
      "the feature, "
      "and W is the width of the feature.");
C
chengduoZH 已提交
213
  AddInput("Filter",
C
fix doc  
chengduoZH 已提交
214
           "(Tensor) The filter tensor of convolution operator. "
C
chengduoZH 已提交
215 216
           "The format of the filter tensor is MCDHW, where M is the number of "
           "output image channels, C is the number of input image channels, "
C
fix doc  
chengduoZH 已提交
217 218 219
           "D is the depth of the filter, H is the height of the filter, and W "
           "is the width of the filter."
           "If the groups attribute is greater than 1, C equals the number of "
C
chengduoZH 已提交
220 221
           "input image channels divided by the groups.");
  AddOutput("Output",
C
fix doc  
chengduoZH 已提交
222
            "(Tensor) The output tensor of convolution operator."
223 224
            "The format of output tensor is also NCDHW.")
      .Reuse("Input");
C
chengduoZH 已提交
225 226 227 228
  AddAttr<std::vector<int>>("strides",
                            "(vector<int>, default:{1, 1, 1}), the "
                            "strides(d_stride, h_stride, w_stride) of "
                            "convolution operator.")
C
chengduoZH 已提交
229
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
230 231 232 233
  AddAttr<std::vector<int>>("paddings",
                            "(vector<int>, default:{0, 0, 0}), the "
                            "paddings(d_pad, h_pad, w_pad) of convolution "
                            "operator.")
C
chengduoZH 已提交
234 235 236
      .SetDefault({0, 0, 0});
  AddAttr<int>(
      "groups",
C
chengduoZH 已提交
237
      "(int default:1), the groups number of the convolution operator. "
C
fix doc  
chengduoZH 已提交
238 239 240 241
      "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: "
      "when group=2, the first half of the filters is only connected to the "
      "first half of the input channels, while the second half of the filters "
      "is only connected to the second half of the input channels.")
C
chengduoZH 已提交
242
      .SetDefault(1);
C
chengduoZH 已提交
243
  AddAttr<std::vector<int>>("dilations",
C
chengduoZH 已提交
244 245
                            "(vector<int> default:{1, 1, 1}), the "
                            "dilations(d_dilation, h_dilation, w_dilation) of "
C
chengduoZH 已提交
246
                            "convolution operator.")
C
chengduoZH 已提交
247
      .SetDefault({1, 1, 1});
248 249 250 251
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
252 253 254
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
  // TODO(dzhwinter): need to registered layout transform function
  AddAttr<int>("workspace_size_MB",
               "Only used in cudnn kernel. workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
               "better hardware. This size should be chosen carefully.")
      .SetDefault(4096);
C
fix doc  
chengduoZH 已提交
270

C
chengduoZH 已提交
271
  AddComment(R"DOC(
C
fix doc  
chengduoZH 已提交
272 273
Convolution3D Operator.

C
chengduoZH 已提交
274
The convolution operation calculates the output based on the input, filter
C
chengduoZH 已提交
275
and strides, paddings, dilations, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
276
parameters is checked in the infer-shape.
C
chengduoZH 已提交
277
Input(Input) and output(Output) are in NCDHW format, where N is batch
C
fix doc  
chengduoZH 已提交
278
size, C is the number of channels,D is the depth of the feature, H is the height of
C
chengduoZH 已提交
279 280 281 282 283 284
the feature, and W is the width of the feature.
Filters(Input) is MCDHW format, where M is the number of output image channels,
C is the number of input image channels, D is the depth of the filter,
H is the height of the filter, and W is the width of the filter.
Parameters(strides, paddings, dilations) are three elements. These three elements
represent depth, height and width, respectively.
C
fix doc  
chengduoZH 已提交
285 286 287 288
The input(X) size and output(Out) size may be different.

Example:
  Input:
C
chengduoZH 已提交
289 290
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{out}, C_{in}, D_f, H_f, W_f)$
C
fix doc  
chengduoZH 已提交
291
  Output:
C
chengduoZH 已提交
292 293 294 295 296 297 298
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
       D_{out}= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{ strides[0]}+ 1 \\
       H_{out}= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{ strides[1]}+ 1 \\
       W_{out}= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{ strides[2]}+ 1
  $$
C
chengduoZH 已提交
299 300 301
)DOC");
}

C
chengduoZH 已提交
302 303 304 305 306 307 308 309 310 311 312
void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const {
  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
  if (ctx->HasOutput(framework::GradVarName("Input"))) {
    ctx->SetOutputDim(framework::GradVarName("Input"), in_dims);
  }
  if (ctx->HasOutput(framework::GradVarName("Filter"))) {
    ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims);
  }
}

313 314
framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
315
  framework::LibraryType library_{framework::LibraryType::kPlain};
M
mozga-intel 已提交
316 317 318 319
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  std::string data_format = ctx.Attr<std::string>("data_format");
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
320
#ifdef PADDLE_WITH_CUDA
321 322
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
323 324
  }
#endif
325 326 327 328
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
329
    layout_ = framework::DataLayout::kMKLDNN;
330
  }
331
#endif
332 333 334 335 336 337

  return framework::OpKernelType(
      framework::ToDataType(ctx.Input<Tensor>("Input")->type()), ctx.GetPlace(),
      layout_, library_);
}

C
chengduoZH 已提交
338 339 340 341
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yang Yang 已提交
342
REGISTER_OPERATOR(conv2d, ops::ConvOp, ops::Conv2DOpMaker,
343 344
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad);
345 346

// depthwise convolution op
Y
Yang Yang 已提交
347
REGISTER_OPERATOR(depthwise_conv2d, ops::ConvOp, ops::Conv2DOpMaker,
348 349
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(depthwise_conv2d_grad, ops::ConvOpGrad);
Y
Yang Yang 已提交
350
REGISTER_OPERATOR(conv3d, ops::ConvOp, ops::Conv3DOpMaker,
351 352
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv3d_grad, ops::ConvOpGrad);
C
chengduoZH 已提交
353

354 355
// depthwise conv kernel
// TODO(xingzhaolong): neon kernel for mobile
Z
zlx 已提交
356
REGISTER_OP_CPU_KERNEL(
357
    depthwise_conv2d,
X
xzl 已提交
358 359 360 361
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);

REGISTER_OP_CPU_KERNEL(
362
    depthwise_conv2d_grad,
X
xzl 已提交
363 364
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);
Z
zlx 已提交
365

C
chengduoZH 已提交
366
REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
367 368 369 370 371 372
    conv2d, ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    conv2d_grad,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);
C
chengduoZH 已提交
373 374

REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
375 376 377 378 379 380
    conv3d, ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    conv3d_grad,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);