conv_op.cc 16.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_op.h"
Y
Update  
Yi Wang 已提交
16 17 18 19

#include <string>
#include <vector>

20 21 22 23 24 25
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
C
chengduoZH 已提交
26 27 28 29

namespace paddle {
namespace operators {

C
chengduoZH 已提交
30
void ConvOp::InferShape(framework::InferShapeContext* ctx) const {
C
chengduoZH 已提交
31
  PADDLE_ENFORCE(ctx->HasInput("Input"),
C
chengduoZH 已提交
32
                 "Input(Input) of ConvOp should not be null.");
C
chengduoZH 已提交
33
  PADDLE_ENFORCE(ctx->HasInput("Filter"),
C
chengduoZH 已提交
34
                 "Input(Filter) of ConvOp should not be null.");
C
chengduoZH 已提交
35
  PADDLE_ENFORCE(ctx->HasOutput("Output"),
C
chengduoZH 已提交
36
                 "Output(Output) of ConvOp should not be null.");
C
chengduoZH 已提交
37 38 39

  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
40

C
chengduoZH 已提交
41 42 43
  std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
  std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
  int groups = ctx->Attrs().Get<int>("groups");
C
chengduoZH 已提交
44
  std::vector<int> dilations = ctx->Attrs().Get<std::vector<int>>("dilations");
C
chengduoZH 已提交
45

C
chengduoZH 已提交
46 47
  PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 5,
                 "Conv intput should be 4-D or 5-D tensor.");
C
chengduoZH 已提交
48 49 50 51 52 53 54 55 56
  PADDLE_ENFORCE_EQ(
      in_dims.size(), filter_dims.size(),
      "Conv input dimension and filter dimension should be the same.");
  PADDLE_ENFORCE(
      in_dims.size() - strides.size() == 2U,
      "Conv input dimension and strides dimension should be consistent.");
  PADDLE_ENFORCE_EQ(
      paddings.size(), strides.size(),
      "Conv paddings dimension and Conv strides dimension should be the same.");
F
fengjiayi 已提交
57

Y
Yang Yu 已提交
58
  PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[1] * groups,
C
chengduoZH 已提交
59
                    "The number of input channels should be equal to filter "
C
chengduoZH 已提交
60
                    "channels * groups.");
C
chengduoZH 已提交
61
  PADDLE_ENFORCE_EQ(
Y
Yang Yu 已提交
62
      filter_dims[0] % groups, 0,
C
chengduoZH 已提交
63 64 65
      "The number of output channels should be divided by groups.");

  std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
C
chengduoZH 已提交
66
  for (size_t i = 0; i < strides.size(); ++i) {
Y
Yang Yang 已提交
67 68 69
    output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2],
                                          dilations[i], paddings[i],
                                          strides[i]));
C
chengduoZH 已提交
70
  }
71
  ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
Y
Yang Yu 已提交
72
  ctx->ShareLoD("Input", "Output");
C
chengduoZH 已提交
73 74
}

75 76
framework::OpKernelType ConvOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
77
  framework::LibraryType library{framework::LibraryType::kPlain};
M
mozga-intel 已提交
78
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
79
  std::string data_format = ctx.Attr<std::string>("data_format");
M
mozga-intel 已提交
80 81
  framework::DataLayout layout = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
82
#ifdef PADDLE_WITH_CUDA
83
  if (platform::CanCUDNNBeUsed(ctx)) {
84
    library = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
85 86
  }
#endif
87
#ifdef PADDLE_WITH_MKLDNN
88
  if (library == framework::LibraryType::kPlain &&
89
      platform::CanMKLDNNBeUsed(ctx)) {
90
    library = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
91
    layout = framework::DataLayout::kMKLDNN;
92
  }
93
#endif
94

K
Kexin Zhao 已提交
95 96 97 98 99 100 101 102
  auto input_data_type =
      framework::ToDataType(ctx.Input<Tensor>("Input")->type());
  auto filter_data_type =
      framework::ToDataType(ctx.Input<Tensor>("Filter")->type());
  PADDLE_ENFORCE_EQ(input_data_type, filter_data_type,
                    "input and filter data type should be consistent");

  if (input_data_type == framework::proto::VarType::FP16) {
103
    PADDLE_ENFORCE_EQ(library, framework::LibraryType::kCUDNN,
K
Kexin Zhao 已提交
104 105 106
                      "float16 can only be used when CUDNN is used");
  }

107 108
  return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
                                 library);
109 110
}

Y
Yu Yang 已提交
111
void Conv2DOpMaker::Make() {
K
Krzysztof Binias 已提交
112
  AddAttr<bool>("is_test", "").SetDefault(false);
C
chengduoZH 已提交
113 114
  AddInput(
      "Input",
C
fix doc  
chengduoZH 已提交
115 116 117 118
      "(Tensor) The input tensor of convolution operator. "
      "The format of input tensor is NCHW, where N is batch size, C is the "
      "number of channels, H is the height of the feature, "
      "and W is the width of the feature.");
C
chengduoZH 已提交
119
  AddInput("Filter",
C
fix doc  
chengduoZH 已提交
120
           "(Tensor) The filter tensor of convolution operator. "
C
chengduoZH 已提交
121 122
           "The format of the filter tensor is MCHW, where M is the number of "
           "output image channels, C is the number of input image channels, "
C
fix doc  
chengduoZH 已提交
123 124
           "H is the height of the filter, and W is the width of the filter. "
           "If the groups attribute is greater than 1, C equals the number of "
C
chengduoZH 已提交
125
           "input image channels divided by the groups.");
126 127 128 129 130
  AddInput("Bias",
           "(Tensor) Bias to be added to each output of filter application."
           "The format of output tensor is X (one-dimensional) of size equal"
           "to the number of output channels. Only used with MKL-DNN.")
      .AsDispensable();
C
chengduoZH 已提交
131
  AddOutput("Output",
C
fix doc  
chengduoZH 已提交
132
            "(Tensor) The output tensor of convolution operator. "
133 134
            "The format of output tensor is also NCHW.")
      .Reuse("Input");
C
chengduoZH 已提交
135 136 137 138
  AddAttr<std::vector<int>>("strides",
                            "(vector<int> default:{1, 1}), the "
                            "strides(h_stride, w_stride) of "
                            "convolution operator.")
C
chengduoZH 已提交
139
      .SetDefault({1, 1});
C
chengduoZH 已提交
140 141 142 143
  AddAttr<std::vector<int>>("paddings",
                            "(vector<int> default:{0, 0}), the "
                            "paddings(h_pad, w_pad) of "
                            "convolution operator.")
C
chengduoZH 已提交
144 145 146
      .SetDefault({0, 0});
  AddAttr<int>(
      "groups",
C
chengduoZH 已提交
147
      "(int default:1), the groups number of the convolution operator. "
C
fix doc  
chengduoZH 已提交
148 149 150 151
      "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: "
      "when group=2, the first half of the filters is only connected to the "
      "first half of the input channels, while the second half of the filters "
      "is only connected to the second half of the input channels.")
C
chengduoZH 已提交
152
      .SetDefault(1);
C
chengduoZH 已提交
153
  AddAttr<std::vector<int>>("dilations",
C
chengduoZH 已提交
154 155
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of "
C
chengduoZH 已提交
156
                            "convolution operator.")
C
chengduoZH 已提交
157
      .SetDefault({1, 1});
158 159 160 161
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
162 163 164
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
M
Michal Gallus 已提交
165 166
  AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
  // TODO(dzhwinter): need to registered layout transform function
  AddAttr<int>("workspace_size_MB",
               "Only used in cudnn kernel. Need set use_cudnn to true."
               "workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
               "better hardware. This size should be chosen carefully.")
      .SetDefault(4096);
C
chengduoZH 已提交
183
  AddComment(R"DOC(
C
fix doc  
chengduoZH 已提交
184 185
Convolution Operator.

C
chengduoZH 已提交
186
The convolution operation calculates the output based on the input, filter
C
chengduoZH 已提交
187
and strides, paddings, dilations, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
188
parameters is checked in the infer-shape.
C
chengduoZH 已提交
189
Input(Input) and Output(Output) are in NCHW format. Where N is batch
C
fix doc  
chengduoZH 已提交
190
size, C is the number of channels, H is the height of the feature, and W is
C
chengduoZH 已提交
191 192 193 194 195 196
the width of the feature.
Filters(Input) is MCHW format. Where M is the number of output image channels, C is
the number of input image channels, H is the height of the filter, and W
is the width of the filter.
Parameters(strides, paddings, dilations) are two elements. These two elements represent
height and width, respectively.
C
chengduoZH 已提交
197 198 199 200
The input(X) size and output(Out) size may be different.

Example:
  Input:
C
chengduoZH 已提交
201 202
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{out}, C_{in}, H_f, W_f)$
C
chengduoZH 已提交
203
  Output:
C
chengduoZH 已提交
204 205 206 207 208 209
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
$$
       H_{out}= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]}+ 1 \\
       W_{out}= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]}+ 1
$$
C
chengduoZH 已提交
210
)DOC");
C
chengduoZH 已提交
211 212
}

Y
Yu Yang 已提交
213
void Conv3DOpMaker::Make() {
C
chengduoZH 已提交
214 215
  AddInput(
      "Input",
C
fix doc  
chengduoZH 已提交
216
      "(Tensor) The input tensor of convolution operator. "
C
chengduoZH 已提交
217
      "The format of input tensor is NCDHW. Where N is batch size, C is the "
C
fix doc  
chengduoZH 已提交
218 219 220
      "number of channels, D is the depth of the feature, H is the height of "
      "the feature, "
      "and W is the width of the feature.");
C
chengduoZH 已提交
221
  AddInput("Filter",
C
fix doc  
chengduoZH 已提交
222
           "(Tensor) The filter tensor of convolution operator. "
C
chengduoZH 已提交
223 224
           "The format of the filter tensor is MCDHW, where M is the number of "
           "output image channels, C is the number of input image channels, "
C
fix doc  
chengduoZH 已提交
225 226 227
           "D is the depth of the filter, H is the height of the filter, and W "
           "is the width of the filter."
           "If the groups attribute is greater than 1, C equals the number of "
C
chengduoZH 已提交
228 229
           "input image channels divided by the groups.");
  AddOutput("Output",
C
fix doc  
chengduoZH 已提交
230
            "(Tensor) The output tensor of convolution operator."
231 232
            "The format of output tensor is also NCDHW.")
      .Reuse("Input");
C
chengduoZH 已提交
233 234 235 236
  AddAttr<std::vector<int>>("strides",
                            "(vector<int>, default:{1, 1, 1}), the "
                            "strides(d_stride, h_stride, w_stride) of "
                            "convolution operator.")
C
chengduoZH 已提交
237
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
238 239 240 241
  AddAttr<std::vector<int>>("paddings",
                            "(vector<int>, default:{0, 0, 0}), the "
                            "paddings(d_pad, h_pad, w_pad) of convolution "
                            "operator.")
C
chengduoZH 已提交
242 243 244
      .SetDefault({0, 0, 0});
  AddAttr<int>(
      "groups",
C
chengduoZH 已提交
245
      "(int default:1), the groups number of the convolution operator. "
C
fix doc  
chengduoZH 已提交
246 247 248 249
      "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: "
      "when group=2, the first half of the filters is only connected to the "
      "first half of the input channels, while the second half of the filters "
      "is only connected to the second half of the input channels.")
C
chengduoZH 已提交
250
      .SetDefault(1);
C
chengduoZH 已提交
251
  AddAttr<std::vector<int>>("dilations",
C
chengduoZH 已提交
252 253
                            "(vector<int> default:{1, 1, 1}), the "
                            "dilations(d_dilation, h_dilation, w_dilation) of "
C
chengduoZH 已提交
254
                            "convolution operator.")
C
chengduoZH 已提交
255
      .SetDefault({1, 1, 1});
256 257 258 259
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
260 261 262
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
  // TODO(dzhwinter): need to registered layout transform function
  AddAttr<int>("workspace_size_MB",
               "Only used in cudnn kernel. workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
               "better hardware. This size should be chosen carefully.")
      .SetDefault(4096);
C
fix doc  
chengduoZH 已提交
278

C
chengduoZH 已提交
279
  AddComment(R"DOC(
C
fix doc  
chengduoZH 已提交
280 281
Convolution3D Operator.

C
chengduoZH 已提交
282
The convolution operation calculates the output based on the input, filter
C
chengduoZH 已提交
283
and strides, paddings, dilations, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
284
parameters is checked in the infer-shape.
C
chengduoZH 已提交
285
Input(Input) and output(Output) are in NCDHW format, where N is batch
C
fix doc  
chengduoZH 已提交
286
size, C is the number of channels,D is the depth of the feature, H is the height of
C
chengduoZH 已提交
287 288 289 290 291 292
the feature, and W is the width of the feature.
Filters(Input) is MCDHW format, where M is the number of output image channels,
C is the number of input image channels, D is the depth of the filter,
H is the height of the filter, and W is the width of the filter.
Parameters(strides, paddings, dilations) are three elements. These three elements
represent depth, height and width, respectively.
C
fix doc  
chengduoZH 已提交
293 294 295 296
The input(X) size and output(Out) size may be different.

Example:
  Input:
C
chengduoZH 已提交
297 298
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{out}, C_{in}, D_f, H_f, W_f)$
C
fix doc  
chengduoZH 已提交
299
  Output:
C
chengduoZH 已提交
300 301 302 303 304 305 306
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
       D_{out}= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{ strides[0]}+ 1 \\
       H_{out}= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{ strides[1]}+ 1 \\
       W_{out}= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{ strides[2]}+ 1
  $$
C
chengduoZH 已提交
307 308 309
)DOC");
}

C
chengduoZH 已提交
310 311 312 313 314 315 316 317 318 319 320
void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const {
  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
  if (ctx->HasOutput(framework::GradVarName("Input"))) {
    ctx->SetOutputDim(framework::GradVarName("Input"), in_dims);
  }
  if (ctx->HasOutput(framework::GradVarName("Filter"))) {
    ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims);
  }
}

321 322
framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
323
  framework::LibraryType library_{framework::LibraryType::kPlain};
M
mozga-intel 已提交
324 325 326 327
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  std::string data_format = ctx.Attr<std::string>("data_format");
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
328
#ifdef PADDLE_WITH_CUDA
329 330
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
331 332
  }
#endif
333 334 335 336
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
337
    layout_ = framework::DataLayout::kMKLDNN;
338
  }
339
#endif
340 341 342 343 344 345

  return framework::OpKernelType(
      framework::ToDataType(ctx.Input<Tensor>("Input")->type()), ctx.GetPlace(),
      layout_, library_);
}

C
chengduoZH 已提交
346 347 348 349
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yang Yang 已提交
350
REGISTER_OPERATOR(conv2d, ops::ConvOp, ops::Conv2DOpMaker,
351 352
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad);
353 354

// depthwise convolution op
Y
Yang Yang 已提交
355
REGISTER_OPERATOR(depthwise_conv2d, ops::ConvOp, ops::Conv2DOpMaker,
356 357
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(depthwise_conv2d_grad, ops::ConvOpGrad);
Y
Yang Yang 已提交
358
REGISTER_OPERATOR(conv3d, ops::ConvOp, ops::Conv3DOpMaker,
359 360
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv3d_grad, ops::ConvOpGrad);
C
chengduoZH 已提交
361

362 363
// depthwise conv kernel
// TODO(xingzhaolong): neon kernel for mobile
Z
zlx 已提交
364
REGISTER_OP_CPU_KERNEL(
365
    depthwise_conv2d,
X
xzl 已提交
366 367 368 369
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);

REGISTER_OP_CPU_KERNEL(
370
    depthwise_conv2d_grad,
X
xzl 已提交
371 372
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);
Z
zlx 已提交
373

C
chengduoZH 已提交
374
REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
375 376 377 378 379 380
    conv2d, ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    conv2d_grad,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);
C
chengduoZH 已提交
381 382

REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
383 384 385 386 387 388
    conv3d, ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    conv3d_grad,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);