conv_op.cc 16.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_op.h"
Y
Update  
Yi Wang 已提交
16 17 18 19

#include <string>
#include <vector>

20 21 22 23 24 25
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
C
chengduoZH 已提交
26 27 28 29

namespace paddle {
namespace operators {

C
chengduoZH 已提交
30
void ConvOp::InferShape(framework::InferShapeContext* ctx) const {
C
chengduoZH 已提交
31
  PADDLE_ENFORCE(ctx->HasInput("Input"),
C
chengduoZH 已提交
32
                 "Input(Input) of ConvOp should not be null.");
C
chengduoZH 已提交
33
  PADDLE_ENFORCE(ctx->HasInput("Filter"),
C
chengduoZH 已提交
34
                 "Input(Filter) of ConvOp should not be null.");
C
chengduoZH 已提交
35
  PADDLE_ENFORCE(ctx->HasOutput("Output"),
C
chengduoZH 已提交
36
                 "Output(Output) of ConvOp should not be null.");
C
chengduoZH 已提交
37 38 39 40 41 42

  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
  std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
  std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
  int groups = ctx->Attrs().Get<int>("groups");
C
chengduoZH 已提交
43
  std::vector<int> dilations = ctx->Attrs().Get<std::vector<int>>("dilations");
C
chengduoZH 已提交
44

C
chengduoZH 已提交
45 46
  PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 5,
                 "Conv intput should be 4-D or 5-D tensor.");
C
chengduoZH 已提交
47 48 49 50 51 52 53 54 55
  PADDLE_ENFORCE_EQ(
      in_dims.size(), filter_dims.size(),
      "Conv input dimension and filter dimension should be the same.");
  PADDLE_ENFORCE(
      in_dims.size() - strides.size() == 2U,
      "Conv input dimension and strides dimension should be consistent.");
  PADDLE_ENFORCE_EQ(
      paddings.size(), strides.size(),
      "Conv paddings dimension and Conv strides dimension should be the same.");
F
fengjiayi 已提交
56

Y
Yang Yu 已提交
57
  PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[1] * groups,
C
chengduoZH 已提交
58
                    "The number of input channels should be equal to filter "
C
chengduoZH 已提交
59
                    "channels * groups.");
F
fengjiayi 已提交
60

C
chengduoZH 已提交
61
  PADDLE_ENFORCE_EQ(
Y
Yang Yu 已提交
62
      filter_dims[0] % groups, 0,
C
chengduoZH 已提交
63 64 65
      "The number of output channels should be divided by groups.");

  std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
C
chengduoZH 已提交
66
  for (size_t i = 0; i < strides.size(); ++i) {
Y
Yang Yang 已提交
67 68 69
    output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2],
                                          dilations[i], paddings[i],
                                          strides[i]));
C
chengduoZH 已提交
70
  }
71
  ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
Y
Yang Yu 已提交
72
  ctx->ShareLoD("Input", "Output");
C
chengduoZH 已提交
73 74
}

75 76
framework::OpKernelType ConvOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
77
  framework::LibraryType library{framework::LibraryType::kPlain};
M
mozga-intel 已提交
78 79 80 81 82

  std::string data_format = ctx.Attr<std::string>("data_format");
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  framework::DataLayout layout = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
83
#ifdef PADDLE_WITH_CUDA
84
  if (platform::CanCUDNNBeUsed(ctx)) {
85
    library = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
86 87
  }
#endif
88
#ifdef PADDLE_WITH_MKLDNN
89
  if (library == framework::LibraryType::kPlain &&
90
      platform::CanMKLDNNBeUsed(ctx)) {
91
    library = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
92
    layout = framework::DataLayout::kMKLDNN;
93
  }
94
#endif
95

K
Kexin Zhao 已提交
96 97 98 99 100 101 102 103
  auto input_data_type =
      framework::ToDataType(ctx.Input<Tensor>("Input")->type());
  auto filter_data_type =
      framework::ToDataType(ctx.Input<Tensor>("Filter")->type());
  PADDLE_ENFORCE_EQ(input_data_type, filter_data_type,
                    "input and filter data type should be consistent");

  if (input_data_type == framework::proto::VarType::FP16) {
104
    PADDLE_ENFORCE_EQ(library, framework::LibraryType::kCUDNN,
K
Kexin Zhao 已提交
105 106 107
                      "float16 can only be used when CUDNN is used");
  }

108 109
  return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
                                 library);
110 111
}

Y
Yu Yang 已提交
112
void Conv2DOpMaker::Make() {
C
chengduoZH 已提交
113 114
  AddInput(
      "Input",
C
fix doc  
chengduoZH 已提交
115 116 117 118
      "(Tensor) The input tensor of convolution operator. "
      "The format of input tensor is NCHW, where N is batch size, C is the "
      "number of channels, H is the height of the feature, "
      "and W is the width of the feature.");
C
chengduoZH 已提交
119
  AddInput("Filter",
C
fix doc  
chengduoZH 已提交
120
           "(Tensor) The filter tensor of convolution operator. "
C
chengduoZH 已提交
121 122
           "The format of the filter tensor is MCHW, where M is the number of "
           "output image channels, C is the number of input image channels, "
C
fix doc  
chengduoZH 已提交
123 124
           "H is the height of the filter, and W is the width of the filter. "
           "If the groups attribute is greater than 1, C equals the number of "
C
chengduoZH 已提交
125 126
           "input image channels divided by the groups.");
  AddOutput("Output",
C
fix doc  
chengduoZH 已提交
127
            "(Tensor) The output tensor of convolution operator. "
128 129
            "The format of output tensor is also NCHW.")
      .Reuse("Input");
C
chengduoZH 已提交
130 131 132 133
  AddAttr<std::vector<int>>("strides",
                            "(vector<int> default:{1, 1}), the "
                            "strides(h_stride, w_stride) of "
                            "convolution operator.")
C
chengduoZH 已提交
134
      .SetDefault({1, 1});
C
chengduoZH 已提交
135 136 137 138
  AddAttr<std::vector<int>>("paddings",
                            "(vector<int> default:{0, 0}), the "
                            "paddings(h_pad, w_pad) of "
                            "convolution operator.")
C
chengduoZH 已提交
139 140 141
      .SetDefault({0, 0});
  AddAttr<int>(
      "groups",
C
chengduoZH 已提交
142
      "(int default:1), the groups number of the convolution operator. "
C
fix doc  
chengduoZH 已提交
143 144 145 146
      "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: "
      "when group=2, the first half of the filters is only connected to the "
      "first half of the input channels, while the second half of the filters "
      "is only connected to the second half of the input channels.")
C
chengduoZH 已提交
147
      .SetDefault(1);
C
chengduoZH 已提交
148
  AddAttr<std::vector<int>>("dilations",
C
chengduoZH 已提交
149 150
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of "
C
chengduoZH 已提交
151
                            "convolution operator.")
C
chengduoZH 已提交
152
      .SetDefault({1, 1});
153 154 155 156
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
157 158 159
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
  // TODO(dzhwinter): need to registered layout transform function
  AddAttr<int>("workspace_size_MB",
               "Only used in cudnn kernel. Need set use_cudnn to true."
               "workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
               "better hardware. This size should be chosen carefully.")
      .SetDefault(4096);
C
chengduoZH 已提交
176
  AddComment(R"DOC(
C
fix doc  
chengduoZH 已提交
177 178
Convolution Operator.

C
chengduoZH 已提交
179
The convolution operation calculates the output based on the input, filter
C
chengduoZH 已提交
180
and strides, paddings, dilations, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
181
parameters is checked in the infer-shape.
C
chengduoZH 已提交
182
Input(Input) and Output(Output) are in NCHW format. Where N is batch
C
fix doc  
chengduoZH 已提交
183
size, C is the number of channels, H is the height of the feature, and W is
C
chengduoZH 已提交
184 185 186 187 188 189
the width of the feature.
Filters(Input) is MCHW format. Where M is the number of output image channels, C is
the number of input image channels, H is the height of the filter, and W
is the width of the filter.
Parameters(strides, paddings, dilations) are two elements. These two elements represent
height and width, respectively.
C
chengduoZH 已提交
190 191 192 193
The input(X) size and output(Out) size may be different.

Example:
  Input:
C
chengduoZH 已提交
194 195
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{out}, C_{in}, H_f, W_f)$
C
chengduoZH 已提交
196
  Output:
C
chengduoZH 已提交
197 198 199 200 201 202
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
$$
       H_{out}= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]}+ 1 \\
       W_{out}= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]}+ 1
$$
C
chengduoZH 已提交
203
)DOC");
C
chengduoZH 已提交
204 205
}

Y
Yu Yang 已提交
206
void Conv3DOpMaker::Make() {
C
chengduoZH 已提交
207 208
  AddInput(
      "Input",
C
fix doc  
chengduoZH 已提交
209
      "(Tensor) The input tensor of convolution operator. "
C
chengduoZH 已提交
210
      "The format of input tensor is NCDHW. Where N is batch size, C is the "
C
fix doc  
chengduoZH 已提交
211 212 213
      "number of channels, D is the depth of the feature, H is the height of "
      "the feature, "
      "and W is the width of the feature.");
C
chengduoZH 已提交
214
  AddInput("Filter",
C
fix doc  
chengduoZH 已提交
215
           "(Tensor) The filter tensor of convolution operator. "
C
chengduoZH 已提交
216 217
           "The format of the filter tensor is MCDHW, where M is the number of "
           "output image channels, C is the number of input image channels, "
C
fix doc  
chengduoZH 已提交
218 219 220
           "D is the depth of the filter, H is the height of the filter, and W "
           "is the width of the filter."
           "If the groups attribute is greater than 1, C equals the number of "
C
chengduoZH 已提交
221 222
           "input image channels divided by the groups.");
  AddOutput("Output",
C
fix doc  
chengduoZH 已提交
223
            "(Tensor) The output tensor of convolution operator."
224 225
            "The format of output tensor is also NCDHW.")
      .Reuse("Input");
C
chengduoZH 已提交
226 227 228 229
  AddAttr<std::vector<int>>("strides",
                            "(vector<int>, default:{1, 1, 1}), the "
                            "strides(d_stride, h_stride, w_stride) of "
                            "convolution operator.")
C
chengduoZH 已提交
230
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
231 232 233 234
  AddAttr<std::vector<int>>("paddings",
                            "(vector<int>, default:{0, 0, 0}), the "
                            "paddings(d_pad, h_pad, w_pad) of convolution "
                            "operator.")
C
chengduoZH 已提交
235 236 237
      .SetDefault({0, 0, 0});
  AddAttr<int>(
      "groups",
C
chengduoZH 已提交
238
      "(int default:1), the groups number of the convolution operator. "
C
fix doc  
chengduoZH 已提交
239 240 241 242
      "According to grouped convolution in Alex Krizhevsky's Deep CNN paper: "
      "when group=2, the first half of the filters is only connected to the "
      "first half of the input channels, while the second half of the filters "
      "is only connected to the second half of the input channels.")
C
chengduoZH 已提交
243
      .SetDefault(1);
C
chengduoZH 已提交
244
  AddAttr<std::vector<int>>("dilations",
C
chengduoZH 已提交
245 246
                            "(vector<int> default:{1, 1, 1}), the "
                            "dilations(d_dilation, h_dilation, w_dilation) of "
C
chengduoZH 已提交
247
                            "convolution operator.")
C
chengduoZH 已提交
248
      .SetDefault({1, 1, 1});
249 250 251 252
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
253 254 255
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
      .SetDefault("AnyLayout");
  // TODO(dzhwinter): need to registered layout transform function
  AddAttr<int>("workspace_size_MB",
               "Only used in cudnn kernel. workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
               "better hardware. This size should be chosen carefully.")
      .SetDefault(4096);
C
fix doc  
chengduoZH 已提交
271

C
chengduoZH 已提交
272
  AddComment(R"DOC(
C
fix doc  
chengduoZH 已提交
273 274
Convolution3D Operator.

C
chengduoZH 已提交
275
The convolution operation calculates the output based on the input, filter
C
chengduoZH 已提交
276
and strides, paddings, dilations, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
277
parameters is checked in the infer-shape.
C
chengduoZH 已提交
278
Input(Input) and output(Output) are in NCDHW format, where N is batch
C
fix doc  
chengduoZH 已提交
279
size, C is the number of channels,D is the depth of the feature, H is the height of
C
chengduoZH 已提交
280 281 282 283 284 285
the feature, and W is the width of the feature.
Filters(Input) is MCDHW format, where M is the number of output image channels,
C is the number of input image channels, D is the depth of the filter,
H is the height of the filter, and W is the width of the filter.
Parameters(strides, paddings, dilations) are three elements. These three elements
represent depth, height and width, respectively.
C
fix doc  
chengduoZH 已提交
286 287 288 289
The input(X) size and output(Out) size may be different.

Example:
  Input:
C
chengduoZH 已提交
290 291
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{out}, C_{in}, D_f, H_f, W_f)$
C
fix doc  
chengduoZH 已提交
292
  Output:
C
chengduoZH 已提交
293 294 295 296 297 298 299
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
       D_{out}= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{ strides[0]}+ 1 \\
       H_{out}= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{ strides[1]}+ 1 \\
       W_{out}= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{ strides[2]}+ 1
  $$
C
chengduoZH 已提交
300 301 302
)DOC");
}

C
chengduoZH 已提交
303 304 305 306 307 308 309 310 311 312 313
void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const {
  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
  if (ctx->HasOutput(framework::GradVarName("Input"))) {
    ctx->SetOutputDim(framework::GradVarName("Input"), in_dims);
  }
  if (ctx->HasOutput(framework::GradVarName("Filter"))) {
    ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims);
  }
}

314 315
framework::OpKernelType ConvOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
316
  framework::LibraryType library_{framework::LibraryType::kPlain};
M
mozga-intel 已提交
317 318 319 320
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  std::string data_format = ctx.Attr<std::string>("data_format");
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
321
#ifdef PADDLE_WITH_CUDA
322 323
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
324 325
  }
#endif
326 327 328 329
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
330
    layout_ = framework::DataLayout::kMKLDNN;
331
  }
332
#endif
333 334 335 336 337 338

  return framework::OpKernelType(
      framework::ToDataType(ctx.Input<Tensor>("Input")->type()), ctx.GetPlace(),
      layout_, library_);
}

C
chengduoZH 已提交
339 340 341 342
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yang Yang 已提交
343
REGISTER_OPERATOR(conv2d, ops::ConvOp, ops::Conv2DOpMaker,
344 345
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad);
346 347

// depthwise convolution op
Y
Yang Yang 已提交
348
REGISTER_OPERATOR(depthwise_conv2d, ops::ConvOp, ops::Conv2DOpMaker,
349 350
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(depthwise_conv2d_grad, ops::ConvOpGrad);
Y
Yang Yang 已提交
351
REGISTER_OPERATOR(conv3d, ops::ConvOp, ops::Conv3DOpMaker,
352 353
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(conv3d_grad, ops::ConvOpGrad);
C
chengduoZH 已提交
354

355 356
// depthwise conv kernel
// TODO(xingzhaolong): neon kernel for mobile
Z
zlx 已提交
357
REGISTER_OP_CPU_KERNEL(
358
    depthwise_conv2d,
X
xzl 已提交
359 360 361 362
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);

REGISTER_OP_CPU_KERNEL(
363
    depthwise_conv2d_grad,
X
xzl 已提交
364 365
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);
Z
zlx 已提交
366

C
chengduoZH 已提交
367
REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
368 369 370 371 372 373
    conv2d, ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    conv2d_grad,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);
C
chengduoZH 已提交
374 375

REGISTER_OP_CPU_KERNEL(
Q
QI JUN 已提交
376 377 378 379 380 381
    conv3d, ops::GemmConvKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    conv3d_grad,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvGradKernel<paddle::platform::CPUDeviceContext, double>);