conv_transpose_op.cc 25.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_transpose_op.h"
S
sneaxiy 已提交
16
#include <memory>
S
Siddharth Goyal 已提交
17 18
#include <string>
#include <vector>
19
#include "paddle/fluid/framework/data_layout.h"
20
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
C
chengduoZH 已提交
21

J
Jacek Czaja 已提交
22 23 24 25
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

C
chengduoZH 已提交
26 27 28
namespace paddle {
namespace operators {

29 30
using DataLayout = framework::DataLayout;

C
chengduoZH 已提交
31
void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
32 33 34
  OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "ConvTranspose");
  OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", "ConvTranspose");
  OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "ConvTranspose");
C
chengduoZH 已提交
35 36 37

  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
38 39
  std::vector<int> output_size =
      ctx->Attrs().Get<std::vector<int>>("output_size");
L
LielinJiang 已提交
40 41
  std::vector<int> output_padding =
      ctx->Attrs().Get<std::vector<int>>("output_padding");
C
chengduoZH 已提交
42 43
  std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
  std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
C
chengduoZH 已提交
44
  std::vector<int> dilations = ctx->Attrs().Get<std::vector<int>>("dilations");
Y
Yibing Liu 已提交
45
  int groups = ctx->Attrs().Get<int>("groups");
46 47
  std::string padding_algorithm =
      ctx->Attrs().Get<std::string>("padding_algorithm");
48 49
  const std::string data_layout_str =
      ctx->Attrs().Get<std::string>("data_format");
50 51 52
  const DataLayout data_layout =
      this->IsMKLDNNType() ? DataLayout::kNCHW
                           : framework::StringToDataLayout(data_layout_str);
C
chengduoZH 已提交
53

54
  PADDLE_ENFORCE_EQ(in_dims.size() == 4 || in_dims.size() == 5, true,
55 56 57 58 59
                    platform::errors::InvalidArgument(
                        "Input of Op(conv_transpose) should be 4-D or "
                        "5-D Tensor. But received: %u-D Tensor, "
                        "the shape of input is [%s]",
                        in_dims.size(), in_dims));
60 61
  PADDLE_ENFORCE_EQ(
      in_dims.size(), filter_dims.size(),
62 63 64 65 66 67
      platform::errors::InvalidArgument(
          "The input's dimension size and filter's dimension size of "
          "Op (conv_transpose) should be equal. But received: the shape of "
          "input is [%s], the dimension size of input is [%d], the shape "
          "of filter is [%s],  the dimension size of filter is [%d]. ",
          in_dims, in_dims.size(), filter_dims, filter_dims.size()));
68
  int in_sub_stride_size = in_dims.size() - strides.size();
69 70
  PADDLE_ENFORCE_EQ(
      in_dims.size() - strides.size(), 2U,
71 72 73 74 75 76
      platform::errors::InvalidArgument(
          "The input's dimension size minus Attr(stride)'s size must "
          "be euqal to 2 for Op(conv_transpose). But received: [%d], the "
          "input's dimension size is [%d], the shape of input "
          "is [%s], the Attr(stride)'s size is [%d].",
          in_sub_stride_size, in_dims.size(), in_dims, strides.size()));
77
  if (output_size.size())
78 79
    PADDLE_ENFORCE_EQ(
        output_size.size(), strides.size(),
80 81 82
        platform::errors::InvalidArgument(
            "The Attr(output_size) and Attr(stride) of Op(conv_transpose) "
            "should be the same."));
L
LielinJiang 已提交
83 84 85 86 87 88
  if (output_padding.size())
    PADDLE_ENFORCE_EQ(
        output_padding.size(), strides.size(),
        platform::errors::InvalidArgument(
            "The Attr(output_padding) and Attr(stride) of Op(conv_transpose) "
            "should be the same."));
C
chengduoZH 已提交
89

90
  const int64_t C =
91
      (data_layout != DataLayout::kNHWC ? in_dims[1]
92 93 94
                                        : in_dims[in_dims.size() - 1]);
  PADDLE_ENFORCE_EQ(
      C, filter_dims[0],
95 96 97 98 99 100 101
      platform::errors::InvalidArgument(
          "The number of input channels should be equal to filter channels "
          "for Op(conv_transpose). But received: the input's channels is "
          "[%d], the shape of input is [%s], the filter's channels is [%d], "
          "the shape of filter is [%s]. The data_format is %s."
          "The error may come from wrong data_format setting.",
          C, in_dims, filter_dims[0], filter_dims, data_layout_str));
102 103

  framework::DDim in_data_dims;
104
  if (data_layout != DataLayout::kNHWC) {
105 106 107 108 109 110 111 112 113 114 115
    in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
  } else {
    in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
  }
  framework::DDim filter_data_dims =
      framework::slice_ddim(filter_dims, 2, filter_dims.size());
  std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
  UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
                           in_data_dims, strides, ksize);

  std::vector<int64_t> output_shape({in_dims[0]});
116
  if (data_layout != DataLayout::kNHWC) {
117 118
    output_shape.push_back(filter_dims[1] * groups);
  }
119
  const int offset = (data_layout != DataLayout::kNHWC ? 2 : 1);
C
chengduoZH 已提交
120
  for (size_t i = 0; i < strides.size(); ++i) {
C
chengduoZH 已提交
121
    auto filter_extent = dilations[i] * (filter_dims[i + 2] - 1) + 1;
122 123 124 125 126
    auto infer_shape = (ctx->IsRuntime() || in_dims[i + offset] > 0)
                           ? (in_dims[i + offset] - 1) * strides[i] -
                                 paddings[2 * i] - paddings[2 * i + 1] +
                                 filter_extent
                           : -1;
127
    if (output_size.size()) {
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
      if (ctx->IsRuntime()) {
        PADDLE_ENFORCE_GE(
            output_size[i], infer_shape,
            platform::errors::InvalidArgument(
                "output_size of Op(ConvTransposeOp) should not be "
                "less than the infered output size. But received output_size = "
                "[%s], whose dim %d is less than the infered output size [%s]",
                framework::make_ddim(output_size), i, infer_shape));
        PADDLE_ENFORCE_LT(
            output_size[i], infer_shape + strides[i],
            platform::errors::InvalidArgument(
                "output_size of Op(ConvTransposeOp) should be less "
                "than infered size + stride. But received output_size = [%s], "
                "whose dim %d is not less than the infered output size (%d) + "
                "stride (%d) = %d",
                framework::make_ddim(output_size), i, infer_shape, strides[i],
                infer_shape + strides[i]));
      }
146
      output_shape.push_back(output_size[i]);
L
LielinJiang 已提交
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
    } else if (output_padding.size()) {
      if (ctx->IsRuntime()) {
        PADDLE_ENFORCE_GE(
            output_padding[i], 0,
            platform::errors::InvalidArgument(
                "output_padding of Op(ConvTransposeOp) should not be "
                "less than the 0. But received output_padding = "
                "[%s], whose dim %d is less than 0",
                framework::make_ddim(output_padding), i));
        PADDLE_ENFORCE_LT(
            output_padding[i], std::max(strides[i], dilations[i]),
            platform::errors::InvalidArgument(
                "output_padding of Op(ConvTransposeOp) should be less "
                "than either stride or dilation. But received output_size = "
                "[%s], "
                "whose dim %d is not less than either stride (%d)  or "
                "dilation (%d)",
                framework::make_ddim(output_size), i, strides[i],
                dilations[i]));
      }
      output_shape.push_back((infer_shape + output_padding[i]));
168 169 170
    } else {
      output_shape.push_back(infer_shape);
    }
C
chengduoZH 已提交
171
  }
172 173 174
  if (data_layout == DataLayout::kNHWC) {
    output_shape.push_back(filter_dims[1] * groups);
  }
C
chengduoZH 已提交
175
  ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
C
chengduoZH 已提交
176 177
}

178 179
framework::OpKernelType ConvTransposeOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
J
Jacek Czaja 已提交
180
  framework::LibraryType library_{framework::LibraryType::kPlain};
181
  framework::DataLayout layout_ = framework::DataLayout::kAnyLayout;
182
  bool use_cudnn = ctx.Attr<bool>("use_cudnn");
C
chengduoZH 已提交
183
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
C
chengduoZH 已提交
184 185 186 187
#ifdef PADDLE_WITH_CUDA
  if (platform::is_gpu_place(ctx.GetPlace())) {
    auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
J
Jacek Czaja 已提交
188 189 190
    if (use_cudnn) {
      library_ = framework::LibraryType::kCUDNN;
    }
C
chengduoZH 已提交
191 192
  }
#endif
J
Jacek Czaja 已提交
193 194 195 196 197
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
    layout_ = framework::DataLayout::kMKLDNN;
198
  }
J
Jacek Czaja 已提交
199
#endif
200

201 202 203
  return framework::OpKernelType(
      OperatorWithKernel::IndicateVarDataType(ctx, "Input"), ctx.GetPlace(),
      layout_, library_);
204 205
}

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if ((var_name == "Input") &&
      (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
    auto dl = framework::StringToDataLayout(data_format);
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
    if (dl != framework::DataLayout::kAnyLayout) {
      return framework::OpKernelType(
          expected_kernel_type.data_type_, tensor.place(),
          framework::StringToDataLayout(data_format));
    }
  }
#endif
  return framework::OpKernelType(expected_kernel_type.data_type_,
                                 tensor.place(), tensor.layout());
}

Y
Yu Yang 已提交
232
void Conv2DTransposeOpMaker::Make() {
J
Jacek Czaja 已提交
233 234 235 236
  AddAttr<bool>("is_test",
                "(bool, default false) Set to true for inference only, false "
                "for training. Some layers may run faster when this is true.")
      .SetDefault(false);
237 238 239 240 241
  AddInput("Input",
           "(Tensor) The input tensor of convolution transpose operator. "
           "The format of input tensor is NCHW or NHWC. Where N is batch size, "
           "C is the number of input channels, H is the height of the feature, "
           "and W is the width of the feature.");
C
chengduoZH 已提交
242 243 244 245 246 247 248 249
  AddInput(
      "Filter",
      "(Tensor) The filter tensor of convolution transpose operator. "
      "The format of the filter tensor is MCHW, where M is the number of "
      "input feature channels, C is the number of "
      "output feature channels,"
      "H is the height of the filter, and W is the width of the filter. "
      "We enforce groups number == 1 in the convolution transpose scenario.");
250 251 252 253 254
  AddInput("Bias",
           "(Tensor) Bias to be added to each output of filter application."
           "The format of output tensor is X (one-dimensional) of size equal"
           "to the number of output channels. Only used with MKL-DNN.")
      .AsDispensable();
C
chengduoZH 已提交
255
  AddOutput("Output",
C
chengduoZH 已提交
256
            "(Tensor) The output tensor of convolution transpose operator. "
257
            "The format of output tensor is the same as input tensor.");
L
LielinJiang 已提交
258 259 260 261 262
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
263 264 265 266
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
      .SetDefault({});
Y
Yibing Liu 已提交
267 268 269 270
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution "
               "transpose operator. ")
      .SetDefault(1);
C
chengduoZH 已提交
271 272 273 274 275
  AddAttr<std::vector<int>>("dilations",
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of convolution "
                            "transpose operator.")
      .SetDefault({1, 1});
C
chengduoZH 已提交
276 277
  AddAttr<std::vector<int>>(
      "strides",
C
chengduoZH 已提交
278
      "(vector<int> default:{1, 1}), the strides(h_stride, w_stride) of "
279
      "convolution transpose operator.")
C
chengduoZH 已提交
280
      .SetDefault({1, 1});
C
chengduoZH 已提交
281 282
  AddAttr<std::vector<int>>(
      "paddings",
C
chengduoZH 已提交
283
      "(vector<int> default:{0, 0}), the paddings(h_pad, w_pad) of convolution "
C
chengduoZH 已提交
284
      "transpose operator.")
C
chengduoZH 已提交
285
      .SetDefault({0, 0});
286 287 288 289
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
J
Jacek Czaja 已提交
290 291 292 293 294
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
  AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
295 296 297 298 299 300 301 302
  AddAttr<std::string>("fuse_activation",
                       "(string, default \"\") Only used in mkldnn kernel")
      .SetDefault("");
  AddAttr<float>("fuse_alpha",
                 "(float, default 0.0) Only used in mkldnn kernel")
      .SetDefault(0.0f);
  AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel")
      .SetDefault(0.0f);
303 304 305 306
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
307 308 309 310 311 312 313 314 315
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
316 317 318 319 320
  AddAttr<int>("workspace_size_MB",
               "Used in cudnn kernel only. workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
T
tianshuo78520a 已提交
321
               "better hardward. This size should be carefully set.")
322
      .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB());
C
chengduoZH 已提交
323
  AddComment(R"DOC(
C
chengduoZH 已提交
324 325
Convolution2D Transpose Operator.

C
chengduoZH 已提交
326
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
327
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
328
parameters is checked in the infer-shape.
329
Input(Input) and output(Output) are in NCHW or NHWC format. Where N is batchsize, C is the
C
chengduoZH 已提交
330 331 332 333 334 335
number of channels, H is the height of the feature, and W is the width of the feature.
Filter(Input) is in MCHW format. Where M is the number of input feature channels,
C is the number of output feature channels, H is the height of the filter,
and W is the width of the filter.
Parameters(strides, paddings) are two elements. These two elements represent height
and width, respectively.
C
chengduoZH 已提交
336
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
337

Y
update  
yi.wu 已提交
338
For an example:
C
chengduoZH 已提交
339
  Input:
C
chengduoZH 已提交
340 341
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, H_f, W_f)$
C
chengduoZH 已提交
342
  Output:
C
chengduoZH 已提交
343 344 345
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
  $$
346 347
       H_{out} = (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom  + dilations[0] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[1] - pad_width_left  - pad_width_right + dilations[1] * (W_f - 1) + 1
C
chengduoZH 已提交
348
  $$
C
chengduoZH 已提交
349 350 351
)DOC");
}

Y
Yu Yang 已提交
352
void Conv3DTransposeOpMaker::Make() {
353 354 355 356 357 358
  AddInput(
      "Input",
      "(Tensor) The input tensor of convolution transpose operator."
      "The format of input tensor is NCDHW or NDHWC. Where N is batch "
      "size, C is the number of channels, D is the depth of the feature, "
      "H is the height of the feature, and W is the width of the feature.");
C
chengduoZH 已提交
359 360
  AddInput("Filter",
           "(Tensor) The filter tensor of convolution transpose operator."
C
chengduoZH 已提交
361 362 363
           "The format of the filter tensor is MCDHW, where M is the number of "
           "input feature channels, C is the number of "
           "output feature channels, D "
C
chengduoZH 已提交
364 365
           "is the depth of the filter, H is the height of the filter, and "
           "W is the width of the filter."
C
chengduoZH 已提交
366
           "We enforce groups number == 1 and padding == 0 in "
C
chengduoZH 已提交
367
           "the convolution3d transpose scenario.");
C
chengduoZH 已提交
368 369
  AddOutput("Output",
            "(Tensor) The output tensor of convolution transpose operator."
370
            "The format of output tensor is the same as input tensor."
C
chengduoZH 已提交
371
            "Where N is batch size, C is "
C
chengduoZH 已提交
372 373
            "the number of channels, D is the depth of the feature, H is the "
            "height of the feature, and W is the width of the feature.");
L
LielinJiang 已提交
374 375 376 377 378
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
379 380 381 382
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
      .SetDefault({});
C
chengduoZH 已提交
383 384 385 386 387 388
  AddAttr<std::vector<int>>(
      "dilations",
      "(vector<int> default:{1, 1, 1}), the "
      "dilations(d_dilation,h_dilation, w_dilation) of convolution "
      "transpose operator.")
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
389
  AddAttr<std::vector<int>>("strides",
C
chengduoZH 已提交
390
                            "(vector<int> default:{1, 1, 1}), the "
391
                            "strides{d_stride, h_stride, w_stride} of "
C
chengduoZH 已提交
392
                            "convolution transpose operator.")
C
chengduoZH 已提交
393
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
394
  AddAttr<std::vector<int>>("paddings",
C
chengduoZH 已提交
395
                            "(vector<int> default:{0, 0, 0}), paddings(d_pad, "
C
chengduoZH 已提交
396
                            "h_pad, w_pad) of convolution transpose operator.")
C
chengduoZH 已提交
397
      .SetDefault({0, 0, 0});
398 399 400 401
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution3d "
               "transpose operator. ")
      .SetDefault(1);
402 403 404 405
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
406 407 408
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
409 410 411 412
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
413 414 415 416 417 418 419 420 421
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
422 423 424 425 426
  AddAttr<int>("workspace_size_MB",
               "Used in cudnn kernel only. workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
T
tianshuo78520a 已提交
427
               "better hardward. This size should be carefully set.")
428
      .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB());
C
chengduoZH 已提交
429
  AddComment(R"DOC(
C
chengduoZH 已提交
430 431
Convolution3D Transpose Operator.

C
chengduoZH 已提交
432
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
433
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
434
parameters is checked in the infer-shape.
435
Input(Input) and output(Output) are in NCDHW or NDHWC format. Where N is batch size, C is the
C
chengduoZH 已提交
436 437 438 439 440 441 442
number of channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature.
Filter(Input) is in MCDHW format. Where M is the number of input feature channels,
C is the number of output feature channels, D is the depth of the filter,H is the
height of the filter, and W is the width of the filter.
Parameters(strides, paddings) are three elements. These three elements represent
depth, height and width, respectively.
C
chengduoZH 已提交
443
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
444

445
Example:
C
chengduoZH 已提交
446
  Input:
C
chengduoZH 已提交
447 448
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, D_f, H_f, W_f)$
C
chengduoZH 已提交
449
  Output:
C
chengduoZH 已提交
450 451 452
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
453 454 455
       D_{out} = (D_{in} - 1) * strides[0] - pad_depth_front - pad_depth_back + dilations[0] * (D_f - 1) + 1 \\
       H_{out} = (H_{in} - 1) * strides[1] - pad_height_top  - pad_height_bottom + dilations[1] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[2] - pad_width_left - pad_width_right + dilations[2] * (W_f - 1) + 1
C
chengduoZH 已提交
456
  $$
C
chengduoZH 已提交
457 458 459
)DOC");
}

C
chengduoZH 已提交
460
void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const {
C
chengduoZH 已提交
461 462 463 464 465 466 467 468 469 470
  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
  if (ctx->HasOutput(framework::GradVarName("Input"))) {
    ctx->SetOutputDim(framework::GradVarName("Input"), in_dims);
  }
  if (ctx->HasOutput(framework::GradVarName("Filter"))) {
    ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims);
  }
}

471 472 473
framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
  bool use_cudnn = ctx.Attr<bool>("use_cudnn");
474
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
C
chengduoZH 已提交
475 476 477 478 479 480
#ifdef PADDLE_WITH_CUDA
  if (platform::is_gpu_place(ctx.GetPlace())) {
    auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
  }
#endif
481 482 483 484 485 486 487
  framework::LibraryType library_;
  if (use_cudnn) {
    library_ = framework::LibraryType::kCUDNN;
  } else {
    library_ = framework::LibraryType::kPlain;
  }

488
  framework::DataLayout layout_ = framework::DataLayout::kAnyLayout;
489 490 491
  return framework::OpKernelType(
      OperatorWithKernel::IndicateVarDataType(ctx, "Input"), ctx.GetPlace(),
      layout_, library_);
492 493
}

H
hong 已提交
494 495
template <typename T>
class ConvTransposeGradOpMaker : public framework::SingleGradOpMaker<T> {
S
sneaxiy 已提交
496
 public:
H
hong 已提交
497
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
S
sneaxiy 已提交
498 499

 protected:
500
  void Apply(GradOpPtr<T> op) const override {
H
hong 已提交
501 502 503 504 505 506 507 508
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input"));
    op->SetOutput(framework::GradVarName("Filter"), this->InputGrad("Filter"));
    if (this->HasInput("Bias")) {
      op->SetInput("Bias", this->Input("Bias"));
      op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
S
sneaxiy 已提交
509
    }
H
hong 已提交
510 511
    op->SetInput(framework::GradVarName("Output"), this->OutputGrad("Output"));
    op->SetAttrMap(this->Attrs());
S
sneaxiy 已提交
512 513 514
  }
};

C
chengduoZH 已提交
515 516 517 518
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
C
chengduoZH 已提交
519

520
// conv2d_transpose
Y
Yang Yang 已提交
521 522
REGISTER_OPERATOR(conv2d_transpose, ops::ConvTransposeOp,
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
523 524
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>);
525
REGISTER_OPERATOR(conv2d_transpose_grad, ops::ConvTransposeOpGrad);
C
chengduoZH 已提交
526 527

REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
528
    conv2d_transpose,
Q
QI JUN 已提交
529 530
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, double>);
C
chengduoZH 已提交
531
REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
532
    conv2d_transpose_grad,
Q
QI JUN 已提交
533 534 535
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext,
                                     double>);
C
chengduoZH 已提交
536

537
// conv3d_transpose
Y
Yang Yang 已提交
538 539
REGISTER_OPERATOR(conv3d_transpose, ops::ConvTransposeOp,
                  ops::Conv3DTransposeOpMaker,
H
hong 已提交
540 541
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>);
542
REGISTER_OPERATOR(conv3d_transpose_grad, ops::ConvTransposeOpGrad);
C
chengduoZH 已提交
543 544

REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
545
    conv3d_transpose,
Q
QI JUN 已提交
546 547
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, double>);
C
chengduoZH 已提交
548
REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
549
    conv3d_transpose_grad,
Q
QI JUN 已提交
550 551 552
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext,
                                     double>);
553 554 555 556

// depthwise conv2d_transpose
REGISTER_OPERATOR(depthwise_conv2d_transpose, ops::ConvTransposeOp,
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
557 558
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>);
559 560 561 562 563 564 565 566 567 568 569
REGISTER_OPERATOR(depthwise_conv2d_transpose_grad, ops::ConvTransposeOpGrad);

REGISTER_OP_CPU_KERNEL(
    depthwise_conv2d_transpose,
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    depthwise_conv2d_transpose_grad,
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext,
                                     double>);