conv_transpose_op.cc 31.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_transpose_op.h"
S
sneaxiy 已提交
16
#include <memory>
S
Siddharth Goyal 已提交
17 18
#include <string>
#include <vector>
19
#include "paddle/fluid/framework/data_layout.h"
20
#include "paddle/fluid/framework/op_version_registry.h"
21
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
C
chengduoZH 已提交
22

J
Jacek Czaja 已提交
23 24 25 26
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

C
chengduoZH 已提交
27 28 29
namespace paddle {
namespace operators {

30 31
using DataLayout = framework::DataLayout;

C
chengduoZH 已提交
32
void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
33 34 35
  OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "ConvTranspose");
  OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", "ConvTranspose");
  OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "ConvTranspose");
C
chengduoZH 已提交
36 37 38

  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
39 40
  std::vector<int> output_size =
      ctx->Attrs().Get<std::vector<int>>("output_size");
L
LielinJiang 已提交
41 42
  std::vector<int> output_padding =
      ctx->Attrs().Get<std::vector<int>>("output_padding");
C
chengduoZH 已提交
43 44
  std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
  std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
C
chengduoZH 已提交
45
  std::vector<int> dilations = ctx->Attrs().Get<std::vector<int>>("dilations");
Y
Yibing Liu 已提交
46
  int groups = ctx->Attrs().Get<int>("groups");
47 48
  std::string padding_algorithm =
      ctx->Attrs().Get<std::string>("padding_algorithm");
49 50
  const std::string data_layout_str =
      ctx->Attrs().Get<std::string>("data_format");
51 52 53
  const DataLayout data_layout =
      this->IsMKLDNNType() ? DataLayout::kNCHW
                           : framework::StringToDataLayout(data_layout_str);
C
chengduoZH 已提交
54

55
  PADDLE_ENFORCE_EQ(in_dims.size() == 4 || in_dims.size() == 5, true,
56 57 58 59 60
                    platform::errors::InvalidArgument(
                        "Input of Op(conv_transpose) should be 4-D or "
                        "5-D Tensor. But received: %u-D Tensor, "
                        "the shape of input is [%s]",
                        in_dims.size(), in_dims));
61 62
  PADDLE_ENFORCE_EQ(
      in_dims.size(), filter_dims.size(),
63 64 65 66 67 68
      platform::errors::InvalidArgument(
          "The input's dimension size and filter's dimension size of "
          "Op (conv_transpose) should be equal. But received: the shape of "
          "input is [%s], the dimension size of input is [%d], the shape "
          "of filter is [%s],  the dimension size of filter is [%d]. ",
          in_dims, in_dims.size(), filter_dims, filter_dims.size()));
69 70 71 72 73 74 75 76 77 78 79 80 81

  int stride_size = strides.size();
  for (int i = 0; i < stride_size; ++i) {
    PADDLE_ENFORCE_GT(
        strides[i], 0,
        platform::errors::InvalidArgument(
            "The stride of Op(Conv) should be larget than 0, but received "
            "stride is %d.",
            strides[i]));
  }

  int in_sub_stride_size = in_dims.size() - stride_size;

82 83
  PADDLE_ENFORCE_EQ(
      in_dims.size() - strides.size(), 2U,
84 85 86 87 88 89
      platform::errors::InvalidArgument(
          "The input's dimension size minus Attr(stride)'s size must "
          "be euqal to 2 for Op(conv_transpose). But received: [%d], the "
          "input's dimension size is [%d], the shape of input "
          "is [%s], the Attr(stride)'s size is [%d].",
          in_sub_stride_size, in_dims.size(), in_dims, strides.size()));
90
  if (output_size.size())
91 92
    PADDLE_ENFORCE_EQ(
        output_size.size(), strides.size(),
93 94 95
        platform::errors::InvalidArgument(
            "The Attr(output_size) and Attr(stride) of Op(conv_transpose) "
            "should be the same."));
L
LielinJiang 已提交
96 97 98 99 100 101
  if (output_padding.size())
    PADDLE_ENFORCE_EQ(
        output_padding.size(), strides.size(),
        platform::errors::InvalidArgument(
            "The Attr(output_padding) and Attr(stride) of Op(conv_transpose) "
            "should be the same."));
C
chengduoZH 已提交
102

103
  const int64_t C =
104
      (data_layout != DataLayout::kNHWC ? in_dims[1]
105 106 107
                                        : in_dims[in_dims.size() - 1]);
  PADDLE_ENFORCE_EQ(
      C, filter_dims[0],
108 109 110 111 112 113 114
      platform::errors::InvalidArgument(
          "The number of input channels should be equal to filter channels "
          "for Op(conv_transpose). But received: the input's channels is "
          "[%d], the shape of input is [%s], the filter's channels is [%d], "
          "the shape of filter is [%s]. The data_format is %s."
          "The error may come from wrong data_format setting.",
          C, in_dims, filter_dims[0], filter_dims, data_layout_str));
115 116

  framework::DDim in_data_dims;
117
  if (data_layout != DataLayout::kNHWC) {
118 119 120 121 122 123 124 125 126 127 128
    in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
  } else {
    in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
  }
  framework::DDim filter_data_dims =
      framework::slice_ddim(filter_dims, 2, filter_dims.size());
  std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
  UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
                           in_data_dims, strides, ksize);

  std::vector<int64_t> output_shape({in_dims[0]});
129
  if (data_layout != DataLayout::kNHWC) {
130 131
    output_shape.push_back(filter_dims[1] * groups);
  }
132
  const int offset = (data_layout != DataLayout::kNHWC ? 2 : 1);
C
chengduoZH 已提交
133
  for (size_t i = 0; i < strides.size(); ++i) {
C
chengduoZH 已提交
134
    auto filter_extent = dilations[i] * (filter_dims[i + 2] - 1) + 1;
135 136 137 138 139
    auto infer_shape = (ctx->IsRuntime() || in_dims[i + offset] > 0)
                           ? (in_dims[i + offset] - 1) * strides[i] -
                                 paddings[2 * i] - paddings[2 * i + 1] +
                                 filter_extent
                           : -1;
140
    if (output_size.size()) {
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
      if (ctx->IsRuntime()) {
        PADDLE_ENFORCE_GE(
            output_size[i], infer_shape,
            platform::errors::InvalidArgument(
                "output_size of Op(ConvTransposeOp) should not be "
                "less than the infered output size. But received output_size = "
                "[%s], whose dim %d is less than the infered output size [%s]",
                framework::make_ddim(output_size), i, infer_shape));
        PADDLE_ENFORCE_LT(
            output_size[i], infer_shape + strides[i],
            platform::errors::InvalidArgument(
                "output_size of Op(ConvTransposeOp) should be less "
                "than infered size + stride. But received output_size = [%s], "
                "whose dim %d is not less than the infered output size (%d) + "
                "stride (%d) = %d",
                framework::make_ddim(output_size), i, infer_shape, strides[i],
                infer_shape + strides[i]));
      }
159
      output_shape.push_back(output_size[i]);
L
LielinJiang 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
    } else if (output_padding.size()) {
      if (ctx->IsRuntime()) {
        PADDLE_ENFORCE_GE(
            output_padding[i], 0,
            platform::errors::InvalidArgument(
                "output_padding of Op(ConvTransposeOp) should not be "
                "less than the 0. But received output_padding = "
                "[%s], whose dim %d is less than 0",
                framework::make_ddim(output_padding), i));
        PADDLE_ENFORCE_LT(
            output_padding[i], std::max(strides[i], dilations[i]),
            platform::errors::InvalidArgument(
                "output_padding of Op(ConvTransposeOp) should be less "
                "than either stride or dilation. But received output_size = "
                "[%s], "
                "whose dim %d is not less than either stride (%d)  or "
                "dilation (%d)",
                framework::make_ddim(output_size), i, strides[i],
                dilations[i]));
      }
      output_shape.push_back((infer_shape + output_padding[i]));
181 182 183
    } else {
      output_shape.push_back(infer_shape);
    }
C
chengduoZH 已提交
184
  }
185 186 187
  if (data_layout == DataLayout::kNHWC) {
    output_shape.push_back(filter_dims[1] * groups);
  }
C
chengduoZH 已提交
188
  ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
C
chengduoZH 已提交
189 190
}

191 192
framework::OpKernelType ConvTransposeOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
J
Jacek Czaja 已提交
193
  framework::LibraryType library_{framework::LibraryType::kPlain};
194
  framework::DataLayout layout_ = framework::DataLayout::kAnyLayout;
195
  bool use_cudnn = ctx.Attr<bool>("use_cudnn");
C
chengduoZH 已提交
196
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
197
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Input");
198
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
C
chengduoZH 已提交
199 200 201
  if (platform::is_gpu_place(ctx.GetPlace())) {
    auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
J
Jacek Czaja 已提交
202 203 204
    if (use_cudnn) {
      library_ = framework::LibraryType::kCUDNN;
    }
C
chengduoZH 已提交
205 206
  }
#endif
J
Jacek Czaja 已提交
207 208
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
209
      this->CanMKLDNNBeUsed(ctx, data_type)) {
J
Jacek Czaja 已提交
210 211
    library_ = framework::LibraryType::kMKLDNN;
    layout_ = framework::DataLayout::kMKLDNN;
212
  }
J
Jacek Czaja 已提交
213
#endif
214

215
  return framework::OpKernelType(data_type, ctx.GetPlace(), layout_, library_);
216 217
}

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if ((var_name == "Input") &&
      (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
    auto dl = framework::StringToDataLayout(data_format);
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
    if (dl != framework::DataLayout::kAnyLayout) {
      return framework::OpKernelType(
          expected_kernel_type.data_type_, tensor.place(),
          framework::StringToDataLayout(data_format));
    }
  }
#endif
  return framework::OpKernelType(expected_kernel_type.data_type_,
                                 tensor.place(), tensor.layout());
}

Y
Yu Yang 已提交
244
void Conv2DTransposeOpMaker::Make() {
J
Jacek Czaja 已提交
245 246 247 248
  AddAttr<bool>("is_test",
                "(bool, default false) Set to true for inference only, false "
                "for training. Some layers may run faster when this is true.")
      .SetDefault(false);
249 250 251 252 253
  AddInput("Input",
           "(Tensor) The input tensor of convolution transpose operator. "
           "The format of input tensor is NCHW or NHWC. Where N is batch size, "
           "C is the number of input channels, H is the height of the feature, "
           "and W is the width of the feature.");
C
chengduoZH 已提交
254 255 256 257 258 259 260 261
  AddInput(
      "Filter",
      "(Tensor) The filter tensor of convolution transpose operator. "
      "The format of the filter tensor is MCHW, where M is the number of "
      "input feature channels, C is the number of "
      "output feature channels,"
      "H is the height of the filter, and W is the width of the filter. "
      "We enforce groups number == 1 in the convolution transpose scenario.");
262 263 264 265 266
  AddInput("Bias",
           "(Tensor) Bias to be added to each output of filter application."
           "The format of output tensor is X (one-dimensional) of size equal"
           "to the number of output channels. Only used with MKL-DNN.")
      .AsDispensable();
C
chengduoZH 已提交
267
  AddOutput("Output",
C
chengduoZH 已提交
268
            "(Tensor) The output tensor of convolution transpose operator. "
269
            "The format of output tensor is the same as input tensor.");
L
LielinJiang 已提交
270 271 272 273 274
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
275 276 277 278
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
      .SetDefault({});
Y
Yibing Liu 已提交
279 280 281 282
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution "
               "transpose operator. ")
      .SetDefault(1);
C
chengduoZH 已提交
283 284 285 286 287
  AddAttr<std::vector<int>>("dilations",
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of convolution "
                            "transpose operator.")
      .SetDefault({1, 1});
C
chengduoZH 已提交
288 289
  AddAttr<std::vector<int>>(
      "strides",
C
chengduoZH 已提交
290
      "(vector<int> default:{1, 1}), the strides(h_stride, w_stride) of "
291
      "convolution transpose operator.")
C
chengduoZH 已提交
292
      .SetDefault({1, 1});
C
chengduoZH 已提交
293 294
  AddAttr<std::vector<int>>(
      "paddings",
C
chengduoZH 已提交
295
      "(vector<int> default:{0, 0}), the paddings(h_pad, w_pad) of convolution "
C
chengduoZH 已提交
296
      "transpose operator.")
C
chengduoZH 已提交
297
      .SetDefault({0, 0});
298 299 300 301
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
J
Jacek Czaja 已提交
302 303 304
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
305 306 307 308 309 310 311 312 313
  AddAttr<bool>("force_fp32_output",
                "(bool, default false) Force BF16 kernel output FP32, only "
                "used in MKL-DNN BF16")
      .SetDefault(false);
  AddAttr<std::string>(
      "mkldnn_data_type",
      "(string, default \"float32\"). Data type of mkldnn kernel")
      .SetDefault("float32")
      .InEnum({"float32", "bfloat16"});
J
Jacek Czaja 已提交
314 315
  AddAttr<bool>("fuse_relu", "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
316 317 318 319 320 321 322 323
  AddAttr<std::string>("fuse_activation",
                       "(string, default \"\") Only used in mkldnn kernel")
      .SetDefault("");
  AddAttr<float>("fuse_alpha",
                 "(float, default 0.0) Only used in mkldnn kernel")
      .SetDefault(0.0f);
  AddAttr<float>("fuse_beta", "(float, default 0.0) Only used in mkldnn kernel")
      .SetDefault(0.0f);
324 325 326 327
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
328 329 330 331 332 333 334 335 336
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
337 338 339 340 341
  AddAttr<int>("workspace_size_MB",
               "Used in cudnn kernel only. workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
T
tianshuo78520a 已提交
342
               "better hardward. This size should be carefully set.")
343
      .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB());
C
chengduoZH 已提交
344
  AddComment(R"DOC(
C
chengduoZH 已提交
345 346
Convolution2D Transpose Operator.

C
chengduoZH 已提交
347
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
348
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
349
parameters is checked in the infer-shape.
350
Input(Input) and output(Output) are in NCHW or NHWC format. Where N is batchsize, C is the
C
chengduoZH 已提交
351 352 353 354 355 356
number of channels, H is the height of the feature, and W is the width of the feature.
Filter(Input) is in MCHW format. Where M is the number of input feature channels,
C is the number of output feature channels, H is the height of the filter,
and W is the width of the filter.
Parameters(strides, paddings) are two elements. These two elements represent height
and width, respectively.
C
chengduoZH 已提交
357
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
358

Y
update  
yi.wu 已提交
359
For an example:
C
chengduoZH 已提交
360
  Input:
C
chengduoZH 已提交
361 362
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, H_f, W_f)$
C
chengduoZH 已提交
363
  Output:
C
chengduoZH 已提交
364 365 366
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
  $$
367 368
       H_{out} = (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom  + dilations[0] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[1] - pad_width_left  - pad_width_right + dilations[1] * (W_f - 1) + 1
C
chengduoZH 已提交
369
  $$
C
chengduoZH 已提交
370 371 372
)DOC");
}

Y
Yu Yang 已提交
373
void Conv3DTransposeOpMaker::Make() {
374 375 376 377 378 379
  AddInput(
      "Input",
      "(Tensor) The input tensor of convolution transpose operator."
      "The format of input tensor is NCDHW or NDHWC. Where N is batch "
      "size, C is the number of channels, D is the depth of the feature, "
      "H is the height of the feature, and W is the width of the feature.");
C
chengduoZH 已提交
380 381
  AddInput("Filter",
           "(Tensor) The filter tensor of convolution transpose operator."
C
chengduoZH 已提交
382 383 384
           "The format of the filter tensor is MCDHW, where M is the number of "
           "input feature channels, C is the number of "
           "output feature channels, D "
C
chengduoZH 已提交
385 386
           "is the depth of the filter, H is the height of the filter, and "
           "W is the width of the filter."
C
chengduoZH 已提交
387
           "We enforce groups number == 1 and padding == 0 in "
C
chengduoZH 已提交
388
           "the convolution3d transpose scenario.");
C
chengduoZH 已提交
389 390
  AddOutput("Output",
            "(Tensor) The output tensor of convolution transpose operator."
391
            "The format of output tensor is the same as input tensor."
C
chengduoZH 已提交
392
            "Where N is batch size, C is "
C
chengduoZH 已提交
393 394
            "the number of channels, D is the depth of the feature, H is the "
            "height of the feature, and W is the width of the feature.");
L
LielinJiang 已提交
395 396 397 398 399
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
400 401 402 403
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
      .SetDefault({});
C
chengduoZH 已提交
404 405 406 407 408 409
  AddAttr<std::vector<int>>(
      "dilations",
      "(vector<int> default:{1, 1, 1}), the "
      "dilations(d_dilation,h_dilation, w_dilation) of convolution "
      "transpose operator.")
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
410
  AddAttr<std::vector<int>>("strides",
C
chengduoZH 已提交
411
                            "(vector<int> default:{1, 1, 1}), the "
412
                            "strides{d_stride, h_stride, w_stride} of "
C
chengduoZH 已提交
413
                            "convolution transpose operator.")
C
chengduoZH 已提交
414
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
415
  AddAttr<std::vector<int>>("paddings",
C
chengduoZH 已提交
416
                            "(vector<int> default:{0, 0, 0}), paddings(d_pad, "
C
chengduoZH 已提交
417
                            "h_pad, w_pad) of convolution transpose operator.")
C
chengduoZH 已提交
418
      .SetDefault({0, 0, 0});
419 420 421 422
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution3d "
               "transpose operator. ")
      .SetDefault(1);
423 424 425 426
  AddAttr<bool>(
      "use_cudnn",
      "(bool, default false) Only used in cudnn kernel, need install cudnn")
      .SetDefault(false);
427 428 429
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
430 431 432 433
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
434 435 436 437 438 439 440 441 442
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
443 444 445 446 447
  AddAttr<int>("workspace_size_MB",
               "Used in cudnn kernel only. workspace size for cudnn, in MB, "
               "workspace is a section of GPU memory which will be "
               "allocated/freed each time the operator runs, larger "
               "workspace size can increase performance but also requires "
T
tianshuo78520a 已提交
448
               "better hardward. This size should be carefully set.")
449
      .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB());
C
chengduoZH 已提交
450
  AddComment(R"DOC(
C
chengduoZH 已提交
451 452
Convolution3D Transpose Operator.

C
chengduoZH 已提交
453
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
454
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
455
parameters is checked in the infer-shape.
456
Input(Input) and output(Output) are in NCDHW or NDHWC format. Where N is batch size, C is the
C
chengduoZH 已提交
457 458 459 460 461 462 463
number of channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature.
Filter(Input) is in MCDHW format. Where M is the number of input feature channels,
C is the number of output feature channels, D is the depth of the filter,H is the
height of the filter, and W is the width of the filter.
Parameters(strides, paddings) are three elements. These three elements represent
depth, height and width, respectively.
C
chengduoZH 已提交
464
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
465

466
Example:
C
chengduoZH 已提交
467
  Input:
C
chengduoZH 已提交
468 469
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, D_f, H_f, W_f)$
C
chengduoZH 已提交
470
  Output:
C
chengduoZH 已提交
471 472 473
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
474 475 476
       D_{out} = (D_{in} - 1) * strides[0] - pad_depth_front - pad_depth_back + dilations[0] * (D_f - 1) + 1 \\
       H_{out} = (H_{in} - 1) * strides[1] - pad_height_top  - pad_height_bottom + dilations[1] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[2] - pad_width_left - pad_width_right + dilations[2] * (W_f - 1) + 1
C
chengduoZH 已提交
477
  $$
C
chengduoZH 已提交
478 479 480
)DOC");
}

C
chengduoZH 已提交
481
void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const {
C
chengduoZH 已提交
482 483 484 485 486 487 488 489 490 491
  auto in_dims = ctx->GetInputDim("Input");
  auto filter_dims = ctx->GetInputDim("Filter");
  if (ctx->HasOutput(framework::GradVarName("Input"))) {
    ctx->SetOutputDim(framework::GradVarName("Input"), in_dims);
  }
  if (ctx->HasOutput(framework::GradVarName("Filter"))) {
    ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims);
  }
}

492 493 494
framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
  bool use_cudnn = ctx.Attr<bool>("use_cudnn");
495
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
496
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
C
chengduoZH 已提交
497 498 499 500 501
  if (platform::is_gpu_place(ctx.GetPlace())) {
    auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
  }
#endif
502 503 504 505 506 507 508
  framework::LibraryType library_;
  if (use_cudnn) {
    library_ = framework::LibraryType::kCUDNN;
  } else {
    library_ = framework::LibraryType::kPlain;
  }

509
  framework::DataLayout layout_ = framework::DataLayout::kAnyLayout;
510 511 512
  return framework::OpKernelType(
      OperatorWithKernel::IndicateVarDataType(ctx, "Input"), ctx.GetPlace(),
      layout_, library_);
513 514
}

H
hong 已提交
515 516
template <typename T>
class ConvTransposeGradOpMaker : public framework::SingleGradOpMaker<T> {
S
sneaxiy 已提交
517
 public:
H
hong 已提交
518
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
S
sneaxiy 已提交
519 520

 protected:
521
  void Apply(GradOpPtr<T> op) const override {
H
hong 已提交
522 523 524 525 526 527 528 529
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input"));
    op->SetOutput(framework::GradVarName("Filter"), this->InputGrad("Filter"));
    if (this->HasInput("Bias")) {
      op->SetInput("Bias", this->Input("Bias"));
      op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
S
sneaxiy 已提交
530
    }
H
hong 已提交
531 532
    op->SetInput(framework::GradVarName("Output"), this->OutputGrad("Output"));
    op->SetAttrMap(this->Attrs());
S
sneaxiy 已提交
533 534 535
  }
};

536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
/*
 * Inputs:  I, W, dO, ddI, ddW
 * Outputs: ddO, dW, dI
 */
template <typename T>
class ConvTransposeDoubleGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

  void Apply(GradOpPtr<T> op) const override {
    op->SetType(this->ForwardOpType() + "_grad");
    // I, W, dO, ddI, ddW
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetInput("DOutput", this->Input(framework::GradVarName("Output")));
    op->SetInput("DDInput", this->OutputGrad(framework::GradVarName("Input")));
    op->SetInput("DDFilter",
                 this->OutputGrad(framework::GradVarName("Filter")));

    // ddO, dI, dW
    // Unlike grad op, double grad op does not use name@GRAD@GRAD
    // as key of ops' inputs and outputs.
    auto ddx = this->OutputGrad(framework::GradVarName("Input"));
    auto ddw = this->OutputGrad(framework::GradVarName("Filter"));

    op->SetOutput("DDOutput",
                  ddx.empty()
                      ? this->EmptyInputGrad()
                      : this->InputGrad(framework::GradVarName("Output")));
    op->SetOutput("DFilter", ddx.empty() ? this->EmptyInputGrad()
                                         : this->InputGrad("Filter"));
    op->SetOutput("DInput", ddw.empty() ? this->EmptyInputGrad()
                                        : this->InputGrad("Input"));

    op->SetAttrMap(this->Attrs());
  }
};

void ConvTransposeOpDoubleGrad::InferShape(
    framework::InferShapeContext* ctx) const {
  auto x_dims = ctx->GetInputDim("Input");
  auto w_dims = ctx->GetInputDim("Filter");
  auto do_dims = ctx->GetInputDim("DOutput");

  if (ctx->HasOutput("DDOutput") &&
      (ctx->HasInput("DDInput") || (ctx->HasInput("DDFilter")))) {
    ctx->SetOutputDim("DDOutput", do_dims);
  }
  if (ctx->HasOutput("DFilter") && ctx->HasInput("DDInput")) {
    ctx->SetOutputDim("DFilter", w_dims);
  }
  if (ctx->HasOutput("DInput") && ctx->HasInput("DDFilter")) {
    ctx->SetOutputDim("DInput", x_dims);
  }
}

framework::OpKernelType ConvTransposeOpDoubleGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
  bool use_cudnn = ctx.Attr<bool>("use_cudnn");
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
596
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
  if (platform::is_gpu_place(ctx.GetPlace())) {
    auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
  }
#endif
  framework::LibraryType library_;
  if (use_cudnn) {
    library_ = framework::LibraryType::kCUDNN;
  } else {
    library_ = framework::LibraryType::kPlain;
  }

  framework::DataLayout layout_ = framework::DataLayout::kAnyLayout;
  return framework::OpKernelType(
      OperatorWithKernel::IndicateVarDataType(ctx, "Input"), ctx.GetPlace(),
      layout_, library_);
}

C
chengduoZH 已提交
615 616 617 618
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
C
chengduoZH 已提交
619

620
// conv2d_transpose
Y
Yang Yang 已提交
621 622
REGISTER_OPERATOR(conv2d_transpose, ops::ConvTransposeOp,
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
623 624
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>);
625 626 627 628 629
REGISTER_OPERATOR(
    conv2d_transpose_grad, ops::ConvTransposeOpGrad,
    ops::ConvTransposeDoubleGradMaker<paddle::framework::OpDesc>,
    ops::ConvTransposeDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(conv2d_transpose_grad_grad, ops::ConvTransposeOpDoubleGrad);
C
chengduoZH 已提交
630 631

REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
632
    conv2d_transpose,
Q
QI JUN 已提交
633 634
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, double>);
C
chengduoZH 已提交
635
REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
636
    conv2d_transpose_grad,
Q
QI JUN 已提交
637 638 639
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext,
                                     double>);
C
chengduoZH 已提交
640

641
// conv3d_transpose
Y
Yang Yang 已提交
642 643
REGISTER_OPERATOR(conv3d_transpose, ops::ConvTransposeOp,
                  ops::Conv3DTransposeOpMaker,
H
hong 已提交
644 645
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>);
646
REGISTER_OPERATOR(conv3d_transpose_grad, ops::ConvTransposeOpGrad);
C
chengduoZH 已提交
647 648

REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
649
    conv3d_transpose,
Q
QI JUN 已提交
650 651
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, double>);
C
chengduoZH 已提交
652
REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
653
    conv3d_transpose_grad,
Q
QI JUN 已提交
654 655 656
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext,
                                     double>);
657 658 659 660

// depthwise conv2d_transpose
REGISTER_OPERATOR(depthwise_conv2d_transpose, ops::ConvTransposeOp,
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
661 662
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>);
663 664 665 666 667 668 669 670 671 672 673
REGISTER_OPERATOR(depthwise_conv2d_transpose_grad, ops::ConvTransposeOpGrad);

REGISTER_OP_CPU_KERNEL(
    depthwise_conv2d_transpose,
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    depthwise_conv2d_transpose_grad,
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::GemmConvTransposeGradKernel<paddle::platform::CPUDeviceContext,
                                     double>);
674 675 676 677 678 679 680 681 682 683

REGISTER_OP_VERSION(conv_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade convtranspose add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
684
            std::vector<int>{}));
685 686 687 688 689 690 691 692 693 694

REGISTER_OP_VERSION(conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
695 696 697 698 699 700 701 702 703 704 705
            std::vector<int>{}))
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attributes [force_fp32_output, mkldnn_data_type].
    )ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewAttr("force_fp32_output",
                     "Force BF16 kernel output FP32, only used in MKL-DNN BF16",
                     false)
            .NewAttr("mkldnn_data_type", "Data type of mkldnn kernel",
                     "float32"));
706 707 708 709 710 711 712 713 714 715

REGISTER_OP_VERSION(conv3d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv3d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
716
            std::vector<int>{}));
717 718 719 720 721 722 723 724 725 726

REGISTER_OP_VERSION(depthwise_conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade depthwise conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
727
            std::vector<int>{}));