conv_transpose_op.cc 21.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_transpose_op.h"
F
From00 已提交
16

S
Siddharth Goyal 已提交
17 18
#include <string>
#include <vector>
19

20
#include "paddle/fluid/framework/data_layout.h"
F
From00 已提交
21 22
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
23
#include "paddle/fluid/framework/op_version_registry.h"
24
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
F
From00 已提交
25 26 27
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
J
Jacek Czaja 已提交
28 29 30 31
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

C
chengduoZH 已提交
32 33 34
namespace paddle {
namespace operators {

35 36
using DataLayout = framework::DataLayout;

37 38
framework::OpKernelType ConvTransposeOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
J
Jacek Czaja 已提交
39
  framework::LibraryType library_{framework::LibraryType::kPlain};
40
  framework::DataLayout layout_ = framework::DataLayout::kAnyLayout;
41 42
  bool use_cudnn =
      ctx.HasAttr("use_cudnn") ? ctx.Attr<bool>("use_cudnn") : false;
C
chengduoZH 已提交
43
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
44
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Input");
45
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
C
chengduoZH 已提交
46
  if (platform::is_gpu_place(ctx.GetPlace())) {
L
Leo Chen 已提交
47
    auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
C
chengduoZH 已提交
48
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
J
Jacek Czaja 已提交
49 50 51
    if (use_cudnn) {
      library_ = framework::LibraryType::kCUDNN;
    }
C
chengduoZH 已提交
52 53
  }
#endif
J
Jacek Czaja 已提交
54 55
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
56
      this->CanMKLDNNBeUsed(ctx, data_type)) {
J
Jacek Czaja 已提交
57 58
    library_ = framework::LibraryType::kMKLDNN;
    layout_ = framework::DataLayout::kMKLDNN;
59
  }
J
Jacek Czaja 已提交
60
#endif
61

62
  return framework::OpKernelType(data_type, ctx.GetPlace(), layout_, library_);
63 64
}

65
framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar(
66 67
    const std::string& var_name,
    const framework::Tensor& tensor,
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if ((var_name == "Input") &&
      (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
    auto dl = framework::StringToDataLayout(data_format);
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
    if (dl != framework::DataLayout::kAnyLayout) {
      return framework::OpKernelType(
83
          expected_kernel_type.data_type_, tensor.place(), dl);
84 85 86
    }
  }
#endif
87 88
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
89 90
}

Y
Yu Yang 已提交
91
void Conv2DTransposeOpMaker::Make() {
92 93 94 95 96
  AddInput("Input",
           "(Tensor) The input tensor of convolution transpose operator. "
           "The format of input tensor is NCHW or NHWC. Where N is batch size, "
           "C is the number of input channels, H is the height of the feature, "
           "and W is the width of the feature.");
C
chengduoZH 已提交
97 98 99 100 101 102 103 104
  AddInput(
      "Filter",
      "(Tensor) The filter tensor of convolution transpose operator. "
      "The format of the filter tensor is MCHW, where M is the number of "
      "input feature channels, C is the number of "
      "output feature channels,"
      "H is the height of the filter, and W is the width of the filter. "
      "We enforce groups number == 1 in the convolution transpose scenario.");
105 106 107 108
  AddInput("Bias",
           "(Tensor) Bias to be added to each output of filter application."
           "The format of output tensor is X (one-dimensional) of size equal"
           "to the number of output channels. Only used with MKL-DNN.")
109 110
      .AsDispensable()
      .AsExtra();
C
chengduoZH 已提交
111
  AddOutput("Output",
C
chengduoZH 已提交
112
            "(Tensor) The output tensor of convolution transpose operator. "
113
            "The format of output tensor is the same as input tensor.");
L
LielinJiang 已提交
114 115 116 117 118
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
119 120 121
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
122 123
      .SetDefault({})
      .SupportTensor();
Y
Yibing Liu 已提交
124 125 126 127
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution "
               "transpose operator. ")
      .SetDefault(1);
C
chengduoZH 已提交
128 129 130 131 132
  AddAttr<std::vector<int>>("dilations",
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of convolution "
                            "transpose operator.")
      .SetDefault({1, 1});
C
chengduoZH 已提交
133 134
  AddAttr<std::vector<int>>(
      "strides",
C
chengduoZH 已提交
135
      "(vector<int> default:{1, 1}), the strides(h_stride, w_stride) of "
136
      "convolution transpose operator.")
C
chengduoZH 已提交
137
      .SetDefault({1, 1});
C
chengduoZH 已提交
138 139
  AddAttr<std::vector<int>>(
      "paddings",
C
chengduoZH 已提交
140
      "(vector<int> default:{0, 0}), the paddings(h_pad, w_pad) of convolution "
C
chengduoZH 已提交
141
      "transpose operator.")
C
chengduoZH 已提交
142
      .SetDefault({0, 0});
143 144 145 146
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
147 148 149 150 151 152 153 154 155
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
C
chengduoZH 已提交
156
  AddComment(R"DOC(
C
chengduoZH 已提交
157 158
Convolution2D Transpose Operator.

C
chengduoZH 已提交
159
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
160
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
161
parameters is checked in the infer-shape.
162
Input(Input) and output(Output) are in NCHW or NHWC format. Where N is batchsize, C is the
C
chengduoZH 已提交
163 164 165 166 167 168
number of channels, H is the height of the feature, and W is the width of the feature.
Filter(Input) is in MCHW format. Where M is the number of input feature channels,
C is the number of output feature channels, H is the height of the filter,
and W is the width of the filter.
Parameters(strides, paddings) are two elements. These two elements represent height
and width, respectively.
C
chengduoZH 已提交
169
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
170

Y
update  
yi.wu 已提交
171
For an example:
C
chengduoZH 已提交
172
  Input:
C
chengduoZH 已提交
173 174
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, H_f, W_f)$
C
chengduoZH 已提交
175
  Output:
C
chengduoZH 已提交
176 177 178
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
  $$
179 180
       H_{out} = (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom  + dilations[0] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[1] - pad_width_left  - pad_width_right + dilations[1] * (W_f - 1) + 1
C
chengduoZH 已提交
181
  $$
C
chengduoZH 已提交
182 183 184
)DOC");
}

Y
Yu Yang 已提交
185
void Conv3DTransposeOpMaker::Make() {
186 187 188 189 190 191
  AddInput(
      "Input",
      "(Tensor) The input tensor of convolution transpose operator."
      "The format of input tensor is NCDHW or NDHWC. Where N is batch "
      "size, C is the number of channels, D is the depth of the feature, "
      "H is the height of the feature, and W is the width of the feature.");
C
chengduoZH 已提交
192 193
  AddInput("Filter",
           "(Tensor) The filter tensor of convolution transpose operator."
C
chengduoZH 已提交
194 195 196
           "The format of the filter tensor is MCDHW, where M is the number of "
           "input feature channels, C is the number of "
           "output feature channels, D "
C
chengduoZH 已提交
197 198
           "is the depth of the filter, H is the height of the filter, and "
           "W is the width of the filter."
C
chengduoZH 已提交
199
           "We enforce groups number == 1 and padding == 0 in "
C
chengduoZH 已提交
200
           "the convolution3d transpose scenario.");
C
chengduoZH 已提交
201 202
  AddOutput("Output",
            "(Tensor) The output tensor of convolution transpose operator."
203
            "The format of output tensor is the same as input tensor."
C
chengduoZH 已提交
204
            "Where N is batch size, C is "
C
chengduoZH 已提交
205 206
            "the number of channels, D is the depth of the feature, H is the "
            "height of the feature, and W is the width of the feature.");
L
LielinJiang 已提交
207 208 209 210 211
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
212 213 214 215
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
      .SetDefault({});
C
chengduoZH 已提交
216 217 218 219 220 221
  AddAttr<std::vector<int>>(
      "dilations",
      "(vector<int> default:{1, 1, 1}), the "
      "dilations(d_dilation,h_dilation, w_dilation) of convolution "
      "transpose operator.")
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
222
  AddAttr<std::vector<int>>("strides",
C
chengduoZH 已提交
223
                            "(vector<int> default:{1, 1, 1}), the "
224
                            "strides{d_stride, h_stride, w_stride} of "
C
chengduoZH 已提交
225
                            "convolution transpose operator.")
C
chengduoZH 已提交
226
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
227
  AddAttr<std::vector<int>>("paddings",
C
chengduoZH 已提交
228
                            "(vector<int> default:{0, 0, 0}), paddings(d_pad, "
C
chengduoZH 已提交
229
                            "h_pad, w_pad) of convolution transpose operator.")
C
chengduoZH 已提交
230
      .SetDefault({0, 0, 0});
231 232 233 234
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution3d "
               "transpose operator. ")
      .SetDefault(1);
235 236 237 238
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
239 240 241 242 243 244 245 246 247
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
C
chengduoZH 已提交
248
  AddComment(R"DOC(
C
chengduoZH 已提交
249 250
Convolution3D Transpose Operator.

C
chengduoZH 已提交
251
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
252
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
253
parameters is checked in the infer-shape.
254
Input(Input) and output(Output) are in NCDHW or NDHWC format. Where N is batch size, C is the
C
chengduoZH 已提交
255 256 257 258 259 260 261
number of channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature.
Filter(Input) is in MCDHW format. Where M is the number of input feature channels,
C is the number of output feature channels, D is the depth of the filter,H is the
height of the filter, and W is the width of the filter.
Parameters(strides, paddings) are three elements. These three elements represent
depth, height and width, respectively.
C
chengduoZH 已提交
262
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
263

264
Example:
C
chengduoZH 已提交
265
  Input:
C
chengduoZH 已提交
266 267
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, D_f, H_f, W_f)$
C
chengduoZH 已提交
268
  Output:
C
chengduoZH 已提交
269 270 271
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
272 273 274
       D_{out} = (D_{in} - 1) * strides[0] - pad_depth_front - pad_depth_back + dilations[0] * (D_f - 1) + 1 \\
       H_{out} = (H_{in} - 1) * strides[1] - pad_height_top  - pad_height_bottom + dilations[1] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[2] - pad_width_left - pad_width_right + dilations[2] * (W_f - 1) + 1
C
chengduoZH 已提交
275
  $$
C
chengduoZH 已提交
276 277 278
)DOC");
}

279 280
framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
281 282
  bool use_cudnn =
      ctx.HasAttr("use_cudnn") ? ctx.Attr<bool>("use_cudnn") : false;
283
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
284
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
C
chengduoZH 已提交
285
  if (platform::is_gpu_place(ctx.GetPlace())) {
L
Leo Chen 已提交
286
    auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
C
chengduoZH 已提交
287 288 289
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
  }
#endif
290 291 292 293 294 295 296
  framework::LibraryType library_;
  if (use_cudnn) {
    library_ = framework::LibraryType::kCUDNN;
  } else {
    library_ = framework::LibraryType::kPlain;
  }

297
  framework::DataLayout layout_ = framework::DataLayout::kAnyLayout;
298
  return framework::OpKernelType(
299 300 301 302
      OperatorWithKernel::IndicateVarDataType(ctx, "Input"),
      ctx.GetPlace(),
      layout_,
      library_);
303 304
}

H
hong 已提交
305 306
template <typename T>
class ConvTransposeGradOpMaker : public framework::SingleGradOpMaker<T> {
S
sneaxiy 已提交
307
 public:
H
hong 已提交
308
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
S
sneaxiy 已提交
309 310

 protected:
311
  void Apply(GradOpPtr<T> op) const override {
H
hong 已提交
312 313 314 315 316 317 318 319
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input"));
    op->SetOutput(framework::GradVarName("Filter"), this->InputGrad("Filter"));
    if (this->HasInput("Bias")) {
      op->SetInput("Bias", this->Input("Bias"));
      op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
S
sneaxiy 已提交
320
    }
H
hong 已提交
321 322
    op->SetInput(framework::GradVarName("Output"), this->OutputGrad("Output"));
    op->SetAttrMap(this->Attrs());
S
sneaxiy 已提交
323 324 325
  }
};

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
/*
 * Inputs:  I, W, dO, ddI, ddW
 * Outputs: ddO, dW, dI
 */
template <typename T>
class ConvTransposeDoubleGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

  void Apply(GradOpPtr<T> op) const override {
    op->SetType(this->ForwardOpType() + "_grad");
    // I, W, dO, ddI, ddW
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetInput("DOutput", this->Input(framework::GradVarName("Output")));
    op->SetInput("DDInput", this->OutputGrad(framework::GradVarName("Input")));
    op->SetInput("DDFilter",
                 this->OutputGrad(framework::GradVarName("Filter")));

    // ddO, dI, dW
    // Unlike grad op, double grad op does not use name@GRAD@GRAD
    // as key of ops' inputs and outputs.
    auto ddx = this->OutputGrad(framework::GradVarName("Input"));
    auto ddw = this->OutputGrad(framework::GradVarName("Filter"));

    op->SetOutput("DDOutput",
                  ddx.empty()
                      ? this->EmptyInputGrad()
                      : this->InputGrad(framework::GradVarName("Output")));
355 356 357 358 359 360
    op->SetOutput(
        "DFilter",
        ddx.empty() ? this->EmptyInputGrad() : this->InputGrad("Filter"));
    op->SetOutput(
        "DInput",
        ddw.empty() ? this->EmptyInputGrad() : this->InputGrad("Input"));
361 362 363 364 365 366 367

    op->SetAttrMap(this->Attrs());
  }
};

framework::OpKernelType ConvTransposeOpDoubleGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
368 369
  bool use_cudnn =
      ctx.HasAttr("use_cudnn") ? ctx.Attr<bool>("use_cudnn") : false;
370
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
371
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
372
  if (platform::is_gpu_place(ctx.GetPlace())) {
L
Leo Chen 已提交
373
    auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
374 375 376 377 378 379 380 381 382 383 384 385
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
  }
#endif
  framework::LibraryType library_;
  if (use_cudnn) {
    library_ = framework::LibraryType::kCUDNN;
  } else {
    library_ = framework::LibraryType::kPlain;
  }

  framework::DataLayout layout_ = framework::DataLayout::kAnyLayout;
  return framework::OpKernelType(
386 387 388 389
      OperatorWithKernel::IndicateVarDataType(ctx, "Input"),
      ctx.GetPlace(),
      layout_,
      library_);
390 391
}

C
chengduoZH 已提交
392 393 394 395
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
C
chengduoZH 已提交
396

397
// conv2d_transpose
398 399
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose,
                            Conv2dTranposeInferShapeFunctor,
400
                            PD_INFER_META(phi::Conv2dTransposeInferMeta));
F
From00 已提交
401 402
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose_grad,
                            Conv2dTranposeGradInferShapeFunctor,
403
                            PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
F
From00 已提交
404
DECLARE_INFER_SHAPE_FUNCTOR(
405 406
    conv2d_transpose_grad_grad,
    Conv2dTranposeDoubleGradInferShapeFunctor,
F
From00 已提交
407 408
    PD_INFER_META(phi::Conv2dTransposeDoubleGradInferMeta));

409 410
REGISTER_OPERATOR(conv2d_transpose,
                  ops::ConvTransposeOp,
Y
Yang Yang 已提交
411
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
412
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
413 414
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  Conv2dTranposeInferShapeFunctor);
415 416
REGISTER_OPERATOR(conv2d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
417 418 419
                  ops::ConvTransposeDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ConvTransposeDoubleGradMaker<paddle::imperative::OpBase>,
                  Conv2dTranposeGradInferShapeFunctor);
420 421
REGISTER_OPERATOR(conv2d_transpose_grad_grad,
                  ops::ConvTransposeOpDoubleGrad,
F
From00 已提交
422
                  Conv2dTranposeDoubleGradInferShapeFunctor);
C
chengduoZH 已提交
423

424
// conv3d_transpose
425 426
DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose,
                            Conv3dTranposeInferShapeFunctor,
F
From00 已提交
427 428 429 430 431
                            PD_INFER_META(phi::ConvTransposeInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose_grad,
                            Conv3dTranposeGradInferShapeFunctor,
                            PD_INFER_META(phi::ConvTransposeGradInferMeta));

432 433
REGISTER_OPERATOR(conv3d_transpose,
                  ops::ConvTransposeOp,
Y
Yang Yang 已提交
434
                  ops::Conv3DTransposeOpMaker,
H
hong 已提交
435
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
436 437
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  Conv3dTranposeInferShapeFunctor);
438 439
REGISTER_OPERATOR(conv3d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
440
                  Conv3dTranposeGradInferShapeFunctor);
441 442

// depthwise conv2d_transpose
F
From00 已提交
443 444
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose,
                            DepthWiseConv2dTranposeInferShapeFunctor,
445
                            PD_INFER_META(phi::Conv2dTransposeInferMeta));
F
From00 已提交
446 447
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose_grad,
                            DepthWiseConv2dTranposeGradInferShapeFunctor,
448
                            PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
F
From00 已提交
449

450 451
REGISTER_OPERATOR(depthwise_conv2d_transpose,
                  ops::ConvTransposeOp,
452
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
453
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
454 455
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  DepthWiseConv2dTranposeInferShapeFunctor);
456 457
REGISTER_OPERATOR(depthwise_conv2d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
458
                  DepthWiseConv2dTranposeGradInferShapeFunctor);
459 460 461 462 463 464 465 466 467 468

REGISTER_OP_VERSION(conv_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade convtranspose add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
469
            std::vector<int>{}));
470 471 472 473 474 475 476 477 478 479

REGISTER_OP_VERSION(conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
480 481 482 483 484 485 486 487 488
            std::vector<int>{}))
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attributes [force_fp32_output, mkldnn_data_type].
    )ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewAttr("force_fp32_output",
                     "Force BF16 kernel output FP32, only used in MKL-DNN BF16",
                     false)
489 490
            .NewAttr("mkldnn_data_type",
                     "Data type of mkldnn kernel",
491
                     "float32"));
492 493 494 495 496 497 498 499 500 501

REGISTER_OP_VERSION(conv3d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv3d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
502
            std::vector<int>{}));
503 504 505 506 507 508 509 510 511 512

REGISTER_OP_VERSION(depthwise_conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade depthwise conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
513
            std::vector<int>{}));