conv_transpose_op.cc 20.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_transpose_op.h"
F
From00 已提交
16

S
Siddharth Goyal 已提交
17 18
#include <string>
#include <vector>
19

20
#include "paddle/fluid/framework/data_layout.h"
F
From00 已提交
21 22
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
23
#include "paddle/fluid/framework/op_version_registry.h"
24
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
F
From00 已提交
25 26 27
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
J
Jacek Czaja 已提交
28 29 30
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
31 32 33
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#endif
J
Jacek Czaja 已提交
34

C
chengduoZH 已提交
35 36 37
namespace paddle {
namespace operators {

38
using DataLayout = phi::DataLayout;
39

40 41
framework::OpKernelType ConvTransposeOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
42
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Input");
43 44 45 46 47 48 49 50
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (platform::CanCUDNNBeUsed(ctx)) {
    return framework::OpKernelType(data_type,
                                   ctx.GetPlace(),
                                   phi::DataLayout::kAnyLayout,
                                   framework::LibraryType::kCUDNN);
  }
#endif
51
  return framework::OpKernelType(data_type, ctx.GetPlace());
52 53
}

54
framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar(
55
    const std::string& var_name,
56
    const phi::DenseTensor& tensor,
57 58 59 60 61
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if ((var_name == "Input") &&
62 63
      (expected_kernel_type.data_layout_ == phi::DataLayout::kMKLDNN) &&
      (tensor.layout() != phi::DataLayout::kMKLDNN)) {
64 65 66
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
67
    auto dl = phi::StringToDataLayout(data_format);
68 69
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
70
    if (dl != phi::DataLayout::kAnyLayout) {
71
      return framework::OpKernelType(
72
          expected_kernel_type.data_type_, tensor.place(), dl);
73 74 75
    }
  }
#endif
76 77
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
78 79
}

Y
Yu Yang 已提交
80
void Conv2DTransposeOpMaker::Make() {
81 82 83 84 85
  AddInput("Input",
           "(Tensor) The input tensor of convolution transpose operator. "
           "The format of input tensor is NCHW or NHWC. Where N is batch size, "
           "C is the number of input channels, H is the height of the feature, "
           "and W is the width of the feature.");
C
chengduoZH 已提交
86 87 88 89 90 91 92 93
  AddInput(
      "Filter",
      "(Tensor) The filter tensor of convolution transpose operator. "
      "The format of the filter tensor is MCHW, where M is the number of "
      "input feature channels, C is the number of "
      "output feature channels,"
      "H is the height of the filter, and W is the width of the filter. "
      "We enforce groups number == 1 in the convolution transpose scenario.");
94 95 96 97
  AddInput("Bias",
           "(Tensor) Bias to be added to each output of filter application."
           "The format of output tensor is X (one-dimensional) of size equal"
           "to the number of output channels. Only used with MKL-DNN.")
98 99
      .AsDispensable()
      .AsExtra();
C
chengduoZH 已提交
100
  AddOutput("Output",
C
chengduoZH 已提交
101
            "(Tensor) The output tensor of convolution transpose operator. "
102
            "The format of output tensor is the same as input tensor.");
L
LielinJiang 已提交
103 104 105 106 107
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
108 109 110
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
111 112
      .SetDefault({})
      .SupportTensor();
Y
Yibing Liu 已提交
113 114 115 116
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution "
               "transpose operator. ")
      .SetDefault(1);
C
chengduoZH 已提交
117 118 119 120 121
  AddAttr<std::vector<int>>("dilations",
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of convolution "
                            "transpose operator.")
      .SetDefault({1, 1});
C
chengduoZH 已提交
122 123
  AddAttr<std::vector<int>>(
      "strides",
C
chengduoZH 已提交
124
      "(vector<int> default:{1, 1}), the strides(h_stride, w_stride) of "
125
      "convolution transpose operator.")
C
chengduoZH 已提交
126
      .SetDefault({1, 1});
C
chengduoZH 已提交
127 128
  AddAttr<std::vector<int>>(
      "paddings",
C
chengduoZH 已提交
129
      "(vector<int> default:{0, 0}), the paddings(h_pad, w_pad) of convolution "
C
chengduoZH 已提交
130
      "transpose operator.")
C
chengduoZH 已提交
131
      .SetDefault({0, 0});
132 133 134 135
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
136 137 138 139 140 141 142 143 144
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
C
chengduoZH 已提交
145
  AddComment(R"DOC(
C
chengduoZH 已提交
146 147
Convolution2D Transpose Operator.

C
chengduoZH 已提交
148
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
149
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
150
parameters is checked in the infer-shape.
151
Input(Input) and output(Output) are in NCHW or NHWC format. Where N is batchsize, C is the
C
chengduoZH 已提交
152 153 154 155 156 157
number of channels, H is the height of the feature, and W is the width of the feature.
Filter(Input) is in MCHW format. Where M is the number of input feature channels,
C is the number of output feature channels, H is the height of the filter,
and W is the width of the filter.
Parameters(strides, paddings) are two elements. These two elements represent height
and width, respectively.
C
chengduoZH 已提交
158
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
159

Y
update  
yi.wu 已提交
160
For an example:
C
chengduoZH 已提交
161
  Input:
C
chengduoZH 已提交
162 163
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, H_f, W_f)$
C
chengduoZH 已提交
164
  Output:
C
chengduoZH 已提交
165 166 167
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
  $$
168 169
       H_{out} = (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom  + dilations[0] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[1] - pad_width_left  - pad_width_right + dilations[1] * (W_f - 1) + 1
C
chengduoZH 已提交
170
  $$
C
chengduoZH 已提交
171 172 173
)DOC");
}

Y
Yu Yang 已提交
174
void Conv3DTransposeOpMaker::Make() {
175 176 177 178 179 180
  AddInput(
      "Input",
      "(Tensor) The input tensor of convolution transpose operator."
      "The format of input tensor is NCDHW or NDHWC. Where N is batch "
      "size, C is the number of channels, D is the depth of the feature, "
      "H is the height of the feature, and W is the width of the feature.");
C
chengduoZH 已提交
181 182
  AddInput("Filter",
           "(Tensor) The filter tensor of convolution transpose operator."
C
chengduoZH 已提交
183 184 185
           "The format of the filter tensor is MCDHW, where M is the number of "
           "input feature channels, C is the number of "
           "output feature channels, D "
C
chengduoZH 已提交
186 187
           "is the depth of the filter, H is the height of the filter, and "
           "W is the width of the filter."
C
chengduoZH 已提交
188
           "We enforce groups number == 1 and padding == 0 in "
C
chengduoZH 已提交
189
           "the convolution3d transpose scenario.");
C
chengduoZH 已提交
190 191
  AddOutput("Output",
            "(Tensor) The output tensor of convolution transpose operator."
192
            "The format of output tensor is the same as input tensor."
C
chengduoZH 已提交
193
            "Where N is batch size, C is "
C
chengduoZH 已提交
194 195
            "the number of channels, D is the depth of the feature, H is the "
            "height of the feature, and W is the width of the feature.");
L
LielinJiang 已提交
196 197 198 199 200
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
201 202 203 204
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
      .SetDefault({});
C
chengduoZH 已提交
205 206 207 208 209 210
  AddAttr<std::vector<int>>(
      "dilations",
      "(vector<int> default:{1, 1, 1}), the "
      "dilations(d_dilation,h_dilation, w_dilation) of convolution "
      "transpose operator.")
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
211
  AddAttr<std::vector<int>>("strides",
C
chengduoZH 已提交
212
                            "(vector<int> default:{1, 1, 1}), the "
213
                            "strides{d_stride, h_stride, w_stride} of "
C
chengduoZH 已提交
214
                            "convolution transpose operator.")
C
chengduoZH 已提交
215
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
216
  AddAttr<std::vector<int>>("paddings",
C
chengduoZH 已提交
217
                            "(vector<int> default:{0, 0, 0}), paddings(d_pad, "
C
chengduoZH 已提交
218
                            "h_pad, w_pad) of convolution transpose operator.")
C
chengduoZH 已提交
219
      .SetDefault({0, 0, 0});
220 221 222 223
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution3d "
               "transpose operator. ")
      .SetDefault(1);
224 225 226 227
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
228 229 230 231 232 233 234 235 236
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
C
chengduoZH 已提交
237
  AddComment(R"DOC(
C
chengduoZH 已提交
238 239
Convolution3D Transpose Operator.

C
chengduoZH 已提交
240
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
241
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
242
parameters is checked in the infer-shape.
243
Input(Input) and output(Output) are in NCDHW or NDHWC format. Where N is batch size, C is the
C
chengduoZH 已提交
244 245 246 247 248 249 250
number of channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature.
Filter(Input) is in MCDHW format. Where M is the number of input feature channels,
C is the number of output feature channels, D is the depth of the filter,H is the
height of the filter, and W is the width of the filter.
Parameters(strides, paddings) are three elements. These three elements represent
depth, height and width, respectively.
C
chengduoZH 已提交
251
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
252

253
Example:
C
chengduoZH 已提交
254
  Input:
C
chengduoZH 已提交
255 256
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, D_f, H_f, W_f)$
C
chengduoZH 已提交
257
  Output:
C
chengduoZH 已提交
258 259 260
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
261 262 263
       D_{out} = (D_{in} - 1) * strides[0] - pad_depth_front - pad_depth_back + dilations[0] * (D_f - 1) + 1 \\
       H_{out} = (H_{in} - 1) * strides[1] - pad_height_top  - pad_height_bottom + dilations[1] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[2] - pad_width_left - pad_width_right + dilations[2] * (W_f - 1) + 1
C
chengduoZH 已提交
264
  $$
C
chengduoZH 已提交
265 266 267
)DOC");
}

268 269
framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
H
HongyuJia 已提交
270
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Input");
271 272 273 274 275 276 277 278
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (platform::CanCUDNNBeUsed(ctx)) {
    return framework::OpKernelType(data_type,
                                   ctx.GetPlace(),
                                   phi::DataLayout::kAnyLayout,
                                   framework::LibraryType::kCUDNN);
  }
#endif
H
HongyuJia 已提交
279
  return framework::OpKernelType(data_type, ctx.GetPlace());
280 281
}

H
hong 已提交
282 283
template <typename T>
class ConvTransposeGradOpMaker : public framework::SingleGradOpMaker<T> {
S
sneaxiy 已提交
284
 public:
H
hong 已提交
285
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
S
sneaxiy 已提交
286 287

 protected:
288
  void Apply(GradOpPtr<T> op) const override {
H
hong 已提交
289 290 291 292 293 294 295 296
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input"));
    op->SetOutput(framework::GradVarName("Filter"), this->InputGrad("Filter"));
    if (this->HasInput("Bias")) {
      op->SetInput("Bias", this->Input("Bias"));
      op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
S
sneaxiy 已提交
297
    }
H
hong 已提交
298 299
    op->SetInput(framework::GradVarName("Output"), this->OutputGrad("Output"));
    op->SetAttrMap(this->Attrs());
S
sneaxiy 已提交
300 301 302
  }
};

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
/*
 * Inputs:  I, W, dO, ddI, ddW
 * Outputs: ddO, dW, dI
 */
template <typename T>
class ConvTransposeDoubleGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

  void Apply(GradOpPtr<T> op) const override {
    op->SetType(this->ForwardOpType() + "_grad");
    // I, W, dO, ddI, ddW
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetInput("DOutput", this->Input(framework::GradVarName("Output")));
    op->SetInput("DDInput", this->OutputGrad(framework::GradVarName("Input")));
    op->SetInput("DDFilter",
                 this->OutputGrad(framework::GradVarName("Filter")));

    // ddO, dI, dW
    // Unlike grad op, double grad op does not use name@GRAD@GRAD
    // as key of ops' inputs and outputs.
    auto ddx = this->OutputGrad(framework::GradVarName("Input"));
    auto ddw = this->OutputGrad(framework::GradVarName("Filter"));

    op->SetOutput("DDOutput",
                  ddx.empty()
                      ? this->EmptyInputGrad()
                      : this->InputGrad(framework::GradVarName("Output")));
332 333 334 335 336 337
    op->SetOutput(
        "DFilter",
        ddx.empty() ? this->EmptyInputGrad() : this->InputGrad("Filter"));
    op->SetOutput(
        "DInput",
        ddw.empty() ? this->EmptyInputGrad() : this->InputGrad("Input"));
338 339 340 341 342 343 344

    op->SetAttrMap(this->Attrs());
  }
};

framework::OpKernelType ConvTransposeOpDoubleGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
H
HongyuJia 已提交
345
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Input");
346 347 348 349 350 351 352 353
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (platform::CanCUDNNBeUsed(ctx)) {
    return framework::OpKernelType(data_type,
                                   ctx.GetPlace(),
                                   phi::DataLayout::kAnyLayout,
                                   framework::LibraryType::kCUDNN);
  }
#endif
H
HongyuJia 已提交
354
  return framework::OpKernelType(data_type, ctx.GetPlace());
355 356
}

C
chengduoZH 已提交
357 358 359 360
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
C
chengduoZH 已提交
361

362
// conv2d_transpose
363 364
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose,
                            Conv2dTranposeInferShapeFunctor,
365
                            PD_INFER_META(phi::Conv2dTransposeInferMeta));
F
From00 已提交
366 367
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose_grad,
                            Conv2dTranposeGradInferShapeFunctor,
368
                            PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
F
From00 已提交
369
DECLARE_INFER_SHAPE_FUNCTOR(
370 371
    conv2d_transpose_grad_grad,
    Conv2dTranposeDoubleGradInferShapeFunctor,
F
From00 已提交
372 373
    PD_INFER_META(phi::Conv2dTransposeDoubleGradInferMeta));

374 375
REGISTER_OPERATOR(conv2d_transpose,
                  ops::ConvTransposeOp,
Y
Yang Yang 已提交
376
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
377
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
378 379
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  Conv2dTranposeInferShapeFunctor);
380 381
REGISTER_OPERATOR(conv2d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
382 383 384
                  ops::ConvTransposeDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ConvTransposeDoubleGradMaker<paddle::imperative::OpBase>,
                  Conv2dTranposeGradInferShapeFunctor);
385 386
REGISTER_OPERATOR(conv2d_transpose_grad_grad,
                  ops::ConvTransposeOpDoubleGrad,
F
From00 已提交
387
                  Conv2dTranposeDoubleGradInferShapeFunctor);
C
chengduoZH 已提交
388

389
// conv3d_transpose
390 391
DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose,
                            Conv3dTranposeInferShapeFunctor,
F
From00 已提交
392 393 394 395 396
                            PD_INFER_META(phi::ConvTransposeInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose_grad,
                            Conv3dTranposeGradInferShapeFunctor,
                            PD_INFER_META(phi::ConvTransposeGradInferMeta));

397 398
REGISTER_OPERATOR(conv3d_transpose,
                  ops::ConvTransposeOp,
Y
Yang Yang 已提交
399
                  ops::Conv3DTransposeOpMaker,
H
hong 已提交
400
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
401 402
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  Conv3dTranposeInferShapeFunctor);
403 404
REGISTER_OPERATOR(conv3d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
405
                  Conv3dTranposeGradInferShapeFunctor);
406 407

// depthwise conv2d_transpose
F
From00 已提交
408 409
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose,
                            DepthWiseConv2dTranposeInferShapeFunctor,
410
                            PD_INFER_META(phi::Conv2dTransposeInferMeta));
F
From00 已提交
411 412
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose_grad,
                            DepthWiseConv2dTranposeGradInferShapeFunctor,
413
                            PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
F
From00 已提交
414

415 416
REGISTER_OPERATOR(depthwise_conv2d_transpose,
                  ops::ConvTransposeOp,
417
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
418
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
419 420
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  DepthWiseConv2dTranposeInferShapeFunctor);
421 422
REGISTER_OPERATOR(depthwise_conv2d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
423
                  DepthWiseConv2dTranposeGradInferShapeFunctor);
424 425 426 427 428 429 430 431 432 433

REGISTER_OP_VERSION(conv_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade convtranspose add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
434
            std::vector<int>{}));
435 436 437 438 439 440 441 442 443 444

REGISTER_OP_VERSION(conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
445 446 447 448 449 450 451 452 453
            std::vector<int>{}))
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attributes [force_fp32_output, mkldnn_data_type].
    )ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewAttr("force_fp32_output",
                     "Force BF16 kernel output FP32, only used in MKL-DNN BF16",
                     false)
454 455
            .NewAttr("mkldnn_data_type",
                     "Data type of mkldnn kernel",
456
                     "float32"));
457 458 459 460 461 462 463 464 465 466

REGISTER_OP_VERSION(conv3d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv3d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
467
            std::vector<int>{}));
468 469 470 471 472 473 474 475 476 477

REGISTER_OP_VERSION(depthwise_conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade depthwise conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
478
            std::vector<int>{}));