conv_transpose_op.cc 19.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_transpose_op.h"
F
From00 已提交
16

S
Siddharth Goyal 已提交
17 18
#include <string>
#include <vector>
19

20
#include "paddle/fluid/framework/data_layout.h"
F
From00 已提交
21 22
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
23
#include "paddle/fluid/framework/op_version_registry.h"
24
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
F
From00 已提交
25 26 27
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
J
Jacek Czaja 已提交
28 29 30 31
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

C
chengduoZH 已提交
32 33 34
namespace paddle {
namespace operators {

35
using DataLayout = phi::DataLayout;
36

37 38
framework::OpKernelType ConvTransposeOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
39
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Input");
40
  return framework::OpKernelType(data_type, ctx.GetPlace());
41 42
}

43
framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar(
44
    const std::string& var_name,
45
    const phi::DenseTensor& tensor,
46 47 48 49 50
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if ((var_name == "Input") &&
51 52
      (expected_kernel_type.data_layout_ == phi::DataLayout::kMKLDNN) &&
      (tensor.layout() != phi::DataLayout::kMKLDNN)) {
53 54 55
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
56
    auto dl = phi::StringToDataLayout(data_format);
57 58
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
59
    if (dl != phi::DataLayout::kAnyLayout) {
60
      return framework::OpKernelType(
61
          expected_kernel_type.data_type_, tensor.place(), dl);
62 63 64
    }
  }
#endif
65 66
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
67 68
}

Y
Yu Yang 已提交
69
void Conv2DTransposeOpMaker::Make() {
70 71 72 73 74
  AddInput("Input",
           "(Tensor) The input tensor of convolution transpose operator. "
           "The format of input tensor is NCHW or NHWC. Where N is batch size, "
           "C is the number of input channels, H is the height of the feature, "
           "and W is the width of the feature.");
C
chengduoZH 已提交
75 76 77 78 79 80 81 82
  AddInput(
      "Filter",
      "(Tensor) The filter tensor of convolution transpose operator. "
      "The format of the filter tensor is MCHW, where M is the number of "
      "input feature channels, C is the number of "
      "output feature channels,"
      "H is the height of the filter, and W is the width of the filter. "
      "We enforce groups number == 1 in the convolution transpose scenario.");
83 84 85 86
  AddInput("Bias",
           "(Tensor) Bias to be added to each output of filter application."
           "The format of output tensor is X (one-dimensional) of size equal"
           "to the number of output channels. Only used with MKL-DNN.")
87 88
      .AsDispensable()
      .AsExtra();
C
chengduoZH 已提交
89
  AddOutput("Output",
C
chengduoZH 已提交
90
            "(Tensor) The output tensor of convolution transpose operator. "
91
            "The format of output tensor is the same as input tensor.");
L
LielinJiang 已提交
92 93 94 95 96
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
97 98 99
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
100 101
      .SetDefault({})
      .SupportTensor();
Y
Yibing Liu 已提交
102 103 104 105
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution "
               "transpose operator. ")
      .SetDefault(1);
C
chengduoZH 已提交
106 107 108 109 110
  AddAttr<std::vector<int>>("dilations",
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of convolution "
                            "transpose operator.")
      .SetDefault({1, 1});
C
chengduoZH 已提交
111 112
  AddAttr<std::vector<int>>(
      "strides",
C
chengduoZH 已提交
113
      "(vector<int> default:{1, 1}), the strides(h_stride, w_stride) of "
114
      "convolution transpose operator.")
C
chengduoZH 已提交
115
      .SetDefault({1, 1});
C
chengduoZH 已提交
116 117
  AddAttr<std::vector<int>>(
      "paddings",
C
chengduoZH 已提交
118
      "(vector<int> default:{0, 0}), the paddings(h_pad, w_pad) of convolution "
C
chengduoZH 已提交
119
      "transpose operator.")
C
chengduoZH 已提交
120
      .SetDefault({0, 0});
121 122 123 124
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
125 126 127 128 129 130 131 132 133
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
C
chengduoZH 已提交
134
  AddComment(R"DOC(
C
chengduoZH 已提交
135 136
Convolution2D Transpose Operator.

C
chengduoZH 已提交
137
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
138
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
139
parameters is checked in the infer-shape.
140
Input(Input) and output(Output) are in NCHW or NHWC format. Where N is batchsize, C is the
C
chengduoZH 已提交
141 142 143 144 145 146
number of channels, H is the height of the feature, and W is the width of the feature.
Filter(Input) is in MCHW format. Where M is the number of input feature channels,
C is the number of output feature channels, H is the height of the filter,
and W is the width of the filter.
Parameters(strides, paddings) are two elements. These two elements represent height
and width, respectively.
C
chengduoZH 已提交
147
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
148

Y
update  
yi.wu 已提交
149
For an example:
C
chengduoZH 已提交
150
  Input:
C
chengduoZH 已提交
151 152
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, H_f, W_f)$
C
chengduoZH 已提交
153
  Output:
C
chengduoZH 已提交
154 155 156
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
  $$
157 158
       H_{out} = (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom  + dilations[0] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[1] - pad_width_left  - pad_width_right + dilations[1] * (W_f - 1) + 1
C
chengduoZH 已提交
159
  $$
C
chengduoZH 已提交
160 161 162
)DOC");
}

Y
Yu Yang 已提交
163
void Conv3DTransposeOpMaker::Make() {
164 165 166 167 168 169
  AddInput(
      "Input",
      "(Tensor) The input tensor of convolution transpose operator."
      "The format of input tensor is NCDHW or NDHWC. Where N is batch "
      "size, C is the number of channels, D is the depth of the feature, "
      "H is the height of the feature, and W is the width of the feature.");
C
chengduoZH 已提交
170 171
  AddInput("Filter",
           "(Tensor) The filter tensor of convolution transpose operator."
C
chengduoZH 已提交
172 173 174
           "The format of the filter tensor is MCDHW, where M is the number of "
           "input feature channels, C is the number of "
           "output feature channels, D "
C
chengduoZH 已提交
175 176
           "is the depth of the filter, H is the height of the filter, and "
           "W is the width of the filter."
C
chengduoZH 已提交
177
           "We enforce groups number == 1 and padding == 0 in "
C
chengduoZH 已提交
178
           "the convolution3d transpose scenario.");
C
chengduoZH 已提交
179 180
  AddOutput("Output",
            "(Tensor) The output tensor of convolution transpose operator."
181
            "The format of output tensor is the same as input tensor."
C
chengduoZH 已提交
182
            "Where N is batch size, C is "
C
chengduoZH 已提交
183 184
            "the number of channels, D is the depth of the feature, H is the "
            "height of the feature, and W is the width of the feature.");
L
LielinJiang 已提交
185 186 187 188 189
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
190 191 192 193
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
      .SetDefault({});
C
chengduoZH 已提交
194 195 196 197 198 199
  AddAttr<std::vector<int>>(
      "dilations",
      "(vector<int> default:{1, 1, 1}), the "
      "dilations(d_dilation,h_dilation, w_dilation) of convolution "
      "transpose operator.")
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
200
  AddAttr<std::vector<int>>("strides",
C
chengduoZH 已提交
201
                            "(vector<int> default:{1, 1, 1}), the "
202
                            "strides{d_stride, h_stride, w_stride} of "
C
chengduoZH 已提交
203
                            "convolution transpose operator.")
C
chengduoZH 已提交
204
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
205
  AddAttr<std::vector<int>>("paddings",
C
chengduoZH 已提交
206
                            "(vector<int> default:{0, 0, 0}), paddings(d_pad, "
C
chengduoZH 已提交
207
                            "h_pad, w_pad) of convolution transpose operator.")
C
chengduoZH 已提交
208
      .SetDefault({0, 0, 0});
209 210 211 212
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution3d "
               "transpose operator. ")
      .SetDefault(1);
213 214 215 216
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
217 218 219 220 221 222 223 224 225
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
C
chengduoZH 已提交
226
  AddComment(R"DOC(
C
chengduoZH 已提交
227 228
Convolution3D Transpose Operator.

C
chengduoZH 已提交
229
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
230
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
231
parameters is checked in the infer-shape.
232
Input(Input) and output(Output) are in NCDHW or NDHWC format. Where N is batch size, C is the
C
chengduoZH 已提交
233 234 235 236 237 238 239
number of channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature.
Filter(Input) is in MCDHW format. Where M is the number of input feature channels,
C is the number of output feature channels, D is the depth of the filter,H is the
height of the filter, and W is the width of the filter.
Parameters(strides, paddings) are three elements. These three elements represent
depth, height and width, respectively.
C
chengduoZH 已提交
240
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
241

242
Example:
C
chengduoZH 已提交
243
  Input:
C
chengduoZH 已提交
244 245
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, D_f, H_f, W_f)$
C
chengduoZH 已提交
246
  Output:
C
chengduoZH 已提交
247 248 249
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
250 251 252
       D_{out} = (D_{in} - 1) * strides[0] - pad_depth_front - pad_depth_back + dilations[0] * (D_f - 1) + 1 \\
       H_{out} = (H_{in} - 1) * strides[1] - pad_height_top  - pad_height_bottom + dilations[1] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[2] - pad_width_left - pad_width_right + dilations[2] * (W_f - 1) + 1
C
chengduoZH 已提交
253
  $$
C
chengduoZH 已提交
254 255 256
)DOC");
}

257 258
framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
H
HongyuJia 已提交
259 260
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Input");
  return framework::OpKernelType(data_type, ctx.GetPlace());
261 262
}

H
hong 已提交
263 264
template <typename T>
class ConvTransposeGradOpMaker : public framework::SingleGradOpMaker<T> {
S
sneaxiy 已提交
265
 public:
H
hong 已提交
266
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
S
sneaxiy 已提交
267 268

 protected:
269
  void Apply(GradOpPtr<T> op) const override {
H
hong 已提交
270 271 272 273 274 275 276 277
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input"));
    op->SetOutput(framework::GradVarName("Filter"), this->InputGrad("Filter"));
    if (this->HasInput("Bias")) {
      op->SetInput("Bias", this->Input("Bias"));
      op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
S
sneaxiy 已提交
278
    }
H
hong 已提交
279 280
    op->SetInput(framework::GradVarName("Output"), this->OutputGrad("Output"));
    op->SetAttrMap(this->Attrs());
S
sneaxiy 已提交
281 282 283
  }
};

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
/*
 * Inputs:  I, W, dO, ddI, ddW
 * Outputs: ddO, dW, dI
 */
template <typename T>
class ConvTransposeDoubleGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

  void Apply(GradOpPtr<T> op) const override {
    op->SetType(this->ForwardOpType() + "_grad");
    // I, W, dO, ddI, ddW
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetInput("DOutput", this->Input(framework::GradVarName("Output")));
    op->SetInput("DDInput", this->OutputGrad(framework::GradVarName("Input")));
    op->SetInput("DDFilter",
                 this->OutputGrad(framework::GradVarName("Filter")));

    // ddO, dI, dW
    // Unlike grad op, double grad op does not use name@GRAD@GRAD
    // as key of ops' inputs and outputs.
    auto ddx = this->OutputGrad(framework::GradVarName("Input"));
    auto ddw = this->OutputGrad(framework::GradVarName("Filter"));

    op->SetOutput("DDOutput",
                  ddx.empty()
                      ? this->EmptyInputGrad()
                      : this->InputGrad(framework::GradVarName("Output")));
313 314 315 316 317 318
    op->SetOutput(
        "DFilter",
        ddx.empty() ? this->EmptyInputGrad() : this->InputGrad("Filter"));
    op->SetOutput(
        "DInput",
        ddw.empty() ? this->EmptyInputGrad() : this->InputGrad("Input"));
319 320 321 322 323 324 325

    op->SetAttrMap(this->Attrs());
  }
};

framework::OpKernelType ConvTransposeOpDoubleGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
H
HongyuJia 已提交
326 327
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Input");
  return framework::OpKernelType(data_type, ctx.GetPlace());
328 329
}

C
chengduoZH 已提交
330 331 332 333
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
C
chengduoZH 已提交
334

335
// conv2d_transpose
336 337
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose,
                            Conv2dTranposeInferShapeFunctor,
338
                            PD_INFER_META(phi::Conv2dTransposeInferMeta));
F
From00 已提交
339 340
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose_grad,
                            Conv2dTranposeGradInferShapeFunctor,
341
                            PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
F
From00 已提交
342
DECLARE_INFER_SHAPE_FUNCTOR(
343 344
    conv2d_transpose_grad_grad,
    Conv2dTranposeDoubleGradInferShapeFunctor,
F
From00 已提交
345 346
    PD_INFER_META(phi::Conv2dTransposeDoubleGradInferMeta));

347 348
REGISTER_OPERATOR(conv2d_transpose,
                  ops::ConvTransposeOp,
Y
Yang Yang 已提交
349
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
350
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
351 352
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  Conv2dTranposeInferShapeFunctor);
353 354
REGISTER_OPERATOR(conv2d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
355 356 357
                  ops::ConvTransposeDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ConvTransposeDoubleGradMaker<paddle::imperative::OpBase>,
                  Conv2dTranposeGradInferShapeFunctor);
358 359
REGISTER_OPERATOR(conv2d_transpose_grad_grad,
                  ops::ConvTransposeOpDoubleGrad,
F
From00 已提交
360
                  Conv2dTranposeDoubleGradInferShapeFunctor);
C
chengduoZH 已提交
361

362
// conv3d_transpose
363 364
DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose,
                            Conv3dTranposeInferShapeFunctor,
F
From00 已提交
365 366 367 368 369
                            PD_INFER_META(phi::ConvTransposeInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose_grad,
                            Conv3dTranposeGradInferShapeFunctor,
                            PD_INFER_META(phi::ConvTransposeGradInferMeta));

370 371
REGISTER_OPERATOR(conv3d_transpose,
                  ops::ConvTransposeOp,
Y
Yang Yang 已提交
372
                  ops::Conv3DTransposeOpMaker,
H
hong 已提交
373
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
374 375
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  Conv3dTranposeInferShapeFunctor);
376 377
REGISTER_OPERATOR(conv3d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
378
                  Conv3dTranposeGradInferShapeFunctor);
379 380

// depthwise conv2d_transpose
F
From00 已提交
381 382
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose,
                            DepthWiseConv2dTranposeInferShapeFunctor,
383
                            PD_INFER_META(phi::Conv2dTransposeInferMeta));
F
From00 已提交
384 385
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose_grad,
                            DepthWiseConv2dTranposeGradInferShapeFunctor,
386
                            PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
F
From00 已提交
387

388 389
REGISTER_OPERATOR(depthwise_conv2d_transpose,
                  ops::ConvTransposeOp,
390
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
391
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
392 393
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  DepthWiseConv2dTranposeInferShapeFunctor);
394 395
REGISTER_OPERATOR(depthwise_conv2d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
396
                  DepthWiseConv2dTranposeGradInferShapeFunctor);
397 398 399 400 401 402 403 404 405 406

REGISTER_OP_VERSION(conv_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade convtranspose add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
407
            std::vector<int>{}));
408 409 410 411 412 413 414 415 416 417

REGISTER_OP_VERSION(conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
418 419 420 421 422 423 424 425 426
            std::vector<int>{}))
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attributes [force_fp32_output, mkldnn_data_type].
    )ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewAttr("force_fp32_output",
                     "Force BF16 kernel output FP32, only used in MKL-DNN BF16",
                     false)
427 428
            .NewAttr("mkldnn_data_type",
                     "Data type of mkldnn kernel",
429
                     "float32"));
430 431 432 433 434 435 436 437 438 439

REGISTER_OP_VERSION(conv3d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv3d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
440
            std::vector<int>{}));
441 442 443 444 445 446 447 448 449 450

REGISTER_OP_VERSION(depthwise_conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade depthwise conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
451
            std::vector<int>{}));