conv_transpose_op.cc 20.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_transpose_op.h"
F
From00 已提交
16

S
Siddharth Goyal 已提交
17 18
#include <string>
#include <vector>
19

20
#include "paddle/fluid/framework/data_layout.h"
F
From00 已提交
21 22
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
23
#include "paddle/fluid/framework/op_version_registry.h"
24
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
F
From00 已提交
25 26 27
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
J
Jacek Czaja 已提交
28 29 30 31
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

C
chengduoZH 已提交
32 33 34
namespace paddle {
namespace operators {

35
using DataLayout = phi::DataLayout;
36

37 38
framework::OpKernelType ConvTransposeOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
39
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Input");
40
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
C
chengduoZH 已提交
41
  if (platform::is_gpu_place(ctx.GetPlace())) {
L
Leo Chen 已提交
42
    auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
43 44 45 46
    if (ctx.HasAttr("use_cudnn") && ctx.Attr<bool>("use_cudnn") &&
        dev_ctx.cudnn_handle() != nullptr) {
      return framework::OpKernelType(data_type,
                                     ctx.GetPlace(),
47
                                     phi::DataLayout::kAnyLayout,
48
                                     framework::LibraryType::kCUDNN);
J
Jacek Czaja 已提交
49
    }
C
chengduoZH 已提交
50 51
  }
#endif
52
  return framework::OpKernelType(data_type, ctx.GetPlace());
53 54
}

55
framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar(
56
    const std::string& var_name,
57
    const phi::DenseTensor& tensor,
58 59 60 61 62
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if ((var_name == "Input") &&
63 64
      (expected_kernel_type.data_layout_ == phi::DataLayout::kMKLDNN) &&
      (tensor.layout() != phi::DataLayout::kMKLDNN)) {
65 66 67
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
68
    auto dl = phi::StringToDataLayout(data_format);
69 70
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
71
    if (dl != phi::DataLayout::kAnyLayout) {
72
      return framework::OpKernelType(
73
          expected_kernel_type.data_type_, tensor.place(), dl);
74 75 76
    }
  }
#endif
77 78
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
79 80
}

Y
Yu Yang 已提交
81
void Conv2DTransposeOpMaker::Make() {
82 83 84 85 86
  AddInput("Input",
           "(Tensor) The input tensor of convolution transpose operator. "
           "The format of input tensor is NCHW or NHWC. Where N is batch size, "
           "C is the number of input channels, H is the height of the feature, "
           "and W is the width of the feature.");
C
chengduoZH 已提交
87 88 89 90 91 92 93 94
  AddInput(
      "Filter",
      "(Tensor) The filter tensor of convolution transpose operator. "
      "The format of the filter tensor is MCHW, where M is the number of "
      "input feature channels, C is the number of "
      "output feature channels,"
      "H is the height of the filter, and W is the width of the filter. "
      "We enforce groups number == 1 in the convolution transpose scenario.");
95 96 97 98
  AddInput("Bias",
           "(Tensor) Bias to be added to each output of filter application."
           "The format of output tensor is X (one-dimensional) of size equal"
           "to the number of output channels. Only used with MKL-DNN.")
99 100
      .AsDispensable()
      .AsExtra();
C
chengduoZH 已提交
101
  AddOutput("Output",
C
chengduoZH 已提交
102
            "(Tensor) The output tensor of convolution transpose operator. "
103
            "The format of output tensor is the same as input tensor.");
L
LielinJiang 已提交
104 105 106 107 108
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
109 110 111
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
112 113
      .SetDefault({})
      .SupportTensor();
Y
Yibing Liu 已提交
114 115 116 117
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution "
               "transpose operator. ")
      .SetDefault(1);
C
chengduoZH 已提交
118 119 120 121 122
  AddAttr<std::vector<int>>("dilations",
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of convolution "
                            "transpose operator.")
      .SetDefault({1, 1});
C
chengduoZH 已提交
123 124
  AddAttr<std::vector<int>>(
      "strides",
C
chengduoZH 已提交
125
      "(vector<int> default:{1, 1}), the strides(h_stride, w_stride) of "
126
      "convolution transpose operator.")
C
chengduoZH 已提交
127
      .SetDefault({1, 1});
C
chengduoZH 已提交
128 129
  AddAttr<std::vector<int>>(
      "paddings",
C
chengduoZH 已提交
130
      "(vector<int> default:{0, 0}), the paddings(h_pad, w_pad) of convolution "
C
chengduoZH 已提交
131
      "transpose operator.")
C
chengduoZH 已提交
132
      .SetDefault({0, 0});
133 134 135 136
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
137 138 139 140 141 142 143 144 145
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
C
chengduoZH 已提交
146
  AddComment(R"DOC(
C
chengduoZH 已提交
147 148
Convolution2D Transpose Operator.

C
chengduoZH 已提交
149
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
150
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
151
parameters is checked in the infer-shape.
152
Input(Input) and output(Output) are in NCHW or NHWC format. Where N is batchsize, C is the
C
chengduoZH 已提交
153 154 155 156 157 158
number of channels, H is the height of the feature, and W is the width of the feature.
Filter(Input) is in MCHW format. Where M is the number of input feature channels,
C is the number of output feature channels, H is the height of the filter,
and W is the width of the filter.
Parameters(strides, paddings) are two elements. These two elements represent height
and width, respectively.
C
chengduoZH 已提交
159
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
160

Y
update  
yi.wu 已提交
161
For an example:
C
chengduoZH 已提交
162
  Input:
C
chengduoZH 已提交
163 164
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, H_f, W_f)$
C
chengduoZH 已提交
165
  Output:
C
chengduoZH 已提交
166 167 168
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
  $$
169 170
       H_{out} = (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom  + dilations[0] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[1] - pad_width_left  - pad_width_right + dilations[1] * (W_f - 1) + 1
C
chengduoZH 已提交
171
  $$
C
chengduoZH 已提交
172 173 174
)DOC");
}

Y
Yu Yang 已提交
175
void Conv3DTransposeOpMaker::Make() {
176 177 178 179 180 181
  AddInput(
      "Input",
      "(Tensor) The input tensor of convolution transpose operator."
      "The format of input tensor is NCDHW or NDHWC. Where N is batch "
      "size, C is the number of channels, D is the depth of the feature, "
      "H is the height of the feature, and W is the width of the feature.");
C
chengduoZH 已提交
182 183
  AddInput("Filter",
           "(Tensor) The filter tensor of convolution transpose operator."
C
chengduoZH 已提交
184 185 186
           "The format of the filter tensor is MCDHW, where M is the number of "
           "input feature channels, C is the number of "
           "output feature channels, D "
C
chengduoZH 已提交
187 188
           "is the depth of the filter, H is the height of the filter, and "
           "W is the width of the filter."
C
chengduoZH 已提交
189
           "We enforce groups number == 1 and padding == 0 in "
C
chengduoZH 已提交
190
           "the convolution3d transpose scenario.");
C
chengduoZH 已提交
191 192
  AddOutput("Output",
            "(Tensor) The output tensor of convolution transpose operator."
193
            "The format of output tensor is the same as input tensor."
C
chengduoZH 已提交
194
            "Where N is batch size, C is "
C
chengduoZH 已提交
195 196
            "the number of channels, D is the depth of the feature, H is the "
            "height of the feature, and W is the width of the feature.");
L
LielinJiang 已提交
197 198 199 200 201
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
202 203 204 205
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
      .SetDefault({});
C
chengduoZH 已提交
206 207 208 209 210 211
  AddAttr<std::vector<int>>(
      "dilations",
      "(vector<int> default:{1, 1, 1}), the "
      "dilations(d_dilation,h_dilation, w_dilation) of convolution "
      "transpose operator.")
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
212
  AddAttr<std::vector<int>>("strides",
C
chengduoZH 已提交
213
                            "(vector<int> default:{1, 1, 1}), the "
214
                            "strides{d_stride, h_stride, w_stride} of "
C
chengduoZH 已提交
215
                            "convolution transpose operator.")
C
chengduoZH 已提交
216
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
217
  AddAttr<std::vector<int>>("paddings",
C
chengduoZH 已提交
218
                            "(vector<int> default:{0, 0, 0}), paddings(d_pad, "
C
chengduoZH 已提交
219
                            "h_pad, w_pad) of convolution transpose operator.")
C
chengduoZH 已提交
220
      .SetDefault({0, 0, 0});
221 222 223 224
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution3d "
               "transpose operator. ")
      .SetDefault(1);
225 226 227 228
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
229 230 231 232 233 234 235 236 237
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
C
chengduoZH 已提交
238
  AddComment(R"DOC(
C
chengduoZH 已提交
239 240
Convolution3D Transpose Operator.

C
chengduoZH 已提交
241
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
242
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
243
parameters is checked in the infer-shape.
244
Input(Input) and output(Output) are in NCDHW or NDHWC format. Where N is batch size, C is the
C
chengduoZH 已提交
245 246 247 248 249 250 251
number of channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature.
Filter(Input) is in MCDHW format. Where M is the number of input feature channels,
C is the number of output feature channels, D is the depth of the filter,H is the
height of the filter, and W is the width of the filter.
Parameters(strides, paddings) are three elements. These three elements represent
depth, height and width, respectively.
C
chengduoZH 已提交
252
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
253

254
Example:
C
chengduoZH 已提交
255
  Input:
C
chengduoZH 已提交
256 257
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, D_f, H_f, W_f)$
C
chengduoZH 已提交
258
  Output:
C
chengduoZH 已提交
259 260 261
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
262 263 264
       D_{out} = (D_{in} - 1) * strides[0] - pad_depth_front - pad_depth_back + dilations[0] * (D_f - 1) + 1 \\
       H_{out} = (H_{in} - 1) * strides[1] - pad_height_top  - pad_height_bottom + dilations[1] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[2] - pad_width_left - pad_width_right + dilations[2] * (W_f - 1) + 1
C
chengduoZH 已提交
265
  $$
C
chengduoZH 已提交
266 267 268
)DOC");
}

269 270
framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
271 272
  bool use_cudnn =
      ctx.HasAttr("use_cudnn") ? ctx.Attr<bool>("use_cudnn") : false;
273
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
274
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
C
chengduoZH 已提交
275
  if (platform::is_gpu_place(ctx.GetPlace())) {
L
Leo Chen 已提交
276
    auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
C
chengduoZH 已提交
277 278 279
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
  }
#endif
280 281 282 283 284 285 286
  framework::LibraryType library_;
  if (use_cudnn) {
    library_ = framework::LibraryType::kCUDNN;
  } else {
    library_ = framework::LibraryType::kPlain;
  }

287
  phi::DataLayout layout_ = phi::DataLayout::kAnyLayout;
288
  return framework::OpKernelType(
289 290 291 292
      OperatorWithKernel::IndicateVarDataType(ctx, "Input"),
      ctx.GetPlace(),
      layout_,
      library_);
293 294
}

H
hong 已提交
295 296
template <typename T>
class ConvTransposeGradOpMaker : public framework::SingleGradOpMaker<T> {
S
sneaxiy 已提交
297
 public:
H
hong 已提交
298
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
S
sneaxiy 已提交
299 300

 protected:
301
  void Apply(GradOpPtr<T> op) const override {
H
hong 已提交
302 303 304 305 306 307 308 309
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input"));
    op->SetOutput(framework::GradVarName("Filter"), this->InputGrad("Filter"));
    if (this->HasInput("Bias")) {
      op->SetInput("Bias", this->Input("Bias"));
      op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
S
sneaxiy 已提交
310
    }
H
hong 已提交
311 312
    op->SetInput(framework::GradVarName("Output"), this->OutputGrad("Output"));
    op->SetAttrMap(this->Attrs());
S
sneaxiy 已提交
313 314 315
  }
};

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
/*
 * Inputs:  I, W, dO, ddI, ddW
 * Outputs: ddO, dW, dI
 */
template <typename T>
class ConvTransposeDoubleGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

  void Apply(GradOpPtr<T> op) const override {
    op->SetType(this->ForwardOpType() + "_grad");
    // I, W, dO, ddI, ddW
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetInput("DOutput", this->Input(framework::GradVarName("Output")));
    op->SetInput("DDInput", this->OutputGrad(framework::GradVarName("Input")));
    op->SetInput("DDFilter",
                 this->OutputGrad(framework::GradVarName("Filter")));

    // ddO, dI, dW
    // Unlike grad op, double grad op does not use name@GRAD@GRAD
    // as key of ops' inputs and outputs.
    auto ddx = this->OutputGrad(framework::GradVarName("Input"));
    auto ddw = this->OutputGrad(framework::GradVarName("Filter"));

    op->SetOutput("DDOutput",
                  ddx.empty()
                      ? this->EmptyInputGrad()
                      : this->InputGrad(framework::GradVarName("Output")));
345 346 347 348 349 350
    op->SetOutput(
        "DFilter",
        ddx.empty() ? this->EmptyInputGrad() : this->InputGrad("Filter"));
    op->SetOutput(
        "DInput",
        ddw.empty() ? this->EmptyInputGrad() : this->InputGrad("Input"));
351 352 353 354 355 356 357

    op->SetAttrMap(this->Attrs());
  }
};

framework::OpKernelType ConvTransposeOpDoubleGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
358 359
  bool use_cudnn =
      ctx.HasAttr("use_cudnn") ? ctx.Attr<bool>("use_cudnn") : false;
360
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
361
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
362
  if (platform::is_gpu_place(ctx.GetPlace())) {
L
Leo Chen 已提交
363
    auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
364 365 366 367 368 369 370 371 372 373
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
  }
#endif
  framework::LibraryType library_;
  if (use_cudnn) {
    library_ = framework::LibraryType::kCUDNN;
  } else {
    library_ = framework::LibraryType::kPlain;
  }

374
  phi::DataLayout layout_ = phi::DataLayout::kAnyLayout;
375
  return framework::OpKernelType(
376 377 378 379
      OperatorWithKernel::IndicateVarDataType(ctx, "Input"),
      ctx.GetPlace(),
      layout_,
      library_);
380 381
}

C
chengduoZH 已提交
382 383 384 385
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
C
chengduoZH 已提交
386

387
// conv2d_transpose
388 389
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose,
                            Conv2dTranposeInferShapeFunctor,
390
                            PD_INFER_META(phi::Conv2dTransposeInferMeta));
F
From00 已提交
391 392
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose_grad,
                            Conv2dTranposeGradInferShapeFunctor,
393
                            PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
F
From00 已提交
394
DECLARE_INFER_SHAPE_FUNCTOR(
395 396
    conv2d_transpose_grad_grad,
    Conv2dTranposeDoubleGradInferShapeFunctor,
F
From00 已提交
397 398
    PD_INFER_META(phi::Conv2dTransposeDoubleGradInferMeta));

399 400
REGISTER_OPERATOR(conv2d_transpose,
                  ops::ConvTransposeOp,
Y
Yang Yang 已提交
401
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
402
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
403 404
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  Conv2dTranposeInferShapeFunctor);
405 406
REGISTER_OPERATOR(conv2d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
407 408 409
                  ops::ConvTransposeDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ConvTransposeDoubleGradMaker<paddle::imperative::OpBase>,
                  Conv2dTranposeGradInferShapeFunctor);
410 411
REGISTER_OPERATOR(conv2d_transpose_grad_grad,
                  ops::ConvTransposeOpDoubleGrad,
F
From00 已提交
412
                  Conv2dTranposeDoubleGradInferShapeFunctor);
C
chengduoZH 已提交
413

414
// conv3d_transpose
415 416
DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose,
                            Conv3dTranposeInferShapeFunctor,
F
From00 已提交
417 418 419 420 421
                            PD_INFER_META(phi::ConvTransposeInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose_grad,
                            Conv3dTranposeGradInferShapeFunctor,
                            PD_INFER_META(phi::ConvTransposeGradInferMeta));

422 423
REGISTER_OPERATOR(conv3d_transpose,
                  ops::ConvTransposeOp,
Y
Yang Yang 已提交
424
                  ops::Conv3DTransposeOpMaker,
H
hong 已提交
425
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
426 427
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  Conv3dTranposeInferShapeFunctor);
428 429
REGISTER_OPERATOR(conv3d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
430
                  Conv3dTranposeGradInferShapeFunctor);
431 432

// depthwise conv2d_transpose
F
From00 已提交
433 434
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose,
                            DepthWiseConv2dTranposeInferShapeFunctor,
435
                            PD_INFER_META(phi::Conv2dTransposeInferMeta));
F
From00 已提交
436 437
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose_grad,
                            DepthWiseConv2dTranposeGradInferShapeFunctor,
438
                            PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
F
From00 已提交
439

440 441
REGISTER_OPERATOR(depthwise_conv2d_transpose,
                  ops::ConvTransposeOp,
442
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
443
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
444 445
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  DepthWiseConv2dTranposeInferShapeFunctor);
446 447
REGISTER_OPERATOR(depthwise_conv2d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
448
                  DepthWiseConv2dTranposeGradInferShapeFunctor);
449 450 451 452 453 454 455 456 457 458

REGISTER_OP_VERSION(conv_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade convtranspose add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
459
            std::vector<int>{}));
460 461 462 463 464 465 466 467 468 469

REGISTER_OP_VERSION(conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
470 471 472 473 474 475 476 477 478
            std::vector<int>{}))
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attributes [force_fp32_output, mkldnn_data_type].
    )ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewAttr("force_fp32_output",
                     "Force BF16 kernel output FP32, only used in MKL-DNN BF16",
                     false)
479 480
            .NewAttr("mkldnn_data_type",
                     "Data type of mkldnn kernel",
481
                     "float32"));
482 483 484 485 486 487 488 489 490 491

REGISTER_OP_VERSION(conv3d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv3d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
492
            std::vector<int>{}));
493 494 495 496 497 498 499 500 501 502

REGISTER_OP_VERSION(depthwise_conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade depthwise conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
503
            std::vector<int>{}));