conv_transpose_op.cc 21.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
C
chengduoZH 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
C
chengduoZH 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
C
chengduoZH 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/conv_transpose_op.h"
F
From00 已提交
16

S
Siddharth Goyal 已提交
17 18
#include <string>
#include <vector>
19

20
#include "paddle/fluid/framework/data_layout.h"
F
From00 已提交
21 22
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
23
#include "paddle/fluid/framework/op_version_registry.h"
24
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
F
From00 已提交
25 26 27
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
J
Jacek Czaja 已提交
28 29 30 31
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

C
chengduoZH 已提交
32 33 34
namespace paddle {
namespace operators {

35 36
using DataLayout = framework::DataLayout;

37 38
framework::OpKernelType ConvTransposeOp::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
39
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "Input");
40
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
C
chengduoZH 已提交
41
  if (platform::is_gpu_place(ctx.GetPlace())) {
L
Leo Chen 已提交
42
    auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
43 44 45 46 47 48
    if (ctx.HasAttr("use_cudnn") && ctx.Attr<bool>("use_cudnn") &&
        dev_ctx.cudnn_handle() != nullptr) {
      return framework::OpKernelType(data_type,
                                     ctx.GetPlace(),
                                     framework::DataLayout::kAnyLayout,
                                     framework::LibraryType::kCUDNN);
J
Jacek Czaja 已提交
49
    }
C
chengduoZH 已提交
50 51
  }
#endif
J
Jacek Czaja 已提交
52
#ifdef PADDLE_WITH_MKLDNN
53 54 55 56 57
  if (this->CanMKLDNNBeUsed(ctx, data_type)) {
    return framework::OpKernelType(data_type,
                                   ctx.GetPlace(),
                                   framework::DataLayout::kMKLDNN,
                                   framework::LibraryType::kMKLDNN);
58
  }
J
Jacek Czaja 已提交
59
#endif
60

61
  return framework::OpKernelType(data_type, ctx.GetPlace());
62 63
}

64
framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar(
65
    const std::string& var_name,
66
    const phi::DenseTensor& tensor,
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if ((var_name == "Input") &&
      (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
    auto dl = framework::StringToDataLayout(data_format);
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
    if (dl != framework::DataLayout::kAnyLayout) {
      return framework::OpKernelType(
82
          expected_kernel_type.data_type_, tensor.place(), dl);
83 84 85
    }
  }
#endif
86 87
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
88 89
}

Y
Yu Yang 已提交
90
void Conv2DTransposeOpMaker::Make() {
91 92 93 94 95
  AddInput("Input",
           "(Tensor) The input tensor of convolution transpose operator. "
           "The format of input tensor is NCHW or NHWC. Where N is batch size, "
           "C is the number of input channels, H is the height of the feature, "
           "and W is the width of the feature.");
C
chengduoZH 已提交
96 97 98 99 100 101 102 103
  AddInput(
      "Filter",
      "(Tensor) The filter tensor of convolution transpose operator. "
      "The format of the filter tensor is MCHW, where M is the number of "
      "input feature channels, C is the number of "
      "output feature channels,"
      "H is the height of the filter, and W is the width of the filter. "
      "We enforce groups number == 1 in the convolution transpose scenario.");
104 105 106 107
  AddInput("Bias",
           "(Tensor) Bias to be added to each output of filter application."
           "The format of output tensor is X (one-dimensional) of size equal"
           "to the number of output channels. Only used with MKL-DNN.")
108 109
      .AsDispensable()
      .AsExtra();
C
chengduoZH 已提交
110
  AddOutput("Output",
C
chengduoZH 已提交
111
            "(Tensor) The output tensor of convolution transpose operator. "
112
            "The format of output tensor is the same as input tensor.");
L
LielinJiang 已提交
113 114 115 116 117
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
118 119 120
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
121 122
      .SetDefault({})
      .SupportTensor();
Y
Yibing Liu 已提交
123 124 125 126
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution "
               "transpose operator. ")
      .SetDefault(1);
C
chengduoZH 已提交
127 128 129 130 131
  AddAttr<std::vector<int>>("dilations",
                            "(vector<int> default:{1, 1}), the "
                            "dilations(h_dilation, w_dilation) of convolution "
                            "transpose operator.")
      .SetDefault({1, 1});
C
chengduoZH 已提交
132 133
  AddAttr<std::vector<int>>(
      "strides",
C
chengduoZH 已提交
134
      "(vector<int> default:{1, 1}), the strides(h_stride, w_stride) of "
135
      "convolution transpose operator.")
C
chengduoZH 已提交
136
      .SetDefault({1, 1});
C
chengduoZH 已提交
137 138
  AddAttr<std::vector<int>>(
      "paddings",
C
chengduoZH 已提交
139
      "(vector<int> default:{0, 0}), the paddings(h_pad, w_pad) of convolution "
C
chengduoZH 已提交
140
      "transpose operator.")
C
chengduoZH 已提交
141
      .SetDefault({0, 0});
142 143 144 145
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
146 147 148 149 150 151 152 153 154
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
C
chengduoZH 已提交
155
  AddComment(R"DOC(
C
chengduoZH 已提交
156 157
Convolution2D Transpose Operator.

C
chengduoZH 已提交
158
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
159
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
160
parameters is checked in the infer-shape.
161
Input(Input) and output(Output) are in NCHW or NHWC format. Where N is batchsize, C is the
C
chengduoZH 已提交
162 163 164 165 166 167
number of channels, H is the height of the feature, and W is the width of the feature.
Filter(Input) is in MCHW format. Where M is the number of input feature channels,
C is the number of output feature channels, H is the height of the filter,
and W is the width of the filter.
Parameters(strides, paddings) are two elements. These two elements represent height
and width, respectively.
C
chengduoZH 已提交
168
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
169

Y
update  
yi.wu 已提交
170
For an example:
C
chengduoZH 已提交
171
  Input:
C
chengduoZH 已提交
172 173
       Input shape: $(N, C_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, H_f, W_f)$
C
chengduoZH 已提交
174
  Output:
C
chengduoZH 已提交
175 176 177
       Output shape: $(N, C_{out}, H_{out}, W_{out})$
  Where
  $$
178 179
       H_{out} = (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom  + dilations[0] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[1] - pad_width_left  - pad_width_right + dilations[1] * (W_f - 1) + 1
C
chengduoZH 已提交
180
  $$
C
chengduoZH 已提交
181 182 183
)DOC");
}

Y
Yu Yang 已提交
184
void Conv3DTransposeOpMaker::Make() {
185 186 187 188 189 190
  AddInput(
      "Input",
      "(Tensor) The input tensor of convolution transpose operator."
      "The format of input tensor is NCDHW or NDHWC. Where N is batch "
      "size, C is the number of channels, D is the depth of the feature, "
      "H is the height of the feature, and W is the width of the feature.");
C
chengduoZH 已提交
191 192
  AddInput("Filter",
           "(Tensor) The filter tensor of convolution transpose operator."
C
chengduoZH 已提交
193 194 195
           "The format of the filter tensor is MCDHW, where M is the number of "
           "input feature channels, C is the number of "
           "output feature channels, D "
C
chengduoZH 已提交
196 197
           "is the depth of the filter, H is the height of the filter, and "
           "W is the width of the filter."
C
chengduoZH 已提交
198
           "We enforce groups number == 1 and padding == 0 in "
C
chengduoZH 已提交
199
           "the convolution3d transpose scenario.");
C
chengduoZH 已提交
200 201
  AddOutput("Output",
            "(Tensor) The output tensor of convolution transpose operator."
202
            "The format of output tensor is the same as input tensor."
C
chengduoZH 已提交
203
            "Where N is batch size, C is "
C
chengduoZH 已提交
204 205
            "the number of channels, D is the depth of the feature, H is the "
            "height of the feature, and W is the width of the feature.");
L
LielinJiang 已提交
206 207 208 209 210
  AddAttr<std::vector<int>>("output_padding",
                            "(vector<int> default: []), Additional size added "
                            "to one side of each dimension in the output "
                            "shape")
      .SetDefault({});
211 212 213 214
  AddAttr<std::vector<int>>("output_size",
                            "(vector<int> default: []), the "
                            "size of the output tensor")
      .SetDefault({});
C
chengduoZH 已提交
215 216 217 218 219 220
  AddAttr<std::vector<int>>(
      "dilations",
      "(vector<int> default:{1, 1, 1}), the "
      "dilations(d_dilation,h_dilation, w_dilation) of convolution "
      "transpose operator.")
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
221
  AddAttr<std::vector<int>>("strides",
C
chengduoZH 已提交
222
                            "(vector<int> default:{1, 1, 1}), the "
223
                            "strides{d_stride, h_stride, w_stride} of "
C
chengduoZH 已提交
224
                            "convolution transpose operator.")
C
chengduoZH 已提交
225
      .SetDefault({1, 1, 1});
C
chengduoZH 已提交
226
  AddAttr<std::vector<int>>("paddings",
C
chengduoZH 已提交
227
                            "(vector<int> default:{0, 0, 0}), paddings(d_pad, "
C
chengduoZH 已提交
228
                            "h_pad, w_pad) of convolution transpose operator.")
C
chengduoZH 已提交
229
      .SetDefault({0, 0, 0});
230 231 232 233
  AddAttr<int>("groups",
               "(int default:1), the groups number of the convolution3d "
               "transpose operator. ")
      .SetDefault(1);
234 235 236 237
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
238 239 240 241 242 243 244 245 246
      "Specify that the data format of the input and output data is "
      "channel_first or channel_last.")
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
C
chengduoZH 已提交
247
  AddComment(R"DOC(
C
chengduoZH 已提交
248 249
Convolution3D Transpose Operator.

C
chengduoZH 已提交
250
The convolution transpose operation calculates the output based on the input, filter
C
chengduoZH 已提交
251
and dilations, strides, paddings, groups parameters. The size of each dimension of the
C
chengduoZH 已提交
252
parameters is checked in the infer-shape.
253
Input(Input) and output(Output) are in NCDHW or NDHWC format. Where N is batch size, C is the
C
chengduoZH 已提交
254 255 256 257 258 259 260
number of channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature.
Filter(Input) is in MCDHW format. Where M is the number of input feature channels,
C is the number of output feature channels, D is the depth of the filter,H is the
height of the filter, and W is the width of the filter.
Parameters(strides, paddings) are three elements. These three elements represent
depth, height and width, respectively.
C
chengduoZH 已提交
261
The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
262

263
Example:
C
chengduoZH 已提交
264
  Input:
C
chengduoZH 已提交
265 266
       Input shape: $(N, C_{in}, D_{in}, H_{in}, W_{in})$
       Filter shape: $(C_{in}, C_{out}, D_f, H_f, W_f)$
C
chengduoZH 已提交
267
  Output:
C
chengduoZH 已提交
268 269 270
       Output shape: $(N, C_{out}, D_{out}, H_{out}, W_{out})$
  Where
  $$
271 272 273
       D_{out} = (D_{in} - 1) * strides[0] - pad_depth_front - pad_depth_back + dilations[0] * (D_f - 1) + 1 \\
       H_{out} = (H_{in} - 1) * strides[1] - pad_height_top  - pad_height_bottom + dilations[1] * (H_f - 1) + 1 \\
       W_{out} = (W_{in} - 1) * strides[2] - pad_width_left - pad_width_right + dilations[2] * (W_f - 1) + 1
C
chengduoZH 已提交
274
  $$
C
chengduoZH 已提交
275 276 277
)DOC");
}

278 279
framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
280 281
  bool use_cudnn =
      ctx.HasAttr("use_cudnn") ? ctx.Attr<bool>("use_cudnn") : false;
282
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
283
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
C
chengduoZH 已提交
284
  if (platform::is_gpu_place(ctx.GetPlace())) {
L
Leo Chen 已提交
285
    auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
C
chengduoZH 已提交
286 287 288
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
  }
#endif
289 290 291 292 293 294 295
  framework::LibraryType library_;
  if (use_cudnn) {
    library_ = framework::LibraryType::kCUDNN;
  } else {
    library_ = framework::LibraryType::kPlain;
  }

296
  framework::DataLayout layout_ = framework::DataLayout::kAnyLayout;
297
  return framework::OpKernelType(
298 299 300 301
      OperatorWithKernel::IndicateVarDataType(ctx, "Input"),
      ctx.GetPlace(),
      layout_,
      library_);
302 303
}

H
hong 已提交
304 305
template <typename T>
class ConvTransposeGradOpMaker : public framework::SingleGradOpMaker<T> {
S
sneaxiy 已提交
306
 public:
H
hong 已提交
307
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
S
sneaxiy 已提交
308 309

 protected:
310
  void Apply(GradOpPtr<T> op) const override {
H
hong 已提交
311 312 313 314 315 316 317 318
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input"));
    op->SetOutput(framework::GradVarName("Filter"), this->InputGrad("Filter"));
    if (this->HasInput("Bias")) {
      op->SetInput("Bias", this->Input("Bias"));
      op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
S
sneaxiy 已提交
319
    }
H
hong 已提交
320 321
    op->SetInput(framework::GradVarName("Output"), this->OutputGrad("Output"));
    op->SetAttrMap(this->Attrs());
S
sneaxiy 已提交
322 323 324
  }
};

325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
/*
 * Inputs:  I, W, dO, ddI, ddW
 * Outputs: ddO, dW, dI
 */
template <typename T>
class ConvTransposeDoubleGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

  void Apply(GradOpPtr<T> op) const override {
    op->SetType(this->ForwardOpType() + "_grad");
    // I, W, dO, ddI, ddW
    op->SetInput("Input", this->Input("Input"));
    op->SetInput("Filter", this->Input("Filter"));
    op->SetInput("DOutput", this->Input(framework::GradVarName("Output")));
    op->SetInput("DDInput", this->OutputGrad(framework::GradVarName("Input")));
    op->SetInput("DDFilter",
                 this->OutputGrad(framework::GradVarName("Filter")));

    // ddO, dI, dW
    // Unlike grad op, double grad op does not use name@GRAD@GRAD
    // as key of ops' inputs and outputs.
    auto ddx = this->OutputGrad(framework::GradVarName("Input"));
    auto ddw = this->OutputGrad(framework::GradVarName("Filter"));

    op->SetOutput("DDOutput",
                  ddx.empty()
                      ? this->EmptyInputGrad()
                      : this->InputGrad(framework::GradVarName("Output")));
354 355 356 357 358 359
    op->SetOutput(
        "DFilter",
        ddx.empty() ? this->EmptyInputGrad() : this->InputGrad("Filter"));
    op->SetOutput(
        "DInput",
        ddw.empty() ? this->EmptyInputGrad() : this->InputGrad("Input"));
360 361 362 363 364 365 366

    op->SetAttrMap(this->Attrs());
  }
};

framework::OpKernelType ConvTransposeOpDoubleGrad::GetExpectedKernelType(
    const framework::ExecutionContext& ctx) const {
367 368
  bool use_cudnn =
      ctx.HasAttr("use_cudnn") ? ctx.Attr<bool>("use_cudnn") : false;
369
  use_cudnn &= platform::is_gpu_place(ctx.GetPlace());
370
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
371
  if (platform::is_gpu_place(ctx.GetPlace())) {
L
Leo Chen 已提交
372
    auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
373 374 375 376 377 378 379 380 381 382 383 384
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
  }
#endif
  framework::LibraryType library_;
  if (use_cudnn) {
    library_ = framework::LibraryType::kCUDNN;
  } else {
    library_ = framework::LibraryType::kPlain;
  }

  framework::DataLayout layout_ = framework::DataLayout::kAnyLayout;
  return framework::OpKernelType(
385 386 387 388
      OperatorWithKernel::IndicateVarDataType(ctx, "Input"),
      ctx.GetPlace(),
      layout_,
      library_);
389 390
}

C
chengduoZH 已提交
391 392 393 394
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
C
chengduoZH 已提交
395

396
// conv2d_transpose
397 398
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose,
                            Conv2dTranposeInferShapeFunctor,
399
                            PD_INFER_META(phi::Conv2dTransposeInferMeta));
F
From00 已提交
400 401
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose_grad,
                            Conv2dTranposeGradInferShapeFunctor,
402
                            PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
F
From00 已提交
403
DECLARE_INFER_SHAPE_FUNCTOR(
404 405
    conv2d_transpose_grad_grad,
    Conv2dTranposeDoubleGradInferShapeFunctor,
F
From00 已提交
406 407
    PD_INFER_META(phi::Conv2dTransposeDoubleGradInferMeta));

408 409
REGISTER_OPERATOR(conv2d_transpose,
                  ops::ConvTransposeOp,
Y
Yang Yang 已提交
410
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
411
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
412 413
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  Conv2dTranposeInferShapeFunctor);
414 415
REGISTER_OPERATOR(conv2d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
416 417 418
                  ops::ConvTransposeDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::ConvTransposeDoubleGradMaker<paddle::imperative::OpBase>,
                  Conv2dTranposeGradInferShapeFunctor);
419 420
REGISTER_OPERATOR(conv2d_transpose_grad_grad,
                  ops::ConvTransposeOpDoubleGrad,
F
From00 已提交
421
                  Conv2dTranposeDoubleGradInferShapeFunctor);
C
chengduoZH 已提交
422

423
// conv3d_transpose
424 425
DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose,
                            Conv3dTranposeInferShapeFunctor,
F
From00 已提交
426 427 428 429 430
                            PD_INFER_META(phi::ConvTransposeInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose_grad,
                            Conv3dTranposeGradInferShapeFunctor,
                            PD_INFER_META(phi::ConvTransposeGradInferMeta));

431 432
REGISTER_OPERATOR(conv3d_transpose,
                  ops::ConvTransposeOp,
Y
Yang Yang 已提交
433
                  ops::Conv3DTransposeOpMaker,
H
hong 已提交
434
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
435 436
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  Conv3dTranposeInferShapeFunctor);
437 438
REGISTER_OPERATOR(conv3d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
439
                  Conv3dTranposeGradInferShapeFunctor);
440 441

// depthwise conv2d_transpose
F
From00 已提交
442 443
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose,
                            DepthWiseConv2dTranposeInferShapeFunctor,
444
                            PD_INFER_META(phi::Conv2dTransposeInferMeta));
F
From00 已提交
445 446
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose_grad,
                            DepthWiseConv2dTranposeGradInferShapeFunctor,
447
                            PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
F
From00 已提交
448

449 450
REGISTER_OPERATOR(depthwise_conv2d_transpose,
                  ops::ConvTransposeOp,
451
                  ops::Conv2DTransposeOpMaker,
H
hong 已提交
452
                  ops::ConvTransposeGradOpMaker<paddle::framework::OpDesc>,
F
From00 已提交
453 454
                  ops::ConvTransposeGradOpMaker<paddle::imperative::OpBase>,
                  DepthWiseConv2dTranposeInferShapeFunctor);
455 456
REGISTER_OPERATOR(depthwise_conv2d_transpose_grad,
                  ops::ConvTransposeOpGrad,
F
From00 已提交
457
                  DepthWiseConv2dTranposeGradInferShapeFunctor);
458 459 460 461 462 463 464 465 466 467

REGISTER_OP_VERSION(conv_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade convtranspose add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
468
            std::vector<int>{}));
469 470 471 472 473 474 475 476 477 478

REGISTER_OP_VERSION(conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
479 480 481 482 483 484 485 486 487
            std::vector<int>{}))
    .AddCheckpoint(
        R"ROC(
      Upgrade conv2d transpose to add a new attributes [force_fp32_output, mkldnn_data_type].
    )ROC",
        paddle::framework::compatible::OpVersionDesc()
            .NewAttr("force_fp32_output",
                     "Force BF16 kernel output FP32, only used in MKL-DNN BF16",
                     false)
488 489
            .NewAttr("mkldnn_data_type",
                     "Data type of mkldnn kernel",
490
                     "float32"));
491 492 493 494 495 496 497 498 499 500

REGISTER_OP_VERSION(conv3d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade conv3d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
501
            std::vector<int>{}));
502 503 504 505 506 507 508 509 510 511

REGISTER_OP_VERSION(depthwise_conv2d_transpose)
    .AddCheckpoint(
        R"ROC(
      Upgrade depthwise conv2d transpose to add a new attribute [output_padding].
    )ROC",
        paddle::framework::compatible::OpVersionDesc().NewAttr(
            "output_padding",
            "In order to add additional size to one side of each dimension "
            "in the output",
512
            std::vector<int>{}));