conv_transpose_cudnn_op.cu 51.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
18 19 20
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#else
21
#include "paddle/fluid/operators/conv_cudnn_helper.h"
22
#endif
23
#include "paddle/fluid/operators/conv_transpose_op.h"
24
#include "paddle/phi/kernels/funcs/math_function.h"
25
#include "paddle/phi/kernels/funcs/padding.h"
26 27 28 29 30 31 32 33 34 35 36

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;

template <typename T, int D>
static void DataTranspose(const framework::ExecutionContext& ctx,
                          const Tensor* input, Tensor* output,
                          const std::vector<int>& axis, int flag = 0) {
  auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
37
  phi::funcs::Transpose<platform::CUDADeviceContext, T, D> transpose;
38 39 40 41 42 43 44 45
  auto in_dims = input->dims();
  std::vector<int64_t> input_transpose_vec;
  for (size_t i = 0; i < axis.size(); ++i) {
    if (flag == 0)
      input_transpose_vec.push_back(in_dims[axis[i]]);
    else
      input_transpose_vec.push_back(in_dims[i]);
  }
46
  framework::DDim input_transpose_dims(phi::make_ddim(input_transpose_vec));
47 48 49 50 51 52 53 54
  output->mutable_data<T>(input_transpose_dims, ctx.GetPlace());
  transpose(dev_ctx, *input, output, axis);
}

template <typename T>
class CUDNNConvTransposeOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
55 56 57
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx.GetPlace()), true,
        paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
58 59 60 61 62 63 64 65 66 67 68 69 70
    auto* input = ctx.Input<Tensor>("Input");
    auto* filter = ctx.Input<Tensor>("Filter");
    auto* output = ctx.Output<Tensor>("Output");

    std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
    std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");

    // cudnn v5 does not support dilations
    std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
    int groups = ctx.Attr<int>("groups");
    const T* filter_data = filter->data<T>();
    const std::string data_layout_str = ctx.Attr<std::string>("data_format");
W
wuhuanzhou 已提交
71 72 73
    const paddle::platform::DataLayout data_layout =
        (data_layout_str != "NHWC" ? platform::DataLayout::kNCHW
                                   : platform::DataLayout::kNHWC);
74 75 76

    // if channel_last, transpose to channel_first
    Tensor input_transpose;
77 78
    std::vector<int> input_vec = phi::vectorize<int>(input->dims());
    std::vector<int> output_vec = phi::vectorize<int>(output->dims());
W
wuhuanzhou 已提交
79
    if (data_layout == platform::DataLayout::kNHWC) {
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
      if (strides.size() == 2U) {
        std::vector<int> axis = {0, 3, 1, 2};
        for (size_t i = 0; i < axis.size(); ++i) {
          input_vec[i] = input->dims()[axis[i]];
          output_vec[i] = output->dims()[axis[i]];
        }
        DataTranspose<T, 4>(ctx, input, &input_transpose, axis);
      } else if (strides.size() == 3U) {
        std::vector<int> axis = {0, 4, 1, 2, 3};
        for (size_t i = 0; i < axis.size(); ++i) {
          input_vec[i] = input->dims()[axis[i]];
          output_vec[i] = output->dims()[axis[i]];
        }
        DataTranspose<T, 5>(ctx, input, &input_transpose, axis);
      }
    } else {
      input_transpose = *input;
    }

    // update padding and dilation
    auto in_dims = input_transpose.dims();
    auto filter_dims = filter->dims();
    framework::DDim in_data_dims;
103
    in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size());
104
    framework::DDim filter_data_dims =
105 106
        phi::slice_ddim(filter_dims, 2, filter_dims.size());
    std::vector<int> ksize = phi::vectorize<int>(filter_data_dims);
107 108 109 110
    UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
                             in_data_dims, strides, ksize);

    int data_dim = strides.size();  // 2d or 3d
111
    bool is_sys_pad = phi::funcs::IsSymmetricPadding(paddings, data_dim);
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129

    std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
    Tensor transformed_input;
    std::vector<int> padding_common(data_dim, 0);
    if (!is_sys_pad) {
      std::vector<int> padding_diff(data_dim);
      std::vector<int> new_input_shape_vec(data_dim + 2);
      new_input_shape_vec[0] = input_transpose.dims()[0];
      new_input_shape_vec[1] = input_transpose.dims()[1];

      for (size_t i = 0; i < data_dim; ++i) {
        padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
        padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
        new_input_shape_vec[i + 2] =
            input_transpose.dims()[i + 2] + padding_diff[i];
        input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
        input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
      }
130
      framework::DDim new_input_shape(phi::make_ddim(new_input_shape_vec));
131 132 133 134 135 136 137 138 139 140 141
      transformed_input.Resize(new_input_shape);
      auto& dev_ctx =
          ctx.template device_context<paddle::platform::CUDADeviceContext>();

      transformed_input =
          ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
              new_input_shape, dev_ctx);
      const int rank = input_transpose.dims().size();
      T pad_value(0.0);
      switch (rank) {
        case 4: {
142 143 144
          phi::funcs::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
              dev_ctx, input_pad, input_transpose, pad_value,
              &transformed_input);
145 146
        } break;
        case 5: {
147 148 149
          phi::funcs::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
              dev_ctx, input_pad, input_transpose, pad_value,
              &transformed_input);
150 151
        } break;
        default:
152 153
          PADDLE_THROW(platform::errors::InvalidArgument(
              "Op(ConvTranspose) only supports 4-D or 5-D input Tensor."));
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
      }
    } else {
      transformed_input = input_transpose;
      if (paddings.size() == data_dim) {
        for (size_t i = 0; i < data_dim; ++i) {
          padding_common[i] = paddings[i];
        }
      } else {
        for (size_t i = 0; i < data_dim; ++i) {
          padding_common[i] = paddings[2 * i];
        }
      }
    }

    std::vector<int64_t> starts(data_dim, 0);
    std::vector<int64_t> ends(data_dim, 0);
    std::vector<int64_t> axes(data_dim, 0);
    for (size_t i = 0; i < data_dim; ++i) {
      starts[i] = input_pad[2 * i + 4] * (strides[i] + 1);
      ends[i] = starts[i] + output_vec[i + 2];
      axes[i] = i + 2;
    }

    const T* input_data = transformed_input.data<T>();
178
    input_vec = phi::vectorize<int>(transformed_input.dims());
179 180 181 182 183 184 185 186 187 188 189

    std::vector<int> transformed_output_vec = output_vec;
    for (size_t i = 0; i < data_dim; ++i) {
      transformed_output_vec[i + 2] =
          output_vec[i + 2] +
          (input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] -
          2 * padding_common[i] + paddings[2 * i] + paddings[2 * i + 1];
    }

    Tensor transformed_output;
    if (!is_sys_pad) {
190
      DDim transformed_output_shape(phi::make_ddim(transformed_output_vec));
191 192 193 194 195
      transformed_output.mutable_data<T>(transformed_output_shape,
                                         ctx.GetPlace());
    } else {
      output->mutable_data<T>(ctx.GetPlace());
      transformed_output.ShareDataWith(*output);
196
      transformed_output.Resize(phi::make_ddim(transformed_output_vec));
197 198 199
    }
    T* transformed_output_data = transformed_output.data<T>();

W
wuhuanzhou 已提交
200
    platform::DataLayout layout;
201

202 203
    int iwo_groups = groups;
    int c_groups = 1;
204
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
205 206 207 208 209
    iwo_groups = 1;
    c_groups = groups;
    groups = 1;
#endif

210
    if (strides.size() == 2U) {
W
wuhuanzhou 已提交
211
      layout = platform::DataLayout::kNCHW;
212
    } else {
W
wuhuanzhou 已提交
213
      layout = platform::DataLayout::kNCDHW;
214 215
    }

216
    size_t workspace_size = 0;
217 218 219
#ifdef PADDLE_WITH_HIP
    miopenConvBwdDataAlgorithm_t algo{};
#else
220
    cudnnConvolutionBwdDataAlgo_t algo{};
221
#endif
222 223 224
    // ------------------- cudnn conv algorithm ---------------------
    auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    auto handle = dev_ctx.cudnn_handle();
225 226
    auto layout_tensor = GetCudnnTensorFormat(layout);
    bool deterministic = FLAGS_cudnn_deterministic;
L
Lv Mengsi 已提交
227

228 229
    auto dtype = platform::CudnnDataType<T>::type;
    // ------------------- cudnn descriptors ---------------------
230 231 232 233 234 235 236
    ConvArgs args{&transformed_output,
                  filter,
                  &transformed_input,
                  strides,
                  padding_common,
                  dilations,
                  dtype};
237 238 239 240
    args.handle = handle;
    args.idesc.set(transformed_output, iwo_groups);
    args.wdesc.set(*filter, layout_tensor, iwo_groups);
    args.odesc.set(transformed_input, iwo_groups);
A
AshburnLee 已提交
241 242
    args.cdesc.set(dtype, padding_common, strides, dilations,
                   platform::AllowTF32Cudnn(), c_groups);
243

244 245
#ifdef PADDLE_WITH_HIP
    using search = SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
246
    workspace_size = std::max(workspace_size, search::GetWorkspaceSize(args));
H
hong 已提交
247 248 249
    algo = search::Find<T>(
        args, false, deterministic, workspace_size,
        ctx.template device_context<platform::CUDADeviceContext>());
250
#else
251
    using search = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
H
hong 已提交
252 253 254
    algo = search::Find<T>(
        args, false, deterministic,
        ctx.template device_context<platform::CUDADeviceContext>());
255 256
    workspace_size =
        std::max(workspace_size, search::GetWorkspaceSize(args, algo));
257
#endif
258 259 260 261 262 263 264

    // ------------------- cudnn conv transpose forward ---------------------
    int input_offset =
        transformed_input.numel() / transformed_input.dims()[0] / groups;
    int output_offset =
        transformed_output.numel() / transformed_output.dims()[0] / groups;
    int filter_offset = filter->numel() / groups;
265 266
    ScalingParamType<T> alpha = 1.0f;
    ScalingParamType<T> beta = 0.0f;
267 268
    auto workspace_handle = dev_ctx.cudnn_workspace_handle();
    for (int g = 0; g < groups; g++) {
269 270
#ifdef PADDLE_WITH_HIP
      auto cudnn_func = [&](void* cudnn_workspace) {
271
        PADDLE_ENFORCE_GPU_SUCCESS(
272 273 274 275 276 277 278 279
            platform::dynload::miopenConvolutionBackwardData(
                handle, &alpha, args.odesc.desc(),
                input_data + input_offset * g, args.wdesc.desc(),
                filter_data + filter_offset * g, args.cdesc.desc(), algo, &beta,
                args.idesc.desc(), transformed_output_data + output_offset * g,
                cudnn_workspace, workspace_size));
      };
#else   // PADDLE_WITH_HIP
280
      auto cudnn_func = [&](void* cudnn_workspace) {
281
        PADDLE_ENFORCE_GPU_SUCCESS(
282
            platform::dynload::cudnnConvolutionBackwardData(
283 284 285 286
                handle, &alpha, args.wdesc.desc(),
                filter_data + filter_offset * g, args.odesc.desc(),
                input_data + input_offset * g, args.cdesc.desc(), algo,
                cudnn_workspace, workspace_size, &beta, args.idesc.desc(),
287
                transformed_output_data + output_offset * g));
288
      };
289
#endif  // PADDLE_WITH_HIP
290
      workspace_handle.RunFunc(cudnn_func, workspace_size);
291 292 293 294 295 296 297 298 299
    }
    if (!is_sys_pad && strides.size() == 2U) {
      Slice<paddle::platform::CUDADeviceContext, T, 4>(
          ctx, &transformed_output, output, starts, ends, axes);
    } else if (!is_sys_pad && strides.size() == 3U) {
      Slice<paddle::platform::CUDADeviceContext, T, 5>(
          ctx, &transformed_output, output, starts, ends, axes);
    }

W
wuhuanzhou 已提交
300
    if (data_layout == platform::DataLayout::kNHWC) {
301 302 303
      Tensor output_transpose;
      Tensor output_nchw;
      output_nchw.ShareDataWith(*output);
304
      output_nchw.Resize(phi::make_ddim(output_vec));
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
      if (strides.size() == 2U) {
        std::vector<int> axis = {0, 2, 3, 1};
        DataTranspose<T, 4>(ctx, &output_nchw, &output_transpose, axis);
        *output = output_transpose;
      } else if (strides.size() == 3U) {
        std::vector<int> axis = {0, 2, 3, 4, 1};
        DataTranspose<T, 5>(ctx, &output_nchw, &output_transpose, axis);
        *output = output_transpose;
      }
    }
  }
};

template <typename T>
class CUDNNConvTransposeGradOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
322 323 324
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx.GetPlace()), true,
        paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
    auto input = ctx.Input<Tensor>("Input");
    auto filter = ctx.Input<Tensor>("Filter");
    auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
    auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
    auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
    const T* filter_data = filter->data<T>();

    std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
    std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
    // cudnn v5 does not support dilations
    std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
    int groups = ctx.Attr<int>("groups");
    std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
    int user_workspace_size = ctx.Attr<int>("workspace_size_MB");
    const std::string data_layout_str = ctx.Attr<std::string>("data_format");
W
wuhuanzhou 已提交
340 341 342
    const paddle::platform::DataLayout data_layout =
        (data_layout_str != "NHWC" ? platform::DataLayout::kNCHW
                                   : platform::DataLayout::kNHWC);
343 344 345 346

    // if channel_last, transpose to channel_first
    Tensor input_transpose;
    Tensor output_grad_transpose;
347 348
    std::vector<int> input_vec = phi::vectorize<int>(input->dims());
    std::vector<int> output_vec = phi::vectorize<int>(output_grad->dims());
W
wuhuanzhou 已提交
349
    if (data_layout == platform::DataLayout::kNHWC) {
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
      if (strides.size() == 2U) {
        std::vector<int> axis = {0, 3, 1, 2};
        for (size_t i = 0; i < axis.size(); ++i) {
          input_vec[i] = input->dims()[axis[i]];
          output_vec[i] = output_grad->dims()[axis[i]];
        }
        DataTranspose<T, 4>(ctx, input, &input_transpose, axis);
        DataTranspose<T, 4>(ctx, output_grad, &output_grad_transpose, axis);
      } else if (strides.size() == 3U) {
        std::vector<int> axis = {0, 4, 1, 2, 3};
        for (size_t i = 0; i < axis.size(); ++i) {
          input_vec[i] = input->dims()[axis[i]];
          output_vec[i] = output_grad->dims()[axis[i]];
        }
        DataTranspose<T, 5>(ctx, input, &input_transpose, axis);
        DataTranspose<T, 5>(ctx, output_grad, &output_grad_transpose, axis);
      }
    } else {
      input_transpose = *input;
      output_grad_transpose = *output_grad;
    }

    // update padding and dilation
    auto in_dims = input_transpose.dims();
    auto filter_dims = filter->dims();
    framework::DDim in_data_dims;
376
    in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size());
377
    framework::DDim filter_data_dims =
378 379
        phi::slice_ddim(filter_dims, 2, filter_dims.size());
    std::vector<int> ksize = phi::vectorize<int>(filter_data_dims);
380 381 382 383
    UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
                             in_data_dims, strides, ksize);

    int data_dim = strides.size();  // 2d or 3d
384
    bool is_sys_pad = phi::funcs::IsSymmetricPadding(paddings, data_dim);
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403

    std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
    Tensor transformed_output_grad;
    std::vector<int> padding_common(data_dim, 0);
    if (!is_sys_pad) {
      std::vector<int> padding_diff(data_dim);
      std::vector<int> new_output_grad_shape_vec(data_dim + 2);
      new_output_grad_shape_vec[0] = output_grad_transpose.dims()[0];
      new_output_grad_shape_vec[1] = output_grad_transpose.dims()[1];

      for (size_t i = 0; i < data_dim; ++i) {
        padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
        padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
        new_output_grad_shape_vec[i + 2] =
            output_grad_transpose.dims()[i + 2] + padding_diff[i];
        input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
        input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
      }
      framework::DDim new_output_grad_shape(
404
          phi::make_ddim(new_output_grad_shape_vec));
405 406 407 408 409 410 411 412 413 414 415
      transformed_output_grad.Resize(new_output_grad_shape);
      auto& dev_ctx =
          ctx.template device_context<paddle::platform::CUDADeviceContext>();

      transformed_output_grad =
          ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
              new_output_grad_shape, dev_ctx);
      const int rank = input_transpose.dims().size();
      T pad_value(0.0);
      switch (rank) {
        case 4: {
416 417
          phi::funcs::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
              dev_ctx, input_pad, output_grad_transpose, pad_value,
418 419 420
              &transformed_output_grad);
        } break;
        case 5: {
421 422
          phi::funcs::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
              dev_ctx, input_pad, output_grad_transpose, pad_value,
423 424 425
              &transformed_output_grad);
        } break;
        default:
426 427
          PADDLE_THROW(platform::errors::InvalidArgument(
              "Op(ConvTranspose) only supports 4-D or 5-D input Tensor."));
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
      }
    } else {
      transformed_output_grad = output_grad_transpose;
      if (paddings.size() == data_dim) {
        for (size_t i = 0; i < data_dim; ++i) {
          padding_common[i] = paddings[i];
        }
      } else {
        for (size_t i = 0; i < data_dim; ++i) {
          padding_common[i] = paddings[2 * i];
        }
      }
    }

    const T* input_data = input_transpose.data<T>();
    const T* output_grad_data = transformed_output_grad.data<T>();
444
    output_vec = phi::vectorize<int>(transformed_output_grad.dims());
445 446

    // ------------------- cudnn descriptors ---------------------
W
wuhuanzhou 已提交
447
    platform::DataLayout layout;
448 449

    if (strides.size() == 2U) {
W
wuhuanzhou 已提交
450
      layout = platform::DataLayout::kNCHW;
451
    } else {
W
wuhuanzhou 已提交
452
      layout = platform::DataLayout::kNCDHW;
453 454
    }

455 456
    int iwo_groups = groups;
    int c_groups = 1;
457
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
458 459 460 461
    iwo_groups = 1;
    c_groups = groups;
    groups = 1;
#endif
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478

    auto dtype = platform::CudnnDataType<T>::type;

    ConvArgs args1{&transformed_output_grad,
                   filter,
                   &input_transpose,
                   strides,
                   padding_common,
                   dilations,
                   dtype};
    ConvArgs args2{&transformed_output_grad,
                   filter,
                   &input_transpose,
                   strides,
                   padding_common,
                   dilations,
                   dtype};
479 480 481 482 483

#ifdef PADDLE_WITH_HIP
    miopenConvFwdAlgorithm_t data_algo{};
    miopenConvBwdWeightsAlgorithm_t filter_algo{};
#else
484 485
    cudnnConvolutionFwdAlgo_t data_algo{};
    cudnnConvolutionBwdFilterAlgo_t filter_algo{};
486
#endif
487 488 489

    auto layout_tensor = GetCudnnTensorFormat(layout);
    size_t workspace_size = 0;
490 491
    auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    auto handle = dev_ctx.cudnn_handle();
492 493 494 495
    bool deterministic = FLAGS_cudnn_deterministic;
    T* input_grad_data = nullptr;
    T* filter_grad_data = nullptr;

496
    if (input_grad) {
497 498 499 500 501
      input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
      args1.handle = handle;
      args1.idesc.set(transformed_output_grad, iwo_groups);
      args1.wdesc.set(*filter, layout_tensor, iwo_groups);
      args1.odesc.set(input_transpose, iwo_groups);
A
AshburnLee 已提交
502 503
      args1.cdesc.set(dtype, padding_common, strides, dilations,
                      platform::AllowTF32Cudnn(), c_groups);
504 505
#ifdef PADDLE_WITH_HIP
      using search1 = SearchAlgorithm<miopenConvFwdAlgorithm_t>;
506 507
      workspace_size =
          std::max(workspace_size, search1::GetWorkspaceSize(args1));
H
hong 已提交
508 509 510
      data_algo = search1::Find<T>(
          args1, false, deterministic, workspace_size,
          ctx.template device_context<platform::CUDADeviceContext>());
511
#else
512
      using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
H
hong 已提交
513 514 515
      data_algo = search1::Find<T>(
          args1, false, deterministic,
          ctx.template device_context<platform::CUDADeviceContext>());
516 517
      workspace_size =
          std::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
518
#endif
519 520 521
    }

    if (filter_grad) {
522 523 524 525 526
      filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace());
      args2.handle = handle;
      args2.idesc.set(transformed_output_grad, iwo_groups);
      args2.wdesc.set(*filter_grad, layout_tensor, iwo_groups);
      args2.odesc.set(input_transpose, iwo_groups);
A
AshburnLee 已提交
527 528
      args2.cdesc.set(dtype, padding_common, strides, dilations,
                      platform::AllowTF32Cudnn(), c_groups);
529 530
#ifdef PADDLE_WITH_HIP
      using search2 = SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
531 532
      workspace_size =
          std::max(workspace_size, search2::GetWorkspaceSize(args2));
H
hong 已提交
533 534 535
      filter_algo = search2::Find<T>(
          args2, false, deterministic, workspace_size,
          ctx.template device_context<platform::CUDADeviceContext>());
536
#else
537
      using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
H
hong 已提交
538 539 540
      filter_algo = search2::Find<T>(
          args2, false, deterministic,
          ctx.template device_context<platform::CUDADeviceContext>());
541 542
      workspace_size = std::max(workspace_size,
                                search2::GetWorkspaceSize(args2, filter_algo));
543
#endif
544 545 546 547 548 549 550 551
    }

    // ------------------- cudnn conv backward data ---------------------
    // FIXME(typhoonzero): template type T may not be the same as cudnn call.
    int input_offset = input->numel() / input->dims()[0] / groups;
    int output_grad_offset = transformed_output_grad.numel() /
                             transformed_output_grad.dims()[0] / groups;
    int filter_offset = filter->numel() / groups;
552 553
    ScalingParamType<T> alpha = 1.0f;
    ScalingParamType<T> beta = 0.0f;
554 555 556 557
    auto workspace_handle = dev_ctx.cudnn_workspace_handle();
    if (input_grad) {
      // Because beta is zero, it is unnecessary to reset input_grad.
      for (int g = 0; g < groups; g++) {
558 559
#ifdef PADDLE_WITH_HIP
        auto cudnn_func = [&](void* cudnn_workspace) {
560
          PADDLE_ENFORCE_GPU_SUCCESS(
561 562 563 564 565 566 567 568 569
              platform::dynload::miopenConvolutionForward(
                  handle, &alpha, args1.idesc.desc(),
                  output_grad_data + output_grad_offset * g, args1.wdesc.desc(),
                  filter_data + filter_offset * g, args1.cdesc.desc(),
                  data_algo, &beta, args1.odesc.desc(),
                  input_grad_data + input_offset * g, cudnn_workspace,
                  workspace_size));
        };
#else   // PADDLE_WITH_HIP
570
        auto cudnn_func = [&](void* cudnn_workspace) {
571 572 573 574 575 576
          PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnConvolutionForward(
              handle, &alpha, args1.idesc.desc(),
              output_grad_data + output_grad_offset * g, args1.wdesc.desc(),
              filter_data + filter_offset * g, args1.cdesc.desc(), data_algo,
              cudnn_workspace, workspace_size, &beta, args1.odesc.desc(),
              input_grad_data + input_offset * g));
577
        };
578
#endif  // PADDLE_WITH_HIP
579
        workspace_handle.RunFunc(cudnn_func, workspace_size);
580 581
      }

W
wuhuanzhou 已提交
582
      if (data_layout == platform::DataLayout::kNHWC) {
583 584 585
        Tensor input_grad_transpose;
        Tensor input_grad_nchw;
        input_grad_nchw.ShareDataWith(*input_grad);
586
        input_grad_nchw.Resize(phi::make_ddim(input_vec));
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
        if (strides.size() == 2U) {
          std::vector<int> axis = {0, 2, 3, 1};
          DataTranspose<T, 4>(ctx, &input_grad_nchw, &input_grad_transpose,
                              axis);
          *input_grad = input_grad_transpose;
        } else if (strides.size() == 3U) {
          std::vector<int> axis = {0, 2, 3, 4, 1};
          DataTranspose<T, 5>(ctx, &input_grad_nchw, &input_grad_transpose,
                              axis);
          *input_grad = input_grad_transpose;
        }
      }
    }

    // ------------------- cudnn conv backward filter ---------------------
    if (filter_grad) {
      // Because beta is zero, it is unnecessary to reset filter_grad.
      // Gradient with respect to the filter
      for (int g = 0; g < groups; g++) {
606 607
#ifdef PADDLE_WITH_HIP
        auto cudnn_func = [&](void* cudnn_workspace) {
608
          PADDLE_ENFORCE_GPU_SUCCESS(
609 610 611 612 613 614 615 616 617
              platform::dynload::miopenConvolutionBackwardWeights(
                  handle, &alpha, args2.odesc.desc(),
                  input_data + input_offset * g, args2.idesc.desc(),
                  output_grad_data + output_grad_offset * g, args2.cdesc.desc(),
                  filter_algo, &beta, args2.wdesc.desc(),
                  filter_grad_data + filter_offset * g, cudnn_workspace,
                  workspace_size));
        };
#else   // PADDLE_WITH_HIP
618
        auto cudnn_func = [&](void* cudnn_workspace) {
619
          PADDLE_ENFORCE_GPU_SUCCESS(
620
              platform::dynload::cudnnConvolutionBackwardFilter(
621 622 623 624 625
                  handle, &alpha, args2.idesc.desc(),
                  output_grad_data + output_grad_offset * g, args2.odesc.desc(),
                  input_data + input_offset * g, args2.cdesc.desc(),
                  filter_algo, cudnn_workspace, workspace_size, &beta,
                  args2.wdesc.desc(), filter_grad_data + filter_offset * g));
626
        };
627
#endif  // PADDLE_WITH_HIP
628
        workspace_handle.RunFunc(cudnn_func, workspace_size);
629 630 631 632 633
      }
    }
  }
};

634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
/*
 * Inputs:  I, W, dO, ddI, ddW
 * Outputs: ddO, dW, dI
 * ddo = conv_bp_data(W, ddI) + conv_bp_data(ddW, I)
 * dW = conv_bp_filter(dO, ddI)
 * dI = conv(dO, ddW)
 */
template <typename T>
class CUDNNConvTransposeDoubleGradOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
    auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    PADDLE_ENFORCE_EQ(
        platform::is_gpu_place(ctx.GetPlace()), true,
        paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
    auto X = ctx.Input<Tensor>("Input");
    auto W = ctx.Input<Tensor>("Filter");
    auto dO = ctx.Input<Tensor>("DOutput");
    auto ddX = ctx.Input<Tensor>("DDInput");
    auto ddW = ctx.Input<Tensor>("DDFilter");

    auto ddO = ctx.Output<Tensor>("DDOutput");
    auto dW = ctx.Output<Tensor>("DFilter");
    auto dX = ctx.Output<Tensor>("DInput");

    if (ddO) {
      ddO->mutable_data<T>(ctx.GetPlace());
661
      phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_zero;
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
      set_zero(dev_ctx, ddO, static_cast<T>(0));
    }
    if (dW) {
      dW->mutable_data<T>(ctx.GetPlace());
    }
    if (dX) {
      dX->mutable_data<T>(ctx.GetPlace());
    }

    const T* dy = dO->data<T>();
    const T* w = W->data<T>();

    const T* ddx = nullptr;
    const T* ddw = nullptr;
    T *dw, *dx, *ddy;
    dw = dx = ddy = nullptr;
    T* transformed_dx = nullptr;
    const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides");
    std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
    int groups = ctx.Attr<int>("groups");

    bool deterministic = FLAGS_cudnn_deterministic;

    std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");

    std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
    const std::string data_format = ctx.Attr<std::string>("data_format");
    const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");

    // transform Tensors to channel first-----------
    Tensor transformed_X_channel(X->type());
    Tensor transformed_dO_channel(dO->type());
    Tensor transformed_ddX_channel(X->type());

    Tensor transformed_ddO_channel(dO->type());
    Tensor transformed_dX_channel(X->type());

    if (channel_last) {
      ResizeToChannelFirst<platform::CUDADeviceContext, T>(
          ctx, X, &transformed_X_channel);
      TransToChannelFirst<platform::CUDADeviceContext, T>(
          ctx, X, &transformed_X_channel);

      ResizeToChannelFirst<platform::CUDADeviceContext, T>(
          ctx, dO, &transformed_dO_channel);
      TransToChannelFirst<platform::CUDADeviceContext, T>(
          ctx, dO, &transformed_dO_channel);

      if (ddX) {
        ResizeToChannelFirst<platform::CUDADeviceContext, T>(
            ctx, ddX, &transformed_ddX_channel);
        TransToChannelFirst<platform::CUDADeviceContext, T>(
            ctx, ddX, &transformed_ddX_channel);
      }

      if (ddO) {
        ResizeToChannelFirst<platform::CUDADeviceContext, T>(
            ctx, ddO, &transformed_ddO_channel);
      }
      if (dX) {
        ResizeToChannelFirst<platform::CUDADeviceContext, T>(
            ctx, dX, &transformed_dX_channel);
        transformed_dX_channel.mutable_data<T>(ctx.GetPlace());
      }

    } else {
      transformed_X_channel = *X;
      transformed_dO_channel = *dO;
      if (ddX) {
        transformed_ddX_channel = *ddX;
      }
      if (dX) {
        transformed_dX_channel = *dX;
      }
    }
    std::vector<int> output_vec =
738
        phi::vectorize<int>(transformed_dO_channel.dims());
739 740 741

    auto in_dims = transformed_X_channel.dims();
    auto filter_dims = W->dims();
742
    framework::DDim in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size());
743
    framework::DDim filter_data_dims =
744 745
        phi::slice_ddim(filter_dims, 2, filter_dims.size());
    std::vector<int> ksize = phi::vectorize<int>(filter_data_dims);
746 747 748 749
    UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
                             in_data_dims, strides, ksize);

    int data_dim = strides.size();  // 2d or 3d
750
    bool is_sys_pad = phi::funcs::IsSymmetricPadding(paddings, data_dim);
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
    Tensor transformed_X(X->type());
    Tensor transformed_ddX(X->type());

    Tensor transformed_dO(dO->type());

    std::vector<int> padding_common(data_dim, 0);
    std::vector<int> input_pad(X->dims().size() * 2, 0);

    if (!is_sys_pad) {
      // get pad
      std::vector<int> padding_diff(data_dim);
      std::vector<int> new_input_shape_vec(data_dim + 2);
      std::vector<int> new_output_grad_shape_vec(data_dim + 2);

      new_input_shape_vec[0] = transformed_X_channel.dims()[0];
      new_input_shape_vec[1] = transformed_X_channel.dims()[1];

      new_output_grad_shape_vec[0] = transformed_dO_channel.dims()[0];
      new_output_grad_shape_vec[1] = transformed_dO_channel.dims()[1];

      for (size_t i = 0; i < data_dim; ++i) {
        padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
        padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
        new_input_shape_vec[i + 2] =
            transformed_X_channel.dims()[i + 2] + padding_diff[i];

        new_output_grad_shape_vec[i + 2] =
            transformed_dO_channel.dims()[i + 2] + padding_diff[i];

        input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
        input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
      }
783
      framework::DDim new_input_shape(phi::make_ddim(new_input_shape_vec));
784 785 786 787
      transformed_X.Resize(new_input_shape);
      transformed_ddX.Resize(new_input_shape);

      framework::DDim new_output_grad_shape(
788
          phi::make_ddim(new_output_grad_shape_vec));
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
      transformed_dO.Resize(new_output_grad_shape);

      transformed_dO =
          ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
              new_output_grad_shape, dev_ctx);

      transformed_X =
          ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
              new_input_shape, dev_ctx);
      if (ddX) {
        transformed_ddX =
            ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
                new_input_shape, dev_ctx);
      }

      // pad for input
      const int rank = X->dims().size();
      T pad_value(0.0);
      switch (rank) {
        case 4: {
809 810 811
          phi::funcs::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
              dev_ctx, input_pad, transformed_X_channel, pad_value,
              &transformed_X);
812
          if (dO) {
813 814
            phi::funcs::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
                dev_ctx, input_pad, transformed_dO_channel, pad_value,
815 816 817 818
                &transformed_dO);
          }

          if (ddX) {
819 820
            phi::funcs::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
                dev_ctx, input_pad, transformed_ddX_channel, pad_value,
821 822 823 824
                &transformed_ddX);
          }
        } break;
        case 5: {
825 826 827
          phi::funcs::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
              dev_ctx, input_pad, transformed_X_channel, pad_value,
              &transformed_X);
828
          if (ddX) {
829 830
            phi::funcs::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
                dev_ctx, input_pad, transformed_ddX_channel, pad_value,
831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
                &transformed_ddX);
          }
        } break;
        default:
          PADDLE_THROW(platform::errors::InvalidArgument(
              "ConvOp only support tensors with 4 or 5 dimensions."));
      }

    } else {
      transformed_X = transformed_X_channel;
      transformed_dO = transformed_dO_channel;
      if (ddX) {
        transformed_ddX = transformed_ddX_channel;
      }

      if (paddings.size() == data_dim) {
        for (size_t i = 0; i < data_dim; ++i) {
          padding_common[i] = paddings[i];
        }
      } else {
        for (size_t i = 0; i < data_dim; ++i) {
          padding_common[i] = paddings[2 * i];
        }
      }
    }

    std::vector<int64_t> starts(data_dim, 0);
    std::vector<int64_t> ends(data_dim, 0);
    std::vector<int64_t> axes(data_dim, 0);
    for (size_t i = 0; i < data_dim; ++i) {
      starts[i] = input_pad[2 * i + 4] * (strides[i] + 1);
      ends[i] = starts[i] + output_vec[i + 2];
      axes[i] = i + 2;
    }

    std::vector<int> transformed_output_vec = output_vec;
    for (size_t i = 0; i < data_dim; ++i) {
      transformed_output_vec[i + 2] =
          output_vec[i + 2] +
          (input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] -
          2 * padding_common[i] + paddings[2 * i] + paddings[2 * i + 1];
    }

    if (!is_sys_pad) {
875
      DDim transformed_output_shape(phi::make_ddim(transformed_output_vec));
876 877 878 879 880
      transformed_ddO_channel.mutable_data<T>(transformed_output_shape,
                                              ctx.GetPlace());
    } else {
      ddO->mutable_data<T>(ctx.GetPlace());
      transformed_ddO_channel = *ddO;
881
      transformed_ddO_channel.Resize(phi::make_ddim(transformed_output_vec));
882 883 884 885 886 887
    }

    const T* x = transformed_X.data<T>();

    int iwo_group = groups;
    int c_group = 1;
F
furnace 已提交
888
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
    iwo_group = 1;
    c_group = groups;
    groups = 1;
#endif
    auto dtype = platform::CudnnDataType<T>::type;

    auto handle = dev_ctx.cudnn_handle();

    ConvArgs args1{&transformed_ddO_channel,
                   W,
                   &transformed_ddX,
                   strides,
                   padding_common,
                   dilations,
                   dtype};
    ConvArgs args2{&transformed_ddO_channel, ddW,       &transformed_X, strides,
                   padding_common,           dilations, dtype};

    ConvArgs args3{&transformed_dO,
                   dW,
                   &transformed_ddX_channel,
                   strides,
                   padding_common,
                   dilations,
                   dtype};
    ConvArgs args4{
        &transformed_dO, ddW,  &transformed_dX_channel, strides, padding_common,
        dilations,       dtype};
917 918 919 920 921 922 923 924 925 926
#ifdef PADDLE_WITH_HIP
    miopenConvBwdDataAlgorithm_t bwd_algo1 =
        static_cast<miopenConvBwdDataAlgorithm_t>(0);
    miopenConvBwdDataAlgorithm_t bwd_algo2 =
        static_cast<miopenConvBwdDataAlgorithm_t>(0);
    miopenConvFwdAlgorithm_t data_algo =
        static_cast<miopenConvFwdAlgorithm_t>(0);
    miopenConvBwdWeightsAlgorithm_t filter_algo =
        static_cast<miopenConvBwdWeightsAlgorithm_t>(0);
#else
927 928 929 930 931 932 933 934
    cudnnConvolutionBwdDataAlgo_t bwd_algo1 =
        static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
    cudnnConvolutionBwdDataAlgo_t bwd_algo2 =
        static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
    cudnnConvolutionFwdAlgo_t data_algo =
        static_cast<cudnnConvolutionFwdAlgo_t>(0);
    cudnnConvolutionBwdFilterAlgo_t filter_algo =
        static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
935
#endif
936

W
wuhuanzhou 已提交
937
    auto layout = GetCudnnTensorFormat(platform::DataLayout::kNCHW);
938 939 940 941 942 943 944 945 946 947 948 949 950 951

    // ddo = conv(ddI, W) + conv(I, ddW)
    size_t workspace_size = 0;

    T* transformed_ddy_channel = nullptr;

    if (ddO) {
      ddy = ddO->data<T>();
      transformed_ddy_channel = transformed_ddO_channel.data<T>();
      if (ddX) {
        args1.handle = handle;
        args1.idesc.set(transformed_ddO_channel, iwo_group);
        args1.wdesc.set(*W, layout, iwo_group);
        args1.odesc.set(transformed_ddX, iwo_group);
F
furnace 已提交
952 953
        args1.cdesc.set(dtype, padding_common, strides, dilations,
                        platform::AllowTF32Cudnn(), c_group);
954 955
#ifdef PADDLE_WITH_HIP
        using search1 = SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
956
        workspace_size = search1::GetWorkspaceSize(args1);
H
hong 已提交
957 958 959
        bwd_algo1 = search1::Find<T>(
            args1, false, deterministic, workspace_size,
            ctx.template device_context<platform::CUDADeviceContext>());
960
#else
961
        using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
H
hong 已提交
962 963 964
        bwd_algo1 = search1::Find<T>(
            args1, false, deterministic,
            ctx.template device_context<platform::CUDADeviceContext>());
965
        workspace_size = search1::GetWorkspaceSize(args1, bwd_algo1);
966
#endif
967 968 969 970 971 972 973 974
      }

      if (ddW) {
        ddw = ddW->data<T>();
        args2.handle = handle;
        args2.idesc.set(transformed_ddO_channel, iwo_group);
        args2.wdesc.set(*ddW, layout, iwo_group);
        args2.odesc.set(transformed_X, iwo_group);
F
furnace 已提交
975 976
        args2.cdesc.set(dtype, padding_common, strides, dilations,
                        platform::AllowTF32Cudnn(), c_group);
977 978
#ifdef PADDLE_WITH_HIP
        using search2 = SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
979 980
        workspace_size =
            std::max(workspace_size, search2::GetWorkspaceSize(args2));
H
hong 已提交
981 982 983
        bwd_algo2 = search2::Find<T>(
            args2, false, deterministic, workspace_size,
            ctx.template device_context<platform::CUDADeviceContext>());
984
#else
985
        using search2 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
H
hong 已提交
986 987 988
        bwd_algo2 = search2::Find<T>(
            args2, false, deterministic,
            ctx.template device_context<platform::CUDADeviceContext>());
989 990
        workspace_size = std::max(workspace_size,
                                  search2::GetWorkspaceSize(args2, bwd_algo2));
991
#endif
992 993 994 995 996 997 998 999 1000 1001 1002
      }
    }

    if (dW && ddX) {
      dw = dW->data<T>();
      args3.handle = handle;
      args3.idesc.set(transformed_dO, iwo_group);
      args3.wdesc.set(*dW, layout, iwo_group);

      args3.odesc.set(transformed_ddX_channel, iwo_group);

F
furnace 已提交
1003 1004
      args3.cdesc.set(dtype, padding_common, strides, dilations,
                      platform::AllowTF32Cudnn(), c_group);
1005 1006
#ifdef PADDLE_WITH_HIP
      using search3 = SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
1007 1008
      workspace_size =
          std::max(workspace_size, search3::GetWorkspaceSize(args3));
H
hong 已提交
1009 1010 1011
      filter_algo = search3::Find<T>(
          args3, false, deterministic, workspace_size,
          ctx.template device_context<platform::CUDADeviceContext>());
1012
#else
1013
      using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
H
hong 已提交
1014 1015 1016
      filter_algo = search3::Find<T>(
          args3, false, deterministic,
          ctx.template device_context<platform::CUDADeviceContext>());
1017 1018
      workspace_size = std::max(workspace_size,
                                search3::GetWorkspaceSize(args3, filter_algo));
1019
#endif
1020 1021 1022 1023 1024 1025 1026 1027 1028
    }

    if (ddW && dX) {
      transformed_dx = transformed_dX_channel.data<T>();

      args4.handle = handle;
      args4.idesc.set(transformed_dO, iwo_group);
      args4.wdesc.set(*ddW, layout, iwo_group);
      args4.odesc.set(transformed_dX_channel, iwo_group);
F
furnace 已提交
1029 1030
      args4.cdesc.set(dtype, padding_common, strides, dilations,
                      platform::AllowTF32Cudnn(), c_group);
1031 1032
#ifdef PADDLE_WITH_HIP
      using search4 = SearchAlgorithm<miopenConvFwdAlgorithm_t>;
1033 1034
      workspace_size =
          std::max(workspace_size, search4::GetWorkspaceSize(args4));
H
hong 已提交
1035 1036 1037
      data_algo = search4::Find<T>(
          args4, false, deterministic, workspace_size,
          ctx.template device_context<platform::CUDADeviceContext>());
1038
#else
1039
      using search4 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
H
hong 已提交
1040 1041 1042
      data_algo = search4::Find<T>(
          args4, false, deterministic,
          ctx.template device_context<platform::CUDADeviceContext>());
1043 1044
      workspace_size =
          std::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo));
1045
#endif
1046 1047 1048
    }

    int i_n, i_c, i_d, i_h, i_w;
W
wuhuanzhou 已提交
1049 1050
    GetNCDHW(transformed_X.dims(), platform::DataLayout::kNCHW, &i_n, &i_c,
             &i_d, &i_h, &i_w);
1051 1052

    int o_n, o_c, o_d, o_h, o_w;
W
wuhuanzhou 已提交
1053 1054
    GetNCDHW(transformed_dO.dims(), platform::DataLayout::kNCHW, &o_n, &o_c,
             &o_d, &o_h, &o_w);
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070

    int group_offset_in =
        transformed_X.numel() / transformed_X.dims()[0] / groups;
    int group_offset_out =
        transformed_dO.numel() / transformed_dO.dims()[0] / groups;
    int group_offset_filter = W->numel() / groups;

    ScalingParamType<T> alpha = 1.0f;
    ScalingParamType<T> beta = 0.0f;

    auto wkspace_handle = dev_ctx.cudnn_workspace_handle();

    if (ddO) {
      if (ddX) {
        ddx = transformed_ddX.data<T>();
        for (int i = 0; i < groups; i++) {
1071 1072 1073
#ifdef PADDLE_WITH_HIP
          wkspace_handle.RunFunc(
              [&](void* workspace_ptr) {
1074
                PADDLE_ENFORCE_GPU_SUCCESS(
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
                    platform::dynload::miopenConvolutionBackwardData(
                        handle, &alpha, args1.odesc.desc(),
                        ddx + i * group_offset_in, args1.wdesc.desc(),
                        w + i * group_offset_filter, args1.cdesc.desc(),
                        bwd_algo1, &beta, args1.idesc.desc(),
                        transformed_ddy_channel + i * group_offset_out,
                        workspace_ptr, workspace_size));
              },
              workspace_size);
#else   // PADDLE_WITH_HIP
1085 1086
          wkspace_handle.RunFunc(
              [&](void* workspace_ptr) {
1087
                PADDLE_ENFORCE_GPU_SUCCESS(
1088 1089 1090 1091 1092 1093 1094 1095 1096
                    platform::dynload::cudnnConvolutionBackwardData(
                        handle, &alpha, args1.wdesc.desc(),
                        w + i * group_offset_filter, args1.odesc.desc(),
                        ddx + i * group_offset_in, args1.cdesc.desc(),
                        bwd_algo1, workspace_ptr, workspace_size, &beta,
                        args1.idesc.desc(),
                        transformed_ddy_channel + i * group_offset_out));
              },
              workspace_size);
1097
#endif  // PADDLE_WITH_HIP
1098 1099 1100 1101
        }
      }
      if (ddW) {
        for (int i = 0; i < groups; i++) {
1102
#ifdef PADDLE_WITH_HIP
F
furnace 已提交
1103 1104 1105 1106
          // MIOPEN ONLY support beta to be 0.0f
          Tensor conv_x_ddw(dO->type());
          conv_x_ddw.Resize(transformed_ddO_channel.dims());
          T* conv_x_ddw_data = conv_x_ddw.mutable_data<T>(ctx.GetPlace());
1107 1108
          wkspace_handle.RunFunc(
              [&](void* workspace_ptr) {
1109
                PADDLE_ENFORCE_GPU_SUCCESS(
1110 1111 1112 1113
                    platform::dynload::miopenConvolutionBackwardData(
                        handle, &alpha, args2.odesc.desc(),
                        x + i * group_offset_in, args2.wdesc.desc(),
                        ddw + i * group_offset_filter, args2.cdesc.desc(),
F
furnace 已提交
1114 1115 1116
                        bwd_algo2, &beta, args2.idesc.desc(),
                        conv_x_ddw_data + i * group_offset_out, workspace_ptr,
                        workspace_size));
1117 1118
              },
              workspace_size);
1119
          PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenOpTensor(
F
furnace 已提交
1120 1121 1122 1123 1124
              handle, miopenTensorOpAdd, &alpha, args2.idesc.desc(),
              transformed_ddy_channel + i * group_offset_out, &alpha,
              args2.idesc.desc(), conv_x_ddw_data + i * group_offset_out, &beta,
              args2.idesc.desc(),
              transformed_ddy_channel + i * group_offset_out));
1125
#else   // PADDLE_WITH_HIP
1126 1127
          wkspace_handle.RunFunc(
              [&](void* workspace_ptr) {
1128
                PADDLE_ENFORCE_GPU_SUCCESS(
1129 1130 1131 1132 1133 1134 1135 1136 1137
                    platform::dynload::cudnnConvolutionBackwardData(
                        handle, &alpha, args2.wdesc.desc(),
                        ddw + i * group_offset_filter, args2.odesc.desc(),
                        x + i * group_offset_in, args2.cdesc.desc(), bwd_algo2,
                        workspace_ptr, workspace_size, &alpha,
                        args2.idesc.desc(),
                        transformed_ddy_channel + i * group_offset_out));
              },
              workspace_size);
1138
#endif  // PADDLE_WITH_HIP
1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
        }
      }
      if ((!is_sys_pad) && (!channel_last)) {
        if (strides.size() == 2U) {
          Slice<paddle::platform::CUDADeviceContext, T, 4>(
              ctx, &transformed_ddO_channel, ddO, starts, ends, axes);
        } else if (!is_sys_pad && strides.size() == 3U) {
          Slice<paddle::platform::CUDADeviceContext, T, 5>(
              ctx, &transformed_ddO_channel, ddO, starts, ends, axes);
        }
      } else if ((!is_sys_pad) && (channel_last)) {
        if (strides.size() == 2U) {
          Slice<paddle::platform::CUDADeviceContext, T, 4>(
              ctx, &transformed_ddO_channel, &transformed_ddO_channel, starts,
              ends, axes);
        } else if (!is_sys_pad && strides.size() == 3U) {
          Slice<paddle::platform::CUDADeviceContext, T, 5>(
              ctx, &transformed_ddO_channel, &transformed_ddO_channel, starts,
              ends, axes);
        }

        TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
            ctx, &transformed_ddO_channel, ddO);
      }
    }

    T* transformed_dy_channel = transformed_dO.data<T>();
    if (dW && ddX) {
      ddx = transformed_ddX_channel.data<T>();
      for (int i = 0; i < groups; i++) {
1169 1170 1171
#ifdef PADDLE_WITH_HIP
        wkspace_handle.RunFunc(
            [&](void* workspace_ptr) {
1172
              PADDLE_ENFORCE_GPU_SUCCESS(
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
                  platform::dynload::miopenConvolutionBackwardWeights(
                      handle, &alpha, args3.odesc.desc(),
                      ddx + i * group_offset_in, args3.idesc.desc(),
                      transformed_dy_channel + i * group_offset_out,
                      args3.cdesc.desc(), filter_algo, &beta,
                      args3.wdesc.desc(), dw + i * group_offset_filter,
                      workspace_ptr, workspace_size));
            },
            workspace_size);
#else   // PADDLE_WITH_HIP
1183 1184
        wkspace_handle.RunFunc(
            [&](void* workspace_ptr) {
1185
              PADDLE_ENFORCE_GPU_SUCCESS(
1186 1187 1188 1189 1190 1191 1192 1193 1194
                  platform::dynload::cudnnConvolutionBackwardFilter(
                      handle, &alpha, args3.idesc.desc(),
                      transformed_dy_channel + i * group_offset_out,
                      args3.odesc.desc(), ddx + i * group_offset_in,
                      args3.cdesc.desc(), filter_algo, workspace_ptr,
                      workspace_size, &beta, args3.wdesc.desc(),
                      dw + i * group_offset_filter));
            },
            workspace_size);
1195
#endif  // PADDLE_WITH_HIP
1196 1197 1198 1199 1200 1201
      }
    }

    if (dX && ddW) {
      ddw = ddW->data<T>();
      for (int i = 0; i < groups; i++) {
1202 1203 1204
#ifdef PADDLE_WITH_HIP
        wkspace_handle.RunFunc(
            [&](void* workspace_ptr) {
1205
              PADDLE_ENFORCE_GPU_SUCCESS(
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
                  platform::dynload::miopenConvolutionForward(
                      handle, &alpha, args4.idesc.desc(),
                      transformed_dy_channel + i * group_offset_out,
                      args4.wdesc.desc(), ddw + i * group_offset_filter,
                      args4.cdesc.desc(), data_algo, &beta, args4.odesc.desc(),
                      transformed_dx + i * group_offset_in, workspace_ptr,
                      workspace_size));
            },
            workspace_size);
#else   // PADDLE_WITH_HIP
1216 1217
        wkspace_handle.RunFunc(
            [&](void* workspace_ptr) {
1218
              PADDLE_ENFORCE_GPU_SUCCESS(
1219 1220 1221 1222 1223 1224 1225 1226 1227
                  platform::dynload::cudnnConvolutionForward(
                      handle, &alpha, args4.idesc.desc(),
                      transformed_dy_channel + i * group_offset_out,
                      args4.wdesc.desc(), ddw + i * group_offset_filter,
                      args4.cdesc.desc(), data_algo, workspace_ptr,
                      workspace_size, &beta, args4.odesc.desc(),
                      transformed_dx + i * group_offset_in));
            },
            workspace_size);
1228
#endif  // PADDLE_WITH_HIP
1229 1230 1231 1232 1233 1234 1235 1236 1237
      }
      if (channel_last) {
        TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
            ctx, &transformed_dX_channel, dX);
      }
    }
  }
};

1238 1239 1240 1241
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
1242
namespace plat = paddle::platform;
1243

1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_KERNEL(conv2d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
                   ops::CUDNNConvTransposeOpKernel<plat::float16>,
                   ops::CUDNNConvTransposeOpKernel<float>);
REGISTER_OP_KERNEL(conv2d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
                   ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
                   ops::CUDNNConvTransposeGradOpKernel<float>);
REGISTER_OP_KERNEL(
    conv2d_transpose_grad_grad, CUDNN, plat::CUDAPlace,
    paddle::operators::CUDNNConvTransposeDoubleGradOpKernel<float>,
    paddle::operators::CUDNNConvTransposeDoubleGradOpKernel<plat::float16>);

REGISTER_OP_KERNEL(conv3d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
                   ops::CUDNNConvTransposeOpKernel<plat::float16>,
                   ops::CUDNNConvTransposeOpKernel<float>);
REGISTER_OP_KERNEL(conv3d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
                   ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
                   ops::CUDNNConvTransposeGradOpKernel<float>);
#else
1264
REGISTER_OP_KERNEL(conv2d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
1265
                   ops::CUDNNConvTransposeOpKernel<plat::float16>,
1266 1267 1268
                   ops::CUDNNConvTransposeOpKernel<float>,
                   ops::CUDNNConvTransposeOpKernel<double>);
REGISTER_OP_KERNEL(conv2d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
1269
                   ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
1270 1271
                   ops::CUDNNConvTransposeGradOpKernel<float>,
                   ops::CUDNNConvTransposeGradOpKernel<double>);
1272 1273 1274 1275 1276
REGISTER_OP_KERNEL(
    conv2d_transpose_grad_grad, CUDNN, plat::CUDAPlace,
    paddle::operators::CUDNNConvTransposeDoubleGradOpKernel<float>,
    paddle::operators::CUDNNConvTransposeDoubleGradOpKernel<double>,
    paddle::operators::CUDNNConvTransposeDoubleGradOpKernel<plat::float16>);
1277 1278

REGISTER_OP_KERNEL(conv3d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
1279
                   ops::CUDNNConvTransposeOpKernel<plat::float16>,
1280 1281 1282
                   ops::CUDNNConvTransposeOpKernel<float>,
                   ops::CUDNNConvTransposeOpKernel<double>);
REGISTER_OP_KERNEL(conv3d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
1283
                   ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
1284 1285
                   ops::CUDNNConvTransposeGradOpKernel<float>,
                   ops::CUDNNConvTransposeGradOpKernel<double>);
1286
#endif