conv_kernel.cu 25.9 KB
Newer Older
H
hong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16
#include "paddle/phi/kernels/conv_kernel.h"

17 18
#include "glog/logging.h"

19
#include "paddle/phi/backends/context_pool.h"
H
hong 已提交
20
#include "paddle/phi/backends/gpu/gpu_context.h"
21
#include "paddle/phi/core/dense_tensor.h"
H
hong 已提交
22 23 24
#include "paddle/phi/core/kernel_registry.h"

#ifdef PADDLE_WITH_HIP
25
#include "paddle/phi/kernels/gpudnn/conv_miopen_helper.h"
H
hong 已提交
26
#else
27
#include "paddle/phi/kernels/gpudnn/conv_cudnn_v7.h"
H
hong 已提交
28 29
#endif

30
#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h"
31 32
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
H
hong 已提交
33 34
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
35
#include "paddle/phi/kernels/funcs/padding.h"
H
hong 已提交
36 37
#include "paddle/phi/kernels/impl/conv_cudnn_impl.h"

38 39 40 41 42 43 44 45
#ifdef PADDLE_WITH_CUDNN_FRONTEND
// clang-format off
#include "paddle/phi/backends/dynload/cudnn_frontend.h"
#include "paddle/phi/kernels/autotune/cache.h"
#include "paddle/phi/kernels/gpudnn/conv_cudnn_frontend.h"
// clang-format on
#endif

H
hong 已提交
46 47
namespace phi {

48 49 50 51 52 53 54
template <typename T, typename Context>
void ConvCudnnKernelImplV7(const DenseTensor* transformed_input,
                           const DenseTensor* transformed_filter_channel,
                           const Context& ctx,
                           const std::vector<int>& strides,
                           const std::vector<int>& padding_common,
                           const std::vector<int>& dilations,
55 56
                           phi::backends::gpu::DataLayout compute_format,
                           phi::backends::gpu::DataLayout layout,
57 58 59 60 61 62 63 64 65 66 67
                           bool exhaustive_search,
                           bool deterministic,
                           int groups,
                           DenseTensor* transformed_output) {
  const T* input_data = transformed_input->data<T>();
  const T* filter_data = transformed_filter_channel->data<T>();
  T* output_data = transformed_output->data<T>();

  auto handle = ctx.cudnn_handle();
  auto workspace_handle = ctx.cudnn_workspace_handle();

68 69
  auto layout_format = phi::backends::gpu::GetCudnnTensorFormat(layout);
  auto dtype = phi::backends::gpu::CudnnDataType<T>::type;
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

  // ------------------- cudnn descriptors ---------------------
  ConvArgs args{handle,
                transformed_input,
                transformed_filter_channel,
                transformed_output,
                strides,
                padding_common,
                dilations,
                dtype,
                groups,
                compute_format};

#ifdef PADDLE_WITH_HIP
  // MIOPEN need to set groups in cdesc in miopen_desc.h
85 86
  args.cdesc.set(
      dtype, padding_common, strides, dilations, phi::AllowTF32Cudnn(), groups);
87
#else
88 89
  args.cdesc.set(
      dtype, padding_common, strides, dilations, phi::AllowTF32Cudnn());
90 91 92 93 94 95 96
#endif

#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1)
  // cudnn 7 can support groups, no need to do it manually
  // FIXME(typhoonzero): find a better way to disable groups
  // rather than setting it to 1.
  PADDLE_ENFORCE_GPU_SUCCESS(
97
      phi::dynload::cudnnSetConvolutionGroupCount(args.cdesc.desc(), groups));
98 99 100 101 102 103 104 105 106 107 108 109
  groups = 1;
#endif
#ifdef PADDLE_WITH_HIP
  // MIOPEN do not set groups in wdesc after set groups in cdesc
  groups = 1;
#endif
  args.idesc.set(*transformed_input, layout_format);
  args.wdesc.set(*transformed_filter_channel, layout_format, groups);
  args.odesc.set(*transformed_output, layout_format);
  int i_n, i_c, i_d, i_h, i_w;
  int o_n, o_c, o_d, o_h, o_w;

110
  if (compute_format == phi::backends::gpu::DataLayout::kNHWC) {
111
    GetNCDHW(transformed_input->dims(),
112
             phi::backends::gpu::DataLayout::kNHWC,
113 114 115 116 117 118
             &i_n,
             &i_c,
             &i_d,
             &i_h,
             &i_w);
    GetNCDHW(transformed_output->dims(),
119
             phi::backends::gpu::DataLayout::kNHWC,
120 121 122 123 124 125 126
             &o_n,
             &o_c,
             &o_d,
             &o_h,
             &o_w);
  } else {
    GetNCDHW(transformed_input->dims(),
127
             phi::backends::gpu::DataLayout::kNCHW,
128 129 130 131 132 133
             &i_n,
             &i_c,
             &i_d,
             &i_h,
             &i_w);
    GetNCDHW(transformed_output->dims(),
134
             phi::backends::gpu::DataLayout::kNCHW,
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
             &o_n,
             &o_c,
             &o_d,
             &o_h,
             &o_w);
  }

  int group_offset_in = i_c / groups * i_h * i_w * i_d;
  int group_offset_out = o_c / groups * o_h * o_w * o_d;
  int group_offset_filter = transformed_filter_channel->numel() / groups;
  // ------------------- cudnn conv workspace ---------------------
  size_t workspace_size = 0;  // final workspace to allocate.
// ------------------- cudnn conv algorithm ---------------------
#ifdef PADDLE_WITH_HIP
  SearchResult<miopenConvFwdAlgorithm_t> fwd_result;
  using search = SearchAlgorithm<miopenConvFwdAlgorithm_t>;
  workspace_size = search::GetWorkspaceSize(args);
  fwd_result.algo = search::Find<T>(
      args, exhaustive_search, deterministic, workspace_size, ctx);
#else
  SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result;
  using search = SearchAlgorithm<ConvKind::kForward>;
  fwd_result = search::Find<T>(ctx, args, exhaustive_search, deterministic);
  workspace_size = fwd_result.workspace_size;
#endif

#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1)
  // when groups > 1, SearchAlgorithm find algo is CUDNN_CONVOLUTION_\
    // FWD_ALGO_WINOGRAD_NONFUSED, but this kind of algorithm is unstable
  // in forward computation, so change the algorithm to CUDNN_CONVOLUTION_\
    // FWD_ALGO_IMPLICIT_GEMM manually.
  if (groups > 1) {
    fwd_result.algo = static_cast<cudnnConvolutionFwdAlgo_t>(0);
  }
#endif

  // ------------------- cudnn conv forward ---------------------
  ScalingParamType<T> alpha = 1.0f;
  ScalingParamType<T> beta = 0.0f;

  // NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
  // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f;
  // VLOG(4) << "Conv: use_addto = " << ctx.Attr<bool>("use_addto");

#ifdef PADDLE_WITH_HIP
  workspace_handle.RunFunc(
      [&](void* workspace_ptr) {
        PADDLE_ENFORCE_GPU_SUCCESS(
183 184 185 186 187 188 189 190 191 192 193 194 195
            phi::dynload::miopenConvolutionForward(handle,
                                                   &alpha,
                                                   args.idesc.desc(),
                                                   input_data,
                                                   args.wdesc.desc(),
                                                   filter_data,
                                                   args.cdesc.desc(),
                                                   fwd_result.algo,
                                                   &beta,
                                                   args.odesc.desc(),
                                                   output_data,
                                                   workspace_ptr,
                                                   workspace_size));
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
      },
      workspace_size);
#else
  ConvRunner<T, ConvKind::kForward>::Apply(ctx,
                                           args,
                                           fwd_result,
                                           input_data,
                                           filter_data,
                                           output_data,
                                           groups,
                                           group_offset_in,
                                           group_offset_filter,
                                           group_offset_out,
                                           workspace_size,
                                           &workspace_handle,
                                           false);
#endif
}

#ifdef PADDLE_WITH_CUDNN_FRONTEND
template <typename T, typename Context>
void ConvCudnnKernelImplV8(const DenseTensor* input_tensor,
                           const DenseTensor* filter_channel_tensor,
                           const Context& ctx,
                           const std::vector<int>& strides,
                           const std::vector<int>& padding_common,
                           const std::vector<int>& dilations,
223
                           phi::backends::gpu::DataLayout layout,
224 225 226 227 228 229 230 231 232 233
                           bool exhaustive_search,
                           bool deterministic,
                           int groups,
                           DenseTensor* output_tensor) {
  auto& plan_cache = phi::autotune::AutoTuneCache::Instance().GetConvV8(
      phi::autotune::AlgorithmType::kConvForwardV8);

  PADDLE_ENFORCE_EQ(
      groups,
      1,
234
      phi::errors::Unimplemented(
235 236 237 238 239 240 241 242
          "Group concolution using CUDNNv8 API unsupported for now"));

  T* input_data = const_cast<T*>(input_tensor->data<T>());
  T* filter_data = const_cast<T*>(filter_channel_tensor->data<T>());
  T* output_data = output_tensor->data<T>();
  cudnnHandle_t handle = const_cast<cudnnHandle_t>(ctx.cudnn_handle());
  auto workspace_handle = ctx.cudnn_workspace_handle();

243 244
  auto layout_format = phi::backends::gpu::GetCudnnTensorFormat(layout);
  auto dtype = phi::backends::gpu::CudnnDataType<T>::type;
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331

  float alpha = 1.0f;
  float beta = 0.0f;

  using helper = CudnnFrontendConvHelper;
  auto op_graph = helper::BuildConvOperationGraph<
      CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR>(
      input_tensor,
      output_tensor,
      filter_channel_tensor,
      layout_format,
      strides,
      padding_common,
      dilations,
      dtype,
      handle,
      alpha,
      beta);

  if (plan_cache.FindPlan(op_graph)) {
    auto engine_config = plan_cache.GetConfig(op_graph, handle);
    auto cached_plan = cudnn_frontend::ExecutionPlanBuilder()
                           .setHandle(handle)
                           .setEngineConfig(engine_config, op_graph.getTag())
                           .build();
    auto workspace_size = cached_plan.getWorkspaceSize();
    VLOG(4) << "Cached execution plan found." << cached_plan.getTag()
            << "; Require workspace: " << workspace_size;
    workspace_handle.RunFunc(
        [&](void* workspace_ptr) {
          void* data_ptrs[] = {input_data, output_data, filter_data};
          int64_t uids[] = {'x', 'y', 'w'};
          auto variant_pack = cudnn_frontend::VariantPackBuilder()
                                  .setWorkspacePointer(workspace_ptr)
                                  .setDataPointers(3, data_ptrs)
                                  .setUids(3, uids)
                                  .build();
          PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnBackendExecute(
              handle, cached_plan.get_raw_desc(), variant_pack.get_raw_desc()));
        },
        workspace_size);
    return;
  }

  auto plans = helper::FindExecutionPlans(&op_graph,
                                          exhaustive_search,
                                          deterministic,
                                          input_data,
                                          output_data,
                                          filter_data,
                                          handle,
                                          &workspace_handle);

  for (auto& plan : plans) {
    try {
      int64_t workspace_size = plan.getWorkspaceSize();
      workspace_handle.RunFunc(
          [&](void* workspace_ptr) {
            void* data_ptrs[] = {input_data, output_data, filter_data};
            int64_t uids[] = {'x', 'y', 'w'};
            auto variant_pack = cudnn_frontend::VariantPackBuilder()
                                    .setWorkspacePointer(workspace_ptr)
                                    .setDataPointers(3, data_ptrs)
                                    .setUids(3, uids)
                                    .build();
            PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnBackendExecute(
                handle, plan.get_raw_desc(), variant_pack.get_raw_desc()));
          },
          workspace_size);
      if (!exhaustive_search || plan_cache.IsStable(op_graph, plan.getTag())) {
        plan_cache.InsertPlan(op_graph, plan);
      }
      return;
    } catch (cudnn_frontend::cudnnException& e) {
      VLOG(4) << "Plan " << plan.describe()
              << "failed to execute. Trying next plan.";
    } catch (phi::enforce::EnforceNotMet& e) {
      VLOG(4) << "Plan " << plan.describe()
              << "failed to execute. Trying next plan.";
    }
  }
  PADDLE_THROW(
      phi::errors::InvalidArgument("[CUDNN Frontend API] No valid plan could "
                                   "be found to execute conv."));
}
#endif

H
hong 已提交
332 333 334 335 336 337 338 339
template <typename T, typename Context>
void ConvCudnnKernel(const Context& ctx,
                     const DenseTensor& input,
                     const DenseTensor& filter,
                     const std::vector<int>& strides,
                     const std::vector<int>& paddings_t,
                     const std::string& padding_algorithm,
                     const std::vector<int>& dilations_t,
340
                     int groups,
H
hong 已提交
341 342
                     const std::string& data_format,
                     DenseTensor* output) {
H
hong 已提交
343
  ctx.template Alloc<T>(output);
H
hong 已提交
344 345 346
  std::vector<int> paddings = paddings_t;
  std::vector<int> dilations = dilations_t;

347 348 349 350 351 352 353 354 355
  bool has_exhaustive_search = ctx.HasDnnAttr("exhaustive_search");
  VLOG(4) << "GPUContext contains `exhaustive_search`: "
          << has_exhaustive_search;
  bool exhaustive_search_attr =
      has_exhaustive_search
          ? PADDLE_GET_CONST(bool, ctx.GetDnnAttr("exhaustive_search"))
          : false;
  bool exhaustive_search =
      FLAGS_cudnn_exhaustive_search || exhaustive_search_attr;
H
hong 已提交
356
  bool deterministic = FLAGS_cudnn_deterministic;
357
  PADDLE_ENFORCE_EQ(exhaustive_search && deterministic,
H
hong 已提交
358 359 360 361 362 363
                    false,
                    phi::errors::InvalidArgument(
                        "Cann't set exhaustive_search True and "
                        "FLAGS_cudnn_deterministic True at same time."));

  const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
364
  auto dtype = phi::backends::gpu::CudnnDataType<T>::type;
H
hong 已提交
365 366 367

#ifdef PADDLE_WITH_HIP
  // HIP MIOPEN ONLY SUPPORT NCHW format
368
  auto compute_format = phi::backends::gpu::DataLayout::kNCHW;
H
hong 已提交
369
#else
370
#if CUDNN_VERSION_MIN(8, 1, 0)
H
hong 已提交
371
  // Tensor Core introduced from Volta GPUs supports more faster conv op
372 373 374 375 376 377 378
  // with FP16 or BF16 in NHWC data format.
  const bool compute_in_nhwc =
      (dtype == CUDNN_DATA_HALF || dtype == CUDNN_DATA_BFLOAT16) &&
      IsVoltaOrLater(ctx);
#else
  // Tensor Core introduced from Volta GPUs supports more faster conv op
  // with FP16 in NHWC data format. (BF16 require cudnn >= 8.1.0)
H
hong 已提交
379
  const bool compute_in_nhwc = dtype == CUDNN_DATA_HALF && IsVoltaOrLater(ctx);
380
#endif
H
hong 已提交
381 382 383
  // We will only do data format conversion from NHWC to NCHW.
  // cudnn will convert NCHW to NHWC automatically on Tensor Core.
  auto compute_format = compute_in_nhwc && channel_last
384 385
                            ? phi::backends::gpu::DataLayout::kNHWC
                            : phi::backends::gpu::DataLayout::kNCHW;
H
hong 已提交
386 387 388
#endif
  VLOG(3) << "Compute ConvOp with cuDNN:"
          << " data_format=" << data_format << " compute_format="
389 390
          << (compute_format == phi::backends::gpu::DataLayout::kNHWC ? "NHWC"
                                                                      : "NCHW");
H
hong 已提交
391 392 393 394 395

  // ------------ transformed tensor -----------
  DenseTensor transformed_input_channel(input.type());
  DenseTensor transformed_output(output->type());
  DenseTensor transformed_filter_channel(filter.type());
396

397
  if (channel_last && compute_format == phi::backends::gpu::DataLayout::kNCHW) {
H
hong 已提交
398 399 400 401 402 403 404 405 406 407
    VLOG(3) << "Transform input tensor from NHWC to NCHW.";
    ResizeToChannelFirst<Context, T>(ctx, &input, &transformed_input_channel);
    TransToChannelFirst<Context, T>(ctx, &input, &transformed_input_channel);

    ResizeToChannelFirst<Context, T>(ctx, output, &transformed_output);

  } else {
    transformed_input_channel.ShareDataWith(input);
    transformed_output.ShareDataWith(*output);
  }
408
  if (compute_format == phi::backends::gpu::DataLayout::kNHWC) {
H
hong 已提交
409 410 411 412 413 414 415 416 417 418 419 420 421
    VLOG(3) << "Transform filter tensor from NCHW to NHWC.";
    ResizeToChannelLast<Context, T>(ctx, &filter, &transformed_filter_channel);
    TransToChannelLast<Context, T>(ctx, &filter, &transformed_filter_channel);
  } else {
    transformed_filter_channel.ShareDataWith(filter);
  }

  // update padding and dilation
  auto in_dims = transformed_input_channel.dims();
  auto filter_dims = transformed_filter_channel.dims();
  DDim in_data_dims;
  DDim filter_data_dims;

422
  if (compute_format == phi::backends::gpu::DataLayout::kNCHW) {
H
hong 已提交
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
    in_data_dims = slice_ddim(in_dims, 2, in_dims.size());
    filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size());
  } else {
    in_data_dims = slice_ddim(in_dims, 1, in_dims.size() - 1);
    filter_data_dims = slice_ddim(filter_dims, 1, filter_dims.size() - 1);
  }

  std::vector<int> ksize = vectorize<int>(filter_data_dims);
  UpdatePaddingAndDilation(
      &paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize);

  int data_dim = strides.size();  // 2d or 3d
  bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim);

  DenseTensor transformed_input;
  std::vector<int> padding_common(data_dim, 0);
  if (!is_sys_pad) {
    std::vector<int> padding_diff(data_dim);
    std::vector<int> new_input_shape_vec(data_dim + 2);
    new_input_shape_vec[0] = transformed_input_channel.dims()[0];

444
    if (compute_format == phi::backends::gpu::DataLayout::kNCHW) {
H
hong 已提交
445 446 447 448 449 450 451 452 453 454
      new_input_shape_vec[1] = transformed_input_channel.dims()[1];
    } else {
      new_input_shape_vec[data_dim + 1] =
          transformed_input_channel.dims()[data_dim + 1];
    }

    std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0);
    for (size_t i = 0; i < data_dim; ++i) {
      padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
      padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
455
      if (compute_format == phi::backends::gpu::DataLayout::kNCHW) {
H
hong 已提交
456 457 458 459 460 461
        new_input_shape_vec[i + 2] =
            transformed_input_channel.dims()[i + 2] + padding_diff[i];
      } else {
        new_input_shape_vec[i + 1] =
            transformed_input_channel.dims()[i + 1] + padding_diff[i];
      }
462
      if (compute_format == phi::backends::gpu::DataLayout::kNCHW) {
H
hong 已提交
463 464 465 466 467 468 469 470 471
        input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
        input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
      } else {
        input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
        input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
      }
    }
    DDim new_input_shape(make_ddim(new_input_shape_vec));
    transformed_input.Resize(new_input_shape);
H
hong 已提交
472
    ctx.template Alloc<T>(&transformed_input);
H
hong 已提交
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508

    const int rank = transformed_input_channel.dims().size();
    T pad_value(0.0);
    switch (rank) {
      case 4: {
        funcs::PadFunction<Context, T, 4>(ctx,
                                          input_pad,
                                          transformed_input_channel,
                                          pad_value,
                                          &transformed_input);
      } break;
      case 5: {
        funcs::PadFunction<Context, T, 5>(ctx,
                                          input_pad,
                                          transformed_input_channel,
                                          pad_value,
                                          &transformed_input);
      } break;
      default:
        PADDLE_THROW(phi::errors::InvalidArgument(
            "ConvOp only support tensors with 4 or 5 dimensions."));
    }

  } else {
    transformed_input.ShareDataWith(transformed_input_channel);
    if (paddings.size() == data_dim) {
      for (size_t i = 0; i < data_dim; ++i) {
        padding_common[i] = paddings[i];
      }
    } else {
      for (size_t i = 0; i < data_dim; ++i) {
        padding_common[i] = paddings[2 * i];
      }
    }
  }

509 510 511 512
  phi::backends::gpu::DataLayout layout =
      compute_format == phi::backends::gpu::DataLayout::kNHWC
          ? phi::backends::gpu::DataLayout::kNHWC
          : phi::backends::gpu::DataLayout::kNCHW;
H
hong 已提交
513
  if (transformed_input.dims().size() == 5) {
514 515 516
    layout = compute_format == phi::backends::gpu::DataLayout::kNHWC
                 ? phi::backends::gpu::DataLayout::kNDHWC
                 : phi::backends::gpu::DataLayout::kNCDHW;
H
hong 已提交
517 518
  }

519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
#ifdef PADDLE_WITH_CUDNN_FRONTEND
  if (dynload::IsCudnnFrontendEnabled() && (groups == 1))
    ConvCudnnKernelImplV8<T>(&transformed_input,
                             &transformed_filter_channel,
                             ctx,
                             strides,
                             padding_common,
                             dilations,
                             layout,
                             exhaustive_search,
                             deterministic,
                             groups,
                             &transformed_output);
  else
    ConvCudnnKernelImplV7<T>(&transformed_input,
                             &transformed_filter_channel,
                             ctx,
                             strides,
                             padding_common,
                             dilations,
                             compute_format,
                             layout,
                             exhaustive_search,
                             deterministic,
                             groups,
                             &transformed_output);
H
hong 已提交
545
#else
546 547 548 549 550 551 552 553 554 555 556 557
  ConvCudnnKernelImplV7<T>(&transformed_input,
                           &transformed_filter_channel,
                           ctx,
                           strides,
                           padding_common,
                           dilations,
                           compute_format,
                           layout,
                           exhaustive_search,
                           deterministic,
                           groups,
                           &transformed_output);
H
hong 已提交
558 559
#endif

560
  if (channel_last && compute_format == phi::backends::gpu::DataLayout::kNCHW) {
H
hong 已提交
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
    TransToChannelLast<Context, T>(ctx, &transformed_output, output);
  }
}

template <typename T, typename Context>
void Conv3DCudnnKernel(const Context& dev_ctx,
                       const DenseTensor& input,
                       const DenseTensor& filter,
                       const std::vector<int>& strides,
                       const std::vector<int>& paddings,
                       const std::string& padding_algorithm,
                       int groups,
                       const std::vector<int>& dilations,
                       const std::string& data_format,
                       DenseTensor* out) {
  ConvCudnnKernel<T>(dev_ctx,
                     input,
                     filter,
                     strides,
                     paddings,
                     padding_algorithm,
                     dilations,
583
                     groups,
H
hong 已提交
584 585 586 587
                     data_format,
                     out);
}

H
hong 已提交
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
template <typename T, typename Context>
void DepthwiseConvCudnnKernel(const Context& dev_ctx,
                              const DenseTensor& input,
                              const DenseTensor& filter,
                              const std::vector<int>& strides,
                              const std::vector<int>& paddings,
                              const std::string& padding_algorithm,
                              int groups,
                              const std::vector<int>& dilations,
                              const std::string& data_format,
                              DenseTensor* out) {
  ConvCudnnKernel<T>(dev_ctx,
                     input,
                     filter,
                     strides,
                     paddings,
                     padding_algorithm,
                     dilations,
606
                     groups,
H
hong 已提交
607 608 609 610
                     data_format,
                     out);
}

H
hong 已提交
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
}  // namespace phi

#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(conv2d,
                   GPUDNN,
                   ALL_LAYOUT,
                   phi::ConvCudnnKernel,
                   float,
                   phi::dtype::float16) {}

PD_REGISTER_KERNEL(conv3d,
                   GPUDNN,
                   ALL_LAYOUT,
                   phi::Conv3DCudnnKernel,
                   float,
                   phi::dtype::float16) {}
H
hong 已提交
627 628 629 630 631 632 633 634

PD_REGISTER_KERNEL(depthwise_conv2d,
                   GPUDNN,
                   ALL_LAYOUT,
                   phi::DepthwiseConvCudnnKernel,
                   float,
                   phi::dtype::float16) {}

H
hong 已提交
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
#else
#if CUDNN_VERSION_MIN(8, 1, 0)
PD_REGISTER_KERNEL(conv2d,
                   GPUDNN,
                   ALL_LAYOUT,
                   phi::ConvCudnnKernel,
                   float,
                   double,
                   phi::dtype::float16,
                   phi::dtype::bfloat16) {}

PD_REGISTER_KERNEL(conv3d,
                   GPUDNN,
                   ALL_LAYOUT,
                   phi::Conv3DCudnnKernel,
                   float,
                   double,
                   phi::dtype::float16,
                   phi::dtype::bfloat16) {}
#else
PD_REGISTER_KERNEL(conv2d,
                   GPUDNN,
                   ALL_LAYOUT,
                   phi::ConvCudnnKernel,
                   float,
                   double,
                   phi::dtype::float16) {}

PD_REGISTER_KERNEL(conv3d,
                   GPUDNN,
                   ALL_LAYOUT,
                   phi::Conv3DCudnnKernel,
                   float,
                   double,
                   phi::dtype::float16) {}
#endif

#endif

// todo register bfloat16