cuda_stream.cc 2.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/platform/stream/cuda_stream.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace platform {
namespace stream {

23 24 25
#ifdef PADDLE_WITH_HIP
constexpr unsigned int kDefaultFlag = hipStreamDefault;
#else
26
constexpr unsigned int kDefaultFlag = cudaStreamDefault;
27
#endif
28

29
bool CUDAStream::Init(const Place& place, const Priority& priority) {
30 31 32 33
  PADDLE_ENFORCE_EQ(is_gpu_place(place), true,
                    platform::errors::InvalidArgument(
                        "Cuda stream must be created using cuda place."));
  place_ = place;
34
  CUDADeviceGuard guard(BOOST_GET_CONST(CUDAPlace, place_).device);
35
  if (priority == Priority::kHigh) {
36 37 38 39
#ifdef PADDLE_WITH_HIP
    PADDLE_ENFORCE_CUDA_SUCCESS(
        hipStreamCreateWithPriority(&stream_, kDefaultFlag, -1));
#else
40
    PADDLE_ENFORCE_CUDA_SUCCESS(
41
        cudaStreamCreateWithPriority(&stream_, kDefaultFlag, -1));
42
#endif
43
  } else if (priority == Priority::kNormal) {
44 45 46 47
#ifdef PADDLE_WITH_HIP
    PADDLE_ENFORCE_CUDA_SUCCESS(
        hipStreamCreateWithPriority(&stream_, kDefaultFlag, 0));
#else
48
    PADDLE_ENFORCE_CUDA_SUCCESS(
49
        cudaStreamCreateWithPriority(&stream_, kDefaultFlag, 0));
50
#endif
51
  }
52 53
  callback_manager_.reset(new StreamCallbackManager<gpuStream_t>(stream_));
  VLOG(3) << "GPUStream Init stream: " << stream_
54 55 56 57 58
          << ", priority: " << static_cast<int>(priority);
  return true;
}

void CUDAStream::Destroy() {
59
  CUDADeviceGuard guard(BOOST_GET_CONST(CUDAPlace, place_).device);
60 61 62
  Wait();
  WaitCallback();
  if (stream_) {
63 64 65
#ifdef PADDLE_WITH_HIP
    PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamDestroy(stream_));
#else
66
    PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(stream_));
67
#endif
68 69 70 71 72
  }
  stream_ = nullptr;
}

void CUDAStream::Wait() const {
73 74 75 76 77 78 79 80 81 82 83
#ifdef PADDLE_WITH_HIP
  hipError_t e_sync = hipSuccess;
#if !defined(_WIN32)
  e_sync = hipStreamSynchronize(stream_);
#else
  while (e_sync = hipStreamQuery(stream_)) {
    if (e_sync == hipErrorNotReady) continue;
    break;
  }
#endif
#else
84 85 86 87 88 89 90 91 92
  cudaError_t e_sync = cudaSuccess;
#if !defined(_WIN32)
  e_sync = cudaStreamSynchronize(stream_);
#else
  while (e_sync = cudaStreamQuery(stream_)) {
    if (e_sync == cudaErrorNotReady) continue;
    break;
  }
#endif
93
#endif  // PADDLE_WITH_HIP
94

95
  PADDLE_ENFORCE_CUDA_SUCCESS(e_sync);
96 97 98 99 100
}

}  // namespace stream
}  // namespace platform
}  // namespace paddle