cuda_stream.h 3.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <cstdint>
#include <memory>
W
wanghuancoder 已提交
19

20 21 22 23 24 25 26 27 28
#include "paddle/fluid/platform/gpu_info.h"
#include "paddle/fluid/platform/macros.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/stream_callback_manager.h"

namespace paddle {
namespace platform {
namespace stream {

29
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
30 31 32 33 34 35

enum class Priority : uint8_t {
  kNull = 0x0,
  kHigh = 0x1,
  kNormal = 0x2,
};
36
#endif
37
class CUDAStream final {
38
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
39 40
 public:
  CUDAStream() = default;
41 42
  explicit CUDAStream(const Place& place,
                      const Priority& priority = Priority::kNormal) {
43 44 45 46
    Init(place, priority);
  }
  virtual ~CUDAStream() { Destroy(); }

47
  bool Init(const Place& place, const Priority& priority = Priority::kNormal);
48 49 50 51 52 53 54

  template <typename Callback>
  void AddCallback(Callback&& callback) const {
    callback_manager_->AddCallback(callback);
  }

  template <typename Callback>
55 56 57 58 59 60
#ifdef PADDLE_WITH_HIP
  void RecordEvent(hipEvent_t ev, Callback callback) const {
    callback();
    PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(ev, stream_));
  }
#else
61 62
  void RecordEvent(cudaEvent_t ev, Callback callback) const {
    callback();
63
    PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(ev, stream_));
64
  }
65
#endif
66

67 68 69 70 71
#ifdef PADDLE_WITH_HIP
  void RecordEvent(hipEvent_t ev) const {
    PADDLE_ENFORCE_CUDA_SUCCESS(hipEventRecord(ev, stream_));
  }
#else
72
  void RecordEvent(cudaEvent_t ev) const {
73
    PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventRecord(ev, stream_));
74
  }
75
#endif
76

77 78 79 80 81
#ifdef PADDLE_WITH_HIP
  void WaitEvent(hipEvent_t ev) const {
    PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamWaitEvent(stream_, ev, 0));
  }
#else
82
  void WaitEvent(cudaEvent_t ev) const {
83
    PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamWaitEvent(stream_, ev, 0));
84
  }
85
#endif
86 87 88 89

  void Wait() const;
  void WaitCallback() const { callback_manager_->Wait(); }

90 91 92
#ifdef PADDLE_WITH_HIP
  const hipStream_t& raw_stream() const { return stream_; }
#else
93
  const cudaStream_t& raw_stream() const { return stream_; }
94
#endif
95 96
  void Destroy();

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
  bool Query() const {
#ifdef PADDLE_WITH_HIP
    hipError_t err = hipStreamQuery(stream_);
    if (err == hipSuccess) {
      return true;
    }
    if (err == hipErrorNotReady) {
      return false;
    }
#else
    cudaError_t err = cudaStreamQuery(stream_);
    if (err == cudaSuccess) {
      return true;
    }
    if (err == cudaErrorNotReady) {
      return false;
    }
#endif

    PADDLE_ENFORCE_CUDA_SUCCESS(err);
    return false;
  }

  void Synchronize() const {
#ifdef PADDLE_WITH_HIP
    PADDLE_ENFORCE_CUDA_SUCCESS(hipStreamSynchronize(stream_));
#else
    PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream_));
#endif
  }

128 129
 private:
  Place place_;
130 131 132
#ifdef PADDLE_WITH_HIP
  hipStream_t stream_{nullptr};
#else
133
  cudaStream_t stream_{nullptr};
134
#endif
135
  Priority priority_{Priority::kNormal};
136
  std::unique_ptr<StreamCallbackManager<gpuStream_t>> callback_manager_;
137
#endif
138 139 140
  DISABLE_COPY_AND_ASSIGN(CUDAStream);
};

141
CUDAStream* get_current_stream(int deviceId);
142 143 144 145

}  // namespace stream
}  // namespace platform
}  // namespace paddle