event.h 5.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.

licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
C
chengduo 已提交
16

L
liutiexing 已提交
17
#include <functional>
18
#include <map>
19
#include <string>
20
#include <utility>
P
peizhilin 已提交
21
#ifdef PADDLE_WITH_CUDA
P
peizhilin 已提交
22
#include <cuda_runtime.h>
P
peizhilin 已提交
23
#endif
24 25 26
#ifdef PADDLE_WITH_HIP
#include <hip/hip_runtime.h>
#endif
C
chengduo 已提交
27
#include "paddle/fluid/platform/place.h"
28 29 30 31

namespace paddle {
namespace platform {

32
enum class EventType { kMark, kPushRange, kPopRange };
33

34 35 36 37
enum class EventRole {
  kOrdinary,  // only record op time with op type key
  kInnerOp,   // record op detail time with op type key
  kUniqueOp,  // record op detail time with op unique name key
W
wangchaochaohu 已提交
38
  kSpecial,   // record event such as PE which is outer of thread local
39 40
};

41 42 43 44
class Event {
 public:
  // The DeviceContext is used to get the cuda stream.
  // If CPU profiling mode, can pass nullptr.
45 46 47 48 49
  Event(EventType type,
        std::string name,
        uint32_t thread_id,
        EventRole role = EventRole::kOrdinary,
        std::string attr = "none");
50

L
liutiexing 已提交
51 52 53
  const EventType &type() const;
  Event *parent() const { return parent_; }
  void set_parent(Event *parent) { parent_ = parent; }
54
  std::string name() const { return name_; }
55
  EventRole role() const { return role_; }
56
  uint64_t thread_id() const { return thread_id_; }
57
  void set_name(std::string name) { name_ = name; }
58
  void set_role(EventRole role) { role_ = role; }
Y
Yuang Liu 已提交
59
  std::string attr() const { return attr_; }
60
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
61
#ifndef PADDLE_WITH_CUPTI
62
  gpuEvent_t event() const { return event_; }
63 64 65 66
  int device() const { return device_; }
#endif
#endif

L
liutiexing 已提交
67 68
  double CpuElapsedMs(const Event &e) const;
  double CudaElapsedMs(const Event &e) const;
69 70 71

 private:
  EventType type_;
72
  std::string name_{};
L
liutiexing 已提交
73
  Event *parent_{nullptr};
74
  uint64_t thread_id_;
75
  EventRole role_{};
76
  int64_t cpu_ns_;
77
  bool visited_status_{false};
Y
Yuang Liu 已提交
78
  std::string attr_;
79
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
80 81 82 83 84 85 86 87 88 89
#ifdef PADDLE_WITH_CUPTI
  int64_t gpu_ns_ = 0;

 public:
  void AddCudaElapsedTime(int64_t start_ns, int64_t end_ns) {
    gpu_ns_ += end_ns - start_ns;
  }

 private:
#else
90
  gpuEvent_t event_ = nullptr;
91 92 93 94
  int device_ = -1;
#endif
#endif
};
C
chengduo 已提交
95

L
liutiexing 已提交
96
using EventWithStartNs = std::pair<Event *, uint64_t>;
97 98
using ThreadEvents = std::map<uint64_t, EventWithStartNs>;

C
chengduo 已提交
99 100
class MemEvent {
 public:
101 102 103 104 105 106 107
  MemEvent(EventType type,
           uint64_t start_ns,
           uint64_t end_ns,
           size_t bytes,
           Place place,
           int64_t thread_id,
           const std::string &annotation)
C
chengduo 已提交
108 109 110 111 112 113 114 115
      : type_(type),
        start_ns_(start_ns),
        end_ns_(end_ns),
        bytes_(bytes),
        place_(place),
        thread_id_(thread_id),
        annotation_(annotation) {}

L
liutiexing 已提交
116
  const EventType &type() const { return type_; }
C
chengduo 已提交
117 118 119 120
  uint64_t start_ns() const { return start_ns_; }
  uint64_t end_ns() const { return end_ns_; }
  size_t bytes() const { return bytes_; }
  Place place() const { return place_; }
121
  uint64_t thread_id() const { return thread_id_; }
L
liutiexing 已提交
122
  const std::string &annotation() const { return annotation_; }
C
chengduo 已提交
123 124 125 126 127 128 129

 private:
  EventType type_;
  uint64_t start_ns_ = 0;
  uint64_t end_ns_ = 0;
  size_t bytes_;
  Place place_;
130
  uint64_t thread_id_;
C
chengduo 已提交
131 132 133
  std::string annotation_;
};

134 135
class CudaEvent {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
136

137 138 139 140 141 142 143
 public:
  CudaEvent() {
#ifdef PADDLE_WITH_HIP
    hipEventCreateWithFlags(&event_, flags_);
#else
    cudaEventCreateWithFlags(&event_, flags_);
#endif
144
    VLOG(4) << "CudaEvent " << event_;
145 146
  }

147
  explicit CudaEvent(unsigned int flags) : flags_(flags) {
148 149 150 151 152
#ifdef PADDLE_WITH_HIP
    hipEventCreateWithFlags(&event_, flags_);
#else
    cudaEventCreateWithFlags(&event_, flags_);
#endif
153
    VLOG(4) << "CudaEvent " << event_;
154 155
  }

156 157 158 159 160 161 162 163
  ~CudaEvent() {
#ifdef PADDLE_WITH_HIP
    hipEventDestroy(event_);
#else
    cudaEventDestroy(event_);
#endif
  }

W
Wilber 已提交
164
  void Record(gpuStream_t stream) {
165
#ifdef PADDLE_WITH_HIP
W
Wilber 已提交
166
    PADDLE_ENFORCE_GPU_SUCCESS(hipEventRecord(event_, stream));
167
#else
W
Wilber 已提交
168
    PADDLE_ENFORCE_GPU_SUCCESS(cudaEventRecord(event_, stream));
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
#endif
  }

  bool Query() {
#ifdef PADDLE_WITH_HIP
    gpuError_t err = hipEventQuery(event_);
    if (err == hipSuccess) {
      return true;
    }
    if (err == hipErrorNotReady) {
      return false;
    }
#else
    gpuError_t err = cudaEventQuery(event_);
    if (err == cudaSuccess) {
      return true;
    }
    if (err == cudaErrorNotReady) {
      return false;
    }
#endif
190
    PADDLE_ENFORCE_GPU_SUCCESS(err);
191 192 193 194 195
    return false;
  }

  void Synchronize() {
#ifdef PADDLE_WITH_HIP
196
    PADDLE_ENFORCE_GPU_SUCCESS(hipEventSynchronize(event_));
197
#else
198
    PADDLE_ENFORCE_GPU_SUCCESS(cudaEventSynchronize(event_));
199 200 201 202 203 204 205 206 207 208 209 210 211 212
#endif
  }
  gpuEvent_t GetRawCudaEvent() { return event_; }

 private:
#ifdef PADDLE_WITH_HIP
  unsigned int flags_ = hipEventDefault;
#else
  unsigned int flags_ = cudaEventDefault;
#endif
  gpuEvent_t event_;
#endif
};

213 214
}  // namespace platform
}  // namespace paddle