event.h 2.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.

licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
C
chengduo 已提交
16

17
#include <string>
P
peizhilin 已提交
18
#ifdef PADDLE_WITH_CUDA
P
peizhilin 已提交
19
#include <cuda_runtime.h>
P
peizhilin 已提交
20
#endif
C
chengduo 已提交
21
#include "paddle/fluid/platform/place.h"
22 23 24 25 26 27 28 29 30 31 32 33 34

namespace paddle {
namespace platform {

enum EventType { kMark, kPushRange, kPopRange };

class Event {
 public:
  // The DeviceContext is used to get the cuda stream.
  // If CPU profiling mode, can pass nullptr.
  Event(EventType type, std::string name, uint32_t thread_id);

  const EventType& type() const;
35 36
  Event* parent() const { return parent_; }
  void set_parent(Event* parent) { parent_ = parent; }
37 38
  std::string name() const { return name_; }
  uint32_t thread_id() const { return thread_id_; }
39
  void set_name(std::string name) { name_ = name; }
40 41 42 43 44 45 46 47 48 49 50 51 52

#ifdef PADDLE_WITH_CUDA
#ifndef PADDLE_WITH_CUPTI
  cudaEvent_t event() const { return event_; }
  int device() const { return device_; }
#endif
#endif

  double CpuElapsedMs(const Event& e) const;
  double CudaElapsedMs(const Event& e) const;

 private:
  EventType type_;
53 54
  std::string name_{};
  Event* parent_{nullptr};
55 56
  uint32_t thread_id_;
  int64_t cpu_ns_;
57
  bool visited_status_{false};
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
#ifdef PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_CUPTI
  int64_t gpu_ns_ = 0;

 public:
  void AddCudaElapsedTime(int64_t start_ns, int64_t end_ns) {
    gpu_ns_ += end_ns - start_ns;
  }

 private:
#else
  cudaEvent_t event_ = nullptr;
  int device_ = -1;
#endif
#endif
};
C
chengduo 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104

class MemEvent {
 public:
  MemEvent(EventType type, uint64_t start_ns, uint64_t end_ns, size_t bytes,
           Place place, int64_t thread_id, const std::string& annotation)
      : type_(type),
        start_ns_(start_ns),
        end_ns_(end_ns),
        bytes_(bytes),
        place_(place),
        thread_id_(thread_id),
        annotation_(annotation) {}

  const EventType& type() const { return type_; }
  uint64_t start_ns() const { return start_ns_; }
  uint64_t end_ns() const { return end_ns_; }
  size_t bytes() const { return bytes_; }
  Place place() const { return place_; }
  int64_t thread_id() const { return thread_id_; }
  const std::string& annotation() const { return annotation_; }

 private:
  EventType type_;
  uint64_t start_ns_ = 0;
  uint64_t end_ns_ = 0;
  size_t bytes_;
  Place place_;
  int64_t thread_id_;
  std::string annotation_;
};

105 106
}  // namespace platform
}  // namespace paddle