device_code.h 3.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

17 18
#include <map>
#include <memory>
19
#include <string>
20
#include <unordered_map>
21
#include <vector>
W
wanghuancoder 已提交
22

23 24 25 26 27
#include "paddle/fluid/platform/device_context.h"
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/dynload/cuda_driver.h"
#include "paddle/fluid/platform/dynload/nvrtc.h"
#endif
28 29 30 31
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/platform/dynload/hiprtc.h"
#include "paddle/fluid/platform/dynload/rocm_driver.h"
#endif
32 33 34 35 36 37 38

namespace paddle {
namespace platform {

class DeviceCode {
 public:
  virtual ~DeviceCode() {}
39
  virtual bool Compile(bool include_path = false) = 0;
40 41
  virtual void Launch(const size_t n, std::vector<void*>* args) const = 0;

42 43 44
  Place GetPlace() const { return place_; }
  std::string GetName() const { return name_; }

45 46 47 48 49 50
 protected:
  Place place_;
  std::string name_;
  std::string kernel_;
};

51
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
52 53 54 55
class CUDADeviceCode : public DeviceCode {
 public:
  explicit CUDADeviceCode(const Place& place, const std::string& name,
                          const std::string& kernel);
56
  bool Compile(bool include_path = false) override;
57 58 59 60 61 62 63
  void Launch(const size_t n, std::vector<void*>* args) const override;

  void SetNumThreads(int num_threads) { num_threads_ = num_threads; }
  void SetWorkloadPerThread(int workload_per_thread) {
    workload_per_thread_ = workload_per_thread;
  }

64 65 66
  static void CheckAvailableStatus();
  static bool IsAvailable() { return available_; }

67
 private:
68 69 70
#ifdef PADDLE_WITH_HIP
  bool CheckNVRTCResult(hiprtcResult result, std::string function);
#else
71
  bool CheckNVRTCResult(nvrtcResult result, std::string function);
72
#endif
73 74

  static bool available_;
75 76

  bool is_compiled_{false};
77 78 79 80
  int max_threads_{0};
  int num_threads_{1024};
  int workload_per_thread_{1};
  std::vector<char> ptx_;
81 82 83 84
#ifdef PADDLE_WITH_HIP
  hipModule_t module_;
  hipFunction_t function_;
#else
85 86
  CUmodule module_;
  CUfunction function_;
87
#endif
88 89 90
};
#endif

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
class DeviceCodePool {
 public:
  using DeviceCodeMap =
      std::unordered_map<std::string, std::unique_ptr<DeviceCode>>;

  explicit DeviceCodePool(const std::vector<platform::Place>& places);

  static DeviceCodePool& Instance() {
    PADDLE_ENFORCE_NOT_NULL(
        pool,
        errors::NotFound("Need to create DeviceCodePool first, by calling "
                         "DeviceCodePool::Init(places)!"));
    return *pool;
  }

  static DeviceCodePool& Init(const std::vector<platform::Place>& places) {
    if (pool == nullptr) {
      pool = new DeviceCodePool(places);
    }
    return *pool;
  }

  void Set(std::unique_ptr<DeviceCode>&& code);

  platform::DeviceCode* Get(const platform::Place& place,
                            const std::string& name);

  size_t size(const platform::Place& place) const {
    auto iter = device_codes_.find(place);
    if (iter == device_codes_.end()) {
      return 0;
    }
    return iter->second.size();
  }

 private:
  static DeviceCodePool* pool;
  std::map<Place, DeviceCodeMap> device_codes_;
  DISABLE_COPY_AND_ASSIGN(DeviceCodePool);
};

132 133
}  // namespace platform
}  // namespace paddle