device_code.h 3.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

17 18
#include <map>
#include <memory>
19
#include <string>
20
#include <unordered_map>
21 22 23 24 25 26 27 28 29 30 31 32 33
#include <vector>
#include "paddle/fluid/platform/device_context.h"
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/dynload/cuda_driver.h"
#include "paddle/fluid/platform/dynload/nvrtc.h"
#endif

namespace paddle {
namespace platform {

class DeviceCode {
 public:
  virtual ~DeviceCode() {}
34
  virtual bool Compile() = 0;
35 36
  virtual void Launch(const size_t n, std::vector<void*>* args) const = 0;

37 38 39
  Place GetPlace() const { return place_; }
  std::string GetName() const { return name_; }

40 41 42 43 44 45 46 47 48 49 50
 protected:
  Place place_;
  std::string name_;
  std::string kernel_;
};

#ifdef PADDLE_WITH_CUDA
class CUDADeviceCode : public DeviceCode {
 public:
  explicit CUDADeviceCode(const Place& place, const std::string& name,
                          const std::string& kernel);
51
  bool Compile() override;
52 53 54 55 56 57 58 59
  void Launch(const size_t n, std::vector<void*>* args) const override;

  void SetNumThreads(int num_threads) { num_threads_ = num_threads; }
  void SetWorkloadPerThread(int workload_per_thread) {
    workload_per_thread_ = workload_per_thread;
  }

 private:
60 61 62 63
  bool CheckNVRTCResult(nvrtcResult result, std::string function);
  bool CheckCUDADriverResult(CUresult result, std::string function);

  bool is_compiled_{false};
64 65 66 67 68 69 70 71 72
  int max_threads_{0};
  int num_threads_{1024};
  int workload_per_thread_{1};
  std::vector<char> ptx_;
  CUmodule module_;
  CUfunction function_;
};
#endif

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
class DeviceCodePool {
 public:
  using DeviceCodeMap =
      std::unordered_map<std::string, std::unique_ptr<DeviceCode>>;

  explicit DeviceCodePool(const std::vector<platform::Place>& places);

  static DeviceCodePool& Instance() {
    PADDLE_ENFORCE_NOT_NULL(
        pool,
        errors::NotFound("Need to create DeviceCodePool first, by calling "
                         "DeviceCodePool::Init(places)!"));
    return *pool;
  }

  static DeviceCodePool& Init(const std::vector<platform::Place>& places) {
    if (pool == nullptr) {
      pool = new DeviceCodePool(places);
    }
    return *pool;
  }

  void Set(std::unique_ptr<DeviceCode>&& code);

  platform::DeviceCode* Get(const platform::Place& place,
                            const std::string& name);

  size_t size(const platform::Place& place) const {
    auto iter = device_codes_.find(place);
    if (iter == device_codes_.end()) {
      return 0;
    }
    return iter->second.size();
  }

 private:
  static DeviceCodePool* pool;
  std::map<Place, DeviceCodeMap> device_codes_;
  DISABLE_COPY_AND_ASSIGN(DeviceCodePool);
};

114 115
}  // namespace platform
}  // namespace paddle