“71e046b0ba6a46fa72bd50dd847046e99a3ca0fe”上不存在“examples/aishell/asr3/local/data.sh”
提交 c33e59c7 编写于 作者: 刘琦

Merge branch 'redefine_GPUContext_life_cycle' into 'master'

redefine GPUContext life cycle

See merge request !841
...@@ -58,16 +58,16 @@ GPUContext::GPUContext(const std::string &storage_path, ...@@ -58,16 +58,16 @@ GPUContext::GPUContext(const std::string &storage_path,
GPUContext::~GPUContext() = default; GPUContext::~GPUContext() = default;
KVStorage *GPUContext::opencl_binary_storage() { std::shared_ptr<KVStorage> GPUContext::opencl_binary_storage() {
return opencl_binary_storage_.get(); return opencl_binary_storage_;
} }
KVStorage *GPUContext::opencl_cache_storage() { std::shared_ptr<KVStorage> GPUContext::opencl_cache_storage() {
return opencl_cache_storage_.get(); return opencl_cache_storage_;
} }
Tuner<uint32_t> *GPUContext::opencl_tuner() { std::shared_ptr<Tuner<uint32_t>> GPUContext::opencl_tuner() {
return opencl_tuner_.get(); return opencl_tuner_;
} }
} // namespace mace } // namespace mace
...@@ -32,15 +32,15 @@ class GPUContext { ...@@ -32,15 +32,15 @@ class GPUContext {
const std::string &opencl_parameter_path = ""); const std::string &opencl_parameter_path = "");
~GPUContext(); ~GPUContext();
KVStorage *opencl_binary_storage(); std::shared_ptr<KVStorage> opencl_binary_storage();
KVStorage *opencl_cache_storage(); std::shared_ptr<KVStorage> opencl_cache_storage();
Tuner<uint32_t> *opencl_tuner(); std::shared_ptr<Tuner<uint32_t>> opencl_tuner();
private: private:
std::unique_ptr<KVStorageFactory> storage_factory_; std::unique_ptr<KVStorageFactory> storage_factory_;
std::unique_ptr<Tuner<uint32_t>> opencl_tuner_; std::shared_ptr<Tuner<uint32_t>> opencl_tuner_;
std::unique_ptr<KVStorage> opencl_binary_storage_; std::shared_ptr<KVStorage> opencl_binary_storage_;
std::unique_ptr<KVStorage> opencl_cache_storage_; std::shared_ptr<KVStorage> opencl_cache_storage_;
}; };
} // namespace mace } // namespace mace
......
...@@ -32,7 +32,7 @@ class FileStorageFactory::Impl { ...@@ -32,7 +32,7 @@ class FileStorageFactory::Impl {
public: public:
explicit Impl(const std::string &path); explicit Impl(const std::string &path);
std::unique_ptr<KVStorage> CreateStorage(const std::string &name); std::shared_ptr<KVStorage> CreateStorage(const std::string &name);
private: private:
std::string path_; std::string path_;
...@@ -40,10 +40,9 @@ class FileStorageFactory::Impl { ...@@ -40,10 +40,9 @@ class FileStorageFactory::Impl {
FileStorageFactory::Impl::Impl(const std::string &path): path_(path) {} FileStorageFactory::Impl::Impl(const std::string &path): path_(path) {}
std::unique_ptr<KVStorage> FileStorageFactory::Impl::CreateStorage( std::shared_ptr<KVStorage> FileStorageFactory::Impl::CreateStorage(
const std::string &name) { const std::string &name) {
return std::move(std::unique_ptr<KVStorage>( return std::shared_ptr<KVStorage>(new FileStorage(path_ + "/" + name));
new FileStorage(path_ + "/" + name)));
} }
FileStorageFactory::FileStorageFactory(const std::string &path): FileStorageFactory::FileStorageFactory(const std::string &path):
...@@ -51,7 +50,7 @@ FileStorageFactory::FileStorageFactory(const std::string &path): ...@@ -51,7 +50,7 @@ FileStorageFactory::FileStorageFactory(const std::string &path):
FileStorageFactory::~FileStorageFactory() = default; FileStorageFactory::~FileStorageFactory() = default;
std::unique_ptr<KVStorage> FileStorageFactory::CreateStorage( std::shared_ptr<KVStorage> FileStorageFactory::CreateStorage(
const std::string &name) { const std::string &name) {
return impl_->CreateStorage(name); return impl_->CreateStorage(name);
} }
......
...@@ -41,7 +41,7 @@ class KVStorage { ...@@ -41,7 +41,7 @@ class KVStorage {
class KVStorageFactory { class KVStorageFactory {
public: public:
virtual std::unique_ptr<KVStorage> CreateStorage(const std::string &name) = 0; virtual std::shared_ptr<KVStorage> CreateStorage(const std::string &name) = 0;
virtual ~KVStorageFactory() {} virtual ~KVStorageFactory() {}
}; };
...@@ -53,7 +53,7 @@ class FileStorageFactory : public KVStorageFactory { ...@@ -53,7 +53,7 @@ class FileStorageFactory : public KVStorageFactory {
~FileStorageFactory(); ~FileStorageFactory();
std::unique_ptr<KVStorage> CreateStorage(const std::string &name) override; std::shared_ptr<KVStorage> CreateStorage(const std::string &name) override;
private: private:
class Impl; class Impl;
......
...@@ -18,11 +18,11 @@ ...@@ -18,11 +18,11 @@
namespace mace { namespace mace {
GPUDevice::GPUDevice(Tuner<uint32_t> *tuner, GPUDevice::GPUDevice(std::shared_ptr<Tuner<uint32_t>> tuner,
KVStorage *opencl_cache_storage, std::shared_ptr<KVStorage> opencl_cache_storage,
const GPUPriorityHint priority, const GPUPriorityHint priority,
const GPUPerfHint perf, const GPUPerfHint perf,
KVStorage *opencl_binary_storage, std::shared_ptr<KVStorage> opencl_binary_storage,
const int num_threads, const int num_threads,
CPUAffinityPolicy cpu_affinity_policy, CPUAffinityPolicy cpu_affinity_policy,
bool use_gemmlowp) : bool use_gemmlowp) :
......
...@@ -25,11 +25,11 @@ namespace mace { ...@@ -25,11 +25,11 @@ namespace mace {
class GPUDevice : public CPUDevice { class GPUDevice : public CPUDevice {
public: public:
GPUDevice(Tuner<uint32_t> *tuner, GPUDevice(std::shared_ptr<Tuner<uint32_t>> tuner,
KVStorage *opencl_cache_storage = nullptr, std::shared_ptr<KVStorage> opencl_cache_storage = nullptr,
const GPUPriorityHint priority = GPUPriorityHint::PRIORITY_LOW, const GPUPriorityHint priority = GPUPriorityHint::PRIORITY_LOW,
const GPUPerfHint perf = GPUPerfHint::PERF_NORMAL, const GPUPerfHint perf = GPUPerfHint::PERF_NORMAL,
KVStorage *opencl_binary_storage = nullptr, std::shared_ptr<KVStorage> opencl_binary_storage = nullptr,
const int num_threads = -1, const int num_threads = -1,
CPUAffinityPolicy cpu_affinity_policy = AFFINITY_NONE, CPUAffinityPolicy cpu_affinity_policy = AFFINITY_NONE,
bool use_gemmlowp = false); bool use_gemmlowp = false);
......
...@@ -273,11 +273,11 @@ void OpenCLProfilingTimer::ClearTiming() { ...@@ -273,11 +273,11 @@ void OpenCLProfilingTimer::ClearTiming() {
} }
OpenCLRuntime::OpenCLRuntime( OpenCLRuntime::OpenCLRuntime(
KVStorage *cache_storage, std::shared_ptr<KVStorage> cache_storage,
const GPUPriorityHint priority_hint, const GPUPriorityHint priority_hint,
const GPUPerfHint perf_hint, const GPUPerfHint perf_hint,
KVStorage *precompiled_binary_storage, std::shared_ptr<KVStorage> precompiled_binary_storage,
Tuner<uint32_t> *tuner): std::shared_ptr<Tuner<uint32_t>> tuner):
cache_storage_(cache_storage), cache_storage_(cache_storage),
precompiled_binary_storage_(precompiled_binary_storage), precompiled_binary_storage_(precompiled_binary_storage),
tuner_(tuner), tuner_(tuner),
...@@ -460,7 +460,7 @@ cl::Device &OpenCLRuntime::device() { return *device_; } ...@@ -460,7 +460,7 @@ cl::Device &OpenCLRuntime::device() { return *device_; }
cl::CommandQueue &OpenCLRuntime::command_queue() { return *command_queue_; } cl::CommandQueue &OpenCLRuntime::command_queue() { return *command_queue_; }
Tuner<uint32_t> *OpenCLRuntime::tuner() { return tuner_; } Tuner<uint32_t> *OpenCLRuntime::tuner() { return tuner_.get(); }
uint64_t OpenCLRuntime::device_global_mem_cache_size() const { uint64_t OpenCLRuntime::device_global_mem_cache_size() const {
return device_gloabl_mem_cache_size_; return device_gloabl_mem_cache_size_;
......
...@@ -65,11 +65,11 @@ const std::string OpenCLErrorToString(cl_int error); ...@@ -65,11 +65,11 @@ const std::string OpenCLErrorToString(cl_int error);
class OpenCLRuntime { class OpenCLRuntime {
public: public:
OpenCLRuntime( OpenCLRuntime(
KVStorage *cache_storage = nullptr, std::shared_ptr<KVStorage> cache_storage = nullptr,
const GPUPriorityHint priority_hint = GPUPriorityHint::PRIORITY_NORMAL, const GPUPriorityHint priority_hint = GPUPriorityHint::PRIORITY_NORMAL,
const GPUPerfHint perf_hint = GPUPerfHint::PERF_NORMAL, const GPUPerfHint perf_hint = GPUPerfHint::PERF_NORMAL,
KVStorage *precompiled_binary_storage = nullptr, std::shared_ptr<KVStorage> precompiled_binary_storage = nullptr,
Tuner<uint32_t> *tuner = nullptr); std::shared_ptr<Tuner<uint32_t>> tuner = nullptr);
~OpenCLRuntime(); ~OpenCLRuntime();
OpenCLRuntime(const OpenCLRuntime &) = delete; OpenCLRuntime(const OpenCLRuntime &) = delete;
OpenCLRuntime &operator=(const OpenCLRuntime &) = delete; OpenCLRuntime &operator=(const OpenCLRuntime &) = delete;
...@@ -126,9 +126,9 @@ class OpenCLRuntime { ...@@ -126,9 +126,9 @@ class OpenCLRuntime {
OpenCLVersion ParseDeviceVersion(const std::string &device_version); OpenCLVersion ParseDeviceVersion(const std::string &device_version);
private: private:
KVStorage *cache_storage_; std::shared_ptr<KVStorage> cache_storage_;
KVStorage *precompiled_binary_storage_; std::shared_ptr<KVStorage> precompiled_binary_storage_;
Tuner<uint32_t> *tuner_; std::shared_ptr<Tuner<uint32_t>> tuner_;
bool is_opencl_avaliable_; bool is_opencl_avaliable_;
bool is_profiling_enabled_; bool is_profiling_enabled_;
OpenCLVersion opencl_version_; OpenCLVersion opencl_version_;
......
...@@ -67,7 +67,6 @@ mace::DeviceType ParseDeviceType(const std::string &device) { ...@@ -67,7 +67,6 @@ mace::DeviceType ParseDeviceType(const std::string &device) {
} }
MaceContext& GetMaceContext() { MaceContext& GetMaceContext() {
// stay for the app's life time, only initialize once
static auto *mace_context = new MaceContext; static auto *mace_context = new MaceContext;
return *mace_context; return *mace_context;
......
...@@ -99,10 +99,8 @@ enum MaceStatus { ...@@ -99,10 +99,8 @@ enum MaceStatus {
/// \brief GPU context contain the status used for GPU device. /// \brief GPU context contain the status used for GPU device.
/// ///
/// The life cycle of GPUContext object is the same as MaceEngines use it. /// There are some data in common between different MaceEngines using GPU,
/// Just use one GPUContext for all MaceEngines, which will speed up the /// use one GPUContext could avoid duplication.
/// initialization procedure. There are some data in common between different
/// MaceEngines using GPU, use one GPUContext could avoid duplication.
class GPUContext; class GPUContext;
/// \brief GPUContext builder. /// \brief GPUContext builder.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册