From 87cbeb31bf332ceb4b8cb8df79f5df9aebb318cf Mon Sep 17 00:00:00 2001 From: liuqi Date: Fri, 8 Jun 2018 16:03:08 +0800 Subject: [PATCH] Refactor logic of opencl platform info write logic and docs. --- docs/getting_started/create_a_model_deployment.rst | 10 +++++----- docs/getting_started/models/demo_app_models.yaml | 4 ++-- mace/core/file_storage.cc | 6 ++++-- mace/core/runtime/opencl/opencl_runtime.cc | 9 +++++---- tools/converter.py | 9 +++++---- 5 files changed, 21 insertions(+), 17 deletions(-) diff --git a/docs/getting_started/create_a_model_deployment.rst b/docs/getting_started/create_a_model_deployment.rst index 918c84a2..46be54ad 100644 --- a/docs/getting_started/create_a_model_deployment.rst +++ b/docs/getting_started/create_a_model_deployment.rst @@ -35,7 +35,7 @@ Configurations * - target_abis - The target ABI to build, can be one or more of 'host', 'armeabi-v7a' or 'arm64-v8a' * - target_socs - - build for specified socs if you just want use the model for that socs. + - [optional] build for specified socs if you just want use the model for that socs. * - embed_model_data - Whether embedding model weights as the code, default to 0 * - build_type @@ -50,9 +50,9 @@ Configurations * - model_sha256_checksum - The SHA256 checksum of the model file * - weight_file_path - - The path of the model weights file, used by Caffe model + - [optional] The path of the model weights file, used by Caffe model * - weight_sha256_checksum - - The SHA256 checksum of the weight file, used by Caffe model + - [optional] The SHA256 checksum of the weight file, used by Caffe model * - subgraphs - subgraphs key. ** DO NOT EDIT ** * - input_tensors @@ -63,6 +63,8 @@ Configurations - The shapes of the input tensors, in NHWC order * - output_shapes - The shapes of the output tensors, in NHWC order + * - validation_inputs_data + - [optional] Specify Numpy validation inputs. When not provided, [-1, 1] random values will be used * - runtime - The running device, one of [cpu, gpu, dsp, cpu_gpu]. cpu_gpu contains cpu and gpu model definition so you can run the model on both cpu and gpu. * - data_type @@ -75,5 +77,3 @@ Configurations - [optional] Whether to obfuscate the model operator name, default to 0 * - winograd - [optional] Whether to enable Winograd convolution, **will increase memory consumption** - * - input_files - - [optional] Specify Numpy validation inputs. When not provided, [-1, 1] random values will be used diff --git a/docs/getting_started/models/demo_app_models.yaml b/docs/getting_started/models/demo_app_models.yaml index f78dc40a..216deea5 100644 --- a/docs/getting_started/models/demo_app_models.yaml +++ b/docs/getting_started/models/demo_app_models.yaml @@ -43,10 +43,10 @@ models: # 一个配置文件可以包含多个模型的配置信息,最终生 output_shapes: - 1,256,256,2 - 1,1,1,2 + validation_inputs_data: + - path/to/input_files # support http:// runtime: cpu limit_opencl_kernel_time: 1 nnlib_graph_mode: 0 obfuscate: 1 winograd: 0 - input_files: - - path/to/input_files # support http:// diff --git a/mace/core/file_storage.cc b/mace/core/file_storage.cc index 4d93da51..37c2ece1 100644 --- a/mace/core/file_storage.cc +++ b/mace/core/file_storage.cc @@ -150,8 +150,10 @@ int FileStorage::Load() { bool FileStorage::Insert(const std::string &key, const std::vector &value) { utils::WriteLock lock(&data_mutex_); - data_.emplace(key, value); - data_changed_ = true; + auto res = data_.emplace(key, value); + if (res.second) { + data_changed_ = true; + } return true; } diff --git a/mace/core/runtime/opencl/opencl_runtime.cc b/mace/core/runtime/opencl/opencl_runtime.cc index 0fad7131..5235479d 100644 --- a/mace/core/runtime/opencl/opencl_runtime.cc +++ b/mace/core/runtime/opencl/opencl_runtime.cc @@ -604,6 +604,11 @@ void OpenCLRuntime::BuildProgramFromSource( if (this->cache_storage_ != nullptr) { this->cache_storage_->Insert(built_program_key, content); + // update platform info + this->cache_storage_->Insert( + kOpenCLPlatformInfoKey, + std::vector(platform_info_.begin(), + platform_info_.end())); } VLOG(3) << "Program from source: " << built_program_key; @@ -656,10 +661,6 @@ cl::Kernel OpenCLRuntime::BuildKernel( void OpenCLRuntime::SaveBuiltCLProgram() { if (cache_storage_ != nullptr) { - // update platform info - cache_storage_->Insert(kOpenCLPlatformInfoKey, - std::vector(platform_info_.begin(), - platform_info_.end())); if (cache_storage_->Flush() != 0) { LOG(FATAL) << "Store OPENCL compiled kernel to file failed. " << "Please make sure the storage directory exist " diff --git a/tools/converter.py b/tools/converter.py index 6d9df68d..e1083d30 100644 --- a/tools/converter.py +++ b/tools/converter.py @@ -435,10 +435,11 @@ def get_build_model_dirs(library_name, model_name, target_abi, target_soc, def get_opencl_binary_output_path(library_name): - return '%s/%s/%s/%s' % (BUILD_OUTPUT_DIR, - library_name, - OUTPUT_OPENCL_BINARY_DIR_NAME, - OUTPUT_OPENCL_BINARY_FILE_NAME) + return '%s/%s/%s/%s' % \ + (BUILD_OUTPUT_DIR, + library_name, + OUTPUT_OPENCL_BINARY_DIR_NAME, + library_name + '_' + OUTPUT_OPENCL_BINARY_FILE_NAME) ################################ -- GitLab