diff --git a/.gitignore b/.gitignore index b3a51dd83dbcba3ce393e112b6df5fb02310ba4a..c78b060c4a06e8597917b39f67d90fb53f466a29 100644 --- a/.gitignore +++ b/.gitignore @@ -6,5 +6,6 @@ cmake-build-debug/ mace/codegen/models/ mace/codegen/opencl/ mace/codegen/opencl_bin/ +mace/codegen/tuning/ mace/codegen/version/ build/ diff --git a/mace/codegen/BUILD b/mace/codegen/BUILD index b715869984d414234af5961a21cf4867bdb2601d..bc92a7bf13b871954b5f18de9e218fad7722b459 100644 --- a/mace/codegen/BUILD +++ b/mace/codegen/BUILD @@ -17,14 +17,8 @@ cc_library( ) cc_library( - name = "generated_opencl_dev", - srcs = ["opencl/opencl_encrypt_program.cc"], - linkstatic = 1, -) - -cc_library( - name = "generated_opencl_prod", - srcs = ["opencl/opencl_compiled_program.cc"], + name = "generated_opencl", + srcs = glob(["opencl/*.cc"]), linkstatic = 1, ) diff --git a/mace/codegen/opencl/opencl_compiled_program.cc b/mace/codegen/opencl/opencl_compiled_program.cc deleted file mode 100644 index 62fbad33022b92d4d72a7f407cca18c5e4073fe3..0000000000000000000000000000000000000000 --- a/mace/codegen/opencl/opencl_compiled_program.cc +++ /dev/null @@ -1,16 +0,0 @@ -// -// Copyright (c) 2017 XiaoMi All rights reserved. -// - -// This is a generated file, DO NOT EDIT - -#include -#include -#include - -namespace mace { - -extern const std::map> - kCompiledProgramMap = {}; - -} // namespace mace diff --git a/mace/codegen/tuning/tuning_params.cc b/mace/codegen/tuning/tuning_params.cc deleted file mode 100644 index 8a8b7132a26a150d14ff9504b13aa668fc109285..0000000000000000000000000000000000000000 --- a/mace/codegen/tuning/tuning_params.cc +++ /dev/null @@ -1,16 +0,0 @@ -// -// Copyright (c) 2017 XiaoMi All rights reserved. -// - -// This is a generated file, DO NOT EDIT - -#include -#include -#include - -namespace mace { - -extern const std::map> - kTuningParamsData = {}; - -} // namespace mace diff --git a/mace/core/BUILD b/mace/core/BUILD index 9a957ad1d4becc788d554aa34b58d911428d60ea..1b4cdaa8d51c054a9f8b55d5c7e4266273724f5d 100644 --- a/mace/core/BUILD +++ b/mace/core/BUILD @@ -27,8 +27,6 @@ cc_library( "runtime/hexagon/*.cc", ], exclude = [ - "runtime/opencl/opencl_production.cc", - "runtime/opencl/opencl_development.cc", "*_test.cc", "runtime/hexagon/hexagon_controller_dummy.cc", ], @@ -50,15 +48,15 @@ cc_library( ]), deps = [ ":opencl_headers", - "//mace/utils", + "//mace/codegen:generated_opencl", "//mace/codegen:generated_version", + "//mace/utils", "@half//:half", ] + if_production_mode([ + "//mace/codegen:generated_tuning_params", "//mace/utils:utils_prod", - "//mace/core:opencl_prod", ]) + if_not_production_mode([ "//mace/utils:utils_dev", - "//mace/core:opencl_dev", ]), ) @@ -90,26 +88,3 @@ cc_library( ], alwayslink = 1, ) - -cc_library( - name = "opencl_dev", - srcs = ["runtime/opencl/opencl_development.cc"], - linkstatic = 1, - deps = [ - ":opencl_headers", - "//mace/codegen:generated_opencl_dev", - "//mace/utils", - ], -) - -cc_library( - name = "opencl_prod", - srcs = ["runtime/opencl/opencl_production.cc"], - linkstatic = 1, - deps = [ - ":opencl_headers", - "//mace/codegen:generated_opencl_prod", - "//mace/codegen:generated_tuning_params", - "//mace/utils", - ], -) diff --git a/mace/core/file_storage.cc b/mace/core/file_storage.cc new file mode 100644 index 0000000000000000000000000000000000000000..de9a67fa73a197d8bfe73ec27652fff2f0afd85f --- /dev/null +++ b/mace/core/file_storage.cc @@ -0,0 +1,216 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#include "mace/core/file_storage.h" + +#include +#include +#include +#include +#include + +#include +#include +#include + + +#include "mace/utils/logging.h" + +namespace mace { + +class FileStorageFactory::Impl { + public: + explicit Impl(const std::string &path); + + std::unique_ptr CreateStorage(const std::string &name); + + private: + std::string path_; +}; + +FileStorageFactory::Impl::Impl(const std::string &path): path_(path) {} +std::unique_ptr FileStorageFactory::Impl::CreateStorage( + const std::string &name) { + return std::move(std::unique_ptr( + new FileStorage(path_ + "/" + name))); +} + +FileStorageFactory::FileStorageFactory(const std::string &path): + impl_(new FileStorageFactory::Impl(path)) {} + +FileStorageFactory::~FileStorageFactory() = default; + +std::unique_ptr FileStorageFactory::CreateStorage( + const std::string &name) { + return impl_->CreateStorage(name); +} + +FileStorage::FileStorage(const std::string &file_path): + file_path_(file_path) {} + +int FileStorage::Load() { + struct stat st; + if (stat(file_path_.c_str(), &st) == -1) { + if (errno == ENOENT) { + LOG(INFO) << "File " << file_path_ + << " does not exist"; + return 0; + } else { + LOG(WARNING) << "Stat file " << file_path_ + << " failed, error code: " << errno; + return -1; + } + } + int fd = open(file_path_.c_str(), O_RDONLY); + if (fd < 0) { + if (errno == ENOENT) { + LOG(INFO) << "File " << file_path_ + << " does not exist"; + return 0; + } else { + LOG(WARNING) << "open file " << file_path_ + << " failed, error code: " << errno; + return -1; + } + } + size_t file_size = st.st_size; + unsigned char *file_data = + static_cast(mmap(nullptr, file_size, PROT_READ, + MAP_PRIVATE, fd, 0)); + int res = 0; + if (file_data == MAP_FAILED) { + LOG(WARNING) << "mmap file " << file_path_ + << " failed, error code: " << errno; + + res = close(fd); + if (res != 0) { + LOG(WARNING) << "close file " << file_path_ + << " failed, error code: " << errno; + } + return -1; + } + unsigned char *file_data_ptr = file_data; + + const size_t int_size = sizeof(int32_t); + + int64_t data_size = 0; + memcpy(&data_size, file_data_ptr, sizeof(int64_t)); + file_data_ptr += sizeof(int64_t); + int32_t key_size = 0; + int32_t value_size = 0; + for (int i = 0; i < data_size; ++i) { + memcpy(&key_size, file_data_ptr, int_size); + file_data_ptr += int_size; + std::unique_ptr key(new char[key_size+1]); + memcpy(&key[0], file_data_ptr, key_size); + file_data_ptr += key_size; + key[key_size] = '\0'; + + memcpy(&value_size, file_data_ptr, int_size); + file_data_ptr += int_size; + std::vector value(value_size); + memcpy(value.data(), file_data_ptr, value_size); + file_data_ptr += value_size; + + data_.emplace(std::string(&key[0]), value); + } + + res = munmap(file_data, file_size); + if (res != 0) { + LOG(WARNING) << "munmap file " << file_path_ + << " failed, error code: " << errno; + res = close(fd); + if (res != 0) { + LOG(WARNING) << "close file " << file_path_ + << " failed, error code: " << errno; + } + return -1; + } + res = close(fd); + if (res != 0) { + LOG(WARNING) << "close file " << file_path_ + << " failed, error code: " << errno; + return -1; + } + return 0; +} + +bool FileStorage::Insert(const std::string &key, + const std::vector &value) { + data_.emplace(key, value); + return true; +} + +const std::vector *FileStorage::Find(const std::string &key) { + auto iter = data_.find(key); + if (iter == data_.end()) return nullptr; + + return &(iter->second); +} + +int FileStorage::Flush() { + int fd = open(file_path_.c_str(), O_WRONLY | O_CREAT, 0600); + if (fd < 0) { + LOG(WARNING) << "open file " << file_path_ + << " failed, error code:" << errno; + return -1; + } + + const size_t int_size = sizeof(int32_t); + + int64_t data_size = sizeof(int64_t); + for (auto &kv : data_) { + data_size += 2 * int_size + kv.first.size() + kv.second.size(); + } + std::unique_ptr buffer(new unsigned char[data_size]); + unsigned char *buffer_ptr = &buffer[0]; + + int64_t num_of_data = data_.size(); + memcpy(buffer_ptr, &num_of_data, sizeof(int64_t)); + buffer_ptr += sizeof(int64_t); + for (auto &kv : data_) { + int32_t key_size = kv.first.size(); + memcpy(buffer_ptr, &key_size, int_size); + buffer_ptr += int_size; + + memcpy(buffer_ptr, kv.first.c_str(), kv.first.size()); + buffer_ptr += kv.first.size(); + + int32_t value_size = kv.second.size(); + memcpy(buffer_ptr, &value_size, int_size); + buffer_ptr += int_size; + + memcpy(buffer_ptr, kv.second.data(), kv.second.size()); + buffer_ptr += kv.second.size(); + } + int res = 0; + buffer_ptr = &buffer[0]; + int64_t remain_size = data_size; + while (remain_size > 0) { + size_t buffer_size = std::min(remain_size, SSIZE_MAX); + res = write(fd, buffer_ptr, buffer_size); + if (res == -1) { + LOG(WARNING) << "write file " << file_path_ + << " failed, error code: " << errno; + res = close(fd); + if (res != 0) { + LOG(WARNING) << "close file " << file_path_ + << " failed, error code: " << errno; + } + return -1; + } + remain_size -= buffer_size; + buffer_ptr += buffer_size; + } + + res = close(fd); + if (res != 0) { + LOG(WARNING) << "close file " << file_path_ + << " failed, error code: " << errno; + return -1; + } + return 0; +} + +}; // namespace mace diff --git a/mace/core/file_storage.h b/mace/core/file_storage.h new file mode 100644 index 0000000000000000000000000000000000000000..c29628cdd657d7a16d494128cfee908dfdc1f18f --- /dev/null +++ b/mace/core/file_storage.h @@ -0,0 +1,34 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +#ifndef MACE_CORE_FILE_STORAGE_H_ +#define MACE_CORE_FILE_STORAGE_H_ + +#include +#include +#include + +#include "mace/public/mace_runtime.h" + +namespace mace { + +class FileStorage : public KVStorage { + public: + explicit FileStorage(const std::string &file_path); + + public: + int Load() override; + bool Insert(const std::string &key, + const std::vector &value) override; + const std::vector *Find(const std::string &key) override; + int Flush() override; + + private: + std::string file_path_; + std::map> data_; +}; + +} // namespace mace + +#endif // MACE_CORE_FILE_STORAGE_H_ diff --git a/mace/core/mace.cc b/mace/core/mace.cc index 98b0190d242c2853faecaa40338630e5e7a8400e..484c9472af8e9e12124c78b054d539584e4d2801 100644 --- a/mace/core/mace.cc +++ b/mace/core/mace.cc @@ -4,8 +4,10 @@ #include +#include "mace/core/file_storage.h" #include "mace/core/net.h" #include "mace/core/runtime/hexagon/hexagon_control_wrapper.h" +#include "mace/core/runtime/opencl/opencl_runtime.h" #include "mace/core/types.h" #include "mace/public/mace.h" @@ -94,6 +96,7 @@ MaceEngine::Impl::Impl(const NetDef *net_def, net_(nullptr), hexagon_controller_(nullptr) { LOG(INFO) << "MACE version: " << MaceVersion(); + // Set storage path for internal usage for (auto input_name : input_nodes) { ws_->CreateTensor(MakeString("mace_input_node_", input_name, ":0"), GetDeviceAllocator(device_type_), DT_FLOAT); @@ -173,6 +176,7 @@ MaceStatus MaceEngine::Impl::Run( LOG(FATAL) << "Net run failed"; } } + OpenCLRuntime::Global()->SaveBuiltCLProgram(); for (auto &output : *outputs) { Tensor *output_tensor = ws_->GetTensor(MakeString("mace_output_node_", output.first + ":0")); diff --git a/mace/core/mace_runtime.cc b/mace/core/mace_runtime.cc index da70f96f72b2e0c76d704773c34eefd2a789b5b1..5f94195370990480c0003836cbde8d2a529c70c3 100644 --- a/mace/core/mace_runtime.cc +++ b/mace/core/mace_runtime.cc @@ -8,6 +8,8 @@ namespace mace { +std::shared_ptr kStorageFactory = nullptr; + void ConfigOpenCLRuntime(GPUPerfHint gpu_perf_hint, GPUPriorityHint gpu_priority_hint) { VLOG(1) << "Set GPU configurations, gpu_perf_hint: " << gpu_perf_hint @@ -15,6 +17,11 @@ void ConfigOpenCLRuntime(GPUPerfHint gpu_perf_hint, OpenCLRuntime::Configure(gpu_perf_hint, gpu_priority_hint); } +void ConfigKVStorageFactory(std::shared_ptr storage_factory) { + VLOG(1) << "Set internal KV Storage Engine"; + kStorageFactory = storage_factory; +} + void ConfigOmpThreads(int omp_num_threads) { VLOG(1) << "Config CPU omp_num_threads: " << omp_num_threads; SetOmpThreads(omp_num_threads); diff --git a/mace/core/runtime/opencl/opencl_development.cc b/mace/core/runtime/opencl/opencl_development.cc deleted file mode 100644 index 036a97ed6f19d7b5fb499d1166f5183f99d84bc4..0000000000000000000000000000000000000000 --- a/mace/core/runtime/opencl/opencl_development.cc +++ /dev/null @@ -1,39 +0,0 @@ -// -// Copyright (c) 2017 XiaoMi All rights reserved. -// - -#include -#include -#include -#include -#include - -#include "mace/core/runtime/opencl/cl2_header.h" -#include "mace/utils/logging.h" -#include "mace/utils/utils.h" - -namespace mace { - -bool GetSourceOrBinaryProgram(const std::string &program_name, - const std::string &binary_file_name_prefix, - const cl::Context &context, - const cl::Device &device, - cl::Program *program, - bool *is_binary) { - extern const std::map> - kEncryptedProgramMap; - *is_binary = false; - auto it_source = kEncryptedProgramMap.find(program_name); - if (it_source == kEncryptedProgramMap.end()) { - return false; - } - cl::Program::Sources sources; - std::string content(it_source->second.begin(), it_source->second.end()); - std::string kernel_source = ObfuscateString(content); - sources.push_back(kernel_source); - *program = cl::Program(context, sources); - - return true; -} - -} // namespace mace diff --git a/mace/core/runtime/opencl/opencl_production.cc b/mace/core/runtime/opencl/opencl_production.cc deleted file mode 100644 index f4df016f52911c9ed4c9a37cd4d7660de1621a43..0000000000000000000000000000000000000000 --- a/mace/core/runtime/opencl/opencl_production.cc +++ /dev/null @@ -1,32 +0,0 @@ -// -// Copyright (c) 2017 XiaoMi All rights reserved. -// - -#include -#include -#include -#include - -#include "mace/core/runtime/opencl/cl2_header.h" -#include "mace/utils/logging.h" - -namespace mace { - -bool GetSourceOrBinaryProgram(const std::string &program_name, - const std::string &binary_file_name_prefix, - const cl::Context &context, - const cl::Device &device, - cl::Program *program, - bool *is_binary) { - extern const std::map> - kCompiledProgramMap; - *is_binary = true; - auto it_binary = kCompiledProgramMap.find(binary_file_name_prefix); - if (it_binary == kCompiledProgramMap.end()) { - return false; - } - *program = cl::Program(context, {device}, {it_binary->second}); - return true; -} - -} // namespace mace diff --git a/mace/core/runtime/opencl/opencl_runtime.cc b/mace/core/runtime/opencl/opencl_runtime.cc index 0728d5f0905af0b367fb77df4b986e3f9256ff74..43f51c69ae086eb29015f92b1f26d2bcc203573f 100644 --- a/mace/core/runtime/opencl/opencl_runtime.cc +++ b/mace/core/runtime/opencl/opencl_runtime.cc @@ -10,35 +10,20 @@ #include // NOLINT(build/c++11) #include #include +#include +#include "mace/core/file_storage.h" #include "mace/core/runtime/opencl/opencl_extension.h" #include "mace/public/mace.h" #include "mace/utils/tuner.h" namespace mace { -namespace { - -bool WriteFile(const std::string &filename, - bool binary, - const std::vector &content) { - std::ios_base::openmode mode = std::ios_base::out | std::ios_base::trunc; - if (binary) { - mode |= std::ios::binary; - } - std::ofstream ofs(filename, mode); - - ofs.write(reinterpret_cast(&content[0]), - content.size() * sizeof(char)); - ofs.close(); - if (ofs.fail()) { - LOG(ERROR) << "Failed to write to file " << filename; - return false; - } - return true; -} - -} // namespace +extern const std::map> + kCompiledProgramMap; +extern const std::string kCompiledProgramPlatform; +extern const std::map> + kEncryptedProgramMap; const std::string OpenCLErrorToString(cl_int error) { switch (error) { @@ -194,19 +179,19 @@ void OpenCLProfilingTimer::ClearTiming() { accumulated_micros_ = 0; } -GPUPerfHint OpenCLRuntime::gpu_perf_hint_ = GPUPerfHint::PERF_DEFAULT; -GPUPriorityHint OpenCLRuntime::gpu_priority_hint_ = +GPUPerfHint OpenCLRuntime::kGPUPerfHint = GPUPerfHint::PERF_DEFAULT; +GPUPriorityHint OpenCLRuntime::kGPUPriorityHint = GPUPriorityHint::PRIORITY_DEFAULT; OpenCLRuntime *OpenCLRuntime::Global() { - static OpenCLRuntime runtime(gpu_perf_hint_, gpu_priority_hint_); + static OpenCLRuntime runtime(kGPUPerfHint, kGPUPriorityHint); return &runtime; } void OpenCLRuntime::Configure(GPUPerfHint gpu_perf_hint, GPUPriorityHint gpu_priority_hint) { - OpenCLRuntime::gpu_perf_hint_ = gpu_perf_hint; - OpenCLRuntime::gpu_priority_hint_ = gpu_priority_hint; + OpenCLRuntime::kGPUPerfHint = gpu_perf_hint; + OpenCLRuntime::kGPUPriorityHint = gpu_priority_hint; } void GetAdrenoContextProperties(std::vector *properties, @@ -250,7 +235,8 @@ void GetAdrenoContextProperties(std::vector *properties, } OpenCLRuntime::OpenCLRuntime(GPUPerfHint gpu_perf_hint, - GPUPriorityHint gpu_priority_hint) { + GPUPriorityHint gpu_priority_hint): + storage_(nullptr) { LoadOpenCLLibrary(); std::vector all_platforms; @@ -259,9 +245,12 @@ OpenCLRuntime::OpenCLRuntime(GPUPerfHint gpu_perf_hint, LOG(FATAL) << "No OpenCL platforms found"; } cl::Platform default_platform = all_platforms[0]; - VLOG(1) << "Using platform: " << default_platform.getInfo() - << ", " << default_platform.getInfo() << ", " - << default_platform.getInfo(); + std::stringstream ss; + ss << default_platform.getInfo() + << ", " << default_platform.getInfo() << ", " + << default_platform.getInfo(); + platform_info_ = ss.str(); + VLOG(1) << "Using platform: " << platform_info_; // get default device (CPUs, GPUs) of the default platform std::vector all_devices; @@ -278,10 +267,10 @@ OpenCLRuntime::OpenCLRuntime(GPUPerfHint gpu_perf_hint, gpu_detected = true; const std::string device_name = device.getInfo(); - gpu_type_ = ParseGPUTypeFromDeviceName(device_name); + gpu_type_ = ParseGPUType(device_name); const std::string device_version = device.getInfo(); - opencl_version_ = device_version.substr(7, 3); + opencl_version_ = ParseDeviceVersion(device_version); VLOG(1) << "Using device: " << device_name; break; @@ -320,9 +309,19 @@ OpenCLRuntime::OpenCLRuntime(GPUPerfHint gpu_perf_hint, &err); MACE_CHECK_CL_SUCCESS(err); - const char *kernel_path = getenv("MACE_KERNEL_PATH"); - this->kernel_path_ = - std::string(kernel_path == nullptr ? "" : kernel_path) + "/"; + this->program_map_changed_ = false; + + extern std::shared_ptr kStorageFactory; + if (kStorageFactory != nullptr) { + const std::string cl_compiled_file_name = "mace_cl_compiled_program.bin"; + storage_ = kStorageFactory->CreateStorage(cl_compiled_file_name); + + if (platform_info_ != kCompiledProgramPlatform) { + if (storage_->Load() != 0) { + LOG(FATAL) << "Load opencl compiled kernel file failed"; + } + } + } } OpenCLRuntime::~OpenCLRuntime() { @@ -340,45 +339,49 @@ cl::Device &OpenCLRuntime::device() { return *device_; } cl::CommandQueue &OpenCLRuntime::command_queue() { return *command_queue_; } -std::string OpenCLRuntime::GenerateCLBinaryFilenamePrefix( - const std::string &filename_msg) { - // TODO(heliangliang) This can be long and slow, fix it - std::string filename_prefix = filename_msg; - for (auto it = filename_prefix.begin(); it != filename_prefix.end(); ++it) { - if (*it == ' ' || *it == '-' || *it == '=') { - *it = '_'; +bool OpenCLRuntime::BuildProgramFromBinary( + const std::string &built_program_key, + const std::string &build_options_str, + cl::Program *program) { + // Find from binary + if (kCompiledProgramPlatform != platform_info_) return false; + auto it_binary = kCompiledProgramMap.find(built_program_key); + if (it_binary == kCompiledProgramMap.end()) return false; + + *program = cl::Program(context(), {device()}, {it_binary->second}); + cl_int ret = program->build({device()}, build_options_str.c_str()); + if (ret != CL_SUCCESS) { + if (program->getBuildInfo(device()) == + CL_BUILD_ERROR) { + std::string build_log = + program->getBuildInfo(device()); + LOG(INFO) << "Program build log: " << build_log; } + LOG(WARNING) << "Build program " + << built_program_key << " from Binary failed:" + << (ret == CL_INVALID_PROGRAM ? "CL_INVALID_PROGRAM, possible " + "cause 1: the MACE library is built from SoC 1 but is " + "used on different SoC 2, possible cause 2: the MACE " + "buffer is corrupted make sure your code has no " + "out-of-range memory writing" : MakeString(ret)); + return false; } - return MACE_OBFUSCATE_SYMBOL(filename_prefix); + VLOG(3) << "Program from Binary: " << built_program_key; + return true; } -extern bool GetSourceOrBinaryProgram(const std::string &program_name, - const std::string &binary_file_name_prefix, - const cl::Context &context, - const cl::Device &device, - cl::Program *program, - bool *is_opencl_binary); - -void OpenCLRuntime::BuildProgram(const std::string &program_name, - const std::string &built_program_key, - const std::string &build_options, - cl::Program *program) { - MACE_CHECK_NOTNULL(program); +bool OpenCLRuntime::BuildProgramFromCache( + const std::string &built_program_key, + const std::string &build_options_str, + cl::Program *program) { + // Find from binary + if (this->storage_ == nullptr) return false; + auto content = this->storage_->Find(built_program_key); + if (content == nullptr) { + return false; + } - std::string binary_file_name_prefix = - GenerateCLBinaryFilenamePrefix(built_program_key); - std::vector program_vec; - bool is_opencl_binary; - const bool found = - GetSourceOrBinaryProgram(program_name, binary_file_name_prefix, context(), - device(), program, &is_opencl_binary); - MACE_CHECK(found, "Program not found for ", - is_opencl_binary ? "binary: " : "source: ", built_program_key); - - // Build program - std::string build_options_str = - build_options + " -Werror -cl-mad-enable -cl-fast-relaxed-math"; - // TODO(heliangliang) -cl-unsafe-math-optimizations -cl-fast-relaxed-math + *program = cl::Program(context(), {device()}, {*content}); cl_int ret = program->build({device()}, build_options_str.c_str()); if (ret != CL_SUCCESS) { if (program->getBuildInfo(device()) == @@ -387,20 +390,43 @@ void OpenCLRuntime::BuildProgram(const std::string &program_name, program->getBuildInfo(device()); LOG(INFO) << "Program build log: " << build_log; } - LOG(FATAL) << "Build program from " - << (is_opencl_binary ? "binary: " : "source: ") - << built_program_key << " failed: " - << (ret == CL_INVALID_PROGRAM ? "CL_INVALID_PROGRAM, possible " - "cause 1: the MACE library is built from SoC 1 but is " - "used on different SoC 2, possible cause 2: the MACE " - "buffer is corrupted make sure your code has no " - "out-of-range memory writing" : MakeString(ret)); + LOG(WARNING) << "Build program " + << built_program_key << " from Cache failed:" + << MakeString(ret); + return false; } + VLOG(3) << "Program from Cache: " << built_program_key; + return true; +} + +void OpenCLRuntime::BuildProgramFromSource( + const std::string &program_name, + const std::string &built_program_key, + const std::string &build_options_str, + cl::Program *program) { + // Find from source + auto it_source = kEncryptedProgramMap.find(program_name); + if (it_source != kEncryptedProgramMap.end()) { + cl::Program::Sources sources; + std::string source(it_source->second.begin(), it_source->second.end()); + std::string kernel_source = ObfuscateString(source); + sources.push_back(kernel_source); + *program = cl::Program(context(), sources); + cl_int ret = program->build({device()}, build_options_str.c_str()); + if (ret != CL_SUCCESS) { + if (program->getBuildInfo(device()) == + CL_BUILD_ERROR) { + std::string build_log = + program->getBuildInfo(device()); + LOG(INFO) << "Program build log: " << build_log; + } + LOG(WARNING) << "Build program " + << program_name << " from source failed: " + << MakeString(ret); + return; + } - if (!is_opencl_binary) { - // Write binary if necessary - std::string binary_filename = - kernel_path_ + binary_file_name_prefix + ".bin"; + // Keep built program binary size_t device_list_size = 1; std::unique_ptr program_binary_sizes( new size_t[device_list_size]); @@ -424,7 +450,34 @@ void OpenCLRuntime::BuildProgram(const std::string &program_name, reinterpret_cast(program_binaries[0].get()) + program_binary_sizes[0]); - MACE_CHECK(WriteFile(binary_filename, true, content)); + if (this->storage_ != nullptr) { + this->storage_->Insert(built_program_key, content); + this->program_map_changed_ = true; + } + + VLOG(3) << "Program from source: " << built_program_key; + } +} + +void OpenCLRuntime::BuildProgram(const std::string &program_name, + const std::string &built_program_key, + const std::string &build_options, + cl::Program *program) { + MACE_CHECK_NOTNULL(program); + + std::string build_options_str = + build_options + " -Werror -cl-mad-enable -cl-fast-relaxed-math"; + // TODO(heliangliang) -cl-unsafe-math-optimizations -cl-fast-relaxed-math + bool ret = BuildProgramFromBinary(built_program_key, + build_options_str, program); + if (!ret) { + ret = BuildProgramFromCache(built_program_key, + build_options_str, program); + // Fallback to source. + if (!ret) { + BuildProgramFromSource(program_name, built_program_key, + build_options_str, program); + } } } @@ -451,6 +504,15 @@ cl::Kernel OpenCLRuntime::BuildKernel( return cl::Kernel(program, kernel_name.c_str()); } +void OpenCLRuntime::SaveBuiltCLProgram() { + if (program_map_changed_ && storage_ != nullptr) { + if (storage_->Flush() != 0) { + LOG(FATAL) << "Store opencl compiled kernel to file failed"; + } + program_map_changed_ = false; + } +} + void OpenCLRuntime::GetCallStats(const cl::Event &event, CallStats *stats) { if (stats != nullptr) { stats->start_micros = @@ -472,7 +534,6 @@ uint64_t OpenCLRuntime::GetKernelMaxWorkGroupSize(const cl::Kernel &kernel) { return size; } -// TODO(liuqi): not compatible with mali gpu. uint64_t OpenCLRuntime::GetKernelWaveSize(const cl::Kernel &kernel) { uint64_t size = 0; kernel.getWorkGroupInfo(*device_, CL_KERNEL_WAVE_SIZE_QCOM, &size); @@ -488,7 +549,11 @@ const GPUType OpenCLRuntime::gpu_type() const { return gpu_type_; } -const GPUType OpenCLRuntime::ParseGPUTypeFromDeviceName( +const std::string OpenCLRuntime::platform_info() const { + return platform_info_; +} + +const GPUType OpenCLRuntime::ParseGPUType( const std::string &device_name) { constexpr const char *kQualcommAdrenoGPUStr = "QUALCOMM Adreno(TM)"; constexpr const char *kMaliGPUStr = "Mali"; @@ -504,5 +569,13 @@ const GPUType OpenCLRuntime::ParseGPUTypeFromDeviceName( return GPUType::UNKNOWN; } } +const std::string OpenCLRuntime::ParseDeviceVersion( + const std::string &device_version) { + // OpenCL Device version string format: + // OpenCL\ + // + auto words = Split(device_version, ' '); + return words[1]; +} } // namespace mace diff --git a/mace/core/runtime/opencl/opencl_runtime.h b/mace/core/runtime/opencl/opencl_runtime.h index 3f5261b860bf43a91867214b476edab4ff009e50..f6cf4815ce7f2264fe1859f157258b55538095bc 100644 --- a/mace/core/runtime/opencl/opencl_runtime.h +++ b/mace/core/runtime/opencl/opencl_runtime.h @@ -10,6 +10,7 @@ #include // NOLINT(build/c++11) #include #include +#include #include "mace/core/future.h" #include "mace/core/runtime/opencl/cl2_header.h" @@ -55,21 +56,26 @@ class OpenCLRuntime { public: static OpenCLRuntime *Global(); static void Configure(GPUPerfHint, GPUPriorityHint); + static void Configure(std::shared_ptr storage_engine); cl::Context &context(); cl::Device &device(); cl::CommandQueue &command_queue(); + const GPUType gpu_type() const; + const std::string platform_info() const; + + cl::Kernel BuildKernel(const std::string &program_name, + const std::string &kernel_name, + const std::set &build_options); void GetCallStats(const cl::Event &event, CallStats *stats); uint64_t GetDeviceMaxWorkGroupSize(); uint64_t GetKernelMaxWorkGroupSize(const cl::Kernel &kernel); uint64_t GetKernelWaveSize(const cl::Kernel &kernel); const bool IsNonUniformWorkgroupsSupported(); - const GPUType ParseGPUTypeFromDeviceName(const std::string &device_name); - const GPUType gpu_type() const; - cl::Kernel BuildKernel(const std::string &program_name, - const std::string &kernel_name, - const std::set &build_options); + const GPUType ParseGPUType(const std::string &device_name); + const std::string ParseDeviceVersion(const std::string &device_version); + void SaveBuiltCLProgram(); private: OpenCLRuntime(GPUPerfHint, GPUPriorityHint); @@ -81,7 +87,19 @@ class OpenCLRuntime { const std::string &binary_file_name, const std::string &build_options, cl::Program *program); - std::string GenerateCLBinaryFilenamePrefix(const std::string &filename_msg); + bool BuildProgramFromBinary( + const std::string &built_program_key, + const std::string &build_options_str, + cl::Program *program); + bool BuildProgramFromCache( + const std::string &built_program_key, + const std::string &build_options_str, + cl::Program *program); + void BuildProgramFromSource( + const std::string &program_name, + const std::string &built_program_key, + const std::string &build_options_str, + cl::Program *program); private: // All OpenCL object must be a pointer and manually deleted before unloading @@ -91,12 +109,14 @@ class OpenCLRuntime { std::shared_ptr command_queue_; std::map built_program_map_; std::mutex program_build_mutex_; - std::string kernel_path_; GPUType gpu_type_; std::string opencl_version_; + std::string platform_info_; + bool program_map_changed_; + std::unique_ptr storage_; - static GPUPerfHint gpu_perf_hint_; - static GPUPriorityHint gpu_priority_hint_; + static GPUPerfHint kGPUPerfHint; + static GPUPriorityHint kGPUPriorityHint; }; } // namespace mace diff --git a/mace/examples/BUILD b/mace/examples/BUILD index 7940c6bb67931aca077da743fbf51ee256e412a6..d572e11d319c3da42599055583ead80bd534e65e 100644 --- a/mace/examples/BUILD +++ b/mace/examples/BUILD @@ -2,8 +2,8 @@ load("//mace:mace.bzl", "if_openmp_enabled") cc_binary( - name = "mace_run", - srcs = ["mace_run.cc"], + name = "example", + srcs = ["example.cc"], linkopts = if_openmp_enabled(["-fopenmp"]), linkstatic = 1, deps = [ diff --git a/mace/examples/example.cc b/mace/examples/example.cc new file mode 100644 index 0000000000000000000000000000000000000000..e70025d6c171087fad4393bc3620f728517ac564 --- /dev/null +++ b/mace/examples/example.cc @@ -0,0 +1,290 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +/** + * Usage: + * mace_run --model=mobi_mace.pb \ + * --input=input_node \ + * --output=output_node \ + * --input_shape=1,224,224,3 \ + * --output_shape=1,224,224,2 \ + * --input_file=input_data \ + * --output_file=mace.out \ + * --model_data_file=model_data.data \ + * --device=OPENCL + */ +#include +#include +#include +#include +#include +#include + +#include "gflags/gflags.h" +#include "mace/public/mace.h" +#include "mace/public/mace_runtime.h" +#include "mace/utils/env_time.h" +#include "mace/utils/logging.h" + +// #include "mace/codegen/models/${MACE_MODEL_TAG}/${MACE_MODEL_TAG}.h" instead +namespace mace { +namespace MACE_MODEL_TAG { + +extern const unsigned char *LoadModelData(const char *model_data_file); + +extern void UnloadModelData(const unsigned char *model_data); + +extern NetDef CreateNet(const unsigned char *model_data); + +extern const std::string ModelChecksum(); + +} // namespace MACE_MODEL_TAG +} // namespace mace + +namespace mace { +namespace examples { + +namespace str_util { + +std::vector Split(const std::string &str, char delims) { + std::vector result; + std::string tmp = str; + while (!tmp.empty()) { + size_t next_offset = tmp.find(delims); + result.push_back(tmp.substr(0, next_offset)); + if (next_offset == std::string::npos) { + break; + } else { + tmp = tmp.substr(next_offset + 1); + } + } + return result; +} + +} // namespace str_util + +void ParseShape(const std::string &str, std::vector *shape) { + std::string tmp = str; + while (!tmp.empty()) { + int dim = atoi(tmp.data()); + shape->push_back(dim); + size_t next_offset = tmp.find(","); + if (next_offset == std::string::npos) { + break; + } else { + tmp = tmp.substr(next_offset + 1); + } + } +} + +std::string FormatName(const std::string input) { + std::string res = input; + for (size_t i = 0; i < input.size(); ++i) { + if (!isalnum(res[i])) res[i] = '_'; + } + return res; +} + +DeviceType ParseDeviceType(const std::string &device_str) { + if (device_str.compare("CPU") == 0) { + return DeviceType::CPU; + } else if (device_str.compare("NEON") == 0) { + return DeviceType::NEON; + } else if (device_str.compare("OPENCL") == 0) { + return DeviceType::OPENCL; + } else if (device_str.compare("HEXAGON") == 0) { + return DeviceType::HEXAGON; + } else { + return DeviceType::CPU; + } +} + + +DEFINE_string(input_node, + "input_node0,input_node1", + "input nodes, separated by comma"); +DEFINE_string(input_shape, + "1,224,224,3:1,1,1,10", + "input shapes, separated by colon and comma"); +DEFINE_string(output_node, + "output_node0,output_node1", + "output nodes, separated by comma"); +DEFINE_string(output_shape, + "1,224,224,2:1,1,1,10", + "output shapes, separated by colon and comma"); +DEFINE_string(input_file, + "", + "input file name | input file prefix for multiple inputs."); +DEFINE_string(output_file, + "", + "output file name | output file prefix for multiple outputs"); +DEFINE_string(model_data_file, + "", + "model data file name, used when EMBED_MODEL_DATA set to 0"); +DEFINE_string(device, "OPENCL", "CPU/NEON/OPENCL/HEXAGON"); +DEFINE_int32(round, 1, "round"); +DEFINE_int32(restart_round, 1, "restart round"); +DEFINE_int32(malloc_check_cycle, -1, "malloc debug check cycle, -1 to disable"); +DEFINE_int32(gpu_perf_hint, 2, "0:DEFAULT/1:LOW/2:NORMAL/3:HIGH"); +DEFINE_int32(gpu_priority_hint, 1, "0:DEFAULT/1:LOW/2:NORMAL/3:HIGH"); +DEFINE_int32(omp_num_threads, 8, "num of openmp threads"); +DEFINE_int32(cpu_power_option, + 0, + "0:DEFAULT/1:HIGH_PERFORMANCE/2:BATTERY_SAVE"); + +bool RunModel(const std::vector &input_names, + const std::vector> &input_shapes, + const std::vector &output_names, + const std::vector> &output_shapes) { + // load model + const unsigned char *model_data = + mace::MACE_MODEL_TAG::LoadModelData(FLAGS_model_data_file.c_str()); + NetDef net_def = mace::MACE_MODEL_TAG::CreateNet(model_data); + + DeviceType device_type = ParseDeviceType(FLAGS_device); + + // config runtime + mace::ConfigOmpThreads(FLAGS_omp_num_threads); + mace::ConfigCPUPowerOption( + static_cast(FLAGS_cpu_power_option)); + if (device_type == DeviceType::OPENCL) { + mace::ConfigOpenCLRuntime( + static_cast(FLAGS_gpu_perf_hint), + static_cast(FLAGS_gpu_priority_hint)); + } + + const std::string kernel_file_path = + "/data/local/tmp/mace_run/cl"; + + // Config internal kv storage factory. + std::shared_ptr storage_factory( + new FileStorageFactory(kernel_file_path)); + ConfigKVStorageFactory(storage_factory); + // Init model + mace::MaceEngine engine(&net_def, device_type, input_names, + output_names); + if (device_type == DeviceType::OPENCL || device_type == DeviceType::HEXAGON) { + mace::MACE_MODEL_TAG::UnloadModelData(model_data); + } + + const size_t input_count = input_names.size(); + const size_t output_count = output_names.size(); + + std::map inputs; + std::map outputs; + for (size_t i = 0; i < input_count; ++i) { + // Allocate input and output + int64_t input_size = + std::accumulate(input_shapes[i].begin(), input_shapes[i].end(), 1, + std::multiplies()); + auto buffer_in = std::shared_ptr(new float[input_size], + std::default_delete()); + // load input + std::ifstream in_file(FLAGS_input_file + "_" + FormatName(input_names[i]), + std::ios::in | std::ios::binary); + if (in_file.is_open()) { + in_file.read(reinterpret_cast(buffer_in.get()), + input_size * sizeof(float)); + in_file.close(); + } else { + LOG(INFO) << "Open input file failed"; + return -1; + } + inputs[input_names[i]] = mace::MaceTensor(input_shapes[i], buffer_in); + } + + for (size_t i = 0; i < output_count; ++i) { + int64_t output_size = + std::accumulate(output_shapes[i].begin(), output_shapes[i].end(), 1, + std::multiplies()); + auto buffer_out = std::shared_ptr(new float[output_size], + std::default_delete()); + outputs[output_names[i]] = mace::MaceTensor(output_shapes[i], buffer_out); + } + + LOG(INFO) << "Warm up run"; + engine.Run(inputs, &outputs); + + if (FLAGS_round > 0) { + LOG(INFO) << "Run model"; + for (int i = 0; i < FLAGS_round; ++i) { + engine.Run(inputs, &outputs); + } + } + + for (size_t i = 0; i < output_count; ++i) { + std::string output_name = + FLAGS_output_file + "_" + FormatName(output_names[i]); + std::ofstream out_file(output_name, std::ios::binary); + int64_t output_size = + std::accumulate(output_shapes[i].begin(), output_shapes[i].end(), 1, + std::multiplies()); + out_file.write( + reinterpret_cast(outputs[output_names[i]].data().get()), + output_size * sizeof(float)); + out_file.flush(); + out_file.close(); + } + + return true; +} + +int Main(int argc, char **argv) { + gflags::SetUsageMessage("some usage message"); + gflags::ParseCommandLineFlags(&argc, &argv, true); + + LOG(INFO) << "mace version: " << MaceVersion(); + LOG(INFO) << "model checksum: " << mace::MACE_MODEL_TAG::ModelChecksum(); + LOG(INFO) << "input node: " << FLAGS_input_node; + LOG(INFO) << "input shape: " << FLAGS_input_shape; + LOG(INFO) << "output node: " << FLAGS_output_node; + LOG(INFO) << "output shape: " << FLAGS_output_shape; + LOG(INFO) << "input_file: " << FLAGS_input_file; + LOG(INFO) << "output_file: " << FLAGS_output_file; + LOG(INFO) << "model_data_file: " << FLAGS_model_data_file; + LOG(INFO) << "device: " << FLAGS_device; + LOG(INFO) << "round: " << FLAGS_round; + LOG(INFO) << "restart_round: " << FLAGS_restart_round; + LOG(INFO) << "gpu_perf_hint: " << FLAGS_gpu_perf_hint; + LOG(INFO) << "gpu_priority_hint: " << FLAGS_gpu_priority_hint; + LOG(INFO) << "omp_num_threads: " << FLAGS_omp_num_threads; + LOG(INFO) << "cpu_power_option: " << FLAGS_cpu_power_option; + + std::vector input_names = str_util::Split(FLAGS_input_node, ','); + std::vector output_names = + str_util::Split(FLAGS_output_node, ','); + std::vector input_shapes = + str_util::Split(FLAGS_input_shape, ':'); + std::vector output_shapes = + str_util::Split(FLAGS_output_shape, ':'); + + const size_t input_count = input_shapes.size(); + const size_t output_count = output_shapes.size(); + std::vector> input_shape_vec(input_count); + std::vector> output_shape_vec(output_count); + for (size_t i = 0; i < input_count; ++i) { + ParseShape(input_shapes[i], &input_shape_vec[i]); + } + for (size_t i = 0; i < output_count; ++i) { + ParseShape(output_shapes[i], &output_shape_vec[i]); + } + + bool ret; +#pragma omp parallel for + for (int i = 0; i < FLAGS_restart_round; ++i) { + VLOG(0) << "restart round " << i; + ret = + RunModel(input_names, input_shape_vec, output_names, output_shape_vec); + } + if (ret) { + return 0; + } else { + return -1; + } +} + +} // namespace examples +} // namespace mace + +int main(int argc, char **argv) { mace::examples::Main(argc, argv); } diff --git a/mace/public/mace_runtime.h b/mace/public/mace_runtime.h index 63ca972324a9ff538d088dc407460e5853e67a59..8acb1cace385a876e97d62a2c2563c0bd13bd497 100644 --- a/mace/public/mace_runtime.h +++ b/mace/public/mace_runtime.h @@ -8,6 +8,11 @@ #ifndef MACE_PUBLIC_MACE_RUNTIME_H_ #define MACE_PUBLIC_MACE_RUNTIME_H_ +#include +#include +#include +#include + namespace mace { enum GPUPerfHint { @@ -26,10 +31,42 @@ enum GPUPriorityHint { enum CPUPowerOption { DEFAULT = 0, HIGH_PERFORMANCE = 1, BATTERY_SAVE = 2 }; +class KVStorage { + public: + // return: 0 for success, -1 for error + virtual int Load() = 0; + virtual bool Insert(const std::string &key, + const std::vector &value) = 0; + virtual const std::vector *Find(const std::string &key) = 0; + // return: 0 for success, -1 for error + virtual int Flush() = 0; +}; + +class KVStorageFactory { + public: + virtual std::unique_ptr CreateStorage(const std::string &name) = 0; +}; + +class FileStorageFactory : public KVStorageFactory { + public: + explicit FileStorageFactory(const std::string &path); + + ~FileStorageFactory(); + + std::unique_ptr CreateStorage(const std::string &name) override; + + private: + class Impl; + std::unique_ptr impl_; +}; + +void ConfigKVStorageFactory(std::shared_ptr storage_factory); + void ConfigOpenCLRuntime(GPUPerfHint, GPUPriorityHint); void ConfigOmpThreads(int omp_num_threads); void ConfigCPUPowerOption(CPUPowerOption power_option); + } // namespace mace #endif // MACE_PUBLIC_MACE_RUNTIME_H_ diff --git a/mace/python/tools/binary_codegen.py b/mace/python/tools/binary_codegen.py index 9956624b41086a1cb91403a21fd25b79c56a2efd..3be2b086461dd70e7fa6066f086d91442601dec5 100644 --- a/mace/python/tools/binary_codegen.py +++ b/mace/python/tools/binary_codegen.py @@ -25,9 +25,9 @@ def generate_cpp_source(): with open(binary_path, "rb") as f: binary_array = np.fromfile(f, dtype=np.uint8) + print "Generate binary from", binary_path idx = 0 size, = struct.unpack("Q", binary_array[idx:idx+8]) - print size idx += 8 for _ in xrange(size): key_size, = struct.unpack("i", binary_array[idx:idx+4]) @@ -64,7 +64,7 @@ def parse_args(): parser.add_argument( "--binary_dirs", type=str, - default="cl_bin0/,cl_bin1/", + default="", help="The binaries file path.") parser.add_argument( "--binary_file_name", diff --git a/mace/python/tools/opencl_codegen.py b/mace/python/tools/opencl_codegen.py index 04bda9a82ff46d02d7cfb9a1ebc1a19ee7f7b9c2..96f4c6a6d11b27c60f01b83e35ecd47bca863e05 100644 --- a/mace/python/tools/opencl_codegen.py +++ b/mace/python/tools/opencl_codegen.py @@ -1,6 +1,7 @@ import argparse import os import sys +import struct import numpy as np @@ -14,30 +15,49 @@ FLAGS = None def generate_cpp_source(): maps = {} - cl_binary_dir_arr = FLAGS.cl_binary_dirs.split(",") - for cl_binary_dir in cl_binary_dir_arr: - if not os.path.exists(cl_binary_dir): - print("Input cl_binary_dir " + cl_binary_dir + " doesn't exist!") - for file_name in os.listdir(cl_binary_dir): - file_path = os.path.join(cl_binary_dir, file_name) - if file_path[-4:] == ".bin": - # read binary - f = open(file_path, "rb") - binary_array = np.fromfile(f, dtype=np.uint8) - f.close() - - maps[file_name[:-4]] = [] - for ele in binary_array: - maps[file_name[:-4]].append(hex(ele)) + platform_info = '' + binary_dirs = FLAGS.cl_binary_dirs.strip().split(",") + for binary_dir in binary_dirs: + binary_path = os.path.join(binary_dir, FLAGS.built_kernel_file_name) + if not os.path.exists(binary_path): + continue + + print 'generate opencl code from', binary_path + with open(binary_path, "rb") as f: + binary_array = np.fromfile(f, dtype=np.uint8) + + idx = 0 + size, = struct.unpack("Q", binary_array[idx:idx+8]) + idx += 8 + for _ in xrange(size): + key_size, = struct.unpack("i", binary_array[idx:idx+4]) + idx += 4 + key, = struct.unpack(str(key_size) + "s", binary_array[idx:idx+key_size]) + idx += key_size + value_size, = struct.unpack("i", binary_array[idx:idx+4]) + idx += 4 + maps[key] = [] + value = struct.unpack(str(value_size) + "B", + binary_array[idx:idx+value_size]) + idx += value_size + for ele in value: + maps[key].append(hex(ele)) + + cl_platform_info_path = os.path.join(binary_dir, FLAGS.platform_info_file_name) + with open(cl_platform_info_path, 'r') as f: + curr_platform_info = f.read() + if platform_info != "": + assert(curr_platform_info == platform_info) + platform_info = curr_platform_info env = jinja2.Environment(loader=jinja2.FileSystemLoader(sys.path[0])) - return env.get_template('str2vec_maps.cc.jinja2').render( + return env.get_template('opencl_compiled_kernel.cc.jinja2').render( maps = maps, data_type = 'unsigned char', - variable_name = 'kCompiledProgramMap' + variable_name = 'kCompiledProgramMap', + platform_info = platform_info, ) - def main(unused_args): cpp_cl_binary_source = generate_cpp_source() @@ -54,7 +74,17 @@ def parse_args(): parser.add_argument( "--cl_binary_dirs", type=str, - default="cl_bin0/,cl_bin1/,cl_bin2/", + default="", + help="The cl binaries directories.") + parser.add_argument( + "--built_kernel_file_name", + type=str, + default="", + help="The cl binaries directories.") + parser.add_argument( + "--platform_info_file_name", + type=str, + default="", help="The cl binaries directories.") parser.add_argument( "--output_path", diff --git a/mace/python/tools/opencl_compiled_kernel.cc.jinja2 b/mace/python/tools/opencl_compiled_kernel.cc.jinja2 new file mode 100644 index 0000000000000000000000000000000000000000..391b03f0cd64b9c07ad4fe3b5345a3aaa7c1cd69 --- /dev/null +++ b/mace/python/tools/opencl_compiled_kernel.cc.jinja2 @@ -0,0 +1,29 @@ +// +// Copyright (c) 2017 XiaoMi All rights reserved. +// + +// This is a generated file, DO NOT EDIT + +#include +#include +#include + +namespace mace { + +extern const std::map> {{variable_name}} = +{ + {% for key, value in maps.iteritems() %} + { + "{{key}}", + { + {%- for ele in value -%} + {{ele}}, + {%- endfor -%} + } + }, // {{key}} +{% endfor %} +}; + +extern const std::string kCompiledProgramPlatform = {{platform_info|tojson}}; + +} // namespace mace diff --git a/mace/tools/validation/BUILD b/mace/tools/validation/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..a4e1cb876dd508b9a1a365c2350345301b9a3af4 --- /dev/null +++ b/mace/tools/validation/BUILD @@ -0,0 +1,14 @@ +# Examples +load("//mace:mace.bzl", "if_openmp_enabled") + +cc_binary( + name = "mace_run", + srcs = ["mace_run.cc"], + linkopts = if_openmp_enabled(["-fopenmp"]), + linkstatic = 1, + deps = [ + "//external:gflags_nothreads", + "//mace/codegen:generated_models", + "//mace/core:core", + ], +) diff --git a/mace/examples/mace_run.cc b/mace/tools/validation/mace_run.cc similarity index 93% rename from mace/examples/mace_run.cc rename to mace/tools/validation/mace_run.cc index 207cfbecce19f104b82a6cd1b4f311803ecec7f0..d4fa7c29a00073b9aff74d0a107e620540fc45cb 100644 --- a/mace/examples/mace_run.cc +++ b/mace/tools/validation/mace_run.cc @@ -23,6 +23,7 @@ #include #include "gflags/gflags.h" +#include "mace/core/runtime/opencl/opencl_runtime.h" #include "mace/public/mace.h" #include "mace/public/mace_runtime.h" #include "mace/utils/env_time.h" @@ -101,6 +102,20 @@ DeviceType ParseDeviceType(const std::string &device_str) { } } +void WriteOpenCLPlatformInfo(const std::string &output_dir) { + std::string platform_info = OpenCLRuntime::Global()->platform_info(); + const std::string cl_platform_info_file_name = output_dir + + "/mace_cl_platform_info.txt"; + + std::ofstream ofs(cl_platform_info_file_name); + if (ofs.is_open()) { + ofs << platform_info; + ofs.close(); + } else { + LOG(WARNING) << "Write opencl platform info failed."; + } +} + struct mallinfo LogMallinfoChange(struct mallinfo prev) { struct mallinfo curr = mallinfo(); if (prev.arena != curr.arena) { @@ -206,8 +221,16 @@ bool RunModel(const std::vector &input_names, static_cast(FLAGS_gpu_priority_hint)); } + const char *kernel_path = getenv("MACE_CL_PROGRAM_PATH"); + const std::string kernel_file_path = + std::string(kernel_path == nullptr ? + "/data/local/tmp/mace_run/cl_program" : kernel_path); + // Init model LOG(INFO) << "Run init"; + std::shared_ptr storage_factory( + new FileStorageFactory(kernel_file_path)); + ConfigKVStorageFactory(storage_factory); mace::MaceEngine engine(&net_def, device_type, input_names, output_names); if (device_type == DeviceType::OPENCL || device_type == DeviceType::HEXAGON) { mace::MACE_MODEL_TAG::UnloadModelData(model_data); @@ -285,6 +308,8 @@ bool RunModel(const std::vector &input_names, printf("time %11.3f %11.3f %11.3f %11.3f %11.3f\n", create_net_millis, mace_engine_ctor_millis, init_millis, warmup_millis, model_run_millis); + WriteOpenCLPlatformInfo(kernel_file_path); + for (size_t i = 0; i < output_count; ++i) { std::string output_name = FLAGS_output_file + "_" + FormatName(output_names[i]); diff --git a/mace/utils/utils.h b/mace/utils/utils.h index 54ab5793d229bba9453ae38fae6ec260f3c9cb6e..a9ca2a40aae252c10ce96d5968987b68af9894ea 100644 --- a/mace/utils/utils.h +++ b/mace/utils/utils.h @@ -9,6 +9,7 @@ #include #include #include +#include namespace mace { template @@ -96,5 +97,20 @@ inline std::string ObfuscateSymbol(const std::string &src) { #define MACE_OBFUSCATE_SYMBOL(str) (str) #endif +inline std::vector Split(const std::string &str, char delims) { + std::vector result; + std::string tmp = str; + while (!tmp.empty()) { + size_t next_offset = tmp.find(delims); + result.push_back(tmp.substr(0, next_offset)); + if (next_offset == std::string::npos) { + break; + } else { + tmp = tmp.substr(next_offset + 1); + } + } + return result; +} + } // namespace mace #endif // MACE_UTILS_UTILS_H_ diff --git a/tools/bazel_adb_run.py b/tools/bazel_adb_run.py index ea0fe7dcd675a3518576a6726b726da5251800d9..78f7a24a5591cc0164cedffdf370d9c9ea9fb0a5 100644 --- a/tools/bazel_adb_run.py +++ b/tools/bazel_adb_run.py @@ -94,6 +94,7 @@ def main(unused_args): # generate sources sh_commands.gen_encrypted_opencl_source() + sh_commands.gen_compiled_opencl_source() sh_commands.gen_mace_version() for target_abi in target_abis: diff --git a/tools/build_mace_run.sh b/tools/build_mace_run.sh index 658ae26745f330cd0dded36da0f24d85d1bb9361..8f22ee33966692798cbb4ee25d427dc4579ff265 100644 --- a/tools/build_mace_run.sh +++ b/tools/build_mace_run.sh @@ -30,7 +30,7 @@ if [ x"$TARGET_ABI" = x"host" ]; then --copt="-O3" \ $PRODUCTION_MODE_BUILD_FLAGS || exit 1 - bazel build --verbose_failures -c opt --strip always //mace/examples:mace_run \ + bazel build --verbose_failures -c opt --strip always //mace/tools/validation:mace_run \ --copt="-std=c++11" \ --copt="-D_GLIBCXX_USE_C99_MATH_TR1" \ --copt="-Werror=return-type" \ @@ -47,7 +47,7 @@ else NEON_ENABLE_FLAG="--define neon=true" fi - bazel build --verbose_failures -c opt --strip always //mace/examples:mace_run \ + bazel build --verbose_failures -c opt --strip always //mace/tools/validation:mace_run \ --crosstool_top=//external:android/crosstool \ --host_crosstool_top=@bazel_tools//tools/cpp:toolchain \ --cpu=${TARGET_ABI} \ @@ -64,14 +64,13 @@ else $HEXAGON_MODE_BUILD_FLAG || exit 1 fi -if [ "$PRODUCTION_MODE" = 1 ]; then - cp $GENERATED_MODEL_LIB_PATH $MODEL_OUTPUT_DIR/libmace_${MODEL_TAG}.a -fi +rm -rf $MODEL_OUTPUT_DIR/libmace_${MODEL_TAG}.a +cp $GENERATED_MODEL_LIB_PATH $MODEL_OUTPUT_DIR/libmace_${MODEL_TAG}.a if [ -f "$MODEL_OUTPUT_DIR/mace_run" ]; then rm -rf $MODEL_OUTPUT_DIR/mace_run fi -cp bazel-bin/mace/examples/mace_run $MODEL_OUTPUT_DIR +cp bazel-bin/mace/tools/validation/mace_run $MODEL_OUTPUT_DIR if [ "$EMBED_MODEL_DATA" = 0 ]; then cp mace/codegen/models/${MODEL_TAG}/${MODEL_TAG}.data $MODEL_OUTPUT_DIR fi diff --git a/tools/build_production_code.sh b/tools/build_production_code.sh index 7987f67fa7747177517aeef621b2a137ce4a9f9d..4072019429ecd61d4b7e18f0cbbed17e1851dc09 100644 --- a/tools/build_production_code.sh +++ b/tools/build_production_code.sh @@ -35,9 +35,9 @@ build_target() } if [ x"$TARGET_ABI" = x"host" ]; then - build_host_target //mace/codegen:generated_opencl_prod + build_host_target //mace/codegen:generated_opencl build_host_target //mace/codegen:generated_tuning_params else - build_target //mace/codegen:generated_opencl_prod + build_target //mace/codegen:generated_opencl build_target //mace/codegen:generated_tuning_params fi diff --git a/tools/clear_env.sh b/tools/clear_env.sh index ef14f55bf2d6433b8134258d12e8cf24c91f3efb..1652bb52a4ad2e08763e1b1fddcf05487c82a383 100644 --- a/tools/clear_env.sh +++ b/tools/clear_env.sh @@ -26,4 +26,3 @@ if [ x"$TARGET_ABI" != x"host" ]; then fi rm -rf mace/codegen/models -git checkout -- mace/codegen/opencl/opencl_compiled_program.cc mace/codegen/tuning/tuning_params.cc diff --git a/tools/env.sh b/tools/env.sh index 5ef841a1ed14eb75f8a75dc3f7b87d0ec7b55db6..634fb93c6d5bf6726c461066d60446601ca8b62a 100644 --- a/tools/env.sh +++ b/tools/env.sh @@ -3,12 +3,14 @@ LIBMACE_TAG=`git describe --abbrev=0 --tags` MACE_SOURCE_DIR=`/bin/pwd` PHONE_DATA_DIR="/data/local/tmp/mace_run" -KERNEL_DIR="${PHONE_DATA_DIR}/cl/" +COMPILED_PROGRAM_DIR="${PHONE_DATA_DIR}/cl_program/" CODEGEN_DIR=${MACE_SOURCE_DIR}/mace/codegen MODEL_CODEGEN_DIR=${CODEGEN_DIR}/models/${MODEL_TAG} CL_CODEGEN_DIR=${CODEGEN_DIR}/opencl TUNING_CODEGEN_DIR=${CODEGEN_DIR}/tuning VERSION_SOURCE_PATH=${CODEGEN_DIR}/version +CL_BUILT_KERNEL_FILE_NAME=mace_cl_compiled_program.bin +CL_PLATFORM_INFO_FILE_NAME=mace_cl_platform_info.txt if [ -z ${EMBED_MODEL_DATA} ]; then EMBED_MODEL_DATA=1 fi diff --git a/tools/generate_opencl_code.sh b/tools/generate_opencl_code.sh new file mode 100644 index 0000000000000000000000000000000000000000..37d1fb10c9e3f2474e7000c29d58cfb5aeed5a9e --- /dev/null +++ b/tools/generate_opencl_code.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +Usage() { + echo "Usage: bash tools/genenrate_opencl_code.sh type [target_soc] [cl_bin_dirs] [pull_or_not]" +} + +if [ $# -lt 1 ]; then + Usage + exit 1 +fi + +CURRENT_DIR=`dirname $0` +source ${CURRENT_DIR}/env.sh + +TYPE=$1 +TARGET_SOC=$2 +CL_BIN_DIRS=$3 +PULL_OR_NOT=$4 + +mkdir -p ${CL_CODEGEN_DIR} + +if [ x"$TYPE" == x"source" ];then + python mace/python/tools/encrypt_opencl_codegen.py \ + --cl_kernel_dir=./mace/kernels/opencl/cl/ \ + --output_path=${CODEGEN_DIR}/opencl/opencl_encrypt_program.cc || exit 1 +elif [ x"$#" == x"1" ];then + + python mace/python/tools/opencl_codegen.py \ + --built_kernel_file_name=${CL_BUILT_KERNEL_FILE_NAME} \ + --platform_info_file_name=${CL_PLATFORM_INFO_FILE_NAME} \ + --output_path=${CL_CODEGEN_DIR}/opencl_compiled_program.cc || exit 1 + +else + RESULT_VALUE=`echo_device_id_by_soc $TARGET_SOC` + if [ $? -ne 0 ]; then + echo $RESULT_VALUE + exit 1 + else + DEVICE_ID=$RESULT_VALUE + fi + + if [ "$PULL_OR_NOT" = 1 ]; then + CL_BIN_DIR=${CL_BIN_DIRS} + mkdir -p ${CL_BIN_DIR} + rm -rf ${CL_BIN_DIR}/${CL_BUILT_KERNEL_FILE_NAME} + rm -rf ${CL_BIN_DIR}/${CL_PLATFORM_INFO_FILE_NAME} + if [ x"$TARGET_ABI" != x"host" ]; then + adb -s $DEVICE_ID pull ${COMPILED_PROGRAM_DIR}/. ${CL_BIN_DIR} > /dev/null + fi + fi + + python mace/python/tools/opencl_codegen.py \ + --built_kernel_file_name=${CL_BUILT_KERNEL_FILE_NAME} \ + --platform_info_file_name=${CL_PLATFORM_INFO_FILE_NAME} \ + --cl_binary_dirs=${CL_BIN_DIRS} \ + --output_path=${CL_CODEGEN_DIR}/opencl_compiled_program.cc || exit 1 +fi + diff --git a/tools/generate_production_code.sh b/tools/generate_production_code.sh deleted file mode 100644 index f4f021c4e930e50a5c969568e0ecda0891357914..0000000000000000000000000000000000000000 --- a/tools/generate_production_code.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -Usage() { - echo "Usage: bash tools/generate_production_code.sh target_soc cl_bin_dirs pull_or_not" -} - -if [ $# -lt 3 ]; then - Usage - exit 1 -fi - -CURRENT_DIR=`dirname $0` -source ${CURRENT_DIR}/env.sh - -TARGET_SOC=$1 -CL_BIN_DIRS=$2 -PULL_OR_NOT=$3 - -RESULT_VALUE=`echo_device_id_by_soc $TARGET_SOC` -if [ $? -ne 0 ]; then - echo $RESULT_VALUE - exit 1 -else - DEVICE_ID=$RESULT_VALUE -fi - -if [ "$PULL_OR_NOT" = 1 ]; then - CL_BIN_DIR=${CL_BIN_DIRS} - rm -rf ${CL_BIN_DIR} - mkdir -p ${CL_BIN_DIR} - if [ x"$TARGET_ABI" != x"host" ]; then - adb -s $DEVICE_ID pull ${KERNEL_DIR}/. ${CL_BIN_DIR} > /dev/null - adb -s $DEVICE_ID pull ${PHONE_DATA_DIR}/mace_run.config ${CL_BIN_DIR} > /dev/null - fi -fi - -python mace/python/tools/opencl_codegen.py \ - --cl_binary_dirs=${CL_BIN_DIRS} \ - --output_path=${CL_CODEGEN_DIR}/opencl_compiled_program.cc - -python mace/python/tools/binary_codegen.py \ - --binary_dirs=${CL_BIN_DIRS} \ - --binary_file_name=mace_run.config \ - --output_path=${TUNING_CODEGEN_DIR}/tuning_params.cc diff --git a/tools/generate_tuning_param_code.sh b/tools/generate_tuning_param_code.sh new file mode 100644 index 0000000000000000000000000000000000000000..4d116c9fec7c140a2078e0d6f28e1c5181468e51 --- /dev/null +++ b/tools/generate_tuning_param_code.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +CURRENT_DIR=`dirname $0` +source ${CURRENT_DIR}/env.sh + +mkdir -p ${TUNING_CODEGEN_DIR} + +if [ "$#" -eq "0" ]; then + python mace/python/tools/binary_codegen.py \ + --binary_file_name=mace_run.config \ + --output_path=${TUNING_CODEGEN_DIR}/tuning_params.cc +else + + TARGET_SOC=$1 + BIN_DIRS=$2 + PULL_OR_NOT=$3 + + RESULT_VALUE=`echo_device_id_by_soc $TARGET_SOC` + if [ $? -ne 0 ]; then + echo $RESULT_VALUE + exit 1 + else + DEVICE_ID=$RESULT_VALUE + fi + + if [ "$PULL_OR_NOT" = 1 ]; then + mkdir -p ${BIN_DIRS} + rm -rf ${BIN_DIRS}/mace_run.config + if [ x"$TARGET_ABI" != x"host" ]; then + adb -s $DEVICE_ID pull ${PHONE_DATA_DIR}/mace_run.config ${BIN_DIRS} > /dev/null + fi + fi + + python mace/python/tools/binary_codegen.py \ + --binary_dirs=${BIN_DIRS} \ + --binary_file_name=mace_run.config \ + --output_path=${TUNING_CODEGEN_DIR}/tuning_params.cc +fi + + diff --git a/tools/generate_opencl_and_version_code.sh b/tools/generate_version_code.sh similarity index 54% rename from tools/generate_opencl_and_version_code.sh rename to tools/generate_version_code.sh index ee32acc7dac7fdfc83beb0aab1be26106efbf6e0..7b1c33d219a6a886c29335deff8c4bc6661c39c4 100644 --- a/tools/generate_opencl_and_version_code.sh +++ b/tools/generate_version_code.sh @@ -1,13 +1,10 @@ #!/bin/bash +CL_KERNEL_DIR_TAG=$1 + CURRENT_DIR=`dirname $0` source ${CURRENT_DIR}/env.sh -python mace/python/tools/encrypt_opencl_codegen.py \ - --cl_kernel_dir=./mace/kernels/opencl/cl/ \ - --output_path=${CODEGEN_DIR}/opencl/opencl_encrypt_program.cc || exit 1 - - rm -rf ${CODEGEN_DIR}/version mkdir ${CODEGEN_DIR}/version bash mace/tools/git/gen_version_source.sh ${CODEGEN_DIR}/version/version.cc || exit 1 diff --git a/tools/mace_tools.py b/tools/mace_tools.py index a00b15cf45f77141eb973f6b0d01511223a82557..d229573a5f1210068eb00616b9cf9b5e7b360b17 100644 --- a/tools/mace_tools.py +++ b/tools/mace_tools.py @@ -59,10 +59,41 @@ def get_global_runtime(configs): return global_runtime -def generate_opencl_and_version_code(): - command = "bash tools/generate_opencl_and_version_code.sh" +def generate_version_code(): + command = "bash tools/generate_version_code.sh" run_command(command) +def generate_opencl_source_code(): + command = "bash tools/generate_opencl_code.sh source" + run_command(command) + +def generate_opencl_binay_code(target_soc, model_output_dirs, pull_or_not): + cl_bin_dirs = [] + for d in model_output_dirs: + cl_bin_dirs.append(os.path.join(d, "opencl_bin")) + cl_bin_dirs_str = ",".join(cl_bin_dirs) + if not cl_bin_dirs: + command = "bash tools/generate_opencl_code.sh binary" + else: + command = "bash tools/generate_opencl_code.sh {} {} {} {}".format( + 'binary', target_soc, cl_bin_dirs_str, int(pull_or_not)) + run_command(command) + +def generate_tuning_param_code(target_soc, model_output_dirs, pull_or_not): + cl_bin_dirs = [] + for d in model_output_dirs: + cl_bin_dirs.append(os.path.join(d, "opencl_bin")) + cl_bin_dirs_str = ",".join(cl_bin_dirs) + if not cl_bin_dirs: + command = "bash tools/generate_tuning_param_code.sh" + else: + command = "bash tools/generate_tuning_param_code.sh {} {} {}".format( + target_soc, cl_bin_dirs_str, int(pull_or_not)) + run_command(command) + +def generate_code(target_soc, model_output_dirs, pull_or_not): + generate_opencl_binay_code(target_soc, model_output_dirs, pull_or_not) + generate_tuning_param_code(target_soc, model_output_dirs, pull_or_not) def clear_env(target_soc): command = "bash tools/clear_env.sh {}".format(target_soc) @@ -119,14 +150,13 @@ def tuning_run(model_name, model_output_dir, running_round, tuning, - production_mode, restart_round, option_args=''): # TODO(yejianwu) refactoring the hackish code stdout_buff = [] process_output = sh_commands.make_output_processor(stdout_buff) p = sh.bash("tools/tuning_run.sh", target_soc, model_output_dir, - running_round, int(tuning), int(production_mode), + running_round, int(tuning), restart_round, option_args, _out=process_output, _bg=True, _err_to_out=True) p.wait() @@ -158,7 +188,7 @@ def benchmark_model(target_soc, model_output_dir, option_args=''): def run_model(model_name, target_runtime, target_abi, target_soc, model_output_dir, running_round, restart_round, option_args): tuning_run(model_name, target_runtime, target_abi, target_soc, - model_output_dir, running_round, False, False, + model_output_dir, running_round, False, restart_round, option_args) @@ -179,22 +209,21 @@ def build_mace_run_prod(model_name, target_runtime, target_abi, target_soc, else: hexagon_mode = False + generate_code(target_soc, [], False) production_or_not = False build_mace_run(production_or_not, model_output_dir, hexagon_mode) tuning_run( model_name, target_runtime, - target_abi, + target_abi, target_soc, model_output_dir, running_round=0, tuning=tuning, - production_mode=production_or_not, restart_round=1) + generate_code(target_soc, [model_output_dir], True) production_or_not = True - pull_or_not = True - generate_production_code(target_soc, [model_output_dir], pull_or_not) build_mace_run(production_or_not, model_output_dir, hexagon_mode) @@ -218,8 +247,7 @@ def build_production_code(): def merge_libs_and_tuning_results(target_soc, output_dir, model_output_dirs): - pull_or_not = False - generate_production_code(target_soc, model_output_dirs, pull_or_not) + generate_code(target_soc, model_output_dirs, False) build_production_code() model_output_dirs_str = ",".join(model_output_dirs) @@ -232,6 +260,26 @@ def packaging_lib_file(output_dir): command = "bash tools/packaging_lib.sh {}".format(output_dir) run_command(command) +def download_model_files(model_file_path, + model_output_dir, + weight_file_path=""): + if model_file_path.startswith("http://") or \ + model_file_path.startswith("https://"): + os.environ["MODEL_FILE_PATH"] = model_output_dir + "/model.pb" + urllib.urlretrieve(model_file_path, os.environ["MODEL_FILE_PATH"]) + + if weight_file_path.startswith("http://") or \ + weight_file_path.startswith("https://"): + os.environ[ + "WEIGHT_FILE_PATH"] = model_output_dir + "/model.caffemodel" + urllib.urlretrieve(weight_file_path, + os.environ["WEIGHT_FILE_PATH"]) + +def md5sum(str): + md5 = hashlib.md5() + md5.update(str) + return md5.hexdigest() + def parse_model_configs(): with open(FLAGS.config) as f: @@ -298,7 +346,9 @@ def main(unused_args): shutil.rmtree(os.path.join(FLAGS.output_dir, os.environ["PROJECT_NAME"])) os.makedirs(os.path.join(FLAGS.output_dir, os.environ["PROJECT_NAME"])) - generate_opencl_and_version_code() + generate_version_code() + generate_opencl_source_code() + option_args = ' '.join([arg for arg in unused_args if arg.startswith('--')]) available_socs = sh_commands.adb_get_all_socs() @@ -315,6 +365,7 @@ def main(unused_args): print("Error: devices with SoCs are not connected %s" % missing_socs) exit(1) + for target_soc in target_socs: for target_abi in configs["target_abis"]: global_runtime = get_global_runtime(configs) @@ -322,9 +373,9 @@ def main(unused_args): os.environ["TARGET_ABI"] = target_abi model_output_dirs = [] for model_name in configs["models"]: + print '=======================', model_name, '=======================' # Transfer params by environment os.environ["MODEL_TAG"] = model_name - print '=======================', model_name, '=======================' model_config = configs["models"][model_name] input_file_list = model_config.get("validation_inputs_data", []) for key in model_config: @@ -337,9 +388,8 @@ def main(unused_args): else: os.environ[key.upper()] = str(model_config[key]) - md5 = hashlib.md5() - md5.update(model_config["model_file_path"]) - model_path_digest = md5.hexdigest() + # Create model build directory + model_path_digest = md5sum(model_config["model_file_path"]) model_output_dir = "%s/%s/%s/%s/%s/%s/%s" % (FLAGS.output_dir, os.environ["PROJECT_NAME"], "build", model_name, @@ -353,21 +403,8 @@ def main(unused_args): os.makedirs(model_output_dir) clear_env(target_soc) - # Support http:// and https:// - if model_config["model_file_path"].startswith( - "http://") or model_config["model_file_path"].startswith( - "https://"): - os.environ["MODEL_FILE_PATH"] = model_output_dir + "/model.pb" - urllib.urlretrieve(model_config["model_file_path"], - os.environ["MODEL_FILE_PATH"]) - - if model_config["platform"] == "caffe" and ( - model_config["weight_file_path"].startswith("http://") or - model_config["weight_file_path"].startswith("https://")): - os.environ[ - "WEIGHT_FILE_PATH"] = model_output_dir + "/model.caffemodel" - urllib.urlretrieve(model_config["weight_file_path"], - os.environ["WEIGHT_FILE_PATH"]) + download_model_files(model_config["model_file_path"], + model_output_dir, model_config.get("weight_file_path", "")) if FLAGS.mode == "build" or FLAGS.mode == "run" or FLAGS.mode == "validate"\ or FLAGS.mode == "benchmark" or FLAGS.mode == "all": diff --git a/tools/merge_libs.sh b/tools/merge_libs.sh index cc4cd4b054d4b14c8a1b25e4dbfd4d582d054884..90bfa7d96a104775bc344975649c47e411f783a3 100644 --- a/tools/merge_libs.sh +++ b/tools/merge_libs.sh @@ -41,14 +41,13 @@ LIBMACE_TEMP_DIR=`mktemp -d -t libmace.XXXX` echo "create ${LIBMACE_BUILD_DIR}/${TARGET_ABI}/libmace_${PROJECT_NAME}.${TARGET_SOC}.a" > ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri if [ x"$TARGET_ABI" = x"host" ]; then - echo "addlib bazel-bin/mace/codegen/libgenerated_opencl_prod.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri + echo "addlib bazel-bin/mace/codegen/libgenerated_opencl.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "addlib bazel-bin/mace/codegen/libgenerated_tuning_params.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri else - echo "addlib bazel-bin/mace/codegen/libgenerated_opencl_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri + echo "addlib bazel-bin/mace/codegen/libgenerated_opencl.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "addlib bazel-bin/mace/codegen/libgenerated_tuning_params.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "addlib bazel-bin/mace/codegen/libgenerated_version.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "addlib bazel-bin/mace/core/libcore.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri - echo "addlib bazel-bin/mace/core/libopencl_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "addlib bazel-bin/mace/kernels/libkernels.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "addlib bazel-bin/mace/utils/libutils.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "addlib bazel-bin/mace/utils/libutils_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri diff --git a/tools/sh_commands.py b/tools/sh_commands.py index a4a16a5613a033a0860f56b27555fa0bd49c66e2..c0742c5d32274e1f56c82a43efd2c2961e704b81 100644 --- a/tools/sh_commands.py +++ b/tools/sh_commands.py @@ -69,22 +69,20 @@ def adb_run(serialno, host_bin_path, bin_name, device_bin_path="/data/local/tmp/mace"): host_bin_full_path = "%s/%s" % (host_bin_path, bin_name) device_bin_full_path = "%s/%s" % (device_bin_path, bin_name) - device_cl_path = "%s/cl" % device_bin_path props = adb_getprop_by_serialno(serialno) print("=====================================================================") print("Run on device: %s, %s, %s" % (serialno, props["ro.board.platform"], props["ro.product.model"])) sh.adb("-s", serialno, "shell", "rm -rf %s" % device_bin_path) sh.adb("-s", serialno, "shell", "mkdir -p %s" % device_bin_path) - sh.adb("-s", serialno, "shell", "mkdir -p %s" % device_cl_path) print("Push %s to %s" % (host_bin_full_path, device_bin_full_path)) sh.adb("-s", serialno, "push", host_bin_full_path, device_bin_full_path) print("Run %s" % device_bin_full_path) stdout_buff=[] process_output = make_output_processor(stdout_buff) p = sh.adb("-s", serialno, "shell", - "MACE_OPENCL_PROFILING=%d MACE_KERNEL_PATH=%s MACE_CPP_MIN_VLOG_LEVEL=%d %s %s" % - (opencl_profiling, device_cl_path, vlog_level, device_bin_full_path, args), + "MACE_OPENCL_PROFILING=%d MACE_CPP_MIN_VLOG_LEVEL=%d %s %s" % + (opencl_profiling, vlog_level, device_bin_full_path, args), _out=process_output, _bg=True, _err_to_out=True) p.wait() return "".join(stdout_buff) @@ -130,6 +128,7 @@ def bazel_target_to_bin(target): ################################ # TODO this should be refactored def gen_encrypted_opencl_source(codegen_path="mace/codegen"): + sh.mkdir("-p", "%s/opencl" % codegen_path) sh.python("mace/python/tools/encrypt_opencl_codegen.py", "--cl_kernel_dir=./mace/kernels/opencl/cl/", "--output_path=%s/opencl/opencl_encrypt_program.cc" % codegen_path) @@ -139,6 +138,11 @@ def gen_mace_version(codegen_path="mace/codegen"): sh.bash("mace/tools/git/gen_version_source.sh", "%s/version/version.cc" % codegen_path) +def gen_compiled_opencl_source(codegen_path="mace/codegen"): + sh.mkdir("-p", "%s/opencl" % codegen_path) + sh.python("mace/python/tools/opencl_codegen.py", + "--output_path=%s/opencl/opencl_compiled_program.cc" % codegen_path) + ################################ # falcon ################################ diff --git a/tools/tuning_run.sh b/tools/tuning_run.sh index c4e8dbe99e403b6d067b4a802022f341e6a49141..98fc9e092b88b55ec573bc688c95e14ec3242149 100644 --- a/tools/tuning_run.sh +++ b/tools/tuning_run.sh @@ -1,10 +1,10 @@ #!/bin/bash Usage() { - echo "Usage: bash tools/tuning_run.sh target_soc model_output_dir round tuning production_mode" + echo "Usage: bash tools/tuning_run.sh target_soc model_output_dir round tuning " } -if [ $# -lt 7 ]; then +if [ $# -lt 6 ]; then Usage exit 1 fi @@ -16,9 +16,8 @@ TARGET_SOC=$1 MODEL_OUTPUT_DIR=$2 ROUND=$3 TUNING_OR_NOT=$4 -PRODUCTION_MODE=$5 -RESTART_ROUND=$6 -OPTION_ARGS=$7 +RESTART_ROUND=$5 +OPTION_ARGS=$6 echo $OPTION_ARGS @@ -45,16 +44,14 @@ if [ x"$TARGET_ABI" = x"host" ]; then --restart_round=1 \ $OPTION_ARGS || exit 1 else - if [[ "${TUNING_OR_NOT}" != "0" && "$PRODUCTION_MODE" != 1 ]];then + if [[ "${TUNING_OR_NOT}" != "0" ]];then tuning_flag=1 else tuning_flag=0 fi adb -s $DEVICE_ID shell "mkdir -p ${PHONE_DATA_DIR}" || exit 1 - if [ "$PRODUCTION_MODE" = 0 ]; then - adb -s $DEVICE_ID shell "mkdir -p ${KERNEL_DIR}" || exit 1 - fi + adb -s $DEVICE_ID shell "mkdir -p ${COMPILED_PROGRAM_DIR}" || exit 1 IFS=',' read -r -a INPUT_NAMES <<< "${INPUT_NODES}" for NAME in "${INPUT_NAMES[@]}";do @@ -72,7 +69,7 @@ else MACE_TUNING=${tuning_flag} \ MACE_CPP_MIN_VLOG_LEVEL=$VLOG_LEVEL \ MACE_RUN_PARAMETER_PATH=${PHONE_DATA_DIR}/mace_run.config \ - MACE_KERNEL_PATH=$KERNEL_DIR \ + MACE_CL_PROGRAM_PATH=$COMPILED_PROGRAM_DIR \ MACE_LIMIT_OPENCL_KERNEL_TIME=${LIMIT_OPENCL_KERNEL_TIME} \ ${PHONE_DATA_DIR}/mace_run \ --input_node="${INPUT_NODES}" \