提交 a2f2a3c6 编写于 作者: L liuqi

Refactor openc kernel build logic.

上级 dd49fa97
......@@ -17,14 +17,8 @@ cc_library(
)
cc_library(
name = "generated_opencl_dev",
srcs = ["opencl/opencl_encrypt_program.cc"],
linkstatic = 1,
)
cc_library(
name = "generated_opencl_prod",
srcs = ["opencl/opencl_compiled_program.cc"],
name = "generated_opencl",
srcs = glob(["opencl/*.cc"]),
linkstatic = 1,
)
......
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
// This is a generated file, DO NOT EDIT
#include <map>
#include <string>
#include <vector>
namespace mace {
extern const std::map<std::string, std::vector<unsigned int>>
kTuningParamsData = {};
} // namespace mace
......@@ -27,8 +27,6 @@ cc_library(
"runtime/hexagon/*.cc",
],
exclude = [
"runtime/opencl/opencl_production.cc",
"runtime/opencl/opencl_development.cc",
"*_test.cc",
"runtime/hexagon/hexagon_controller_dummy.cc",
],
......@@ -50,15 +48,15 @@ cc_library(
]),
deps = [
":opencl_headers",
"//mace/utils",
"//mace/codegen:generated_opencl",
"//mace/codegen:generated_version",
"//mace/utils",
"@half//:half",
] + if_production_mode([
"//mace/codegen:generated_tuning_params",
"//mace/utils:utils_prod",
"//mace/core:opencl_prod",
]) + if_not_production_mode([
"//mace/utils:utils_dev",
"//mace/core:opencl_dev",
]),
)
......@@ -90,26 +88,3 @@ cc_library(
],
alwayslink = 1,
)
cc_library(
name = "opencl_dev",
srcs = ["runtime/opencl/opencl_development.cc"],
linkstatic = 1,
deps = [
":opencl_headers",
"//mace/codegen:generated_opencl_dev",
"//mace/utils",
],
)
cc_library(
name = "opencl_prod",
srcs = ["runtime/opencl/opencl_production.cc"],
linkstatic = 1,
deps = [
":opencl_headers",
"//mace/codegen:generated_opencl_prod",
"//mace/codegen:generated_tuning_params",
"//mace/utils",
],
)
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include "mace/core/file_storage_engine.h"
#include <fstream>
#include "mace/utils/logging.h"
namespace mace {
std::string FileStorageEngine::kStoragePath = "/data/local/tmp";
FileStorageEngine::FileStorageEngine(const std::string &file_name):
file_name_(file_name){}
void FileStorageEngine::Write(
const std::map<std::string, std::vector<unsigned char>> &data) {
const std::string file_path = kStoragePath + "/" + file_name_;
std::ofstream ofs(file_path,
std::ios::binary | std::ios::out);
if (ofs.is_open()) {
int64_t data_size = data.size();
ofs.write(reinterpret_cast<const char *>(&data_size),
sizeof(data_size));
for (auto &kv: data) {
int32_t key_size = static_cast<int32_t>(kv.first.size());
ofs.write(reinterpret_cast<const char *>(&key_size), sizeof(key_size));
ofs.write(kv.first.c_str(), key_size);
int32_t value_size = static_cast<int32_t>(kv.second.size());
ofs.write(reinterpret_cast<const char *>(&value_size),
sizeof(value_size));
ofs.write(reinterpret_cast<const char*>(kv.second.data()),
value_size);
}
ofs.close();
} else {
LOG(WARNING) << "Write failed, please check directory exists";
}
}
void FileStorageEngine::Read(
std::map<std::string, std::vector<unsigned char>> *data) {
const std::string file_path = kStoragePath + "/" + file_name_;
std::ifstream ifs(file_path, std::ios::binary | std::ios::in);
if (ifs.is_open()) {
int64_t data_size = 0;
ifs.read(reinterpret_cast<char *>(&data_size), sizeof(data_size));
while (data_size--) {
int32_t key_size = 0;
ifs.read(reinterpret_cast<char *>(&key_size), sizeof(key_size));
std::string key(key_size, ' ');
ifs.read(&key[0], key_size);
int32_t value_size = 0;
ifs.read(reinterpret_cast<char *>(&value_size),
sizeof(value_size));
std::vector<unsigned char> program_binary(value_size);
ifs.read(reinterpret_cast<char *>(program_binary.data()),
value_size);
data->emplace(key, program_binary);
}
ifs.close();
} else {
LOG(INFO) << "No file to Read.";
}
}
}; // namespace mace
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#ifndef MACE_CORE_FILE_STORAGE_ENGINE_H_
#define MACE_CORE_FILE_STORAGE_ENGINE_H_
#include <map>
#include <string>
#include <vector>
#include "mace/public/mace_runtime.h"
namespace mace {
class FileStorageEngine : public KVStorageEngine {
public:
FileStorageEngine(const std::string &file_name);
public:
void Write(
const std::map<std::string, std::vector<unsigned char>> &data) override;
void Read(
std::map<std::string, std::vector<unsigned char>> *data) override;
private:
std::string file_name_;
public:
static std::string kStoragePath;
};
} // namespace mace
#endif // MACE_CORE_FILE_STORAGE_ENGINE_H_
......@@ -4,8 +4,10 @@
#include <memory>
#include "mace/core/file_storage_engine.h"
#include "mace/core/net.h"
#include "mace/core/runtime/hexagon/hexagon_control_wrapper.h"
#include "mace/core/runtime/opencl/opencl_runtime.h"
#include "mace/core/types.h"
#include "mace/public/mace.h"
......@@ -67,7 +69,8 @@ class MaceEngine::Impl {
explicit Impl(const NetDef *net_def,
DeviceType device_type,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes);
const std::vector<std::string> &output_nodes,
const std::string &internal_storage_path);
~Impl();
MaceStatus Run(const std::map<std::string, MaceTensor> &inputs,
......@@ -87,13 +90,16 @@ class MaceEngine::Impl {
MaceEngine::Impl::Impl(const NetDef *net_def,
DeviceType device_type,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes)
const std::vector<std::string> &output_nodes,
const std::string &internal_storage_path)
: op_registry_(new OperatorRegistry()),
device_type_(device_type),
ws_(new Workspace()),
net_(nullptr),
hexagon_controller_(nullptr) {
LOG(INFO) << "MACE version: " << MaceVersion();
// Set storage path for internal usage
FileStorageEngine::kStoragePath = internal_storage_path;
for (auto input_name : input_nodes) {
ws_->CreateTensor(MakeString("mace_input_node_", input_name, ":0"),
GetDeviceAllocator(device_type_), DT_FLOAT);
......@@ -173,6 +179,7 @@ MaceStatus MaceEngine::Impl::Run(
LOG(FATAL) << "Net run failed";
}
}
OpenCLRuntime::Global()->SaveBuiltCLProgram();
for (auto &output : *outputs) {
Tensor *output_tensor =
ws_->GetTensor(MakeString("mace_output_node_", output.first + ":0"));
......@@ -199,9 +206,11 @@ MaceStatus MaceEngine::Impl::Run(
MaceEngine::MaceEngine(const NetDef *net_def,
DeviceType device_type,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes) {
const std::vector<std::string> &output_nodes,
const std::string &internal_storage_path) {
impl_ = std::unique_ptr<MaceEngine::Impl>(
new MaceEngine::Impl(net_def, device_type, input_nodes, output_nodes));
new MaceEngine::Impl(net_def, device_type, input_nodes,
output_nodes, internal_storage_path));
}
MaceEngine::~MaceEngine() = default;
......
......@@ -15,6 +15,11 @@ void ConfigOpenCLRuntime(GPUPerfHint gpu_perf_hint,
OpenCLRuntime::Configure(gpu_perf_hint, gpu_priority_hint);
}
void ConfigKVStorageEngine(std::shared_ptr<KVStorageEngine> storage_engine) {
VLOG(1) << "Set internal KV Storage Engine";
OpenCLRuntime::Configure(storage_engine);
}
void ConfigOmpThreads(int omp_num_threads) {
VLOG(1) << "Config CPU omp_num_threads: " << omp_num_threads;
SetOmpThreads(omp_num_threads);
......
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include <fstream>
#include <map>
#include <string>
#include <unordered_map>
#include <vector>
#include "mace/core/runtime/opencl/cl2_header.h"
#include "mace/utils/logging.h"
#include "mace/utils/utils.h"
namespace mace {
bool GetSourceOrBinaryProgram(const std::string &program_name,
const std::string &binary_file_name_prefix,
const cl::Context &context,
const cl::Device &device,
cl::Program *program,
bool *is_binary) {
extern const std::map<std::string, std::vector<unsigned char>>
kEncryptedProgramMap;
*is_binary = false;
auto it_source = kEncryptedProgramMap.find(program_name);
if (it_source == kEncryptedProgramMap.end()) {
return false;
}
cl::Program::Sources sources;
std::string content(it_source->second.begin(), it_source->second.end());
std::string kernel_source = ObfuscateString(content);
sources.push_back(kernel_source);
*program = cl::Program(context, sources);
return true;
}
} // namespace mace
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
#include <map>
#include <string>
#include <unordered_map>
#include <vector>
#include "mace/core/runtime/opencl/cl2_header.h"
#include "mace/utils/logging.h"
namespace mace {
bool GetSourceOrBinaryProgram(const std::string &program_name,
const std::string &binary_file_name_prefix,
const cl::Context &context,
const cl::Device &device,
cl::Program *program,
bool *is_binary) {
extern const std::map<std::string, std::vector<unsigned char>>
kCompiledProgramMap;
*is_binary = true;
auto it_binary = kCompiledProgramMap.find(binary_file_name_prefix);
if (it_binary == kCompiledProgramMap.end()) {
return false;
}
*program = cl::Program(context, {device}, {it_binary->second});
return true;
}
} // namespace mace
......@@ -11,34 +11,18 @@
#include <string>
#include <vector>
#include "mace/core/file_storage_engine.h"
#include "mace/core/runtime/opencl/opencl_extension.h"
#include "mace/public/mace.h"
#include "mace/utils/tuner.h"
namespace mace {
namespace {
bool WriteFile(const std::string &filename,
bool binary,
const std::vector<unsigned char> &content) {
std::ios_base::openmode mode = std::ios_base::out | std::ios_base::trunc;
if (binary) {
mode |= std::ios::binary;
}
std::ofstream ofs(filename, mode);
ofs.write(reinterpret_cast<const char *>(&content[0]),
content.size() * sizeof(char));
ofs.close();
if (ofs.fail()) {
LOG(ERROR) << "Failed to write to file " << filename;
return false;
}
return true;
}
} // namespace
extern const std::map<std::string, std::vector<unsigned char>>
kCompiledProgramMap;
extern const std::string kCompiledProgramPlatform;
extern const std::map<std::string, std::vector<unsigned char>>
kEncryptedProgramMap;
const std::string OpenCLErrorToString(cl_int error) {
switch (error) {
......@@ -194,19 +178,23 @@ void OpenCLProfilingTimer::ClearTiming() {
accumulated_micros_ = 0;
}
GPUPerfHint OpenCLRuntime::gpu_perf_hint_ = GPUPerfHint::PERF_DEFAULT;
GPUPriorityHint OpenCLRuntime::gpu_priority_hint_ =
GPUPerfHint OpenCLRuntime::kGPUPerfHint = GPUPerfHint::PERF_DEFAULT;
GPUPriorityHint OpenCLRuntime::kGPUPriorityHint =
GPUPriorityHint::PRIORITY_DEFAULT;
std::shared_ptr<KVStorageEngine> OpenCLRuntime::kStorageEngine(nullptr);
OpenCLRuntime *OpenCLRuntime::Global() {
static OpenCLRuntime runtime(gpu_perf_hint_, gpu_priority_hint_);
static OpenCLRuntime runtime(kGPUPerfHint, kGPUPriorityHint);
return &runtime;
}
void OpenCLRuntime::Configure(GPUPerfHint gpu_perf_hint,
GPUPriorityHint gpu_priority_hint) {
OpenCLRuntime::gpu_perf_hint_ = gpu_perf_hint;
OpenCLRuntime::gpu_priority_hint_ = gpu_priority_hint;
OpenCLRuntime::kGPUPerfHint = gpu_perf_hint;
OpenCLRuntime::kGPUPriorityHint = gpu_priority_hint;
}
void OpenCLRuntime::Configure(std::shared_ptr<KVStorageEngine> storage_engine) {
OpenCLRuntime::kStorageEngine = std::move(storage_engine);
}
void GetAdrenoContextProperties(std::vector<cl_context_properties> *properties,
......@@ -259,9 +247,12 @@ OpenCLRuntime::OpenCLRuntime(GPUPerfHint gpu_perf_hint,
LOG(FATAL) << "No OpenCL platforms found";
}
cl::Platform default_platform = all_platforms[0];
VLOG(1) << "Using platform: " << default_platform.getInfo<CL_PLATFORM_NAME>()
<< ", " << default_platform.getInfo<CL_PLATFORM_PROFILE>() << ", "
<< default_platform.getInfo<CL_PLATFORM_VERSION>();
std::stringstream ss;
ss << default_platform.getInfo<CL_PLATFORM_NAME>()
<< ", " << default_platform.getInfo<CL_PLATFORM_PROFILE>() << ", "
<< default_platform.getInfo<CL_PLATFORM_VERSION>();
platform_info_ = ss.str();
VLOG(1) << "Using platform: " << platform_info_;
// get default device (CPUs, GPUs) of the default platform
std::vector<cl::Device> all_devices;
......@@ -278,10 +269,10 @@ OpenCLRuntime::OpenCLRuntime(GPUPerfHint gpu_perf_hint,
gpu_detected = true;
const std::string device_name = device.getInfo<CL_DEVICE_NAME>();
gpu_type_ = ParseGPUTypeFromDeviceName(device_name);
gpu_type_ = ParseGPUType(device_name);
const std::string device_version = device.getInfo<CL_DEVICE_VERSION>();
opencl_version_ = device_version.substr(7, 3);
opencl_version_ = ParseDeviceVersion(device_version);
VLOG(1) << "Using device: " << device_name;
break;
......@@ -320,9 +311,18 @@ OpenCLRuntime::OpenCLRuntime(GPUPerfHint gpu_perf_hint,
&err);
MACE_CHECK_CL_SUCCESS(err);
const char *kernel_path = getenv("MACE_KERNEL_PATH");
this->kernel_path_ =
std::string(kernel_path == nullptr ? "" : kernel_path) + "/";
this->program_map_changed = false;
if (kStorageEngine == nullptr) {
const std::string cl_compiled_file_name = "mace_cl_compiled_program.bin";
kStorageEngine = std::move(
std::unique_ptr<FileStorageEngine>(
new FileStorageEngine(cl_compiled_file_name)));
}
if (platform_info_ != kCompiledProgramPlatform) {
kStorageEngine->Read(&program_content_map_);
}
}
OpenCLRuntime::~OpenCLRuntime() {
......@@ -340,45 +340,48 @@ cl::Device &OpenCLRuntime::device() { return *device_; }
cl::CommandQueue &OpenCLRuntime::command_queue() { return *command_queue_; }
std::string OpenCLRuntime::GenerateCLBinaryFilenamePrefix(
const std::string &filename_msg) {
// TODO(heliangliang) This can be long and slow, fix it
std::string filename_prefix = filename_msg;
for (auto it = filename_prefix.begin(); it != filename_prefix.end(); ++it) {
if (*it == ' ' || *it == '-' || *it == '=') {
*it = '_';
bool OpenCLRuntime::BuildProgramFromBinary(
const std::string &built_program_key,
const std::string &build_options_str,
cl::Program *program) {
// Find from binary
if (kCompiledProgramPlatform != platform_info_) return false;
auto it_binary = kCompiledProgramMap.find(built_program_key);
if (it_binary == kCompiledProgramMap.end()) return false;
*program = cl::Program(context(), {device()}, {it_binary->second});
cl_int ret = program->build({device()}, build_options_str.c_str());
if (ret != CL_SUCCESS) {
if (program->getBuildInfo<CL_PROGRAM_BUILD_STATUS>(device()) ==
CL_BUILD_ERROR) {
std::string build_log =
program->getBuildInfo<CL_PROGRAM_BUILD_LOG>(device());
LOG(INFO) << "Program build log: " << build_log;
}
LOG(WARNING) << "Build program "
<< built_program_key << " from Binary failed:"
<< (ret == CL_INVALID_PROGRAM ? "CL_INVALID_PROGRAM, possible "
"cause 1: the MACE library is built from SoC 1 but is "
"used on different SoC 2, possible cause 2: the MACE "
"buffer is corrupted make sure your code has no "
"out-of-range memory writing" : MakeString(ret));
return false;
}
return MACE_OBFUSCATE_SYMBOL(filename_prefix);
VLOG(3) << "Program from Binary: " << built_program_key;
return true;
}
extern bool GetSourceOrBinaryProgram(const std::string &program_name,
const std::string &binary_file_name_prefix,
const cl::Context &context,
const cl::Device &device,
cl::Program *program,
bool *is_opencl_binary);
void OpenCLRuntime::BuildProgram(const std::string &program_name,
const std::string &built_program_key,
const std::string &build_options,
cl::Program *program) {
MACE_CHECK_NOTNULL(program);
bool OpenCLRuntime::BuildProgramFromCache(
const std::string &built_program_key,
const std::string &build_options_str,
cl::Program *program) {
// Find from binary
auto it_content = this->program_content_map_.find(built_program_key);
if (it_content == this->program_content_map_.end()) {
return false;
}
std::string binary_file_name_prefix =
GenerateCLBinaryFilenamePrefix(built_program_key);
std::vector<unsigned char> program_vec;
bool is_opencl_binary;
const bool found =
GetSourceOrBinaryProgram(program_name, binary_file_name_prefix, context(),
device(), program, &is_opencl_binary);
MACE_CHECK(found, "Program not found for ",
is_opencl_binary ? "binary: " : "source: ", built_program_key);
// Build program
std::string build_options_str =
build_options + " -Werror -cl-mad-enable -cl-fast-relaxed-math";
// TODO(heliangliang) -cl-unsafe-math-optimizations -cl-fast-relaxed-math
*program = cl::Program(context(), {device()}, {it_content->second});
cl_int ret = program->build({device()}, build_options_str.c_str());
if (ret != CL_SUCCESS) {
if (program->getBuildInfo<CL_PROGRAM_BUILD_STATUS>(device()) ==
......@@ -387,20 +390,43 @@ void OpenCLRuntime::BuildProgram(const std::string &program_name,
program->getBuildInfo<CL_PROGRAM_BUILD_LOG>(device());
LOG(INFO) << "Program build log: " << build_log;
}
LOG(FATAL) << "Build program from "
<< (is_opencl_binary ? "binary: " : "source: ")
<< built_program_key << " failed: "
<< (ret == CL_INVALID_PROGRAM ? "CL_INVALID_PROGRAM, possible "
"cause 1: the MACE library is built from SoC 1 but is "
"used on different SoC 2, possible cause 2: the MACE "
"buffer is corrupted make sure your code has no "
"out-of-range memory writing" : MakeString(ret));
LOG(WARNING) << "Build program "
<< built_program_key << " from Cache failed:"
<< MakeString(ret);
return false;
}
VLOG(3) << "Program from Cache: " << built_program_key;
return true;
}
if (!is_opencl_binary) {
// Write binary if necessary
std::string binary_filename =
kernel_path_ + binary_file_name_prefix + ".bin";
void OpenCLRuntime::BuildProgramFromSource(
const std::string &program_name,
const std::string &built_program_key,
const std::string &build_options_str,
cl::Program *program) {
// Find from source
auto it_source = kEncryptedProgramMap.find(program_name);
if (it_source != kEncryptedProgramMap.end()) {
cl::Program::Sources sources;
std::string source(it_source->second.begin(), it_source->second.end());
std::string kernel_source = ObfuscateString(source);
sources.push_back(kernel_source);
*program = cl::Program(context(), sources);
cl_int ret = program->build({device()}, build_options_str.c_str());
if (ret != CL_SUCCESS) {
if (program->getBuildInfo<CL_PROGRAM_BUILD_STATUS>(device()) ==
CL_BUILD_ERROR) {
std::string build_log =
program->getBuildInfo<CL_PROGRAM_BUILD_LOG>(device());
LOG(INFO) << "Program build log: " << build_log;
}
LOG(WARNING) << "Build program "
<< program_name << " from source failed: "
<< MakeString(ret);
return;
}
// Keep built program binary
size_t device_list_size = 1;
std::unique_ptr<size_t[]> program_binary_sizes(
new size_t[device_list_size]);
......@@ -424,10 +450,36 @@ void OpenCLRuntime::BuildProgram(const std::string &program_name,
reinterpret_cast<unsigned char const *>(program_binaries[0].get()) +
program_binary_sizes[0]);
MACE_CHECK(WriteFile(binary_filename, true, content));
this->program_content_map_.emplace(built_program_key,
content);
this->program_map_changed = true;
VLOG(3) << "Program from source: " << built_program_key ;
}
}
void OpenCLRuntime::BuildProgram(const std::string &program_name,
const std::string &built_program_key,
const std::string &build_options,
cl::Program *program) {
MACE_CHECK_NOTNULL(program);
std::string build_options_str =
build_options + " -Werror -cl-mad-enable -cl-fast-relaxed-math";
// TODO(heliangliang) -cl-unsafe-math-optimizations -cl-fast-relaxed-math
bool ret = BuildProgramFromBinary(built_program_key,
build_options_str, program);
if (!ret) {
ret = BuildProgramFromCache(built_program_key,
build_options_str, program);
// Fallback to source.
if (!ret) {
BuildProgramFromSource(program_name, built_program_key,
build_options_str, program);
}
}
}
cl::Kernel OpenCLRuntime::BuildKernel(
const std::string &program_name,
const std::string &kernel_name,
......@@ -451,6 +503,13 @@ cl::Kernel OpenCLRuntime::BuildKernel(
return cl::Kernel(program, kernel_name.c_str());
}
void OpenCLRuntime::SaveBuiltCLProgram() {
if (program_map_changed) {
kStorageEngine->Write(program_content_map_);
program_map_changed = false;
}
}
void OpenCLRuntime::GetCallStats(const cl::Event &event, CallStats *stats) {
if (stats != nullptr) {
stats->start_micros =
......@@ -472,7 +531,6 @@ uint64_t OpenCLRuntime::GetKernelMaxWorkGroupSize(const cl::Kernel &kernel) {
return size;
}
// TODO(liuqi): not compatible with mali gpu.
uint64_t OpenCLRuntime::GetKernelWaveSize(const cl::Kernel &kernel) {
uint64_t size = 0;
kernel.getWorkGroupInfo(*device_, CL_KERNEL_WAVE_SIZE_QCOM, &size);
......@@ -488,7 +546,11 @@ const GPUType OpenCLRuntime::gpu_type() const {
return gpu_type_;
}
const GPUType OpenCLRuntime::ParseGPUTypeFromDeviceName(
const std::string OpenCLRuntime::platform_info() const {
return platform_info_;
}
const GPUType OpenCLRuntime::ParseGPUType(
const std::string &device_name) {
constexpr const char *kQualcommAdrenoGPUStr = "QUALCOMM Adreno(TM)";
constexpr const char *kMaliGPUStr = "Mali";
......@@ -504,5 +566,13 @@ const GPUType OpenCLRuntime::ParseGPUTypeFromDeviceName(
return GPUType::UNKNOWN;
}
}
const std::string OpenCLRuntime::ParseDeviceVersion(
const std::string &device_version) {
// OpenCL Device version string format:
// OpenCL<space><major_version.minor_version><space>\
// <vendor-specific information>
auto words = Split(device_version, ' ');
return words[1];
}
} // namespace mace
......@@ -55,21 +55,26 @@ class OpenCLRuntime {
public:
static OpenCLRuntime *Global();
static void Configure(GPUPerfHint, GPUPriorityHint);
static void Configure(std::shared_ptr<KVStorageEngine> storage_engine);
cl::Context &context();
cl::Device &device();
cl::CommandQueue &command_queue();
const GPUType gpu_type() const;
const std::string platform_info() const;
cl::Kernel BuildKernel(const std::string &program_name,
const std::string &kernel_name,
const std::set<std::string> &build_options);
void GetCallStats(const cl::Event &event, CallStats *stats);
uint64_t GetDeviceMaxWorkGroupSize();
uint64_t GetKernelMaxWorkGroupSize(const cl::Kernel &kernel);
uint64_t GetKernelWaveSize(const cl::Kernel &kernel);
const bool IsNonUniformWorkgroupsSupported();
const GPUType ParseGPUTypeFromDeviceName(const std::string &device_name);
const GPUType gpu_type() const;
cl::Kernel BuildKernel(const std::string &program_name,
const std::string &kernel_name,
const std::set<std::string> &build_options);
const GPUType ParseGPUType(const std::string &device_name);
const std::string ParseDeviceVersion(const std::string &device_version);
void SaveBuiltCLProgram();
private:
OpenCLRuntime(GPUPerfHint, GPUPriorityHint);
......@@ -81,7 +86,19 @@ class OpenCLRuntime {
const std::string &binary_file_name,
const std::string &build_options,
cl::Program *program);
std::string GenerateCLBinaryFilenamePrefix(const std::string &filename_msg);
bool BuildProgramFromBinary(
const std::string &built_program_key,
const std::string &build_options_str,
cl::Program *program);
bool BuildProgramFromCache(
const std::string &built_program_key,
const std::string &build_options_str,
cl::Program *program);
void BuildProgramFromSource(
const std::string &program_name,
const std::string &built_program_key,
const std::string &build_options_str,
cl::Program *program);
private:
// All OpenCL object must be a pointer and manually deleted before unloading
......@@ -90,13 +107,16 @@ class OpenCLRuntime {
std::shared_ptr<cl::Device> device_;
std::shared_ptr<cl::CommandQueue> command_queue_;
std::map<std::string, cl::Program> built_program_map_;
std::map<std::string, std::vector<unsigned char>> program_content_map_;
std::mutex program_build_mutex_;
std::string kernel_path_;
GPUType gpu_type_;
std::string opencl_version_;
std::string platform_info_;
bool program_map_changed;
static GPUPerfHint gpu_perf_hint_;
static GPUPriorityHint gpu_priority_hint_;
static GPUPerfHint kGPUPerfHint;
static GPUPriorityHint kGPUPriorityHint;
static std::shared_ptr<KVStorageEngine> kStorageEngine;
};
} // namespace mace
......
......@@ -2,8 +2,8 @@
load("//mace:mace.bzl", "if_openmp_enabled")
cc_binary(
name = "mace_run",
srcs = ["mace_run.cc"],
name = "example",
srcs = ["example.cc"],
linkopts = if_openmp_enabled(["-fopenmp"]),
linkstatic = 1,
deps = [
......
//
// Copyright (c) 2017 XiaoMi All rights reserved.
//
/**
* Usage:
* mace_run --model=mobi_mace.pb \
* --input=input_node \
* --output=output_node \
* --input_shape=1,224,224,3 \
* --output_shape=1,224,224,2 \
* --input_file=input_data \
* --output_file=mace.out \
* --model_data_file=model_data.data \
* --device=OPENCL
*/
#include <malloc.h>
#include <stdint.h>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <numeric>
#include "gflags/gflags.h"
#include "mace/public/mace.h"
#include "mace/public/mace_runtime.h"
#include "mace/utils/env_time.h"
#include "mace/utils/logging.h"
// #include "mace/codegen/models/${MACE_MODEL_TAG}/${MACE_MODEL_TAG}.h" instead
namespace mace {
namespace MACE_MODEL_TAG {
extern const unsigned char *LoadModelData(const char *model_data_file);
extern void UnloadModelData(const unsigned char *model_data);
extern NetDef CreateNet(const unsigned char *model_data);
extern const std::string ModelChecksum();
} // namespace MACE_MODEL_TAG
} // namespace mace
namespace mace {
namespace examples {
namespace str_util {
std::vector<std::string> Split(const std::string &str, char delims) {
std::vector<std::string> result;
std::string tmp = str;
while (!tmp.empty()) {
size_t next_offset = tmp.find(delims);
result.push_back(tmp.substr(0, next_offset));
if (next_offset == std::string::npos) {
break;
} else {
tmp = tmp.substr(next_offset + 1);
}
}
return result;
}
} // namespace str_util
void ParseShape(const std::string &str, std::vector<int64_t> *shape) {
std::string tmp = str;
while (!tmp.empty()) {
int dim = atoi(tmp.data());
shape->push_back(dim);
size_t next_offset = tmp.find(",");
if (next_offset == std::string::npos) {
break;
} else {
tmp = tmp.substr(next_offset + 1);
}
}
}
std::string FormatName(const std::string input) {
std::string res = input;
for (size_t i = 0; i < input.size(); ++i) {
if (!isalnum(res[i])) res[i] = '_';
}
return res;
}
DeviceType ParseDeviceType(const std::string &device_str) {
if (device_str.compare("CPU") == 0) {
return DeviceType::CPU;
} else if (device_str.compare("NEON") == 0) {
return DeviceType::NEON;
} else if (device_str.compare("OPENCL") == 0) {
return DeviceType::OPENCL;
} else if (device_str.compare("HEXAGON") == 0) {
return DeviceType::HEXAGON;
} else {
return DeviceType::CPU;
}
}
DEFINE_string(input_node,
"input_node0,input_node1",
"input nodes, separated by comma");
DEFINE_string(input_shape,
"1,224,224,3:1,1,1,10",
"input shapes, separated by colon and comma");
DEFINE_string(output_node,
"output_node0,output_node1",
"output nodes, separated by comma");
DEFINE_string(output_shape,
"1,224,224,2:1,1,1,10",
"output shapes, separated by colon and comma");
DEFINE_string(input_file,
"",
"input file name | input file prefix for multiple inputs.");
DEFINE_string(output_file,
"",
"output file name | output file prefix for multiple outputs");
DEFINE_string(model_data_file,
"",
"model data file name, used when EMBED_MODEL_DATA set to 0");
DEFINE_string(device, "OPENCL", "CPU/NEON/OPENCL/HEXAGON");
DEFINE_int32(round, 1, "round");
DEFINE_int32(restart_round, 1, "restart round");
DEFINE_int32(malloc_check_cycle, -1, "malloc debug check cycle, -1 to disable");
DEFINE_int32(gpu_perf_hint, 2, "0:DEFAULT/1:LOW/2:NORMAL/3:HIGH");
DEFINE_int32(gpu_priority_hint, 1, "0:DEFAULT/1:LOW/2:NORMAL/3:HIGH");
DEFINE_int32(omp_num_threads, 8, "num of openmp threads");
DEFINE_int32(cpu_power_option,
0,
"0:DEFAULT/1:HIGH_PERFORMANCE/2:BATTERY_SAVE");
bool RunModel(const std::vector<std::string> &input_names,
const std::vector<std::vector<int64_t>> &input_shapes,
const std::vector<std::string> &output_names,
const std::vector<std::vector<int64_t>> &output_shapes) {
// load model
const unsigned char *model_data =
mace::MACE_MODEL_TAG::LoadModelData(FLAGS_model_data_file.c_str());
NetDef net_def = mace::MACE_MODEL_TAG::CreateNet(model_data);
DeviceType device_type = ParseDeviceType(FLAGS_device);
// config runtime
mace::ConfigOmpThreads(FLAGS_omp_num_threads);
mace::ConfigCPUPowerOption(
static_cast<CPUPowerOption>(FLAGS_cpu_power_option));
if (device_type == DeviceType::OPENCL) {
mace::ConfigOpenCLRuntime(
static_cast<GPUPerfHint>(FLAGS_gpu_perf_hint),
static_cast<GPUPriorityHint>(FLAGS_gpu_priority_hint));
}
const std::string kernel_file_path =
"/data/local/tmp/mace_run/cl";
// Init model
mace::MaceEngine engine(&net_def, device_type, input_names,
output_names, kernel_file_path);
if (device_type == DeviceType::OPENCL || device_type == DeviceType::HEXAGON) {
mace::MACE_MODEL_TAG::UnloadModelData(model_data);
}
const size_t input_count = input_names.size();
const size_t output_count = output_names.size();
std::map<std::string, mace::MaceTensor> inputs;
std::map<std::string, mace::MaceTensor> outputs;
for (size_t i = 0; i < input_count; ++i) {
// Allocate input and output
int64_t input_size =
std::accumulate(input_shapes[i].begin(), input_shapes[i].end(), 1,
std::multiplies<int64_t>());
auto buffer_in = std::shared_ptr<float>(new float[input_size],
std::default_delete<float[]>());
// load input
std::ifstream in_file(FLAGS_input_file + "_" + FormatName(input_names[i]),
std::ios::in | std::ios::binary);
if (in_file.is_open()) {
in_file.read(reinterpret_cast<char *>(buffer_in.get()),
input_size * sizeof(float));
in_file.close();
} else {
LOG(INFO) << "Open input file failed";
return -1;
}
inputs[input_names[i]] = mace::MaceTensor(input_shapes[i], buffer_in);
}
for (size_t i = 0; i < output_count; ++i) {
int64_t output_size =
std::accumulate(output_shapes[i].begin(), output_shapes[i].end(), 1,
std::multiplies<int64_t>());
auto buffer_out = std::shared_ptr<float>(new float[output_size],
std::default_delete<float[]>());
outputs[output_names[i]] = mace::MaceTensor(output_shapes[i], buffer_out);
}
LOG(INFO) << "Warm up run";
engine.Run(inputs, &outputs);
if (FLAGS_round > 0) {
LOG(INFO) << "Run model";
for (int i = 0; i < FLAGS_round; ++i) {
engine.Run(inputs, &outputs);
}
}
for (size_t i = 0; i < output_count; ++i) {
std::string output_name =
FLAGS_output_file + "_" + FormatName(output_names[i]);
std::ofstream out_file(output_name, std::ios::binary);
int64_t output_size =
std::accumulate(output_shapes[i].begin(), output_shapes[i].end(), 1,
std::multiplies<int64_t>());
out_file.write(
reinterpret_cast<char *>(outputs[output_names[i]].data().get()),
output_size * sizeof(float));
out_file.flush();
out_file.close();
}
return true;
}
int Main(int argc, char **argv) {
gflags::SetUsageMessage("some usage message");
gflags::ParseCommandLineFlags(&argc, &argv, true);
LOG(INFO) << "mace version: " << MaceVersion();
LOG(INFO) << "model checksum: " << mace::MACE_MODEL_TAG::ModelChecksum();
LOG(INFO) << "input node: " << FLAGS_input_node;
LOG(INFO) << "input shape: " << FLAGS_input_shape;
LOG(INFO) << "output node: " << FLAGS_output_node;
LOG(INFO) << "output shape: " << FLAGS_output_shape;
LOG(INFO) << "input_file: " << FLAGS_input_file;
LOG(INFO) << "output_file: " << FLAGS_output_file;
LOG(INFO) << "model_data_file: " << FLAGS_model_data_file;
LOG(INFO) << "device: " << FLAGS_device;
LOG(INFO) << "round: " << FLAGS_round;
LOG(INFO) << "restart_round: " << FLAGS_restart_round;
LOG(INFO) << "gpu_perf_hint: " << FLAGS_gpu_perf_hint;
LOG(INFO) << "gpu_priority_hint: " << FLAGS_gpu_priority_hint;
LOG(INFO) << "omp_num_threads: " << FLAGS_omp_num_threads;
LOG(INFO) << "cpu_power_option: " << FLAGS_cpu_power_option;
std::vector<std::string> input_names = str_util::Split(FLAGS_input_node, ',');
std::vector<std::string> output_names =
str_util::Split(FLAGS_output_node, ',');
std::vector<std::string> input_shapes =
str_util::Split(FLAGS_input_shape, ':');
std::vector<std::string> output_shapes =
str_util::Split(FLAGS_output_shape, ':');
const size_t input_count = input_shapes.size();
const size_t output_count = output_shapes.size();
std::vector<std::vector<int64_t>> input_shape_vec(input_count);
std::vector<std::vector<int64_t>> output_shape_vec(output_count);
for (size_t i = 0; i < input_count; ++i) {
ParseShape(input_shapes[i], &input_shape_vec[i]);
}
for (size_t i = 0; i < output_count; ++i) {
ParseShape(output_shapes[i], &output_shape_vec[i]);
}
bool ret;
#pragma omp parallel for
for (int i = 0; i < FLAGS_restart_round; ++i) {
VLOG(0) << "restart round " << i;
ret =
RunModel(input_names, input_shape_vec, output_names, output_shape_vec);
}
if (ret) {
return 0;
} else {
return -1;
}
}
} // namespace examples
} // namespace mace
int main(int argc, char **argv) { mace::examples::Main(argc, argv); }
......@@ -54,7 +54,8 @@ class MaceEngine {
explicit MaceEngine(const NetDef *net_def,
DeviceType device_type,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes);
const std::vector<std::string> &output_nodes,
const std::string &internal_storage_path);
~MaceEngine();
MaceStatus Run(const std::map<std::string, MaceTensor> &inputs,
......
......@@ -8,6 +8,11 @@
#ifndef MACE_PUBLIC_MACE_RUNTIME_H_
#define MACE_PUBLIC_MACE_RUNTIME_H_
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace mace {
enum GPUPerfHint {
......@@ -26,10 +31,20 @@ enum GPUPriorityHint {
enum CPUPowerOption { DEFAULT = 0, HIGH_PERFORMANCE = 1, BATTERY_SAVE = 2 };
class KVStorageEngine {
public:
virtual void Write(
const std::map<std::string, std::vector<unsigned char>> &data) = 0;
virtual void Read(
std::map<std::string, std::vector<unsigned char>> *data) = 0;
};
void ConfigOpenCLRuntime(GPUPerfHint, GPUPriorityHint);
void ConfigKVStorageEngine(std::shared_ptr<KVStorageEngine> storage_engine);
void ConfigOmpThreads(int omp_num_threads);
void ConfigCPUPowerOption(CPUPowerOption power_option);
} // namespace mace
#endif // MACE_PUBLIC_MACE_RUNTIME_H_
......@@ -27,7 +27,6 @@ def generate_cpp_source():
idx = 0
size, = struct.unpack("Q", binary_array[idx:idx+8])
print size
idx += 8
for _ in xrange(size):
key_size, = struct.unpack("i", binary_array[idx:idx+4])
......@@ -64,7 +63,7 @@ def parse_args():
parser.add_argument(
"--binary_dirs",
type=str,
default="cl_bin0/,cl_bin1/",
default="",
help="The binaries file path.")
parser.add_argument(
"--binary_file_name",
......
import argparse
import os
import sys
import struct
import numpy as np
......@@ -13,31 +14,50 @@ FLAGS = None
def generate_cpp_source():
cl_built_kernel_file_name = 'mace_cl_compiled_program.bin'
cl_platform_info_file_name = 'mace_cl_platform_info.txt'
maps = {}
cl_binary_dir_arr = FLAGS.cl_binary_dirs.split(",")
for cl_binary_dir in cl_binary_dir_arr:
if not os.path.exists(cl_binary_dir):
print("Input cl_binary_dir " + cl_binary_dir + " doesn't exist!")
for file_name in os.listdir(cl_binary_dir):
file_path = os.path.join(cl_binary_dir, file_name)
if file_path[-4:] == ".bin":
# read binary
f = open(file_path, "rb")
binary_array = np.fromfile(f, dtype=np.uint8)
f.close()
maps[file_name[:-4]] = []
for ele in binary_array:
maps[file_name[:-4]].append(hex(ele))
platform_info = ''
for binary_dir in FLAGS.cl_binary_dirs.split(","):
binary_path = os.path.join(binary_dir, cl_built_kernel_file_name)
if not os.path.exists(binary_path):
continue
with open(binary_path, "rb") as f:
binary_array = np.fromfile(f, dtype=np.uint8)
idx = 0
size, = struct.unpack("Q", binary_array[idx:idx+8])
idx += 8
for _ in xrange(size):
key_size, = struct.unpack("i", binary_array[idx:idx+4])
idx += 4
key, = struct.unpack(str(key_size) + "s", binary_array[idx:idx+key_size])
idx += key_size
value_size, = struct.unpack("i", binary_array[idx:idx+4])
idx += 4
maps[key] = []
value = struct.unpack(str(value_size) + "B",
binary_array[idx:idx+value_size])
idx += value_size
for ele in value:
maps[key].append(hex(ele))
cl_platform_info_path = os.path.join(binary_dir, cl_platform_info_file_name)
with open(cl_platform_info_path, 'r') as f:
curr_platform_info = f.read()
if platform_info != "":
assert(curr_platform_info == platform_info)
platform_info = curr_platform_info
env = jinja2.Environment(loader=jinja2.FileSystemLoader(sys.path[0]))
return env.get_template('str2vec_maps.cc.jinja2').render(
return env.get_template('opencl_compiled_kernel.cc.jinja2').render(
maps = maps,
data_type = 'unsigned char',
variable_name = 'kCompiledProgramMap'
variable_name = 'kCompiledProgramMap',
platform_info = platform_info,
)
def main(unused_args):
cpp_cl_binary_source = generate_cpp_source()
......@@ -54,7 +74,7 @@ def parse_args():
parser.add_argument(
"--cl_binary_dirs",
type=str,
default="cl_bin0/,cl_bin1/,cl_bin2/",
default="",
help="The cl binaries directories.")
parser.add_argument(
"--output_path",
......
......@@ -10,7 +10,20 @@
namespace mace {
extern const std::map<std::string, std::vector<unsigned char>>
kCompiledProgramMap = {};
extern const std::map<std::string, std::vector<{{data_type}}>> {{variable_name}} =
{
{% for key, value in maps.iteritems() %}
{
"{{key}}",
{
{%- for ele in value -%}
{{ele}},
{%- endfor -%}
}
}, // {{key}}
{% endfor %}
};
extern const std::string kCompiledProgramPlatform = {{platform_info|tojson}};
} // namespace mace
# Examples
load("//mace:mace.bzl", "if_openmp_enabled")
cc_binary(
name = "mace_run",
srcs = ["mace_run.cc"],
linkopts = if_openmp_enabled(["-fopenmp"]),
linkstatic = 1,
deps = [
"//external:gflags_nothreads",
"//mace/codegen:generated_models",
"//mace/core:core",
],
)
......@@ -23,6 +23,7 @@
#include <numeric>
#include "gflags/gflags.h"
#include "mace/core/runtime/opencl/opencl_runtime.h"
#include "mace/public/mace.h"
#include "mace/public/mace_runtime.h"
#include "mace/utils/env_time.h"
......@@ -101,6 +102,20 @@ DeviceType ParseDeviceType(const std::string &device_str) {
}
}
void WriteOpenCLPlatformInfo(const std::string &output_dir) {
std::string platform_info = OpenCLRuntime::Global()->platform_info();
const std::string cl_platform_info_file_name = output_dir
+ "/mace_cl_platform_info.txt";
std::ofstream ofs(cl_platform_info_file_name);
if (ofs.is_open()) {
ofs << platform_info;
ofs.close();
} else {
LOG(WARNING) << "Write opencl platform info failed.";
}
}
struct mallinfo LogMallinfoChange(struct mallinfo prev) {
struct mallinfo curr = mallinfo();
if (prev.arena != curr.arena) {
......@@ -206,6 +221,11 @@ bool RunModel(const std::vector<std::string> &input_names,
static_cast<GPUPriorityHint>(FLAGS_gpu_priority_hint));
}
const char *kernel_path = getenv("MACE_CL_PROGRAM_PATH");
const std::string kernel_file_path =
std::string(kernel_path == nullptr ?
"/data/local/tmp/mace_run/cl_program" : kernel_path);
// Init model
LOG(INFO) << "Run init";
mace::MaceEngine engine(&net_def, device_type, input_names, output_names);
......@@ -285,6 +305,8 @@ bool RunModel(const std::vector<std::string> &input_names,
printf("time %11.3f %11.3f %11.3f %11.3f %11.3f\n", create_net_millis,
mace_engine_ctor_millis, init_millis, warmup_millis, model_run_millis);
WriteOpenCLPlatformInfo(kernel_file_path);
for (size_t i = 0; i < output_count; ++i) {
std::string output_name =
FLAGS_output_file + "_" + FormatName(output_names[i]);
......
......@@ -96,5 +96,20 @@ inline std::string ObfuscateSymbol(const std::string &src) {
#define MACE_OBFUSCATE_SYMBOL(str) (str)
#endif
inline std::vector<std::string> Split(const std::string &str, char delims) {
std::vector<std::string> result;
std::string tmp = str;
while (!tmp.empty()) {
size_t next_offset = tmp.find(delims);
result.push_back(tmp.substr(0, next_offset));
if (next_offset == std::string::npos) {
break;
} else {
tmp = tmp.substr(next_offset + 1);
}
}
return result;
}
} // namespace mace
#endif // MACE_UTILS_UTILS_H_
......@@ -94,6 +94,7 @@ def main(unused_args):
# generate sources
sh_commands.gen_encrypted_opencl_source()
sh_commands.gen_compiled_opencl_source()
sh_commands.gen_mace_version()
for target_abi in target_abis:
......
......@@ -30,7 +30,7 @@ if [ x"$TARGET_ABI" = x"host" ]; then
--copt="-O3" \
$PRODUCTION_MODE_BUILD_FLAGS || exit 1
bazel build --verbose_failures -c opt --strip always //mace/examples:mace_run \
bazel build --verbose_failures -c opt --strip always //mace/tools/validation:mace_run \
--copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \
......@@ -47,7 +47,7 @@ else
NEON_ENABLE_FLAG="--define neon=true"
fi
bazel build --verbose_failures -c opt --strip always //mace/examples:mace_run \
bazel build --verbose_failures -c opt --strip always //mace/tools/validation:mace_run \
--crosstool_top=//external:android/crosstool \
--host_crosstool_top=@bazel_tools//tools/cpp:toolchain \
--cpu=${TARGET_ABI} \
......@@ -64,14 +64,13 @@ else
$HEXAGON_MODE_BUILD_FLAG || exit 1
fi
if [ "$PRODUCTION_MODE" = 1 ]; then
cp $GENERATED_MODEL_LIB_PATH $MODEL_OUTPUT_DIR/libmace_${MODEL_TAG}.a
fi
rm -rf $MODEL_OUTPUT_DIR/libmace_${MODEL_TAG}.a
cp $GENERATED_MODEL_LIB_PATH $MODEL_OUTPUT_DIR/libmace_${MODEL_TAG}.a
if [ -f "$MODEL_OUTPUT_DIR/mace_run" ]; then
rm -rf $MODEL_OUTPUT_DIR/mace_run
fi
cp bazel-bin/mace/examples/mace_run $MODEL_OUTPUT_DIR
cp bazel-bin/mace/tools/validation/mace_run $MODEL_OUTPUT_DIR
if [ "$EMBED_MODEL_DATA" = 0 ]; then
cp mace/codegen/models/${MODEL_TAG}/${MODEL_TAG}.data $MODEL_OUTPUT_DIR
fi
......
......@@ -35,9 +35,9 @@ build_target()
}
if [ x"$TARGET_ABI" = x"host" ]; then
build_host_target //mace/codegen:generated_opencl_prod
build_host_target //mace/codegen:generated_opencl
build_host_target //mace/codegen:generated_tuning_params
else
build_target //mace/codegen:generated_opencl_prod
build_target //mace/codegen:generated_opencl
build_target //mace/codegen:generated_tuning_params
fi
......@@ -26,4 +26,3 @@ if [ x"$TARGET_ABI" != x"host" ]; then
fi
rm -rf mace/codegen/models
git checkout -- mace/codegen/opencl/opencl_compiled_program.cc mace/codegen/tuning/tuning_params.cc
......@@ -3,7 +3,7 @@ LIBMACE_TAG=`git describe --abbrev=0 --tags`
MACE_SOURCE_DIR=`/bin/pwd`
PHONE_DATA_DIR="/data/local/tmp/mace_run"
KERNEL_DIR="${PHONE_DATA_DIR}/cl/"
COMPILED_PROGRAM_DIR="${PHONE_DATA_DIR}/cl_program/"
CODEGEN_DIR=${MACE_SOURCE_DIR}/mace/codegen
MODEL_CODEGEN_DIR=${CODEGEN_DIR}/models/${MODEL_TAG}
CL_CODEGEN_DIR=${CODEGEN_DIR}/opencl
......
#!/usr/bin/env bash
Usage() {
echo "Usage: bash tools/genenrate_opencl_code.sh type [target_soc] [cl_bin_dirs] [pull_or_not]"
}
if [ $# -lt 1 ]; then
Usage
exit 1
fi
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
TYPE=$1
TARGET_SOC=$2
CL_BIN_DIRS=$3
PULL_OR_NOT=$4
if [ x"$TYPE" == x"source" ];then
python mace/python/tools/encrypt_opencl_codegen.py \
--cl_kernel_dir=./mace/kernels/opencl/cl/ \
--output_path=${CODEGEN_DIR}/opencl/opencl_encrypt_program.cc || exit 1
elif [ x"$#" == x"1" ];then
python mace/python/tools/opencl_codegen.py \
--output_path=${CL_CODEGEN_DIR}/opencl_compiled_program.cc || exit 1
else
RESULT_VALUE=`echo_device_id_by_soc $TARGET_SOC`
if [ $? -ne 0 ]; then
echo $RESULT_VALUE
exit 1
else
DEVICE_ID=$RESULT_VALUE
fi
if [ "$PULL_OR_NOT" = 1 ]; then
CL_BIN_DIR=${CL_BIN_DIRS}
rm -rf ${CL_BIN_DIR}
mkdir -p ${CL_BIN_DIR}
if [ x"$TARGET_ABI" != x"host" ]; then
adb -s $DEVICE_ID pull ${COMPILED_PROGRAM_DIR}/. ${CL_BIN_DIR} > /dev/null
fi
fi
python mace/python/tools/opencl_codegen.py \
--cl_binary_dirs=${CL_BIN_DIRS} \
--output_path=${CL_CODEGEN_DIR}/opencl_compiled_program.cc || exit 1
fi
#!/bin/bash
Usage() {
echo "Usage: bash tools/generate_production_code.sh target_soc cl_bin_dirs pull_or_not"
}
if [ $# -lt 3 ]; then
Usage
exit 1
fi
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
TARGET_SOC=$1
CL_BIN_DIRS=$2
PULL_OR_NOT=$3
RESULT_VALUE=`echo_device_id_by_soc $TARGET_SOC`
if [ $? -ne 0 ]; then
echo $RESULT_VALUE
exit 1
else
DEVICE_ID=$RESULT_VALUE
fi
if [ "$PULL_OR_NOT" = 1 ]; then
CL_BIN_DIR=${CL_BIN_DIRS}
rm -rf ${CL_BIN_DIR}
mkdir -p ${CL_BIN_DIR}
if [ x"$TARGET_ABI" != x"host" ]; then
adb -s $DEVICE_ID pull ${KERNEL_DIR}/. ${CL_BIN_DIR} > /dev/null
adb -s $DEVICE_ID pull ${PHONE_DATA_DIR}/mace_run.config ${CL_BIN_DIR} > /dev/null
fi
fi
python mace/python/tools/opencl_codegen.py \
--cl_binary_dirs=${CL_BIN_DIRS} \
--output_path=${CL_CODEGEN_DIR}/opencl_compiled_program.cc
python mace/python/tools/binary_codegen.py \
--binary_dirs=${CL_BIN_DIRS} \
--binary_file_name=mace_run.config \
--output_path=${TUNING_CODEGEN_DIR}/tuning_params.cc
#!/bin/bash
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
if [ "$#" -eq "0" ]; then
python mace/python/tools/binary_codegen.py \
--binary_file_name=mace_run.config \
--output_path=${TUNING_CODEGEN_DIR}/tuning_params.cc
else
TARGET_SOC=$1
BIN_DIRS=$2
PULL_OR_NOT=$3
RESULT_VALUE=`echo_device_id_by_soc $TARGET_SOC`
if [ $? -ne 0 ]; then
echo $RESULT_VALUE
exit 1
else
DEVICE_ID=$RESULT_VALUE
fi
if [ "$PULL_OR_NOT" = 1 ]; then
rm -rf ${BIN_DIRS}
mkdir -p ${BIN_DIRS}
if [ x"$TARGET_ABI" != x"host" ]; then
adb -s $DEVICE_ID pull ${PHONE_DATA_DIR}/mace_run.config ${BIN_DIRS} > /dev/null
fi
fi
python mace/python/tools/binary_codegen.py \
--binary_dirs=${BIN_DIRS} \
--binary_file_name=mace_run.config \
--output_path=${TUNING_CODEGEN_DIR}/tuning_params.cc
fi
#!/bin/bash
CL_KERNEL_DIR_TAG=$1
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
python mace/python/tools/encrypt_opencl_codegen.py \
--cl_kernel_dir=./mace/kernels/opencl/cl/ \
--output_path=${CODEGEN_DIR}/opencl/opencl_encrypt_program.cc || exit 1
rm -rf ${CODEGEN_DIR}/version
mkdir ${CODEGEN_DIR}/version
bash mace/tools/git/gen_version_source.sh ${CODEGEN_DIR}/version/version.cc || exit 1
......@@ -59,10 +59,41 @@ def get_global_runtime(configs):
return global_runtime
def generate_opencl_and_version_code():
command = "bash tools/generate_opencl_and_version_code.sh"
def generate_version_code():
command = "bash tools/generate_version_code.sh"
run_command(command)
def generate_opencl_source_code():
command = "bash tools/generate_opencl_code.sh source"
run_command(command)
def generate_opencl_binay_code(target_soc, model_output_dirs, pull_or_not):
cl_bin_dirs = []
for d in model_output_dirs:
cl_bin_dirs.append(os.path.join(d, "opencl_bin"))
cl_bin_dirs_str = ",".join(cl_bin_dirs)
if not cl_bin_dirs:
command = "bash tools/generate_opencl_code.sh binary"
else:
command = "bash tools/generate_opencl_code.sh {} {} {} {}".format(
'binary', target_soc, cl_bin_dirs_str, int(pull_or_not))
run_command(command)
def generate_tuning_param_code(target_soc, model_output_dirs, pull_or_not):
cl_bin_dirs = []
for d in model_output_dirs:
cl_bin_dirs.append(os.path.join(d, "opencl_bin"))
cl_bin_dirs_str = ",".join(cl_bin_dirs)
if not cl_bin_dirs:
command = "bash tools/generate_tuning_param_code.sh"
else:
command = "bash tools/generate_tuning_param_code.sh {} {} {}".format(
target_soc, cl_bin_dirs_str, int(pull_or_not))
run_command(command)
def generate_code(target_soc, model_output_dirs, pull_or_not):
generate_opencl_binay_code(target_soc, model_output_dirs, pull_or_not)
generate_tuning_param_code(target_soc, model_output_dirs, pull_or_not)
def clear_env(target_soc):
command = "bash tools/clear_env.sh {}".format(target_soc)
......@@ -119,7 +150,6 @@ def tuning_run(model_name,
model_output_dir,
running_round,
tuning,
production_mode,
restart_round,
option_args=''):
# TODO(yejianwu) refactoring the hackish code
......@@ -179,6 +209,7 @@ def build_mace_run_prod(model_name, target_runtime, target_abi, target_soc,
else:
hexagon_mode = False
generate_code(target_soc, [], False)
production_or_not = False
build_mace_run(production_or_not, model_output_dir, hexagon_mode)
tuning_run(
......@@ -189,12 +220,10 @@ def build_mace_run_prod(model_name, target_runtime, target_abi, target_soc,
model_output_dir,
running_round=0,
tuning=tuning,
production_mode=production_or_not,
restart_round=1)
generate_code(target_soc, [model_output_dir], True)
production_or_not = True
pull_or_not = True
generate_production_code(target_soc, [model_output_dir], pull_or_not)
build_mace_run(production_or_not, model_output_dir, hexagon_mode)
......@@ -218,11 +247,11 @@ def build_production_code():
def merge_libs_and_tuning_results(target_soc, output_dir, model_output_dirs):
pull_or_not = False
generate_production_code(target_soc, model_output_dirs, pull_or_not)
generate_code(target_soc, model_output_dirs, False)
build_production_code()
model_output_dirs_str = ",".join(model_output_dirs)
print output_dir, model_output_dirs
command = "bash tools/merge_libs.sh {} {} {}".format(target_soc, output_dir,
model_output_dirs_str)
run_command(command)
......@@ -232,6 +261,26 @@ def packaging_lib_file(output_dir):
command = "bash tools/packaging_lib.sh {}".format(output_dir)
run_command(command)
def download_model_files(model_file_path,
model_output_dir,
weight_file_path=""):
if model_file_path.startswith("http://") or \
model_file_path.startswith("https://"):
os.environ["MODEL_FILE_PATH"] = model_output_dir + "/model.pb"
urllib.urlretrieve(model_file_path, os.environ["MODEL_FILE_PATH"])
if weight_file_path.startswith("http://") or \
weight_file_path.startswith("https://"):
os.environ[
"WEIGHT_FILE_PATH"] = model_output_dir + "/model.caffemodel"
urllib.urlretrieve(weight_file_path,
os.environ["WEIGHT_FILE_PATH"])
def md5sum(str):
md5 = hashlib.md5()
md5.update(str)
return md5.hexdigest()
def parse_model_configs():
with open(FLAGS.config) as f:
......@@ -298,7 +347,9 @@ def main(unused_args):
shutil.rmtree(os.path.join(FLAGS.output_dir, os.environ["PROJECT_NAME"]))
os.makedirs(os.path.join(FLAGS.output_dir, os.environ["PROJECT_NAME"]))
generate_opencl_and_version_code()
generate_version_code()
generate_opencl_source_code()
option_args = ' '.join([arg for arg in unused_args if arg.startswith('--')])
available_socs = sh_commands.adb_get_all_socs()
......@@ -315,6 +366,7 @@ def main(unused_args):
print("Error: devices with SoCs are not connected %s" % missing_socs)
exit(1)
for target_soc in target_socs:
for target_abi in configs["target_abis"]:
global_runtime = get_global_runtime(configs)
......@@ -322,9 +374,9 @@ def main(unused_args):
os.environ["TARGET_ABI"] = target_abi
model_output_dirs = []
for model_name in configs["models"]:
print '=======================', model_name, '======================='
# Transfer params by environment
os.environ["MODEL_TAG"] = model_name
print '=======================', model_name, '======================='
model_config = configs["models"][model_name]
input_file_list = model_config.get("validation_inputs_data", [])
for key in model_config:
......@@ -337,9 +389,8 @@ def main(unused_args):
else:
os.environ[key.upper()] = str(model_config[key])
md5 = hashlib.md5()
md5.update(model_config["model_file_path"])
model_path_digest = md5.hexdigest()
# Create model build directory
model_path_digest = md5sum(model_config["model_file_path"])
model_output_dir = "%s/%s/%s/%s/%s/%s/%s" % (FLAGS.output_dir,
os.environ["PROJECT_NAME"],
"build", model_name,
......@@ -353,21 +404,8 @@ def main(unused_args):
os.makedirs(model_output_dir)
clear_env(target_soc)
# Support http:// and https://
if model_config["model_file_path"].startswith(
"http://") or model_config["model_file_path"].startswith(
"https://"):
os.environ["MODEL_FILE_PATH"] = model_output_dir + "/model.pb"
urllib.urlretrieve(model_config["model_file_path"],
os.environ["MODEL_FILE_PATH"])
if model_config["platform"] == "caffe" and (
model_config["weight_file_path"].startswith("http://") or
model_config["weight_file_path"].startswith("https://")):
os.environ[
"WEIGHT_FILE_PATH"] = model_output_dir + "/model.caffemodel"
urllib.urlretrieve(model_config["weight_file_path"],
os.environ["WEIGHT_FILE_PATH"])
download_model_files(model_config["model_file_path"],
model_output_dir, model_config.get("weight_file_path", ""))
if FLAGS.mode == "build" or FLAGS.mode == "run" or FLAGS.mode == "validate"\
or FLAGS.mode == "benchmark" or FLAGS.mode == "all":
......
......@@ -41,14 +41,13 @@ LIBMACE_TEMP_DIR=`mktemp -d -t libmace.XXXX`
echo "create ${LIBMACE_BUILD_DIR}/${TARGET_ABI}/libmace_${PROJECT_NAME}.${TARGET_SOC}.a" > ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
if [ x"$TARGET_ABI" = x"host" ]; then
echo "addlib bazel-bin/mace/codegen/libgenerated_opencl_prod.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/codegen/libgenerated_opencl.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/codegen/libgenerated_tuning_params.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
else
echo "addlib bazel-bin/mace/codegen/libgenerated_opencl_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/codegen/libgenerated_opencl.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/codegen/libgenerated_tuning_params.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/codegen/libgenerated_version.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/core/libcore.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/core/libopencl_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/kernels/libkernels.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/utils/libutils.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/utils/libutils_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
......
......@@ -69,22 +69,20 @@ def adb_run(serialno, host_bin_path, bin_name,
device_bin_path="/data/local/tmp/mace"):
host_bin_full_path = "%s/%s" % (host_bin_path, bin_name)
device_bin_full_path = "%s/%s" % (device_bin_path, bin_name)
device_cl_path = "%s/cl" % device_bin_path
props = adb_getprop_by_serialno(serialno)
print("=====================================================================")
print("Run on device: %s, %s, %s" % (serialno, props["ro.board.platform"],
props["ro.product.model"]))
sh.adb("-s", serialno, "shell", "rm -rf %s" % device_bin_path)
sh.adb("-s", serialno, "shell", "mkdir -p %s" % device_bin_path)
sh.adb("-s", serialno, "shell", "mkdir -p %s" % device_cl_path)
print("Push %s to %s" % (host_bin_full_path, device_bin_full_path))
sh.adb("-s", serialno, "push", host_bin_full_path, device_bin_full_path)
print("Run %s" % device_bin_full_path)
stdout_buff=[]
process_output = make_output_processor(stdout_buff)
p = sh.adb("-s", serialno, "shell",
"MACE_OPENCL_PROFILING=%d MACE_KERNEL_PATH=%s MACE_CPP_MIN_VLOG_LEVEL=%d %s %s" %
(opencl_profiling, device_cl_path, vlog_level, device_bin_full_path, args),
"MACE_OPENCL_PROFILING=%d MACE_CPP_MIN_VLOG_LEVEL=%d %s %s" %
(opencl_profiling, vlog_level, device_bin_full_path, args),
_out=process_output, _bg=True, _err_to_out=True)
p.wait()
return "".join(stdout_buff)
......@@ -139,6 +137,10 @@ def gen_mace_version(codegen_path="mace/codegen"):
sh.bash("mace/tools/git/gen_version_source.sh",
"%s/version/version.cc" % codegen_path)
def gen_compiled_opencl_source(codegen_path="mace/codegen"):
sh.python("mace/python/tools/opencl_codegen.py",
"--output_path=%s/opencl/opencl_compiled_program.cc" % codegen_path)
################################
# falcon
################################
......
#!/bin/bash
Usage() {
echo "Usage: bash tools/tuning_run.sh target_soc model_output_dir round tuning production_mode"
echo "Usage: bash tools/tuning_run.sh target_soc model_output_dir round tuning "
}
if [ $# -lt 7 ]; then
if [ $# -lt 6 ]; then
Usage
exit 1
fi
......@@ -16,9 +16,8 @@ TARGET_SOC=$1
MODEL_OUTPUT_DIR=$2
ROUND=$3
TUNING_OR_NOT=$4
PRODUCTION_MODE=$5
RESTART_ROUND=$6
OPTION_ARGS=$7
RESTART_ROUND=$5
OPTION_ARGS=$6
echo $OPTION_ARGS
......@@ -45,16 +44,14 @@ if [ x"$TARGET_ABI" = x"host" ]; then
--restart_round=1 \
$OPTION_ARGS || exit 1
else
if [[ "${TUNING_OR_NOT}" != "0" && "$PRODUCTION_MODE" != 1 ]];then
if [[ "${TUNING_OR_NOT}" != "0" ]];then
tuning_flag=1
else
tuning_flag=0
fi
adb -s $DEVICE_ID shell "mkdir -p ${PHONE_DATA_DIR}" || exit 1
if [ "$PRODUCTION_MODE" = 0 ]; then
adb -s $DEVICE_ID shell "mkdir -p ${KERNEL_DIR}" || exit 1
fi
adb -s $DEVICE_ID shell "mkdir -p ${COMPILED_PROGRAM_DIR}" || exit 1
IFS=',' read -r -a INPUT_NAMES <<< "${INPUT_NODES}"
for NAME in "${INPUT_NAMES[@]}";do
......@@ -72,7 +69,7 @@ else
MACE_TUNING=${tuning_flag} \
MACE_CPP_MIN_VLOG_LEVEL=$VLOG_LEVEL \
MACE_RUN_PARAMETER_PATH=${PHONE_DATA_DIR}/mace_run.config \
MACE_KERNEL_PATH=$KERNEL_DIR \
MACE_CL_PROGRAM_PATH=$COMPILED_PROGRAM_DIR \
MACE_LIMIT_OPENCL_KERNEL_TIME=${LIMIT_OPENCL_KERNEL_TIME} \
${PHONE_DATA_DIR}/mace_run \
--input_node="${INPUT_NODES}" \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册