提交 b352192b 编写于 作者: L liuqi

refactor converter to convert->run/bencharmk.

上级 bb10ae3b
...@@ -9,6 +9,7 @@ mace/codegen/opencl_bin/ ...@@ -9,6 +9,7 @@ mace/codegen/opencl_bin/
mace/codegen/tuning/ mace/codegen/tuning/
mace/codegen/version/ mace/codegen/version/
mace/codegen/engine/ mace/codegen/engine/
mace/codegen/lib/
build/ build/
docs/_build/ docs/_build/
*.a *.a
......
...@@ -69,8 +69,6 @@ extra_tests: ...@@ -69,8 +69,6 @@ extra_tests:
platform_compatible_tests: platform_compatible_tests:
stage: platform_compatible_tests stage: platform_compatible_tests
script: script:
- mkdir -p mace/codegen/version && bash mace/tools/git/gen_version_source.sh mace/codegen/version/version.cc
- mkdir -p mace/codegen/tuning && python mace/python/tools/binary_codegen.py --output_path=mace/codegen/tuning/tuning_params.cc
- bazel build mace/core:core - bazel build mace/core:core
ndk_versions_compatible_tests: ndk_versions_compatible_tests:
...@@ -101,5 +99,8 @@ python_tools_tests: ...@@ -101,5 +99,8 @@ python_tools_tests:
- rm -rf mace-models - rm -rf mace-models
- GIT_SSH_COMMAND="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" git clone git@v9.git.n.xiaomi.com:deep-computing/mace-models.git - GIT_SSH_COMMAND="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" git clone git@v9.git.n.xiaomi.com:deep-computing/mace-models.git
- CONF_FILE=mace-models/mobilenet-v2/mobilenet-v2.yml - CONF_FILE=mace-models/mobilenet-v2/mobilenet-v2.yml
- sh -c "python tools/converter.py build --config=${CONF_FILE} --disable_tuning && python tools/converter.py run --config=${CONF_FILE} --round=1 --validate && python tools/converter.py run --config=${CONF_FILE} --example --round=1 --validate" || exit 1 - >
python tools/converter.py convert --config=${CONF_FILE} --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --round=1 --validate --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --example --round=1 --validate --model_graph_format=file --model_data_format=file || exit 1;
- rm -rf mace-models - rm -rf mace-models
...@@ -103,11 +103,3 @@ genrule( ...@@ -103,11 +103,3 @@ genrule(
tools = ["//mace/python/tools:archive_static_lib"], tools = ["//mace/python/tools:archive_static_lib"],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )
cc_library(
name = "libmace_static_lib",
srcs = [":libmace_static"],
linkstatic = 1,
visibility = ["//visibility:public"],
)
...@@ -36,11 +36,12 @@ cc_binary( ...@@ -36,11 +36,12 @@ cc_binary(
"//external:gflags_nothreads", "//external:gflags_nothreads",
"//mace/codegen:generated_models", "//mace/codegen:generated_models",
"//mace/codegen:generated_mace_engine_factory", "//mace/codegen:generated_mace_engine_factory",
"//mace/ops:ops",
], ],
) )
cc_binary( cc_binary(
name = "benchmark_model_shared", name = "benchmark_model_dynamic",
srcs = [ srcs = [
"benchmark_model.cc", "benchmark_model.cc",
], ],
......
...@@ -26,7 +26,9 @@ ...@@ -26,7 +26,9 @@
#include "mace/utils/logging.h" #include "mace/utils/logging.h"
#include "mace/utils/utils.h" #include "mace/utils/utils.h"
#include "mace/benchmark/statistics.h" #include "mace/benchmark/statistics.h"
#ifdef MODEL_GRAPH_FORMAT_CODE
#include "mace/codegen/engine/mace_engine_factory.h" #include "mace/codegen/engine/mace_engine_factory.h"
#endif
namespace mace { namespace mace {
namespace benchmark { namespace benchmark {
...@@ -287,27 +289,30 @@ int Main(int argc, char **argv) { ...@@ -287,27 +289,30 @@ int Main(int argc, char **argv) {
// Create Engine // Create Engine
const char *model_data_file_ptr = const char *model_data_file_ptr =
FLAGS_model_data_file.empty() ? nullptr : FLAGS_model_data_file.c_str(); FLAGS_model_data_file.empty() ? nullptr : FLAGS_model_data_file.c_str();
if (FLAGS_model_file != "") {
std::vector<unsigned char> model_pb_data; std::vector<unsigned char> model_pb_data;
if (FLAGS_model_file != "") {
if (!mace::ReadBinaryFile(&model_pb_data, FLAGS_model_file)) { if (!mace::ReadBinaryFile(&model_pb_data, FLAGS_model_file)) {
LOG(FATAL) << "Failed to read file: " << FLAGS_model_file; LOG(FATAL) << "Failed to read file: " << FLAGS_model_file;
} }
}
#ifdef MODEL_GRAPH_FORMAT_CODE
create_engine_status = create_engine_status =
CreateMaceEngineFromProto(model_pb_data, CreateMaceEngineFromCode(FLAGS_model_name,
model_data_file_ptr, model_data_file_ptr,
input_names, input_names,
output_names, output_names,
device_type, device_type,
&engine); &engine);
} else { #else
create_engine_status = create_engine_status =
CreateMaceEngineFromCode(FLAGS_model_name, CreateMaceEngineFromProto(model_pb_data,
model_data_file_ptr, model_data_file_ptr,
input_names, input_names,
output_names, output_names,
device_type, device_type,
&engine); &engine);
} #endif
if (create_engine_status != MaceStatus::MACE_SUCCESS) { if (create_engine_status != MaceStatus::MACE_SUCCESS) {
LOG(FATAL) << "Create engine error, please check the arguments"; LOG(FATAL) << "Create engine error, please check the arguments";
} }
......
...@@ -14,7 +14,6 @@ cc_library( ...@@ -14,7 +14,6 @@ cc_library(
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"], copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
deps = [ deps = [
"//mace/core", "//mace/core",
"//mace/ops",
], ],
) )
...@@ -36,9 +35,22 @@ cc_library( ...@@ -36,9 +35,22 @@ cc_library(
cc_library( cc_library(
name = "generated_mace_engine_factory", name = "generated_mace_engine_factory",
hdrs = ["engine/mace_engine_factory.h"], hdrs = glob(["engine/*.h"]),
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"], copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
deps = [ deps = [
"//mace/public", "//mace/public",
], ],
) )
cc_library(
name = "generated_libmace",
srcs = glob(["lib/*.so"]),
visibility = ["//visibility:public"],
)
cc_library(
name = "generated_libmace_static",
srcs = glob(["lib/*.a"]),
linkstatic = 1,
visibility = ["//visibility:public"],
)
...@@ -24,7 +24,7 @@ void SetKVStorageFactory(std::shared_ptr<KVStorageFactory> storage_factory) { ...@@ -24,7 +24,7 @@ void SetKVStorageFactory(std::shared_ptr<KVStorageFactory> storage_factory) {
kStorageFactory = storage_factory; kStorageFactory = storage_factory;
} }
std::string kOpenCLParameterPath; std::string kOpenCLParameterPath; // NOLINT(runtime/string)
void SetOpenCLParameterPath(const std::string &path) { void SetOpenCLParameterPath(const std::string &path) {
kOpenCLParameterPath = path; kOpenCLParameterPath = path;
......
...@@ -23,14 +23,14 @@ cc_binary( ...@@ -23,14 +23,14 @@ cc_binary(
deps = [ deps = [
"//external:gflags_nothreads", "//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory", "//mace/codegen:generated_mace_engine_factory",
"//mace:libmace_static_lib", "//mace/codegen:generated_libmace_static",
] + if_hexagon_enabled([ ] + if_hexagon_enabled([
"//third_party/nnlib:libhexagon", "//third_party/nnlib:libhexagon",
]), ]),
) )
cc_binary( cc_binary(
name = "example_shared", name = "example_dynamic",
srcs = ["example.cc"], srcs = ["example.cc"],
copts = [ copts = [
"-Werror", "-Werror",
...@@ -45,7 +45,7 @@ cc_binary( ...@@ -45,7 +45,7 @@ cc_binary(
"//external:gflags_nothreads", "//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory", "//mace/codegen:generated_mace_engine_factory",
"//mace/utils:utils", "//mace/utils:utils",
"//mace:libmace", "//mace/codegen:generated_libmace",
], ],
) )
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include "mace/public/mace.h" #include "mace/public/mace.h"
#include "mace/public/mace_runtime.h" #include "mace/public/mace_runtime.h"
// if convert model to code. // if convert model to code.
#ifdef CODE_TYPE #ifdef MODEL_GRAPH_FORMAT_CODE
#include "mace/codegen/engine/mace_engine_factory.h" #include "mace/codegen/engine/mace_engine_factory.h"
#endif #endif
...@@ -126,7 +126,7 @@ DEFINE_int32(gpu_priority_hint, 3, "0:DEFAULT/1:LOW/2:NORMAL/3:HIGH"); ...@@ -126,7 +126,7 @@ DEFINE_int32(gpu_priority_hint, 3, "0:DEFAULT/1:LOW/2:NORMAL/3:HIGH");
DEFINE_int32(omp_num_threads, -1, "num of openmp threads"); DEFINE_int32(omp_num_threads, -1, "num of openmp threads");
DEFINE_int32(cpu_affinity_policy, 1, DEFINE_int32(cpu_affinity_policy, 1,
"0:AFFINITY_NONE/1:AFFINITY_BIG_ONLY/2:AFFINITY_LITTLE_ONLY"); "0:AFFINITY_NONE/1:AFFINITY_BIG_ONLY/2:AFFINITY_LITTLE_ONLY");
#ifndef CODE_TYPE #ifndef MODEL_GRAPH_FORMAT_CODE
namespace { namespace {
bool ReadBinaryFile(std::vector<unsigned char> *data, bool ReadBinaryFile(std::vector<unsigned char> *data,
const std::string &filename) { const std::string &filename) {
...@@ -196,7 +196,7 @@ bool RunModel(const std::vector<std::string> &input_names, ...@@ -196,7 +196,7 @@ bool RunModel(const std::vector<std::string> &input_names,
MaceStatus create_engine_status; MaceStatus create_engine_status;
// Only choose one of the two type based on the `build_type` // Only choose one of the two type based on the `build_type`
// in model deployment file(.yml). // in model deployment file(.yml).
#ifdef CODE_TYPE #ifdef MODEL_GRAPH_FORMAT_CODE
create_engine_status = create_engine_status =
CreateMaceEngineFromCode(FLAGS_model_name, CreateMaceEngineFromCode(FLAGS_model_name,
FLAGS_model_data_file, FLAGS_model_data_file,
......
...@@ -6,6 +6,7 @@ mace { ...@@ -6,6 +6,7 @@ mace {
*FileStorageFactory*; *FileStorageFactory*;
*SetKVStorageFactory*; *SetKVStorageFactory*;
*SetOpenCLBinaryPaths*; *SetOpenCLBinaryPaths*;
*SetOpenCLParameterPath*;
*SetGPUHints*; *SetGPUHints*;
*SetOpenMPThreadPolicy*; *SetOpenMPThreadPolicy*;
*SetOpenMPThreadAffinity*; *SetOpenMPThreadAffinity*;
......
...@@ -91,9 +91,11 @@ __attribute__((visibility("default"))) ...@@ -91,9 +91,11 @@ __attribute__((visibility("default")))
void SetOpenCLBinaryPaths(const std::vector<std::string> &paths); void SetOpenCLBinaryPaths(const std::vector<std::string> &paths);
// Just call once. (Not thread-safe) // Just call once. (Not thread-safe)
// Set the path of Generated OpenCL parameter file if you use gpu of specific soc. // Set the path of Generated OpenCL parameter file
// if you use gpu for specific soc.
// The parameters is the local work group size tuned for specific SOC, which // The parameters is the local work group size tuned for specific SOC, which
// may be faster than the general parameters. // may be faster than the general parameters.
__attribute__((visibility("default")))
void SetOpenCLParameterPath(const std::string &path); void SetOpenCLParameterPath(const std::string &path);
// Set GPU hints, currently only supports Adreno GPU. // Set GPU hints, currently only supports Adreno GPU.
......
...@@ -13,14 +13,12 @@ ...@@ -13,14 +13,12 @@
# limitations under the License. # limitations under the License.
import sys import sys
import os
# python encrypt_opencl_codegen.py --cl_kernel_dir=./mace/kernels/opencl/cl/ \
# --output_path=./mace/codegen/opencl_encrypt/opencl_encrypted_program.cc
def is_static_lib(lib_name): def is_static_lib(lib_name):
return lib_name.endswith('.a') or lib_name.endswith('.lo') return lib_name.endswith('.a') or lib_name.endswith('.lo')
def merge_libs(input_libs, def merge_libs(input_libs,
output_lib_path, output_lib_path,
mri_script): mri_script):
......
...@@ -198,7 +198,7 @@ def main(unused_args): ...@@ -198,7 +198,7 @@ def main(unused_args):
FLAGS.output_dir, FLAGS.runtime, FLAGS.output_dir, FLAGS.runtime,
FLAGS.embed_model_data, FLAGS.embed_model_data,
FLAGS.winograd, FLAGS.data_type, FLAGS.winograd, FLAGS.data_type,
FLAGS.model_build_type) FLAGS.model_graph_format)
def str2bool(v): def str2bool(v):
...@@ -277,10 +277,10 @@ def parse_args(): ...@@ -277,10 +277,10 @@ def parse_args():
default=True, default=True,
help="embed model data.") help="embed model data.")
parser.add_argument( parser.add_argument(
"--model_build_type", "--model_graph_format",
type=str, type=str,
default="code", default="file",
help="[proto|code] build models to code" + help="[file|code] build models to code" +
"or `Protobuf` file.") "or `Protobuf` file.")
parser.add_argument( parser.add_argument(
"--data_type", "--data_type",
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
namespace mace { namespace mace {
{% if model_type == 'code' %}
{% for tag in model_tags %} {% for tag in model_tags %}
namespace {{tag}} { namespace {{tag}} {
...@@ -59,7 +58,6 @@ MaceStatus CreateMaceEngineFromCode( ...@@ -59,7 +58,6 @@ MaceStatus CreateMaceEngineFromCode(
if (engine == nullptr) { if (engine == nullptr) {
return MaceStatus::MACE_INVALID_ARGS; return MaceStatus::MACE_INVALID_ARGS;
} }
const unsigned char * model_data = nullptr;
std::shared_ptr<NetDef> net_def; std::shared_ptr<NetDef> net_def;
MaceStatus status = MaceStatus::MACE_SUCCESS; MaceStatus status = MaceStatus::MACE_SUCCESS;
switch (model_name_map[model_name]) { switch (model_name_map[model_name]) {
...@@ -69,7 +67,7 @@ MaceStatus CreateMaceEngineFromCode( ...@@ -69,7 +67,7 @@ MaceStatus CreateMaceEngineFromCode(
engine->reset(new mace::MaceEngine(device_type)); engine->reset(new mace::MaceEngine(device_type));
{% if embed_model_data %} {% if embed_model_data %}
(void)model_data_file; (void)model_data_file;
model_data = const unsigned char * model_data =
mace::{{model_tags[i]}}::LoadModelData(); mace::{{model_tags[i]}}::LoadModelData();
status = (*engine)->Init(net_def.get(), input_nodes, output_nodes, model_data); status = (*engine)->Init(net_def.get(), input_nodes, output_nodes, model_data);
{% else %} {% else %}
...@@ -83,22 +81,5 @@ MaceStatus CreateMaceEngineFromCode( ...@@ -83,22 +81,5 @@ MaceStatus CreateMaceEngineFromCode(
return status; return status;
} }
{% else %}
MaceStatus CreateMaceEngineFromCode(
const std::string &model_name,
const std::string &model_data_file,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes,
const DeviceType device_type,
std::shared_ptr<MaceEngine> *engine) {
(void)(model_name);
(void)(model_data_file);
(void)(input_nodes);
(void)(output_nodes);
(void)(device_type);
(void)(engine);
return MaceStatus::MACE_INVALID_ARGS;
}
{% endif %}
} // namespace mace } // namespace mace
...@@ -20,7 +20,7 @@ from jinja2 import Environment, FileSystemLoader ...@@ -20,7 +20,7 @@ from jinja2 import Environment, FileSystemLoader
FLAGS = None FLAGS = None
def gen_mace_engine_factory(model_tags, template_dir, model_type, def gen_mace_engine_factory(model_tags, template_dir,
embed_model_data, output_dir): embed_model_data, output_dir):
# Create the jinja2 environment. # Create the jinja2 environment.
j2_env = Environment( j2_env = Environment(
...@@ -30,33 +30,6 @@ def gen_mace_engine_factory(model_tags, template_dir, model_type, ...@@ -30,33 +30,6 @@ def gen_mace_engine_factory(model_tags, template_dir, model_type,
source = j2_env.get_template(template_name).render( source = j2_env.get_template(template_name).render(
model_tags=model_tags, model_tags=model_tags,
embed_model_data=embed_model_data, embed_model_data=embed_model_data,
model_type=model_type,
) )
with open(output_dir + '/mace_engine_factory.h', "wb") as f: with open(output_dir + '/mace_engine_factory.h', "wb") as f:
f.write(source) f.write(source)
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_tag",
type=str,
default="",
help="model tag")
parser.add_argument(
"--template_dir", type=str, default="", help="template path")
parser.add_argument(
"--output_dir", type=str, default="", help="output path")
parser.add_argument(
"--model_type",
type=str,
default="",
help="[source|pb] model load type")
return parser.parse_known_args()
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
gen_mace_engine_creator(FLAGS.model_tag, FLAGS.template_dir,
FLAGS.model_type, FLAGS.output_dir)
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
// This is a generated file. DO NOT EDIT! // This is a generated file. DO NOT EDIT!
#include <vector>
#include <string> #include <string>
#include "mace/core/macros.h" #include "mace/core/macros.h"
......
...@@ -33,6 +33,11 @@ GPUDataType = \ ...@@ -33,6 +33,11 @@ GPUDataType = \
Enum('GPUDataType', [(ele, ele) for ele in GPUDataTypeStrs], type=str) Enum('GPUDataType', [(ele, ele) for ele in GPUDataTypeStrs], type=str)
class ModelFormat(object):
file = "file"
code = "code"
def generate_obfuscated_name(namespace, name): def generate_obfuscated_name(namespace, name):
md5 = hashlib.md5() md5 = hashlib.md5()
md5.update(namespace) md5.update(namespace)
...@@ -240,11 +245,11 @@ def save_model_to_code(net_def, model_tag, runtime, ...@@ -240,11 +245,11 @@ def save_model_to_code(net_def, model_tag, runtime,
counter += 1 counter += 1
# generate tensor data # generate tensor data
if embed_model_data:
model_data = extract_model_data(net_def) model_data = extract_model_data(net_def)
template_name = 'tensor_data.jinja2' template_name = 'tensor_data.jinja2'
source = j2_env.get_template(template_name).render( source = j2_env.get_template(template_name).render(
tag=model_tag, tag=model_tag,
embed_model_data=embed_model_data,
model_data_size=len(model_data), model_data_size=len(model_data),
model_data=model_data) model_data=model_data)
with open(output_dir + 'tensor_data' + '.cc', "wb") as f: with open(output_dir + 'tensor_data' + '.cc', "wb") as f:
...@@ -293,7 +298,7 @@ def save_model_to_code(net_def, model_tag, runtime, ...@@ -293,7 +298,7 @@ def save_model_to_code(net_def, model_tag, runtime,
def save_model(net_def, model_checksum, weight_checksum, template_dir, def save_model(net_def, model_checksum, weight_checksum, template_dir,
obfuscate, model_tag, output_dir, runtime, embed_model_data, obfuscate, model_tag, output_dir, runtime, embed_model_data,
winograd_conv, data_type, model_build_type): winograd_conv, data_type, model_graph_format):
if obfuscate: if obfuscate:
obfuscate_name(net_def) obfuscate_name(net_def)
else: else:
...@@ -303,10 +308,10 @@ def save_model(net_def, model_checksum, weight_checksum, template_dir, ...@@ -303,10 +308,10 @@ def save_model(net_def, model_checksum, weight_checksum, template_dir,
# update tensor type # update tensor type
update_tensor_infos(net_def, runtime, data_type) update_tensor_infos(net_def, runtime, data_type)
if model_build_type == 'proto' or not embed_model_data: if model_graph_format == ModelFormat.file or not embed_model_data:
save_model_data(net_def, model_tag, output_dir) save_model_data(net_def, model_tag, output_dir)
if model_build_type == 'proto': if model_graph_format == ModelFormat.file:
save_model_to_proto(net_def, model_tag, output_dir) save_model_to_proto(net_def, model_tag, output_dir)
else: else:
save_model_to_code(net_def, model_tag, runtime, save_model_to_code(net_def, model_tag, runtime,
......
...@@ -14,28 +14,17 @@ ...@@ -14,28 +14,17 @@
// This is a generated file. DO NOT EDIT! // This is a generated file. DO NOT EDIT!
#include <vector>
#include <string>
#include "mace/core/macros.h"
#include "mace/public/mace.h"
#include "mace/utils/env_time.h"
#include "mace/utils/logging.h"
namespace mace { namespace mace {
namespace {{tag}} { namespace {{tag}} {
{% if embed_model_data %}
alignas(4) const unsigned char model_data[{{ model_data_size }}] = { alignas(4) const unsigned char model_data[{{ model_data_size }}] = {
{% for d in model_data %}{{"0x%02X, " % d }}{%endfor%} {% for d in model_data %}{{"0x%02X, " % d }}{%endfor%}
}; };
{% endif %}
{% if embed_model_data %}
const unsigned char *LoadModelData() { const unsigned char *LoadModelData() {
return model_data; return model_data;
} }
{% endif %}
} // namespace {{tag}} } // namespace {{tag}}
} // namespace mace } // namespace mace
......
...@@ -14,9 +14,6 @@ ...@@ -14,9 +14,6 @@
// This is a generated file. DO NOT EDIT! // This is a generated file. DO NOT EDIT!
#include <vector>
#include <string>
#include "mace/proto/mace.pb.h" #include "mace/proto/mace.pb.h"
#include "mace/public/mace.h" #include "mace/public/mace.h"
#include "mace/utils/env_time.h" #include "mace/utils/env_time.h"
......
...@@ -16,11 +16,12 @@ cc_binary( ...@@ -16,11 +16,12 @@ cc_binary(
"//external:gflags_nothreads", "//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory", "//mace/codegen:generated_mace_engine_factory",
"//mace/codegen:generated_models", "//mace/codegen:generated_models",
"//mace/ops:ops",
], ],
) )
cc_binary( cc_binary(
name = "mace_run_shared", name = "mace_run_dynamic",
srcs = ["mace_run.cc"], srcs = ["mace_run.cc"],
copts = [ copts = [
"-Werror", "-Werror",
...@@ -33,7 +34,7 @@ cc_binary( ...@@ -33,7 +34,7 @@ cc_binary(
deps = [ deps = [
"//external:gflags_nothreads", "//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory", "//mace/codegen:generated_mace_engine_factory",
"//mace/utils:utils",
"//mace:libmace", "//mace:libmace",
"//mace/utils:utils",
], ],
) )
...@@ -38,7 +38,9 @@ ...@@ -38,7 +38,9 @@
#include "mace/utils/logging.h" #include "mace/utils/logging.h"
#include "mace/utils/utils.h" #include "mace/utils/utils.h"
#ifdef MODEL_GRAPH_FORMAT_CODE
#include "mace/codegen/engine/mace_engine_factory.h" #include "mace/codegen/engine/mace_engine_factory.h"
#endif
namespace mace { namespace mace {
namespace tools { namespace tools {
...@@ -240,23 +242,24 @@ bool RunModel(const std::string &model_name, ...@@ -240,23 +242,24 @@ bool RunModel(const std::string &model_name,
while (true) { while (true) {
// Create Engine // Create Engine
int64_t t0 = NowMicros(); int64_t t0 = NowMicros();
if (FLAGS_model_file != "") { #ifdef MODEL_GRAPH_FORMAT_CODE
create_engine_status = create_engine_status =
CreateMaceEngineFromProto(model_pb_data, CreateMaceEngineFromCode(model_name,
FLAGS_model_data_file, FLAGS_model_data_file,
input_names, input_names,
output_names, output_names,
device_type, device_type,
&engine); &engine);
} else { #else
(void)(model_name);
create_engine_status = create_engine_status =
CreateMaceEngineFromCode(model_name, CreateMaceEngineFromProto(model_pb_data,
FLAGS_model_data_file, FLAGS_model_data_file,
input_names, input_names,
output_names, output_names,
device_type, device_type,
&engine); &engine);
} #endif
int64_t t1 = NowMicros(); int64_t t1 = NowMicros();
if (create_engine_status != MACE_SUCCESS) { if (create_engine_status != MACE_SUCCESS) {
...@@ -313,23 +316,23 @@ bool RunModel(const std::string &model_name, ...@@ -313,23 +316,23 @@ bool RunModel(const std::string &model_name,
LOG(ERROR) << "Warmup runtime error, retry ... errcode: " LOG(ERROR) << "Warmup runtime error, retry ... errcode: "
<< warmup_status; << warmup_status;
do { do {
if (FLAGS_model_file != "") { #ifdef MODEL_GRAPH_FORMAT_CODE
create_engine_status = create_engine_status =
CreateMaceEngineFromProto(model_pb_data, CreateMaceEngineFromCode(model_name,
FLAGS_model_data_file, FLAGS_model_data_file,
input_names, input_names,
output_names, output_names,
device_type, device_type,
&engine); &engine);
} else { #else
create_engine_status = create_engine_status =
CreateMaceEngineFromCode(model_name, CreateMaceEngineFromProto(model_pb_data,
FLAGS_model_data_file, FLAGS_model_data_file,
input_names, input_names,
output_names, output_names,
device_type, device_type,
&engine); &engine);
} #endif
} while (create_engine_status != MACE_SUCCESS); } while (create_engine_status != MACE_SUCCESS);
} else { } else {
int64_t t4 = NowMicros(); int64_t t4 = NowMicros();
...@@ -353,23 +356,23 @@ bool RunModel(const std::string &model_name, ...@@ -353,23 +356,23 @@ bool RunModel(const std::string &model_name,
LOG(ERROR) << "Mace run model runtime error, retry ... errcode: " LOG(ERROR) << "Mace run model runtime error, retry ... errcode: "
<< run_status; << run_status;
do { do {
if (FLAGS_model_file != "") { #ifdef MODEL_GRAPH_FORMAT_CODE
create_engine_status = create_engine_status =
CreateMaceEngineFromProto(model_pb_data, CreateMaceEngineFromCode(model_name,
FLAGS_model_data_file, FLAGS_model_data_file,
input_names, input_names,
output_names, output_names,
device_type, device_type,
&engine); &engine);
} else { #else
create_engine_status = create_engine_status =
CreateMaceEngineFromCode(model_name, CreateMaceEngineFromProto(model_pb_data,
FLAGS_model_data_file, FLAGS_model_data_file,
input_names, input_names,
output_names, output_names,
device_type, device_type,
&engine); &engine);
} #endif
} while (create_engine_status != MACE_SUCCESS); } while (create_engine_status != MACE_SUCCESS);
} else { } else {
int64_t t1 = NowMicros(); int64_t t1 = NowMicros();
......
...@@ -48,6 +48,7 @@ cc_test( ...@@ -48,6 +48,7 @@ cc_test(
linkstatic = 1, linkstatic = 1,
deps = [ deps = [
":utils", ":utils",
"//mace/core",
"@gtest//:gtest", "@gtest//:gtest",
"@gtest//:gtest_main", "@gtest//:gtest_main",
], ],
......
...@@ -134,7 +134,8 @@ class Tuner { ...@@ -134,7 +134,8 @@ class Tuner {
int32_t params_count = params_size / sizeof(unsigned int); int32_t params_count = params_size / sizeof(unsigned int);
std::vector<unsigned int> params(params_count); std::vector<unsigned int> params(params_count);
for (int i = 0; i < params_count; ++i) { for (int i = 0; i < params_count; ++i) {
ifs.read(reinterpret_cast<char *>(&params[i]), sizeof(unsigned int)); ifs.read(reinterpret_cast<char *>(&params[i]),
sizeof(unsigned int));
} }
param_table_.emplace(key, params); param_table_.emplace(key, params);
} }
......
# Description:
# Borrow from tensorflow
# Exports generated files used to generate mace/codegen/version/version.cc
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
exports_files(
glob(["gen/*"]),
)
\ No newline at end of file
"""Repository rule for Git autoconfiguration, borrow from tensorflow
"""
def _git_version_conf_impl(repository_ctx):
repository_ctx.template(
"BUILD",
Label("//repository/git:BUILD.tpl"))
mace_root_path = str(repository_ctx.path(Label("@mace//:BUILD")))[:-len("BUILD")]
generated_files_path = repository_ctx.path("gen")
repository_ctx.execute([
'bash', '%s/mace/tools/git/gen_version_source.sh' % mace_root_path
, '%s/version' % generated_files_path
], quiet=False)
git_version_repository = repository_rule(
implementation = _git_version_conf_impl,
local=True,
)
# Description:
# Exports generated files used to generate mace/codegen/opencl/opencl_encrypt_program.cc
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
exports_files(
glob(["gen/*"]),
)
\ No newline at end of file
"""Repository rule for opencl encrypt kernel autoconfiguration, borrow from tensorflow
"""
def _opencl_encrypt_kernel_impl(repository_ctx):
repository_ctx.template(
"BUILD",
Label("//repository/opencl-kernel:BUILD.tpl"))
mace_root_path = str(repository_ctx.path(Label("@mace//:BUILD")))[:-len("BUILD")]
generated_files_path = repository_ctx.path("gen")
python_bin_path = repository_ctx.which("python")
repository_ctx.execute([
python_bin_path, '%s/mace/python/tools/encrypt_opencl_codegen.py' % mace_root_path,
'--cl_kernel_dir=%s/mace/kernels/opencl/cl' % mace_root_path,
'--output_path=%s/encrypt_opencl_kernel' % generated_files_path
], quiet=False)
encrypt_opencl_kernel_repository = repository_rule(
implementation = _opencl_encrypt_kernel_impl,
local=True,
)
...@@ -9,7 +9,6 @@ build --verbose_failures ...@@ -9,7 +9,6 @@ build --verbose_failures
build --copt=-std=c++11 build --copt=-std=c++11
build --copt=-D_GLIBCXX_USE_C99_MATH_TR1 build --copt=-D_GLIBCXX_USE_C99_MATH_TR1
build --copt=-DMACE_OBFUSCATE_LITERALS build --copt=-DMACE_OBFUSCATE_LITERALS
build --define openmp=true
# Usage example: bazel build --config android # Usage example: bazel build --config android
build:android --crosstool_top=//external:android/crosstool build:android --crosstool_top=//external:android/crosstool
......
...@@ -130,11 +130,6 @@ def main(unused_args): ...@@ -130,11 +130,6 @@ def main(unused_args):
host_bin_path, bin_name = sh_commands.bazel_target_to_bin(target) host_bin_path, bin_name = sh_commands.bazel_target_to_bin(target)
target_abis = FLAGS.target_abis.split(',') target_abis = FLAGS.target_abis.split(',')
# generate sources
sh_commands.gen_encrypted_opencl_source()
sh_commands.gen_mace_version()
sh_commands.gen_tuning_param_code([])
for target_abi in target_abis: for target_abi in target_abis:
sh_commands.bazel_build(target, abi=target_abi, sh_commands.bazel_build(target, abi=target_abi,
enable_neon=FLAGS.enable_neon, enable_neon=FLAGS.enable_neon,
......
...@@ -11,28 +11,44 @@ mkdir -p $INCLUDE_DIR ...@@ -11,28 +11,44 @@ mkdir -p $INCLUDE_DIR
# copy include headers # copy include headers
cp mace/public/*.h $INCLUDE_DIR/ cp mace/public/*.h $INCLUDE_DIR/
echo "build lib for armeabi-v7a" # make directories
rm -rf $LIB_DIR/armeabi-v7a
mkdir -p $LIB_DIR/armeabi-v7a mkdir -p $LIB_DIR/armeabi-v7a
bazel build --config android --config optimization mace:libmace --cpu=armeabi-v7a --define neon=true --define openmp=true rm -f $LIB_DIR/armeabi-v7a/*
cp bazel-bin/mace/libmace.so $LIB_DIR/armeabi-v7a/
echo "build lib for armeabi-v7a with hexagon dsp"
rm -rf $LIB_DIR/armeabi-v7a/hexagon-dsp
mkdir -p $LIB_DIR/armeabi-v7a/hexagon-dsp
bazel build --config android --config optimization mace:libmace --cpu=armeabi-v7a --define neon=true --define openmp=true --define hexagon=true
cp bazel-bin/mace/libmace.so $LIB_DIR/armeabi-v7a/hexagon-dsp/
cp third_party/nnlib/*so $LIB_DIR/armeabi-v7a/hexagon-dsp/
echo "build lib for arm64-v8a"
rm -rf $LIB_DIR/arm64-v8a
mkdir -p $LIB_DIR/arm64-v8a mkdir -p $LIB_DIR/arm64-v8a
bazel build --config android --config optimization mace:libmace --cpu=arm64-v8a --define neon=true --define openmp=true rm -f $LIB_DIR/arm64-v8a/*
cp bazel-bin/mace/libmace.so $LIB_DIR/arm64-v8a/
echo "build lib for linux-x86-64"
rm -rf $LIB_DIR/linux-x86-64
mkdir -p $LIB_DIR/linux-x86-64 mkdir -p $LIB_DIR/linux-x86-64
bazel build --config optimization mace:libmace --define openmp=true rm -f $LIB_DIR/linux-x86-64/*
# build shared libraries
echo "build shared lib for armeabi-v7a"
bazel build --config android --config optimization mace:libmace --define neon=true --define openmp=true --define hexagon=true --cpu=armeabi-v7a
cp bazel-bin/mace/libmace.so $LIB_DIR/armeabi-v7a/
cp third_party/nnlib/*so $LIB_DIR/armeabi-v7a/
echo "build shared lib for arm64-v8a"
bazel build --config android --config optimization mace:libmace --define neon=true --define openmp=true --cpu=arm64-v8a
cp bazel-bin/mace/libmace.so $LIB_DIR/arm64-v8a/
echo "build shared lib for linux-x86-64"
bazel build mace:libmace --config optimization --define openmp=true
cp bazel-bin/mace/libmace.so $LIB_DIR/linux-x86-64/ cp bazel-bin/mace/libmace.so $LIB_DIR/linux-x86-64/
# build static libraries
echo "build static lib for armeabi-v7a"
bazel build --config android --config optimization mace:libmace_static --define neon=true --define openmp=true --define hexagon=true --cpu=armeabi-v7a
cp bazel-genfiles/mace/libmace.a $LIB_DIR/armeabi-v7a/
cp third_party/nnlib/*so $LIB_DIR/armeabi-v7a/
echo "build static lib for arm64-v8a"
bazel build --config android --config optimization mace:libmace_static --define neon=true --define openmp=true --cpu=arm64-v8a
cp bazel-genfiles/mace/libmace.a $LIB_DIR/arm64-v8a/
echo "build static lib for linux-x86-64"
bazel build mace:libmace --config optimization --define openmp=true
cp bazel-genfiles/mace/libmace.a $LIB_DIR/linux-x86-64/
echo "LIB PATH: $LIB_DIR"
echo "INCLUDE FILE PATH: $INCLUDE_DIR"
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册