提交 bd942fe7 编写于 作者: 叶剑武

Merge branch 'refactor-converter' into 'master'

Refactor converter

See merge request !642
......@@ -9,6 +9,7 @@ mace/codegen/opencl_bin/
mace/codegen/tuning/
mace/codegen/version/
mace/codegen/engine/
mace/codegen/lib/
build/
docs/_build/
*.a
......
......@@ -69,8 +69,6 @@ extra_tests:
platform_compatible_tests:
stage: platform_compatible_tests
script:
- mkdir -p mace/codegen/version && bash mace/tools/git/gen_version_source.sh mace/codegen/version/version.cc
- mkdir -p mace/codegen/tuning && python mace/python/tools/binary_codegen.py --output_path=mace/codegen/tuning/tuning_params.cc
- bazel build mace/core:core
ndk_versions_compatible_tests:
......@@ -101,5 +99,8 @@ python_tools_tests:
- rm -rf mace-models
- GIT_SSH_COMMAND="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" git clone git@github.com:XiaoMi/mace-models.git
- CONF_FILE=mace-models/mobilenet-v2/mobilenet-v2.yml
- sh -c "python tools/converter.py build --config=${CONF_FILE} --disable_tuning && python tools/converter.py run --config=${CONF_FILE} --round=1 --validate && python tools/converter.py run --config=${CONF_FILE} --example --round=1 --validate" || exit 1
- >
python tools/converter.py convert --config=${CONF_FILE} --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --round=1 --validate --model_graph_format=file --model_data_format=file || exit 1;
python tools/converter.py run --config=${CONF_FILE} --example --round=1 --validate --model_graph_format=file --model_data_format=file || exit 1;
- rm -rf mace-models
workspace(name = "mace")
# generate version and opencl kernel code.
load("//repository/git:git_configure.bzl", "git_version_repository")
load("//repository/opencl-kernel:opencl_kernel_configure.bzl", "encrypt_opencl_kernel_repository")
git_version_repository(name="local_version_config")
encrypt_opencl_kernel_repository(name="local_opencl_kernel_encrypt")
# proto_library rules implicitly depend on @com_google_protobuf//:protoc,
# which is the proto-compiler.
# This statement defines the @com_google_protobuf repo.
......
......@@ -73,8 +73,33 @@ cc_library(
visibility = ["//visibility:public"],
)
cc_library(
genrule(
name = "libmace_static",
srcs = ["libmace.a"],
srcs = [
"//mace/codegen:generated_opencl",
"//mace/codegen:generated_version",
"//mace/core",
"//mace/kernels",
"//mace/ops",
"//mace/utils",
"//mace/proto:mace_cc",
"@com_google_protobuf//:protobuf_lite",
],
outs = ["libmace.a"],
cmd = "tmp_mri_file=$$(mktemp mace-static-lib-mri.XXXXXXXXXX);" +
"mri_stream=$$(python $(location //mace/python/tools:archive_static_lib) " +
"$(locations //mace/codegen:generated_opencl) " +
"$(locations //mace/codegen:generated_version) " +
"$(locations //mace/core:core) " +
"$(locations //mace/kernels:kernels) " +
"$(locations //mace/ops:ops) " +
"$(locations //mace/utils:utils) " +
"$(locations //mace/proto:mace_cc) " +
"$(locations @com_google_protobuf//:protobuf_lite) " +
"$@ " +
"$$tmp_mri_file);" +
"$(AR) -M <$$tmp_mri_file;" +
"rm -rf $$tmp_mri_file;",
tools = ["//mace/python/tools:archive_static_lib"],
visibility = ["//visibility:public"],
)
......@@ -36,11 +36,12 @@ cc_binary(
"//external:gflags_nothreads",
"//mace/codegen:generated_models",
"//mace/codegen:generated_mace_engine_factory",
"//mace/ops:ops",
],
)
cc_binary(
name = "benchmark_model_shared",
name = "benchmark_model_dynamic",
srcs = [
"benchmark_model.cc",
],
......
......@@ -26,7 +26,9 @@
#include "mace/utils/logging.h"
#include "mace/utils/utils.h"
#include "mace/benchmark/statistics.h"
#ifdef MODEL_GRAPH_FORMAT_CODE
#include "mace/codegen/engine/mace_engine_factory.h"
#endif
namespace mace {
namespace benchmark {
......@@ -191,6 +193,9 @@ DEFINE_int32(warmup_runs, 1, "how many runs to initialize model");
DEFINE_string(opencl_binary_file,
"",
"compiled opencl binary file path");
DEFINE_string(opencl_parameter_file,
"",
"tuned OpenCL parameter file path");
DEFINE_string(model_data_file, "",
"model data file name, used when EMBED_MODEL_DATA set to 0");
DEFINE_string(model_file, "",
......@@ -267,6 +272,8 @@ int Main(int argc, char **argv) {
std::vector<std::string> opencl_binary_paths = {FLAGS_opencl_binary_file};
mace::SetOpenCLBinaryPaths(opencl_binary_paths);
mace::SetOpenCLParameterPath(FLAGS_opencl_parameter_file);
}
#endif // MACE_ENABLE_OPENCL
......@@ -285,27 +292,30 @@ int Main(int argc, char **argv) {
// Create Engine
const char *model_data_file_ptr =
FLAGS_model_data_file.empty() ? nullptr : FLAGS_model_data_file.c_str();
std::vector<unsigned char> model_pb_data;
if (FLAGS_model_file != "") {
std::vector<unsigned char> model_pb_data;
if (!mace::ReadBinaryFile(&model_pb_data, FLAGS_model_file)) {
LOG(FATAL) << "Failed to read file: " << FLAGS_model_file;
}
create_engine_status =
CreateMaceEngineFromProto(model_pb_data,
model_data_file_ptr,
input_names,
output_names,
device_type,
&engine);
} else {
create_engine_status =
}
#ifdef MODEL_GRAPH_FORMAT_CODE
create_engine_status =
CreateMaceEngineFromCode(FLAGS_model_name,
model_data_file_ptr,
input_names,
output_names,
device_type,
&engine);
}
#else
create_engine_status =
CreateMaceEngineFromProto(model_pb_data,
model_data_file_ptr,
input_names,
output_names,
device_type,
&engine);
#endif
if (create_engine_status != MaceStatus::MACE_SUCCESS) {
LOG(FATAL) << "Create engine error, please check the arguments";
}
......
......@@ -5,6 +5,8 @@ package(
default_visibility = ["//visibility:public"],
)
load("//mace:mace.bzl", "mace_version_genrule", "encrypt_opencl_kernel_genrule")
cc_library(
name = "generated_models",
srcs = glob(["models/*/*.cc"]),
......@@ -12,19 +14,16 @@ cc_library(
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
deps = [
"//mace/core",
"//mace/ops",
],
)
cc_library(
name = "generated_opencl",
srcs = glob(["opencl/*.cc"]),
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
)
mace_version_genrule()
encrypt_opencl_kernel_genrule()
cc_library(
name = "generated_tuning_params",
srcs = ["tuning/tuning_params.cc"],
name = "generated_opencl",
srcs = ["opencl/encrypt_opencl_kernel.cc"],
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
)
......@@ -36,9 +35,22 @@ cc_library(
cc_library(
name = "generated_mace_engine_factory",
hdrs = ["engine/mace_engine_factory.h"],
hdrs = glob(["engine/*.h"]),
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
deps = [
"//mace/public",
],
)
cc_library(
name = "generated_libmace",
srcs = glob(["lib/*.so"]),
visibility = ["//visibility:public"],
)
cc_library(
name = "generated_libmace_static",
srcs = glob(["lib/*.a"]),
linkstatic = 1,
visibility = ["//visibility:public"],
)
......@@ -61,7 +61,6 @@ cc_library(
]),
deps = [
"//mace/codegen:generated_version",
"//mace/codegen:generated_tuning_params",
"//mace/proto:mace_cc",
"//mace/utils",
] + if_android([
......
......@@ -24,4 +24,10 @@ void SetKVStorageFactory(std::shared_ptr<KVStorageFactory> storage_factory) {
kStorageFactory = storage_factory;
}
std::string kOpenCLParameterPath; // NOLINT(runtime/string)
void SetOpenCLParameterPath(const std::string &path) {
kOpenCLParameterPath = path;
}
}; // namespace mace
Examples
=======
* Build the example (e.g., with armeabi-v7a target)
```
# To enable debug mode build, use '-c dbg' flag.
# To check the underlying commands executed, use '-s' flag.
# TO check the failed command, use '--verbose_failures' flag.
bazel build -c opt mace/examples:helloworld \
--crosstool_top=//external:android/crosstool \
--host_crosstool_top=@bazel_tools//tools/cpp:toolchain \
--cpu=arm64-v8a
```
* To run adb inside docker, the container network should use 'host'
```
docker run -it --net=host mace-dev /bin/bash
```
* Push and run the example
```
adb shell "mkdir /data/local/tmp"
adb push bazel-bin/mace/examples/helloworld /data/local/tmp/
adb shell /data/local/tmp/helloworld
```
* Check the logs
```
adb logcat | grep native
```
......@@ -23,14 +23,14 @@ cc_binary(
deps = [
"//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory",
"//mace:libmace_static",
"//mace/codegen:generated_libmace_static",
] + if_hexagon_enabled([
"//third_party/nnlib:libhexagon",
]),
)
cc_binary(
name = "example_shared",
name = "example_dynamic",
srcs = ["example.cc"],
copts = [
"-Werror",
......@@ -39,13 +39,20 @@ cc_binary(
] + if_android([
"-DMACE_ENABLE_OPENCL",
]),
linkopts = ["-lm", "-pie", "-fPIE"] + if_openmp_enabled(["-fopenmp"]),
linkopts = [
"-lm",
] + if_openmp_enabled([
"-fopenmp"
]) + if_android([
"-ldl",
"-pie",
"-llog",
]),
linkstatic = 0,
deps = [
"//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory",
"//mace/utils:utils",
"//mace:libmace",
"//mace/codegen:generated_libmace",
],
)
Examples
=======
* Convert model
```
python tools/converter.py convert --config=/path/to/your/model_deployment_file
```
* Run example
```
python tools/converter.py run --config=/path/to/your/model_deployment_file --example
```
* Validate result
```
python tools/converter.py run --config=/path/to/your/model_deployment_file --example --example
```
* Check the logs
```
adb logcat
```
......@@ -23,7 +23,7 @@
#include "mace/public/mace.h"
#include "mace/public/mace_runtime.h"
// if convert model to code.
#ifdef CODE_TYPE
#ifdef MODEL_GRAPH_FORMAT_CODE
#include "mace/codegen/engine/mace_engine_factory.h"
#endif
......@@ -108,6 +108,9 @@ DEFINE_string(output_file,
DEFINE_string(opencl_binary_file,
"",
"compiled opencl binary file path");
DEFINE_string(opencl_parameter_file,
"",
"tuned OpenCL parameter file path");
DEFINE_string(model_data_file,
"",
"model data file name, used when EMBED_MODEL_DATA set to 0");
......@@ -123,7 +126,7 @@ DEFINE_int32(gpu_priority_hint, 3, "0:DEFAULT/1:LOW/2:NORMAL/3:HIGH");
DEFINE_int32(omp_num_threads, -1, "num of openmp threads");
DEFINE_int32(cpu_affinity_policy, 1,
"0:AFFINITY_NONE/1:AFFINITY_BIG_ONLY/2:AFFINITY_LITTLE_ONLY");
#ifndef CODE_TYPE
#ifndef MODEL_GRAPH_FORMAT_CODE
namespace {
bool ReadBinaryFile(std::vector<unsigned char> *data,
const std::string &filename) {
......@@ -172,6 +175,8 @@ bool RunModel(const std::vector<std::string> &input_names,
// you should update the binary when OpenCL Driver changed.
std::vector<std::string> opencl_binary_paths = {FLAGS_opencl_binary_file};
mace::SetOpenCLBinaryPaths(opencl_binary_paths);
mace::SetOpenCLParameterPath(FLAGS_opencl_parameter_file);
}
#endif // MACE_ENABLE_OPENCL
......@@ -191,7 +196,7 @@ bool RunModel(const std::vector<std::string> &input_names,
MaceStatus create_engine_status;
// Only choose one of the two type based on the `build_type`
// in model deployment file(.yml).
#ifdef CODE_TYPE
#ifdef MODEL_GRAPH_FORMAT_CODE
create_engine_status =
CreateMaceEngineFromCode(FLAGS_model_name,
FLAGS_model_data_file,
......
......@@ -47,3 +47,21 @@ def if_openmp_enabled(a):
"//mace:openmp_enabled": a,
"//conditions:default": [],
})
def mace_version_genrule():
native.genrule(
name = "mace_version_gen",
srcs = [str(Label("@local_version_config//:gen/version"))],
outs = ["version/version.cc"],
cmd = "cat $(SRCS) > $@;"
)
def encrypt_opencl_kernel_genrule():
native.genrule(
name = "encrypt_opencl_kernel_gen",
srcs = [str(Label("@local_opencl_kernel_encrypt//:gen/encrypt_opencl_kernel"))],
outs = ["opencl/encrypt_opencl_kernel.cc"],
cmd = "cat $(SRCS) > $@;"
)
......@@ -6,6 +6,7 @@ mace {
*FileStorageFactory*;
*SetKVStorageFactory*;
*SetOpenCLBinaryPaths*;
*SetOpenCLParameterPath*;
*SetGPUHints*;
*SetOpenMPThreadPolicy*;
*SetOpenMPThreadAffinity*;
......
......@@ -90,6 +90,14 @@ void SetKVStorageFactory(std::shared_ptr<KVStorageFactory> storage_factory);
__attribute__((visibility("default")))
void SetOpenCLBinaryPaths(const std::vector<std::string> &paths);
// Just call once. (Not thread-safe)
// Set the path of Generated OpenCL parameter file
// if you use gpu for specific soc.
// The parameters is the local work group size tuned for specific SOC, which
// may be faster than the general parameters.
__attribute__((visibility("default")))
void SetOpenCLParameterPath(const std::string &path);
// Set GPU hints, currently only supports Adreno GPU.
//
// Caution: this function may hurt performance if improper parameters provided.
......
......@@ -48,3 +48,10 @@ py_binary(
"@six_archive//:six",
],
)
py_binary(
name = "archive_static_lib",
srcs = ["archive_static_lib.py"],
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
)
# Copyright 2018 Xiaomi, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def is_static_lib(lib_name):
return lib_name.endswith('.a') or lib_name.endswith('.lo')
def merge_libs(input_libs,
output_lib_path,
mri_script):
# make static library
mri_stream = ""
mri_stream += "create %s\n" % output_lib_path
for lib in input_libs:
if is_static_lib(lib):
mri_stream += ("addlib %s\n" % lib)
mri_stream += "save\n"
mri_stream += "end\n"
with open(mri_script, 'w') as tmp:
tmp.write(mri_stream)
if __name__ == '__main__':
merge_libs(sys.argv[1:-2], sys.argv[-2], sys.argv[-1])
......@@ -198,7 +198,7 @@ def main(unused_args):
FLAGS.output_dir, FLAGS.runtime,
FLAGS.embed_model_data,
FLAGS.winograd, FLAGS.data_type,
FLAGS.model_build_type)
FLAGS.model_graph_format)
def str2bool(v):
......@@ -277,10 +277,10 @@ def parse_args():
default=True,
help="embed model data.")
parser.add_argument(
"--model_build_type",
"--model_graph_format",
type=str,
default="code",
help="[proto|code] build models to code" +
default="file",
help="[file|code] build models to code" +
"or `Protobuf` file.")
parser.add_argument(
"--data_type",
......
......@@ -14,6 +14,7 @@
import argparse
import os
import shutil
import sys
import jinja2
......@@ -68,8 +69,21 @@ def encrypt_opencl_codegen(cl_kernel_dir, output_path):
data_type='unsigned char',
variable_name='kEncryptedProgramMap')
if os.path.isfile(output_path):
os.remove(output_path)
output_dir = os.path.dirname(output_path)
if os.path.exists(output_dir):
if os.path.isdir(output_dir):
try:
shutil.rmtree(output_dir)
except OSError:
raise RuntimeError(
"Cannot delete directory %s due to permission "
"error, inspect and remove manually" % output_dir)
else:
raise RuntimeError(
"Cannot delete non-directory %s, inspect ",
"and remove manually" % output_dir)
os.makedirs(output_dir)
with open(output_path, "w") as w_file:
w_file.write(cpp_cl_encrypted_kernel)
......
......@@ -24,7 +24,6 @@
namespace mace {
{% if model_type == 'code' %}
{% for tag in model_tags %}
namespace {{tag}} {
......@@ -59,7 +58,6 @@ MaceStatus CreateMaceEngineFromCode(
if (engine == nullptr) {
return MaceStatus::MACE_INVALID_ARGS;
}
const unsigned char * model_data = nullptr;
std::shared_ptr<NetDef> net_def;
MaceStatus status = MaceStatus::MACE_SUCCESS;
switch (model_name_map[model_name]) {
......@@ -69,7 +67,7 @@ MaceStatus CreateMaceEngineFromCode(
engine->reset(new mace::MaceEngine(device_type));
{% if embed_model_data %}
(void)model_data_file;
model_data =
const unsigned char * model_data =
mace::{{model_tags[i]}}::LoadModelData();
status = (*engine)->Init(net_def.get(), input_nodes, output_nodes, model_data);
{% else %}
......@@ -83,22 +81,5 @@ MaceStatus CreateMaceEngineFromCode(
return status;
}
{% else %}
MaceStatus CreateMaceEngineFromCode(
const std::string &model_name,
const std::string &model_data_file,
const std::vector<std::string> &input_nodes,
const std::vector<std::string> &output_nodes,
const DeviceType device_type,
std::shared_ptr<MaceEngine> *engine) {
(void)(model_name);
(void)(model_data_file);
(void)(input_nodes);
(void)(output_nodes);
(void)(device_type);
(void)(engine);
return MaceStatus::MACE_INVALID_ARGS;
}
{% endif %}
} // namespace mace
......@@ -20,7 +20,7 @@ from jinja2 import Environment, FileSystemLoader
FLAGS = None
def gen_mace_engine_factory(model_tags, template_dir, model_type,
def gen_mace_engine_factory(model_tags, template_dir,
embed_model_data, output_dir):
# Create the jinja2 environment.
j2_env = Environment(
......@@ -30,33 +30,6 @@ def gen_mace_engine_factory(model_tags, template_dir, model_type,
source = j2_env.get_template(template_name).render(
model_tags=model_tags,
embed_model_data=embed_model_data,
model_type=model_type,
)
with open(output_dir + '/mace_engine_factory.h', "wb") as f:
f.write(source)
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_tag",
type=str,
default="",
help="model tag")
parser.add_argument(
"--template_dir", type=str, default="", help="template path")
parser.add_argument(
"--output_dir", type=str, default="", help="output path")
parser.add_argument(
"--model_type",
type=str,
default="",
help="[source|pb] model load type")
return parser.parse_known_args()
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
gen_mace_engine_creator(FLAGS.model_tag, FLAGS.template_dir,
FLAGS.model_type, FLAGS.output_dir)
......@@ -14,7 +14,6 @@
// This is a generated file. DO NOT EDIT!
#include <vector>
#include <string>
#include "mace/core/macros.h"
......
......@@ -33,6 +33,11 @@ GPUDataType = \
Enum('GPUDataType', [(ele, ele) for ele in GPUDataTypeStrs], type=str)
class ModelFormat(object):
file = "file"
code = "code"
def generate_obfuscated_name(namespace, name):
md5 = hashlib.md5()
md5.update(namespace)
......@@ -240,15 +245,15 @@ def save_model_to_code(net_def, model_tag, runtime,
counter += 1
# generate tensor data
model_data = extract_model_data(net_def)
template_name = 'tensor_data.jinja2'
source = j2_env.get_template(template_name).render(
tag=model_tag,
embed_model_data=embed_model_data,
model_data_size=len(model_data),
model_data=model_data)
with open(output_dir + 'tensor_data' + '.cc', "wb") as f:
f.write(source)
if embed_model_data:
model_data = extract_model_data(net_def)
template_name = 'tensor_data.jinja2'
source = j2_env.get_template(template_name).render(
tag=model_tag,
model_data_size=len(model_data),
model_data=model_data)
with open(output_dir + 'tensor_data' + '.cc', "wb") as f:
f.write(source)
# generate op source files
template_name = 'operator.jinja2'
......@@ -293,7 +298,7 @@ def save_model_to_code(net_def, model_tag, runtime,
def save_model(net_def, model_checksum, weight_checksum, template_dir,
obfuscate, model_tag, output_dir, runtime, embed_model_data,
winograd_conv, data_type, model_build_type):
winograd_conv, data_type, model_graph_format):
if obfuscate:
obfuscate_name(net_def)
else:
......@@ -303,10 +308,10 @@ def save_model(net_def, model_checksum, weight_checksum, template_dir,
# update tensor type
update_tensor_infos(net_def, runtime, data_type)
if model_build_type == 'proto' or not embed_model_data:
if model_graph_format == ModelFormat.file or not embed_model_data:
save_model_data(net_def, model_tag, output_dir)
if model_build_type == 'proto':
if model_graph_format == ModelFormat.file:
save_model_to_proto(net_def, model_tag, output_dir)
else:
save_model_to_code(net_def, model_tag, runtime,
......
......@@ -14,28 +14,17 @@
// This is a generated file. DO NOT EDIT!
#include <vector>
#include <string>
#include "mace/core/macros.h"
#include "mace/public/mace.h"
#include "mace/utils/env_time.h"
#include "mace/utils/logging.h"
namespace mace {
namespace {{tag}} {
{% if embed_model_data %}
alignas(4) const unsigned char model_data[{{ model_data_size }}] = {
{% for d in model_data %}{{"0x%02X, " % d }}{%endfor%}
};
{% endif %}
{% if embed_model_data %}
const unsigned char *LoadModelData() {
return model_data;
}
{% endif %}
} // namespace {{tag}}
} // namespace mace
......
......@@ -14,9 +14,6 @@
// This is a generated file. DO NOT EDIT!
#include <vector>
#include <string>
#include "mace/proto/mace.pb.h"
#include "mace/public/mace.h"
#include "mace/utils/env_time.h"
......
......@@ -13,12 +13,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
MACE_SOURCE_DIR=$(dirname $0)
OUTPUT_FILENAME=$1
if [[ -z "${OUTPUT_FILENAME}}" ]]; then
echo "Usage: $0 <filename>"
exit 1
fi
OUTPUT_DIR=$(dirname $OUTPUT_FILENAME)
if [ -d $OUTPUT_DIR ]; then
rm -rf $OUTPUT_DIR
fi
mkdir -p $OUTPUT_DIR
pushd $MACE_SOURCE_DIR
DATE_STR=$(date +%Y%m%d)
GIT_VERSION=$(git describe --long --tags)
if [[ $? != 0 ]]; then
......@@ -49,3 +60,5 @@ __attribute__((visibility("default")))
const char *MaceVersion() { return "MACEVER-${GIT_VERSION}" + 8; }
} // namespace mace
EOF
popd
......@@ -16,11 +16,12 @@ cc_binary(
"//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory",
"//mace/codegen:generated_models",
"//mace/ops:ops",
],
)
cc_binary(
name = "mace_run_shared",
name = "mace_run_dynamic",
srcs = ["mace_run.cc"],
copts = [
"-Werror",
......@@ -33,7 +34,7 @@ cc_binary(
deps = [
"//external:gflags_nothreads",
"//mace/codegen:generated_mace_engine_factory",
"//mace/utils:utils",
"//mace:libmace",
"//mace/utils:utils",
],
)
......@@ -38,7 +38,9 @@
#include "mace/utils/logging.h"
#include "mace/utils/utils.h"
#ifdef MODEL_GRAPH_FORMAT_CODE
#include "mace/codegen/engine/mace_engine_factory.h"
#endif
namespace mace {
namespace tools {
......@@ -175,6 +177,9 @@ DEFINE_string(output_file,
DEFINE_string(opencl_binary_file,
"",
"compiled opencl binary file path");
DEFINE_string(opencl_parameter_file,
"",
"tuned OpenCL parameter file path");
DEFINE_string(model_data_file,
"",
"model data file name, used when EMBED_MODEL_DATA set to 0 or 2");
......@@ -212,6 +217,8 @@ bool RunModel(const std::string &model_name,
std::vector<std::string> opencl_binary_paths = {FLAGS_opencl_binary_file};
mace::SetOpenCLBinaryPaths(opencl_binary_paths);
mace::SetOpenCLParameterPath(FLAGS_opencl_parameter_file);
}
#endif // MACE_ENABLE_OPENCL
......@@ -238,23 +245,24 @@ bool RunModel(const std::string &model_name,
while (true) {
// Create Engine
int64_t t0 = NowMicros();
if (FLAGS_model_file != "") {
create_engine_status =
CreateMaceEngineFromProto(model_pb_data,
FLAGS_model_data_file,
input_names,
output_names,
device_type,
&engine);
} else {
create_engine_status =
#ifdef MODEL_GRAPH_FORMAT_CODE
create_engine_status =
CreateMaceEngineFromCode(model_name,
FLAGS_model_data_file,
input_names,
output_names,
device_type,
&engine);
}
#else
(void)(model_name);
create_engine_status =
CreateMaceEngineFromProto(model_pb_data,
FLAGS_model_data_file,
input_names,
output_names,
device_type,
&engine);
#endif
int64_t t1 = NowMicros();
if (create_engine_status != MACE_SUCCESS) {
......@@ -311,23 +319,23 @@ bool RunModel(const std::string &model_name,
LOG(ERROR) << "Warmup runtime error, retry ... errcode: "
<< warmup_status;
do {
if (FLAGS_model_file != "") {
create_engine_status =
CreateMaceEngineFromProto(model_pb_data,
FLAGS_model_data_file,
input_names,
output_names,
device_type,
&engine);
} else {
create_engine_status =
CreateMaceEngineFromCode(model_name,
FLAGS_model_data_file,
input_names,
output_names,
device_type,
&engine);
}
#ifdef MODEL_GRAPH_FORMAT_CODE
create_engine_status =
CreateMaceEngineFromCode(model_name,
FLAGS_model_data_file,
input_names,
output_names,
device_type,
&engine);
#else
create_engine_status =
CreateMaceEngineFromProto(model_pb_data,
FLAGS_model_data_file,
input_names,
output_names,
device_type,
&engine);
#endif
} while (create_engine_status != MACE_SUCCESS);
} else {
int64_t t4 = NowMicros();
......@@ -351,23 +359,23 @@ bool RunModel(const std::string &model_name,
LOG(ERROR) << "Mace run model runtime error, retry ... errcode: "
<< run_status;
do {
if (FLAGS_model_file != "") {
create_engine_status =
CreateMaceEngineFromProto(model_pb_data,
FLAGS_model_data_file,
input_names,
output_names,
device_type,
&engine);
} else {
create_engine_status =
CreateMaceEngineFromCode(model_name,
FLAGS_model_data_file,
input_names,
output_names,
device_type,
&engine);
}
#ifdef MODEL_GRAPH_FORMAT_CODE
create_engine_status =
CreateMaceEngineFromCode(model_name,
FLAGS_model_data_file,
input_names,
output_names,
device_type,
&engine);
#else
create_engine_status =
CreateMaceEngineFromProto(model_pb_data,
FLAGS_model_data_file,
input_names,
output_names,
device_type,
&engine);
#endif
} while (create_engine_status != MACE_SUCCESS);
} else {
int64_t t1 = NowMicros();
......
......@@ -31,7 +31,6 @@ cc_library(
copts = ["-Werror", "-Wextra", "-Wno-missing-field-initializers"],
deps = [
"//mace/public",
"//mace/codegen:generated_tuning_params",
],
)
......@@ -49,6 +48,7 @@ cc_test(
linkstatic = 1,
deps = [
":utils",
"//mace/core",
"@gtest//:gtest",
"@gtest//:gtest_main",
],
......
......@@ -117,13 +117,31 @@ class Tuner {
}
inline void ReadRunParamters() {
extern const std::map<std::string, std::vector<unsigned int>>
kTuningParamsData;
if (!kTuningParamsData.empty()) {
for (auto it = kTuningParamsData.begin(); it != kTuningParamsData.end();
++it) {
param_table_.emplace(it->first, std::vector<unsigned int>(
it->second.begin(), it->second.end()));
extern std::string kOpenCLParameterPath;
if (!kOpenCLParameterPath.empty()) {
std::ifstream ifs(kOpenCLParameterPath, std::ios::binary | std::ios::in);
if (ifs.is_open()) {
int64_t num_params = 0;
ifs.read(reinterpret_cast<char *>(&num_params), sizeof(num_params));
while (num_params--) {
int32_t key_size = 0;
ifs.read(reinterpret_cast<char *>(&key_size), sizeof(key_size));
std::string key(key_size, ' ');
ifs.read(&key[0], key_size);
int32_t params_size = 0;
ifs.read(reinterpret_cast<char *>(&params_size), sizeof(params_size));
int32_t params_count = params_size / sizeof(unsigned int);
std::vector<unsigned int> params(params_count);
for (int i = 0; i < params_count; ++i) {
ifs.read(reinterpret_cast<char *>(&params[i]),
sizeof(unsigned int));
}
param_table_.emplace(key, params);
}
ifs.close();
} else {
LOG(WARNING) << "Read OpenCL tuned parameters file failed.";
}
} else {
LOG(INFO) << "There is no tuned parameters.";
......
# Description:
# Borrow from tensorflow
# Exports generated files used to generate mace/codegen/version/version.cc
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
exports_files(
glob(["gen/*"]),
)
\ No newline at end of file
"""Repository rule for Git autoconfiguration, borrow from tensorflow
"""
def _git_version_conf_impl(repository_ctx):
repository_ctx.template(
"BUILD",
Label("//repository/git:BUILD.tpl"))
mace_root_path = str(repository_ctx.path(Label("@mace//:BUILD")))[:-len("BUILD")]
generated_files_path = repository_ctx.path("gen")
repository_ctx.execute([
'bash', '%s/mace/tools/git/gen_version_source.sh' % mace_root_path
, '%s/version' % generated_files_path
], quiet=False)
git_version_repository = repository_rule(
implementation = _git_version_conf_impl,
local=True,
)
# Description:
# Exports generated files used to generate mace/codegen/opencl/opencl_encrypt_program.cc
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
exports_files(
glob(["gen/*"]),
)
\ No newline at end of file
"""Repository rule for opencl encrypt kernel autoconfiguration, borrow from tensorflow
"""
def _opencl_encrypt_kernel_impl(repository_ctx):
repository_ctx.template(
"BUILD",
Label("//repository/opencl-kernel:BUILD.tpl"))
mace_root_path = str(repository_ctx.path(Label("@mace//:BUILD")))[:-len("BUILD")]
generated_files_path = repository_ctx.path("gen")
python_bin_path = repository_ctx.which("python")
repository_ctx.execute([
python_bin_path, '%s/mace/python/tools/encrypt_opencl_codegen.py' % mace_root_path,
'--cl_kernel_dir=%s/mace/kernels/opencl/cl' % mace_root_path,
'--output_path=%s/encrypt_opencl_kernel' % generated_files_path
], quiet=False)
encrypt_opencl_kernel_repository = repository_rule(
implementation = _opencl_encrypt_kernel_impl,
local=True,
)
......@@ -130,11 +130,6 @@ def main(unused_args):
host_bin_path, bin_name = sh_commands.bazel_target_to_bin(target)
target_abis = FLAGS.target_abis.split(',')
# generate sources
sh_commands.gen_encrypted_opencl_source()
sh_commands.gen_mace_version()
sh_commands.gen_tuning_param_code([])
for target_abi in target_abis:
sh_commands.bazel_build(target, abi=target_abi,
enable_neon=FLAGS.enable_neon,
......
......@@ -21,28 +21,44 @@ python mace/python/tools/binary_codegen.py --output_path=mace/codegen/tuning/tun
# copy include headers
cp mace/public/*.h $INCLUDE_DIR/
echo "build lib for armeabi-v7a"
# make directories
rm -rf $LIB_DIR/armeabi-v7a
mkdir -p $LIB_DIR/armeabi-v7a
bazel build --config android --config optimization mace:libmace --cpu=armeabi-v7a --define neon=true --define openmp=true
cp bazel-bin/mace/libmace.so $LIB_DIR/armeabi-v7a/
echo "build lib for armeabi-v7a with hexagon dsp"
rm -rf $LIB_DIR/armeabi-v7a/hexagon-dsp
mkdir -p $LIB_DIR/armeabi-v7a/hexagon-dsp
bazel build --config android --config optimization mace:libmace --cpu=armeabi-v7a --define neon=true --define openmp=true --define hexagon=true
cp bazel-bin/mace/libmace.so $LIB_DIR/armeabi-v7a/hexagon-dsp/
cp third_party/nnlib/*so $LIB_DIR/armeabi-v7a/hexagon-dsp/
echo "build lib for arm64-v8a"
rm -rf $LIB_DIR/arm64-v8a
mkdir -p $LIB_DIR/arm64-v8a
bazel build --config android --config optimization mace:libmace --cpu=arm64-v8a --define neon=true --define openmp=true
cp bazel-bin/mace/libmace.so $LIB_DIR/arm64-v8a/
echo "build lib for linux-x86-64"
rm -rf $LIB_DIR/linux-x86-64
mkdir -p $LIB_DIR/linux-x86-64
bazel build --config optimization mace:libmace --define openmp=true
# build shared libraries
echo "build shared lib for armeabi-v7a"
bazel build --config android --config optimization mace:libmace --define neon=true --define openmp=true --define hexagon=true --cpu=armeabi-v7a
cp bazel-bin/mace/libmace.so $LIB_DIR/armeabi-v7a/
cp third_party/nnlib/*so $LIB_DIR/armeabi-v7a/
echo "build shared lib for arm64-v8a"
bazel build --config android --config optimization mace:libmace --define neon=true --define openmp=true --cpu=arm64-v8a
cp bazel-bin/mace/libmace.so $LIB_DIR/arm64-v8a/
echo "build shared lib for linux-x86-64"
bazel build mace:libmace --config optimization --define openmp=true
cp bazel-bin/mace/libmace.so $LIB_DIR/linux-x86-64/
# build static libraries
echo "build static lib for armeabi-v7a"
bazel build --config android --config optimization mace:libmace_static --define neon=true --define openmp=true --define hexagon=true --cpu=armeabi-v7a
cp bazel-genfiles/mace/libmace.a $LIB_DIR/armeabi-v7a/
cp third_party/nnlib/*so $LIB_DIR/armeabi-v7a/
echo "build static lib for arm64-v8a"
bazel build --config android --config optimization mace:libmace_static --define neon=true --define openmp=true --cpu=arm64-v8a
cp bazel-genfiles/mace/libmace.a $LIB_DIR/arm64-v8a/
echo "build static lib for linux-x86-64"
bazel build mace:libmace --config optimization --define openmp=true
cp bazel-genfiles/mace/libmace.a $LIB_DIR/linux-x86-64/
echo "LIB PATH: $LIB_DIR"
echo "INCLUDE FILE PATH: $INCLUDE_DIR"
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册