提交 8575dde5 编写于 作者: Y yejianwu

merge with libmace

上级 2e3dac1e
...@@ -56,3 +56,20 @@ android_ndk_repository( ...@@ -56,3 +56,20 @@ android_ndk_repository(
# Android 5.0 # Android 5.0
api_level = 21 api_level = 21
) )
git_repository(
name = "com_github_gflags_gflags",
#tag = "v2.2.0",
commit = "30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e", # v2.2.0 + fix of include path
remote = "https://github.com/gflags/gflags.git"
)
bind(
name = "gflags",
actual = "@com_github_gflags_gflags//:gflags",
)
bind(
name = "gflags_nothreads",
actual = "@com_github_gflags_gflags//:gflags_nothreads",
)
...@@ -29,7 +29,7 @@ cc_binary( ...@@ -29,7 +29,7 @@ cc_binary(
linkstatic = 1, linkstatic = 1,
deps = [ deps = [
":stat_summarizer", ":stat_summarizer",
"//codegen:generated_models", "//mace/codegen:generated_models",
"//external:gflags_nothreads", "//external:gflags_nothreads",
] + if_hexagon_enabled([ ] + if_hexagon_enabled([
"//lib/hexagon:hexagon", "//lib/hexagon:hexagon",
......
...@@ -104,6 +104,7 @@ cc_library( ...@@ -104,6 +104,7 @@ cc_library(
deps = [ deps = [
":opencl_headers", ":opencl_headers",
"//mace/codegen:generated_opencl_prod", "//mace/codegen:generated_opencl_prod",
"//mace/codegen:generated_tuning_params",
"//mace/utils", "//mace/utils",
], ],
) )
from lib.proto import mace_pb2 from mace.proto import mace_pb2
from lib.proto import caffe_pb2 from mace.proto import caffe_pb2
from lib.python.tools import memory_optimizer from mace.python.tools import memory_optimizer
import google.protobuf.text_format import google.protobuf.text_format
import numpy as np import numpy as np
import math import math
......
from mace.proto import caffe_pb2
import google.protobuf.text_format
import operator
import functools
import argparse
import sys
import six
import os.path
FLAGS = None
def main(unused_args):
if not os.path.isfile(FLAGS.input):
print 'input model file not exist'
return -1
net = caffe_pb2.NetParameter()
with open(FLAGS.input) as f:
google.protobuf.text_format.Merge(str(f.read()), net)
ops = {}
for layer in net.layer:
if layer.type not in ops:
ops[layer.type] = 1
else:
ops[layer.type] += 1
for key, value in sorted(ops.items(), key=operator.itemgetter(1)):
print key, ":", value
def parse_args():
'''Parses command line arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
type=str,
default='',
help='Caffe \'GraphDef\' file to load.')
return parser.parse_known_args()
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
main(unused_args=[sys.argv[0]] + unparsed)
import tensorflow as tf import tensorflow as tf
from lib.proto import mace_pb2 from mace.proto import mace_pb2
TF_DTYPE_2_MACE_DTYPE_MAP = { TF_DTYPE_2_MACE_DTYPE_MAP = {
tf.float32: mace_pb2.DT_FLOAT, tf.float32: mace_pb2.DT_FLOAT,
......
...@@ -2,7 +2,7 @@ import argparse ...@@ -2,7 +2,7 @@ import argparse
import sys import sys
import hashlib import hashlib
import os.path import os.path
from lib.python.tools import source_converter_lib from mace.python.tools import source_converter_lib
# ./bazel-bin/mace/python/tools/tf_converter --model_file quantized_test.pb --output quantized_test_dsp.pb --runtime dsp --input_dim input_node,1,28,28,3 # ./bazel-bin/mace/python/tools/tf_converter --model_file quantized_test.pb --output quantized_test_dsp.pb --runtime dsp --input_dim input_node,1,28,28,3
...@@ -39,17 +39,17 @@ def main(unused_args): ...@@ -39,17 +39,17 @@ def main(unused_args):
print("DSP not support caffe model yet.") print("DSP not support caffe model yet.")
sys.exit(-1) sys.exit(-1)
from lib.python.tools import caffe_converter_lib from mace.python.tools import caffe_converter_lib
output_graph_def = caffe_converter_lib.convert_to_mace_pb( output_graph_def = caffe_converter_lib.convert_to_mace_pb(
FLAGS.model_file, FLAGS.weight_file, FLAGS.input_node, FLAGS.input_shape, FLAGS.output_node, FLAGS.model_file, FLAGS.weight_file, FLAGS.input_node, FLAGS.input_shape, FLAGS.output_node,
FLAGS.data_type, FLAGS.runtime, FLAGS.winograd) FLAGS.data_type, FLAGS.runtime, FLAGS.winograd)
elif FLAGS.platform == 'tensorflow': elif FLAGS.platform == 'tensorflow':
if FLAGS.runtime == 'dsp': if FLAGS.runtime == 'dsp':
from lib.python.tools import tf_dsp_converter_lib from mace.python.tools import tf_dsp_converter_lib
output_graph_def = tf_dsp_converter_lib.convert_to_mace_pb( output_graph_def = tf_dsp_converter_lib.convert_to_mace_pb(
FLAGS.model_file, FLAGS.input_node, FLAGS.output_node, FLAGS.dsp_mode) FLAGS.model_file, FLAGS.input_node, FLAGS.output_node, FLAGS.dsp_mode)
else: else:
from lib.python.tools import tf_converter_lib from mace.python.tools import tf_converter_lib
output_graph_def = tf_converter_lib.convert_to_mace_pb( output_graph_def = tf_converter_lib.convert_to_mace_pb(
FLAGS.model_file, FLAGS.input_node, FLAGS.input_shape, FLAGS.output_node, FLAGS.model_file, FLAGS.input_node, FLAGS.input_shape, FLAGS.output_node,
FLAGS.data_type, FLAGS.runtime, FLAGS.winograd) FLAGS.data_type, FLAGS.runtime, FLAGS.winograd)
......
import tensorflow as tf import tensorflow as tf
from lib.proto import mace_pb2 from mace.proto import mace_pb2
from collections import OrderedDict from collections import OrderedDict
def sort_tf_node(node, nodes_map, ordered_nodes_map): def sort_tf_node(node, nodes_map, ordered_nodes_map):
......
import sys import sys
import operator import operator
from lib.proto import mace_pb2 from mace.proto import mace_pb2
class MemoryOptimizer(object): class MemoryOptimizer(object):
def __init__(self, net_def): def __init__(self, net_def):
......
...@@ -3,7 +3,7 @@ import uuid ...@@ -3,7 +3,7 @@ import uuid
import numpy as np import numpy as np
import hashlib import hashlib
from lib.proto import mace_pb2 from mace.proto import mace_pb2
from jinja2 import Environment, FileSystemLoader from jinja2 import Environment, FileSystemLoader
......
from lib.proto import mace_pb2 from mace.proto import mace_pb2
import tensorflow as tf import tensorflow as tf
import numpy as np import numpy as np
import math import math
import copy import copy
from tensorflow import gfile from tensorflow import gfile
from lib.python.tools import memory_optimizer from mace.python.tools import memory_optimizer
from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import tensor_shape_pb2 from tensorflow.core.framework import tensor_shape_pb2
......
from lib.proto import mace_pb2 from mace.proto import mace_pb2
import tensorflow as tf import tensorflow as tf
from tensorflow import gfile from tensorflow import gfile
from operator import mul from operator import mul
from dsp_ops import DspOps from dsp_ops import DspOps
from lib.python.tools import graph_util from mace.python.tools import graph_util
from lib.python.tools.convert_util import tf_dtype_2_mace_dtype from mace.python.tools.convert_util import tf_dtype_2_mace_dtype
# converter --input ../libcv/quantized_model.pb --output quantized_model_dsp.pb \ # converter --input ../libcv/quantized_model.pb --output quantized_model_dsp.pb \
# --runtime dsp --input_node input_node --output_node output_node # --runtime dsp --input_node input_node --output_node output_node
......
...@@ -21,7 +21,7 @@ if [ "$PRODUCTION_MODE" = 1 ]; then ...@@ -21,7 +21,7 @@ if [ "$PRODUCTION_MODE" = 1 ]; then
fi fi
if [ x"$TARGET_ABI" = x"host" ]; then if [ x"$TARGET_ABI" = x"host" ]; then
bazel build --verbose_failures -c opt --strip always codegen:generated_models \ bazel build --verbose_failures -c opt --strip always //mace/codegen:generated_models \
--copt="-std=c++11" \ --copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \ --copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \ --copt="-Werror=return-type" \
...@@ -30,7 +30,7 @@ if [ x"$TARGET_ABI" = x"host" ]; then ...@@ -30,7 +30,7 @@ if [ x"$TARGET_ABI" = x"host" ]; then
--copt="-O3" \ --copt="-O3" \
$PRODUCTION_MODE_BUILD_FLAGS || exit 1 $PRODUCTION_MODE_BUILD_FLAGS || exit 1
bazel build --verbose_failures -c opt --strip always examples:mace_run \ bazel build --verbose_failures -c opt --strip always //mace/examples:mace_run \
--copt="-std=c++11" \ --copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \ --copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \ --copt="-Werror=return-type" \
...@@ -43,13 +43,14 @@ else ...@@ -43,13 +43,14 @@ else
HEXAGON_MODE_BUILD_FLAG="--define hexagon=true" HEXAGON_MODE_BUILD_FLAG="--define hexagon=true"
fi fi
bazel build --verbose_failures -c opt --strip always examples:mace_run \ bazel build --verbose_failures -c opt --strip always //mace/examples:mace_run \
--crosstool_top=//external:android/crosstool \ --crosstool_top=//external:android/crosstool \
--host_crosstool_top=@bazel_tools//tools/cpp:toolchain \ --host_crosstool_top=@bazel_tools//tools/cpp:toolchain \
--cpu=${TARGET_ABI} \ --cpu=${TARGET_ABI} \
--copt="-std=c++11" \ --copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \ --copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \ --copt="-Werror=return-type" \
--copt="-DMACE_OBFUSCATE_LITERALS" \
--copt="-DMACE_MODEL_TAG=${MODEL_TAG}" \ --copt="-DMACE_MODEL_TAG=${MODEL_TAG}" \
--define openmp=true \ --define openmp=true \
--copt="-O3" \ --copt="-O3" \
...@@ -64,10 +65,10 @@ fi ...@@ -64,10 +65,10 @@ fi
if [ -f "$MODEL_OUTPUT_DIR/mace_run" ]; then if [ -f "$MODEL_OUTPUT_DIR/mace_run" ]; then
rm -rf $MODEL_OUTPUT_DIR/mace_run rm -rf $MODEL_OUTPUT_DIR/mace_run
fi fi
cp bazel-bin/examples/mace_run $MODEL_OUTPUT_DIR cp bazel-bin/mace/examples/mace_run $MODEL_OUTPUT_DIR
if [ "$EMBED_MODEL_DATA" = 0 ]; then if [ "$EMBED_MODEL_DATA" = 0 ]; then
cp codegen/models/${MODEL_TAG}/${MODEL_TAG}.data $MODEL_OUTPUT_DIR cp mace/codegen/models/${MODEL_TAG}/${MODEL_TAG}.data $MODEL_OUTPUT_DIR
fi fi
# copy model header file to build output dir # copy model header file to build output dir
cp codegen/models/${MODEL_TAG}/${MODEL_TAG}.h $MODEL_OUTPUT_DIR cp mace/codegen/models/${MODEL_TAG}/${MODEL_TAG}.h $MODEL_OUTPUT_DIR
...@@ -35,9 +35,9 @@ build_target() ...@@ -35,9 +35,9 @@ build_target()
} }
if [ x"$TARGET_ABI" = x"host" ]; then if [ x"$TARGET_ABI" = x"host" ]; then
build_host_target //codegen:generated_opencl_prod build_host_target //mace/codegen:generated_opencl_prod
build_host_target //codegen:generated_tuning_params build_host_target //mace/codegen:generated_tuning_params
else else
build_target //codegen:generated_opencl_prod build_target //mace/codegen:generated_opencl_prod
build_target //codegen:generated_tuning_params build_target //mace/codegen:generated_tuning_params
fi fi
...@@ -5,4 +5,5 @@ if [ x"$TARGET_ABI" != x"host" ]; then ...@@ -5,4 +5,5 @@ if [ x"$TARGET_ABI" != x"host" ]; then
adb shell rm -rf $PHONE_DATA_DIR adb shell rm -rf $PHONE_DATA_DIR
fi fi
rm -rf codegen/models codegen/opencl codegen/tuning rm -rf mace/codegen/models
git checkout -- mace/codegen/opencl/opencl_compiled_program.cc mace/codegen/tuning/tuning_params.cc
#!/bin/bash
Usage() {
echo "Usage: bash tools/download_and_link_lib.sh libmace_v7_dsp"
}
if [ $# -lt 1 ]; then
Usage
exit 1
fi
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
LIB_FOLDER_NAME=$1
if [ ! -d "${LIBMACE_SOURCE_DIR}/lib/${LIB_FOLDER_NAME}" ]; then
wget -P ${LIBMACE_SOURCE_DIR}/lib http://cnbj1-inner-fds.api.xiaomi.net/libmace/libs/${LIBMACE_TAG}/${LIB_FOLDER_NAME}.tar.gz && \
tar xvzf ${LIBMACE_SOURCE_DIR}/lib/${LIB_FOLDER_NAME}.tar.gz -C ${LIBMACE_SOURCE_DIR}/lib/ || exit 1
echo "${LIB_FOLDER_NAME} download successfully!"
else
echo "${LIB_FOLDER_NAME} already exists!"
fi
echo "Create link 'mace' of downloaded or existed ${LIB_FOLDER_NAME}"
if [ -L ${LIBMACE_SOURCE_DIR}/lib/mace ]; then
unlink ${LIBMACE_SOURCE_DIR}/lib/mace
fi
ln -s ${LIBMACE_SOURCE_DIR}/lib/${LIB_FOLDER_NAME} ${LIBMACE_SOURCE_DIR}/lib/mace && \
rm -rf ${LIBMACE_SOURCE_DIR}/lib/${LIB_FOLDER_NAME}.tar.gz || exit 1
#!/usr/bin/env bash #!/usr/bin/env bash
LIBMACE_TAG=`git describe --abbrev=0 --tags` LIBMACE_TAG=`git describe --abbrev=0 --tags`
LIBMACE_SOURCE_DIR=`/bin/pwd` MACE_SOURCE_DIR=`/bin/pwd`
INPUT_FILE_NAME="model_input" INPUT_FILE_NAME="model_input"
OUTPUT_FILE_NAME="model_out" OUTPUT_FILE_NAME="model_out"
PHONE_DATA_DIR="/data/local/tmp/mace_run" PHONE_DATA_DIR="/data/local/tmp/mace_run"
KERNEL_DIR="${PHONE_DATA_DIR}/cl/" KERNEL_DIR="${PHONE_DATA_DIR}/cl/"
CODEGEN_DIR=${LIBMACE_SOURCE_DIR}/codegen CODEGEN_DIR=${MACE_SOURCE_DIR}/mace/codegen
MODEL_CODEGEN_DIR=${CODEGEN_DIR}/models/${MODEL_TAG} MODEL_CODEGEN_DIR=${CODEGEN_DIR}/models/${MODEL_TAG}
CL_CODEGEN_DIR=${CODEGEN_DIR}/opencl CL_CODEGEN_DIR=${CODEGEN_DIR}/opencl
TUNING_CODEGEN_DIR=${CODEGEN_DIR}/tuning TUNING_CODEGEN_DIR=${CODEGEN_DIR}/tuning
...@@ -31,4 +31,4 @@ GENERATED_MODEL_LIB_NAME="libgenerated_models.a" ...@@ -31,4 +31,4 @@ GENERATED_MODEL_LIB_NAME="libgenerated_models.a"
if [ x"$TARGET_ABI" = x"host" ]; then if [ x"$TARGET_ABI" = x"host" ]; then
GENERATED_MODEL_LIB_NAME="libgenerated_models.pic.a" GENERATED_MODEL_LIB_NAME="libgenerated_models.pic.a"
fi fi
GENERATED_MODEL_LIB_PATH="bazel-bin/codegen/${GENERATED_MODEL_LIB_NAME}" GENERATED_MODEL_LIB_PATH="bazel-bin/mace/codegen/${GENERATED_MODEL_LIB_NAME}"
...@@ -3,14 +3,14 @@ ...@@ -3,14 +3,14 @@
CURRENT_DIR=`dirname $0` CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh source ${CURRENT_DIR}/env.sh
bazel build //lib/python/tools:converter || exit 1 bazel build //mace/python/tools:converter || exit 1
rm -rf ${MODEL_CODEGEN_DIR} rm -rf ${MODEL_CODEGEN_DIR}
mkdir -p ${MODEL_CODEGEN_DIR} mkdir -p ${MODEL_CODEGEN_DIR}
if [ ${DSP_MODE} ]; then if [ ${DSP_MODE} ]; then
DSP_MODE_FLAG="--dsp_mode=${DSP_MODE}" DSP_MODE_FLAG="--dsp_mode=${DSP_MODE}"
fi fi
PYTHONUNBUFFERED=1 bazel-bin/lib/python/tools/converter --platform=${PLATFORM} \ PYTHONUNBUFFERED=1 bazel-bin/mace/python/tools/converter --platform=${PLATFORM} \
--model_file=${MODEL_FILE_PATH} \ --model_file=${MODEL_FILE_PATH} \
--weight_file=${WEIGHT_FILE_PATH} \ --weight_file=${WEIGHT_FILE_PATH} \
--model_checksum=${MODEL_SHA256_CHECKSUM} \ --model_checksum=${MODEL_SHA256_CHECKSUM} \
...@@ -20,7 +20,7 @@ PYTHONUNBUFFERED=1 bazel-bin/lib/python/tools/converter --platform=${PLATFORM} \ ...@@ -20,7 +20,7 @@ PYTHONUNBUFFERED=1 bazel-bin/lib/python/tools/converter --platform=${PLATFORM} \
--data_type=${DATA_TYPE} \ --data_type=${DATA_TYPE} \
--runtime=${RUNTIME} \ --runtime=${RUNTIME} \
--output_type=source \ --output_type=source \
--template=${LIBMACE_SOURCE_DIR}/lib/python/tools \ --template=${MACE_SOURCE_DIR}/mace/python/tools \
--model_tag=${MODEL_TAG} \ --model_tag=${MODEL_TAG} \
--input_shape=${INPUT_SHAPES} \ --input_shape=${INPUT_SHAPES} \
${DSP_MODE_FLAG} \ ${DSP_MODE_FLAG} \
......
#!/bin/bash
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
python mace/python/tools/encrypt_opencl_codegen.py \
--cl_kernel_dir=./mace/kernels/opencl/cl/ \
--output_path=${CODEGEN_DIR}/opencl/opencl_encrypt_program.cc || exit 1
rm -rf ${CODEGEN_DIR}/version
mkdir ${CODEGEN_DIR}/version
bash mace/tools/git/gen_version_source.sh ${CODEGEN_DIR}/version/version.cc || exit 1
...@@ -32,11 +32,11 @@ mkdir -p ${TUNING_CODEGEN_DIR} ...@@ -32,11 +32,11 @@ mkdir -p ${TUNING_CODEGEN_DIR}
set -x set -x
python lib/python/tools/opencl_codegen.py \ python mace/python/tools/opencl_codegen.py \
--cl_binary_dirs=${CL_BIN_DIRS} \ --cl_binary_dirs=${CL_BIN_DIRS} \
--output_path=${CL_CODEGEN_DIR}/opencl_compiled_program.cc --output_path=${CL_CODEGEN_DIR}/opencl_compiled_program.cc
python lib/python/tools/binary_codegen.py \ python mace/python/tools/binary_codegen.py \
--binary_dirs=${CL_BIN_DIRS} \ --binary_dirs=${CL_BIN_DIRS} \
--binary_file_name=mace_run.config \ --binary_file_name=mace_run.config \
--output_path=${TUNING_CODEGEN_DIR}/tuning_params.cc --output_path=${TUNING_CODEGEN_DIR}/tuning_params.cc
...@@ -17,29 +17,6 @@ import yaml ...@@ -17,29 +17,6 @@ import yaml
from ConfigParser import ConfigParser from ConfigParser import ConfigParser
def run_command_real_time(command):
print("Run command: {}".format(command))
process = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
std_err = process.stderr.readline()
if std_err == '' and process.poll() is not None:
break
if std_err:
print std_err.strip()
while True:
std_out = process.stdout.readline()
if std_out == '' and process.poll() is not None:
break
if std_out:
print std_out.strip()
ret_code = process.poll()
if ret_code != 0:
raise Exception("Exit not 0 from bash with code: {}, command: {}".format(
ret_code, command))
def run_command(command): def run_command(command):
print("Run command: {}".format(command)) print("Run command: {}".format(command))
result = subprocess.Popen( result = subprocess.Popen(
...@@ -56,7 +33,7 @@ def run_command(command): ...@@ -56,7 +33,7 @@ def run_command(command):
result.returncode, command)) result.returncode, command))
def get_libs(target_abi, configs): def get_global_runtime(configs):
runtime_list = [] runtime_list = []
for model_name in configs["models"]: for model_name in configs["models"]:
model_runtime = configs["models"][model_name]["runtime"] model_runtime = configs["models"][model_name]["runtime"]
...@@ -72,12 +49,12 @@ def get_libs(target_abi, configs): ...@@ -72,12 +49,12 @@ def get_libs(target_abi, configs):
else: else:
raise Exception("Not found available RUNTIME in config files!") raise Exception("Not found available RUNTIME in config files!")
libmace_name = "libmace-{}-{}".format(target_abi, global_runtime) return global_runtime
command = "bash tools/download_and_link_lib.sh " + libmace_name
run_command(command)
return libmace_name def generate_opencl_and_version_code():
command = "bash tools/generate_opencl_and_version_code.sh"
run_command(command)
def clear_env(): def clear_env():
...@@ -94,7 +71,7 @@ def generate_random_input(model_output_dir): ...@@ -94,7 +71,7 @@ def generate_random_input(model_output_dir):
def generate_model_code(): def generate_model_code():
command = "bash tools/generate_model_code.sh" command = "bash tools/generate_model_code.sh"
run_command_real_time(command) run_command(command)
def build_mace_run(production_mode, model_output_dir, hexagon_mode): def build_mace_run(production_mode, model_output_dir, hexagon_mode):
...@@ -128,8 +105,8 @@ def generate_production_code(model_output_dirs, pull_or_not): ...@@ -128,8 +105,8 @@ def generate_production_code(model_output_dirs, pull_or_not):
run_command(command) run_command(command)
def build_mace_run_prod(model_output_dir, tuning, libmace_name): def build_mace_run_prod(model_output_dir, tuning, global_runtime):
if "dsp" in libmace_name: if "dsp" == global_runtime:
hexagon_mode = True hexagon_mode = True
else: else:
hexagon_mode = False hexagon_mode = False
...@@ -222,16 +199,14 @@ def main(unused_args): ...@@ -222,16 +199,14 @@ def main(unused_args):
FLAGS.round = 1 FLAGS.round = 1
FLAGS.restart_round = 1 FLAGS.restart_round = 1
# target_abi = configs["target_abi"]
# libmace_name = get_libs(target_abi, configs)
# Transfer params by environment
# os.environ["TARGET_ABI"] = target_abi
os.environ["EMBED_MODEL_DATA"] = str(configs["embed_model_data"]) os.environ["EMBED_MODEL_DATA"] = str(configs["embed_model_data"])
os.environ["VLOG_LEVEL"] = str(configs["vlog_level"]) os.environ["VLOG_LEVEL"] = str(configs["vlog_level"])
os.environ["PROJECT_NAME"] = os.path.splitext(os.path.basename(FLAGS.config))[0] os.environ["PROJECT_NAME"] = os.path.splitext(os.path.basename(FLAGS.config))[0]
generate_opencl_and_version_code()
for target_abi in configs["target_abis"]: for target_abi in configs["target_abis"]:
libmace_name = get_libs(target_abi, configs) global_runtime = get_global_runtime(configs)
# Transfer params by environment # Transfer params by environment
os.environ["TARGET_ABI"] = target_abi os.environ["TARGET_ABI"] = target_abi
model_output_dirs = [] model_output_dirs = []
...@@ -277,7 +252,7 @@ def main(unused_args): ...@@ -277,7 +252,7 @@ def main(unused_args):
if FLAGS.mode == "build" or FLAGS.mode == "all": if FLAGS.mode == "build" or FLAGS.mode == "all":
generate_model_code() generate_model_code()
build_mace_run_prod(model_output_dir, FLAGS.tuning, libmace_name) build_mace_run_prod(model_output_dir, FLAGS.tuning, global_runtime)
if FLAGS.mode == "run" or FLAGS.mode == "validate" or FLAGS.mode == "all": if FLAGS.mode == "run" or FLAGS.mode == "validate" or FLAGS.mode == "all":
run_model(model_output_dir, FLAGS.round, FLAGS.restart_round) run_model(model_output_dir, FLAGS.round, FLAGS.restart_round)
......
...@@ -19,24 +19,31 @@ MODEL_HEADER_DIR=${LIBMACE_BUILD_DIR}/libmace/include/mace/public ...@@ -19,24 +19,31 @@ MODEL_HEADER_DIR=${LIBMACE_BUILD_DIR}/libmace/include/mace/public
MODEL_DATA_DIR=${LIBMACE_BUILD_DIR}/libmace/data MODEL_DATA_DIR=${LIBMACE_BUILD_DIR}/libmace/data
rm -rf ${LIBMACE_BUILD_DIR}/libmace rm -rf ${LIBMACE_BUILD_DIR}/libmace
mkdir -p ${LIBMACE_BUILD_DIR}/libmace/include/mace/public
mkdir -p ${LIBMACE_BUILD_DIR}/libmace/lib mkdir -p ${LIBMACE_BUILD_DIR}/libmace/lib
mkdir -p ${MODEL_DATA_DIR} mkdir -p ${MODEL_DATA_DIR}
cp -rf ${LIBMACE_SOURCE_DIR}/include ${LIBMACE_BUILD_DIR}/libmace/ cp -rf ${MACE_SOURCE_DIR}/mace/public/*.h ${LIBMACE_BUILD_DIR}/libmace/
cp ${LIBMACE_SOURCE_DIR}/lib/hexagon/libhexagon_controller.so ${LIBMACE_BUILD_DIR}/libmace/lib cp ${MACE_SOURCE_DIR}/mace/core/runtime/hexagon/libhexagon_controller.so ${LIBMACE_BUILD_DIR}/libmace/lib
LIBMACE_TEMP_DIR=`mktemp -d -t libmace.XXXX` LIBMACE_TEMP_DIR=`mktemp -d -t libmace.XXXX`
# Merge all libraries in to one # Merge all libraries in to one
echo "create ${LIBMACE_BUILD_DIR}/libmace/lib/libmace_${PROJECT_NAME}.a" > ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "create ${LIBMACE_BUILD_DIR}/libmace/lib/libmace_${PROJECT_NAME}.a" > ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib lib/mace/libmace.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib lib/mace/libmace_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri if [ x"$TARGET_ABI" = x"host" ]; then
if [ x"TARGET_ABI" = x"host" ]; then echo "addlib bazel-bin/mace/codegen/libgenerated_opencl_prod.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/codegen/libgenerated_opencl_prod.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "addlib bazel-bin/mace/codegen/libgenerated_tuning_params.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/codegen/libgenerated_tuning_params.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
else else
echo "addlib bazel-bin/codegen/libgenerated_opencl_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "addlib bazel-bin/mace/codegen/libgenerated_opencl_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/codegen/libgenerated_tuning_params.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "addlib bazel-bin/mace/codegen/libgenerated_tuning_params.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/codegen/libgenerated_version.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/core/libcore.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/core/libopencl_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/kernels/libkernels.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/utils/libutils.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/mace/utils/libutils_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
fi fi
for model_output_dir in ${MODEL_OUTPUT_DIRS_ARR[@]}; do for model_output_dir in ${MODEL_OUTPUT_DIRS_ARR[@]}; do
for lib in ${model_output_dir}/*.a; do for lib in ${model_output_dir}/*.a; do
echo "addlib ${lib}" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri echo "addlib ${lib}" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
......
...@@ -53,7 +53,7 @@ else ...@@ -53,7 +53,7 @@ else
if [ "$EMBED_MODEL_DATA" = 0 ]; then if [ "$EMBED_MODEL_DATA" = 0 ]; then
adb push ${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data ${PHONE_DATA_DIR} > /dev/null || exit 1 adb push ${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data ${PHONE_DATA_DIR} > /dev/null || exit 1
fi fi
adb push lib/hexagon/libhexagon_controller.so ${PHONE_DATA_DIR} > /dev/null || exit 1 adb push mace/core/runtime/hexagon/libhexagon_controller.so ${PHONE_DATA_DIR} > /dev/null || exit 1
mace_adb_output=`adb </dev/null shell \ mace_adb_output=`adb </dev/null shell \
"LD_LIBRARY_PATH=${PHONE_DATA_DIR} \ "LD_LIBRARY_PATH=${PHONE_DATA_DIR} \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册