提交 2e3dac1e 编写于 作者: Y yejianwu

Merge branch 'master' of ../libmace

#!/bin/bash
Usage() {
echo "Usage: bash tools/benchmark.sh model_output_dir"
}
if [ $# -lt 1 ]; then
Usage
exit 1
fi
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
MODEL_OUTPUT_DIR=$1
if [ -f "$MODEL_OUTPUT_DIR/benchmark_model" ]; then
rm -rf $MODEL_OUTPUT_DIR/benchmark_model
fi
if [ "$EMBED_MODEL_DATA" = 0 ]; then
cp codegen/models/${MODEL_TAG}/${MODEL_TAG}.data $MODEL_OUTPUT_DIR
fi
if [ x"$TARGET_ABI" == x"host" ]; then
bazel build --verbose_failures -c opt --strip always benchmark:benchmark_model \
--copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \
--copt="-DMACE_MODEL_TAG=${MODEL_TAG}" \
--copt="-O3" \
--define openmp=true \
--define production=true || exit 1
cp bazel-bin/benchmark/benchmark_model $MODEL_OUTPUT_DIR
MACE_CPP_MIN_VLOG_LEVEL=$VLOG_LEVEL \
${MODEL_OUTPUT_DIR}/benchmark_model \
--model_data_file=${PHONE_DATA_DIR}/${MODEL_TAG}.data \
--device=${DEVICE_TYPE} \
--input_node="${INPUT_NODES}" \
--input_shape="${INPUT_SHAPES}"\
--output_node="${OUTPUT_NODES}" \
--output_shape="${OUTPUT_SHAPES}"\
--input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} || exit 1
else
bazel build --verbose_failures -c opt --strip always benchmark:benchmark_model \
--crosstool_top=//external:android/crosstool \
--host_crosstool_top=@bazel_tools//tools/cpp:toolchain \
--cpu=${TARGET_ABI} \
--copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \
--copt="-DMACE_MODEL_TAG=${MODEL_TAG}" \
--copt="-O3" \
--define openmp=true \
--define production=true || exit 1
cp bazel-bin/benchmark/benchmark_model $MODEL_OUTPUT_DIR
adb shell "mkdir -p ${PHONE_DATA_DIR}" || exit 1
IFS=',' read -r -a INPUT_NAMES <<< "${INPUT_NODES}"
for NAME in "${INPUT_NAMES[@]}";do
FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
adb push ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME} ${PHONE_DATA_DIR} || exit 1
done
adb push ${MODEL_OUTPUT_DIR}/benchmark_model ${PHONE_DATA_DIR} || exit 1
if [ "$EMBED_MODEL_DATA" = 0 ]; then
adb push ${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data ${PHONE_DATA_DIR} || exit 1
fi
adb </dev/null shell \
LD_LIBRARY_PATH=${PHONE_DATA_DIR} \
MACE_CPP_MIN_VLOG_LEVEL=$VLOG_LEVEL \
MACE_RUN_PARAMETER_PATH=${PHONE_DATA_DIR}/mace_run.config \
MACE_LIMIT_OPENCL_KERNEL_TIME=${LIMIT_OPENCL_KERNEL_TIME} \
MACE_OPENCL_PROFILING=1 \
${PHONE_DATA_DIR}/benchmark_model \
--model_data_file=${PHONE_DATA_DIR}/${MODEL_TAG}.data \
--device=${DEVICE_TYPE} \
--input_node="${INPUT_NODES}" \
--input_shape="${INPUT_SHAPES}"\
--output_node="${OUTPUT_NODES}" \
--output_shape="${OUTPUT_SHAPES}"\
--input_file=${PHONE_DATA_DIR}/${INPUT_FILE_NAME} || exit 1
fi
#!/bin/bash
Usage() {
echo "Usage: bash tools/build_mace_run.sh production_mode model_output_dir hexagon_mode"
}
if [ $# -lt 3 ]; then
Usage
exit 1
fi
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
PRODUCTION_MODE=$1
MODEL_OUTPUT_DIR=$2
HEXAGON_MODE=$3
if [ "$PRODUCTION_MODE" = 1 ]; then
PRODUCTION_MODE_BUILD_FLAGS="--define production=true"
fi
if [ x"$TARGET_ABI" = x"host" ]; then
bazel build --verbose_failures -c opt --strip always codegen:generated_models \
--copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \
--copt="-DMACE_MODEL_TAG=${MODEL_TAG}" \
--define openmp=true \
--copt="-O3" \
$PRODUCTION_MODE_BUILD_FLAGS || exit 1
bazel build --verbose_failures -c opt --strip always examples:mace_run \
--copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \
--copt="-DMACE_MODEL_TAG=${MODEL_TAG}" \
--define openmp=true \
--copt="-O3" \
$PRODUCTION_MODE_BUILD_FLAGS || exit 1
else
if [ "$HEXAGON_MODE" = 1 ]; then
HEXAGON_MODE_BUILD_FLAG="--define hexagon=true"
fi
bazel build --verbose_failures -c opt --strip always examples:mace_run \
--crosstool_top=//external:android/crosstool \
--host_crosstool_top=@bazel_tools//tools/cpp:toolchain \
--cpu=${TARGET_ABI} \
--copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \
--copt="-DMACE_MODEL_TAG=${MODEL_TAG}" \
--define openmp=true \
--copt="-O3" \
$PRODUCTION_MODE_BUILD_FLAGS \
$HEXAGON_MODE_BUILD_FLAG || exit 1
fi
if [ "$PRODUCTION_MODE" = 1 ]; then
cp $GENERATED_MODEL_LIB_PATH $MODEL_OUTPUT_DIR/libmace_${MODEL_TAG}.a
fi
if [ -f "$MODEL_OUTPUT_DIR/mace_run" ]; then
rm -rf $MODEL_OUTPUT_DIR/mace_run
fi
cp bazel-bin/examples/mace_run $MODEL_OUTPUT_DIR
if [ "$EMBED_MODEL_DATA" = 0 ]; then
cp codegen/models/${MODEL_TAG}/${MODEL_TAG}.data $MODEL_OUTPUT_DIR
fi
# copy model header file to build output dir
cp codegen/models/${MODEL_TAG}/${MODEL_TAG}.h $MODEL_OUTPUT_DIR
#!/bin/bash
Usage() {
echo "Usage: bash tools/build_production_code.sh"
}
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
build_host_target()
{
BAZEL_TARGET=$1
bazel build --verbose_failures -c opt --strip always $BAZEL_TARGET \
--copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \
--copt="-DMACE_OBFUSCATE_LITERALS" \
--copt="-O3" \
--define openmp=true || exit -1
}
build_target()
{
BAZEL_TARGET=$1
bazel build --verbose_failures -c opt --strip always $BAZEL_TARGET \
--crosstool_top=//external:android/crosstool \
--host_crosstool_top=@bazel_tools//tools/cpp:toolchain \
--cpu=$TARGET_ABI \
--copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \
--copt="-O3" \
--define openmp=true \
--copt="-DMACE_OBFUSCATE_LITERALS" || exit 1
}
if [ x"$TARGET_ABI" = x"host" ]; then
build_host_target //codegen:generated_opencl_prod
build_host_target //codegen:generated_tuning_params
else
build_target //codegen:generated_opencl_prod
build_target //codegen:generated_tuning_params
fi
#!/bin/bash
Usage() {
echo "Usage: bash tools/build_run_throughput_test.sh run_seconds merged_lib_file model_input_dir"
}
if [ $# -lt 1 ]; then
Usage
exit 1
fi
RUN_SECONDS=$1
MERGED_LIB_FILE=$2
MODEL_INPUT_DIR=$3
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
if [ "$CPU_MODEL_TAG" != '' ]; then
CPU_MODEL_TAG_BUILD_FLAGS="--copt=-DMACE_CPU_MODEL_TAG=${CPU_MODEL_TAG}"
fi
if [ "$GPU_MODEL_TAG" != '' ]; then
GPU_MODEL_TAG_BUILD_FLAGS="--copt=-DMACE_GPU_MODEL_TAG=${GPU_MODEL_TAG}"
fi
if [ "$DSP_MODEL_TAG" != '' ]; then
DSP_MODEL_TAG_BUILD_FLAGS="--copt=-DMACE_DSP_MODEL_TAG=${DSP_MODEL_TAG}"
fi
cp $MERGED_LIB_FILE benchmark/libmace_merged.a
bazel build --verbose_failures -c opt --strip always benchmark:model_throughput_test \
--crosstool_top=//external:android/crosstool \
--host_crosstool_top=@bazel_tools//tools/cpp:toolchain \
--cpu=${TARGET_ABI} \
--copt="-std=c++11" \
--copt="-D_GLIBCXX_USE_C99_MATH_TR1" \
--copt="-Werror=return-type" \
${CPU_MODEL_TAG_BUILD_FLAGS} \
${GPU_MODEL_TAG_BUILD_FLAGS} \
${DSP_MODEL_TAG_BUILD_FLAGS} \
--define openmp=true \
--copt="-O3" || exit 1
rm benchmark/libmace_merged.a
adb shell "mkdir -p ${PHONE_DATA_DIR}" || exit 1
adb push ${MODEL_INPUT_DIR}/${INPUT_FILE_NAME} ${PHONE_DATA_DIR} || exit 1
adb push bazel-bin/benchmark/model_throughput_test ${PHONE_DATA_DIR} || exit 1
if [ "$EMBED_MODEL_DATA" = 0 ]; then
adb push codegen/models/${CPU_MODEL_TAG}/${CPU_MODEL_TAG}.data ${PHONE_DATA_DIR} || exit 1
adb push codegen/models/${GPU_MODEL_TAG}/${GPU_MODEL_TAG}.data ${PHONE_DATA_DIR} || exit 1
adb push codegen/models/${DSP_MODEL_TAG}/${DSP_MODEL_TAG}.data ${PHONE_DATA_DIR} || exit 1
fi
adb push lib/hexagon/libhexagon_controller.so ${PHONE_DATA_DIR} || exit 1
adb </dev/null shell \
LD_LIBRARY_PATH=${PHONE_DATA_DIR} \
MACE_CPP_MIN_VLOG_LEVEL=$VLOG_LEVEL \
MACE_RUN_PARAMETER_PATH=${PHONE_DATA_DIR}/mace_run.config \
MACE_KERNEL_PATH=$KERNEL_DIR \
MACE_LIMIT_OPENCL_KERNEL_TIME=${LIMIT_OPENCL_KERNEL_TIME} \
${PHONE_DATA_DIR}/model_throughput_test \
--input_shape="${INPUT_SHAPE}" \
--output_shape="${OUTPUT_SHAPE}" \
--input_file=${PHONE_DATA_DIR}/${INPUT_FILE_NAME} \
--cpu_model_data_file=${PHONE_DATA_DIR}/${CPU_MODEL_TAG}.data \
--gpu_model_data_file=${PHONE_DATA_DIR}/${GPU_MODEL_TAG}.data \
--dsp_model_data_file=${PHONE_DATA_DIR}/${DSP_MODEL_TAG}.data \
--run_seconds=$RUN_SECONDS || exit 1
\ No newline at end of file
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
if [ x"$TARGET_ABI" != x"host" ]; then
adb shell rm -rf $PHONE_DATA_DIR
fi
rm -rf codegen/models codegen/opencl codegen/tuning
#!/bin/bash
Usage() {
echo "Usage: bash tools/download_and_link_lib.sh libmace_v7_dsp"
}
if [ $# -lt 1 ]; then
Usage
exit 1
fi
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
LIB_FOLDER_NAME=$1
if [ ! -d "${LIBMACE_SOURCE_DIR}/lib/${LIB_FOLDER_NAME}" ]; then
wget -P ${LIBMACE_SOURCE_DIR}/lib http://cnbj1-inner-fds.api.xiaomi.net/libmace/libs/${LIBMACE_TAG}/${LIB_FOLDER_NAME}.tar.gz && \
tar xvzf ${LIBMACE_SOURCE_DIR}/lib/${LIB_FOLDER_NAME}.tar.gz -C ${LIBMACE_SOURCE_DIR}/lib/ || exit 1
echo "${LIB_FOLDER_NAME} download successfully!"
else
echo "${LIB_FOLDER_NAME} already exists!"
fi
echo "Create link 'mace' of downloaded or existed ${LIB_FOLDER_NAME}"
if [ -L ${LIBMACE_SOURCE_DIR}/lib/mace ]; then
unlink ${LIBMACE_SOURCE_DIR}/lib/mace
fi
ln -s ${LIBMACE_SOURCE_DIR}/lib/${LIB_FOLDER_NAME} ${LIBMACE_SOURCE_DIR}/lib/mace && \
rm -rf ${LIBMACE_SOURCE_DIR}/lib/${LIB_FOLDER_NAME}.tar.gz || exit 1
#!/usr/bin/env bash
LIBMACE_TAG=`git describe --abbrev=0 --tags`
LIBMACE_SOURCE_DIR=`/bin/pwd`
INPUT_FILE_NAME="model_input"
OUTPUT_FILE_NAME="model_out"
PHONE_DATA_DIR="/data/local/tmp/mace_run"
KERNEL_DIR="${PHONE_DATA_DIR}/cl/"
CODEGEN_DIR=${LIBMACE_SOURCE_DIR}/codegen
MODEL_CODEGEN_DIR=${CODEGEN_DIR}/models/${MODEL_TAG}
CL_CODEGEN_DIR=${CODEGEN_DIR}/opencl
TUNING_CODEGEN_DIR=${CODEGEN_DIR}/tuning
VERSION_SOURCE_PATH=${CODEGEN_DIR}/version
if [ -z ${EMBED_MODEL_DATA} ]; then
EMBED_MODEL_DATA=1
fi
if [ x"$RUNTIME" = x"dsp" ]; then
DATA_TYPE="DT_UINT8"
DEVICE_TYPE="HEXAGON"
LIB_FOLDER_NAME="${LIB_FOLDER_NAME}_dsp"
elif [ x"$RUNTIME" = x"gpu" ]; then
DATA_TYPE="DT_HALF"
DEVICE_TYPE="OPENCL"
elif [ x"$RUNTIME" = x"cpu" ]; then
DATA_TYPE="DT_FLOAT"
DEVICE_TYPE="CPU"
fi
GENERATED_MODEL_LIB_NAME="libgenerated_models.a"
if [ x"$TARGET_ABI" = x"host" ]; then
GENERATED_MODEL_LIB_NAME="libgenerated_models.pic.a"
fi
GENERATED_MODEL_LIB_PATH="bazel-bin/codegen/${GENERATED_MODEL_LIB_NAME}"
# example.yaml
# Each yaml file describes a exported library (could be named [target_abi]/libmace-${filename}.a),
# which can contains more than one models
target_abis: [armeabi-v7a, arm64-v8a]
target_socs: [MSM8953] # target_socs not enabled yet
embed_model_data: 1
vlog_level: 0
models:
preview_net:
platform: tensorflow
model_file_path: path/to/model64.pb # also support http:// and https://
model_sha256_checksum: 05d92625809dc9edd6484882335c48c043397aed450a168d75eb8b538e86881a
input_nodes: input_node
output_nodes: output_node
input_shapes: 1,64,64,3
output_shapes: 1,64,64,2
runtime: gpu
limit_opencl_kernel_time: 0
dsp_mode: 0
obfuscate: 1
fast_conv: 0
capture_net:
platform: caffe
model_file_path: path/to/model.prototxt
weight_file_path: path/to/weight.caffemodel
model_sha256_checksum: 05d92625809dc9edd6484882335c48c043397aed450a168d75eb8b538e86881a
weight_sha256_checksum: 05d92625809dc9edd6484882335c48c043397aed450a168d75eb8b538e86881a
input_nodes:
- input_node0
- input_node1
output_nodes:
- output_node0
- output_node1
input_shapes:
- 1,256,256,3
- 1,128,128,3
output_shapes:
- 1,256,256,2
- 1,1,1,2
runtime: cpu
limit_opencl_kernel_time: 1
dsp_mode: 0
obfuscate: 1
fast_conv: 0
import argparse
import sys
import numpy as np
import re
# Validation Flow:
# 1. Generate input data
# python generate_data.py \
# --input_node input_node \
# --input_shape 1,64,64,3 \
# --input_file input_file
#
def generate_data(name, shape):
np.random.seed()
data = np.random.random(shape) * 2 - 1
input_file_name = FLAGS.input_file + "_" + re.sub('[^0-9a-zA-Z]+', '_', name)
print 'Generate input file: ', input_file_name
data.astype(np.float32).tofile(input_file_name)
def main(unused_args):
input_names = [name for name in FLAGS.input_node.split(',')]
input_shapes = [shape for shape in FLAGS.input_shape.split(':')]
assert len(input_names) == len(input_shapes)
for i in range(len(input_names)):
shape = [int(x) for x in input_shapes[i].split(',')]
generate_data(input_names[i], shape)
print "Generate input file done."
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--input_file",
type=str,
default="",
help="input file.")
parser.add_argument(
"--input_node",
type=str,
default="input_node",
help="input node")
parser.add_argument(
"--input_shape",
type=str,
default="1,64,64,3",
help="input shape.")
return parser.parse_known_args()
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
main(unused_args=[sys.argv[0]] + unparsed)
#!/bin/bash
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
bazel build //lib/python/tools:converter || exit 1
rm -rf ${MODEL_CODEGEN_DIR}
mkdir -p ${MODEL_CODEGEN_DIR}
if [ ${DSP_MODE} ]; then
DSP_MODE_FLAG="--dsp_mode=${DSP_MODE}"
fi
PYTHONUNBUFFERED=1 bazel-bin/lib/python/tools/converter --platform=${PLATFORM} \
--model_file=${MODEL_FILE_PATH} \
--weight_file=${WEIGHT_FILE_PATH} \
--model_checksum=${MODEL_SHA256_CHECKSUM} \
--output=${MODEL_CODEGEN_DIR}/model.cc \
--input_node=${INPUT_NODES} \
--output_node=${OUTPUT_NODES} \
--data_type=${DATA_TYPE} \
--runtime=${RUNTIME} \
--output_type=source \
--template=${LIBMACE_SOURCE_DIR}/lib/python/tools \
--model_tag=${MODEL_TAG} \
--input_shape=${INPUT_SHAPES} \
${DSP_MODE_FLAG} \
--embed_model_data=${EMBED_MODEL_DATA} \
--winograd=${FAST_CONV} \
--obfuscate=${OBFUSCATE} || exit 1
#!/bin/bash
Usage() {
echo "Usage: bash tools/generate_production_code.sh cl_bin_dirs pull_or_not"
}
if [ $# -lt 2 ]; then
Usage
exit 1
fi
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
CL_BIN_DIRS=$1
PULL_OR_NOT=$2
if [ "$PULL_OR_NOT" = 1 ]; then
CL_BIN_DIR=${CL_BIN_DIRS}
rm -rf ${CL_BIN_DIR}
mkdir -p ${CL_BIN_DIR}
if [ x"$TARGET_ABI" != x"host" ]; then
adb pull ${KERNEL_DIR}/. ${CL_BIN_DIR} > /dev/null
adb pull ${PHONE_DATA_DIR}/mace_run.config ${CL_BIN_DIR} > /dev/null
fi
fi
rm -rf ${CL_CODEGEN_DIR}
mkdir -p ${CL_CODEGEN_DIR}
rm -rf ${TUNING_CODEGEN_DIR}
mkdir -p ${TUNING_CODEGEN_DIR}
set -x
python lib/python/tools/opencl_codegen.py \
--cl_binary_dirs=${CL_BIN_DIRS} \
--output_path=${CL_CODEGEN_DIR}/opencl_compiled_program.cc
python lib/python/tools/binary_codegen.py \
--binary_dirs=${CL_BIN_DIRS} \
--binary_file_name=mace_run.config \
--output_path=${TUNING_CODEGEN_DIR}/tuning_params.cc
#!/usr/bin/env python
# Must run at root dir of libmace project.
# python tools/mace_tools.py \
# --config=tools/example.yaml \
# --round=100 \
# --mode=all
import argparse
import hashlib
import os
import shutil
import subprocess
import sys
import urllib
import yaml
from ConfigParser import ConfigParser
def run_command_real_time(command):
print("Run command: {}".format(command))
process = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
std_err = process.stderr.readline()
if std_err == '' and process.poll() is not None:
break
if std_err:
print std_err.strip()
while True:
std_out = process.stdout.readline()
if std_out == '' and process.poll() is not None:
break
if std_out:
print std_out.strip()
ret_code = process.poll()
if ret_code != 0:
raise Exception("Exit not 0 from bash with code: {}, command: {}".format(
ret_code, command))
def run_command(command):
print("Run command: {}".format(command))
result = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = result.communicate()
if out:
print("Stdout msg:\n{}".format(out))
if err:
print("Stderr msg:\n{}".format(err))
if result.returncode != 0:
raise Exception("Exit not 0 from bash with code: {}, command: {}".format(
result.returncode, command))
def get_libs(target_abi, configs):
runtime_list = []
for model_name in configs["models"]:
model_runtime = configs["models"][model_name]["runtime"]
runtime_list.append(model_runtime.lower())
global_runtime = ""
if "dsp" in runtime_list:
global_runtime = "dsp"
elif "gpu" in runtime_list:
global_runtime = "gpu"
elif "cpu" in runtime_list:
global_runtime = "cpu"
else:
raise Exception("Not found available RUNTIME in config files!")
libmace_name = "libmace-{}-{}".format(target_abi, global_runtime)
command = "bash tools/download_and_link_lib.sh " + libmace_name
run_command(command)
return libmace_name
def clear_env():
command = "bash tools/clear_env.sh"
run_command(command)
def generate_random_input(model_output_dir):
generate_data_or_not = True
command = "bash tools/validate_tools.sh {} {}".format(
model_output_dir, int(generate_data_or_not))
run_command(command)
def generate_model_code():
command = "bash tools/generate_model_code.sh"
run_command_real_time(command)
def build_mace_run(production_mode, model_output_dir, hexagon_mode):
command = "bash tools/build_mace_run.sh {} {} {}".format(
int(production_mode), model_output_dir, int(hexagon_mode))
run_command(command)
def tuning_run(model_output_dir, running_round, tuning, production_mode, restart_round):
command = "bash tools/tuning_run.sh {} {} {} {} {}".format(
model_output_dir, running_round, int(tuning), int(production_mode), restart_round)
run_command(command)
def benchmark_model(model_output_dir):
command = "bash tools/benchmark.sh {}".format(model_output_dir)
run_command(command)
def run_model(model_output_dir, running_round, restart_round):
tuning_run(model_output_dir, running_round, False, False, restart_round)
def generate_production_code(model_output_dirs, pull_or_not):
cl_bin_dirs = []
for d in model_output_dirs:
cl_bin_dirs.append(os.path.join(d, "opencl_bin"))
cl_bin_dirs_str = ",".join(cl_bin_dirs)
command = "bash tools/generate_production_code.sh {} {}".format(
cl_bin_dirs_str, int(pull_or_not))
run_command(command)
def build_mace_run_prod(model_output_dir, tuning, libmace_name):
if "dsp" in libmace_name:
hexagon_mode = True
else:
hexagon_mode = False
production_or_not = False
build_mace_run(production_or_not, model_output_dir, hexagon_mode)
tuning_run(
model_output_dir,
running_round=0,
tuning=tuning,
production_mode=production_or_not,
restart_round=1)
production_or_not = True
pull_or_not = True
generate_production_code([model_output_dir], pull_or_not)
build_mace_run(production_or_not, model_output_dir, hexagon_mode)
def build_run_throughput_test(run_seconds, merged_lib_file, model_input_dir):
command = "bash tools/build_run_throughput_test.sh {} {} {}".format(
run_seconds, merged_lib_file, model_input_dir)
run_command(command)
def validate_model(model_output_dir):
generate_data_or_not = False
command = "bash tools/validate_tools.sh {} {}".format(
model_output_dir, int(generate_data_or_not))
run_command(command)
def build_production_code():
command = "bash tools/build_production_code.sh"
run_command(command)
def merge_libs_and_tuning_results(output_dir, model_output_dirs):
pull_or_not = False
generate_production_code(model_output_dirs, pull_or_not)
build_production_code()
model_output_dirs_str = ",".join(model_output_dirs)
command = "bash tools/merge_libs.sh {} {}".format(output_dir,
model_output_dirs_str)
run_command(command)
def parse_model_configs():
with open(FLAGS.config) as f:
configs = yaml.load(f)
return configs
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--config",
type=str,
default="./tool/config",
help="The global config file of models.")
parser.add_argument(
"--output_dir", type=str, default="build", help="The output dir.")
parser.add_argument(
"--round", type=int, default=1, help="The model running round.")
parser.add_argument("--run_seconds", type=int, default=10,
help="The model throughput test running seconds.")
parser.add_argument(
"--restart_round", type=int, default=1, help="The model restart round.")
parser.add_argument(
"--tuning", type="bool", default="true", help="Tune opencl params.")
parser.add_argument("--mode", type=str, default="all",
help="[build|run|validate|merge|all|throughput_test].")
return parser.parse_known_args()
def main(unused_args):
configs = parse_model_configs()
if FLAGS.mode == "build" or FLAGS.mode == "all":
# Remove previous output dirs
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
elif os.path.exists(os.path.join(FLAGS.output_dir, "libmace")):
shutil.rmtree(os.path.join(FLAGS.output_dir, "libmace"))
if FLAGS.mode == "validate":
FLAGS.round = 1
FLAGS.restart_round = 1
# target_abi = configs["target_abi"]
# libmace_name = get_libs(target_abi, configs)
# Transfer params by environment
# os.environ["TARGET_ABI"] = target_abi
os.environ["EMBED_MODEL_DATA"] = str(configs["embed_model_data"])
os.environ["VLOG_LEVEL"] = str(configs["vlog_level"])
os.environ["PROJECT_NAME"] = os.path.splitext(os.path.basename(FLAGS.config))[0]
for target_abi in configs["target_abis"]:
libmace_name = get_libs(target_abi, configs)
# Transfer params by environment
os.environ["TARGET_ABI"] = target_abi
model_output_dirs = []
for model_name in configs["models"]:
# Transfer params by environment
os.environ["MODEL_TAG"] = model_name
print '=======================', model_name, '======================='
model_config = configs["models"][model_name]
for key in model_config:
if key in ['input_nodes', 'output_nodes'] and isinstance(model_config[key], list):
os.environ[key.upper()] = ",".join(model_config[key])
elif key in ['input_shapes', 'output_shapes'] and isinstance(model_config[key], list):
os.environ[key.upper()] = ":".join(model_config[key])
else:
os.environ[key.upper()] = str(model_config[key])
md5 = hashlib.md5()
md5.update(model_config["model_file_path"])
model_path_digest = md5.hexdigest()
model_output_dir = "%s/%s/%s/%s" % (FLAGS.output_dir, model_name, model_path_digest, target_abi)
model_output_dirs.append(model_output_dir)
if FLAGS.mode == "build" or FLAGS.mode == "all":
if os.path.exists(model_output_dir):
shutil.rmtree(model_output_dir)
os.makedirs(model_output_dir)
clear_env()
# Support http:// and https://
if model_config["model_file_path"].startswith(
"http://") or model_config["model_file_path"].startswith("https://"):
os.environ["MODEL_FILE_PATH"] = model_output_dir + "/model.pb"
urllib.urlretrieve(model_config["model_file_path"], os.environ["MODEL_FILE_PATH"])
if model_config["platform"] == "caffe" and (model_config["weight_file_path"].startswith(
"http://") or model_config["weight_file_path"].startswith("https://")):
os.environ["WEIGHT_FILE_PATH"] = model_output_dir + "/model.caffemodel"
urllib.urlretrieve(model_config["weight_file_path"], os.environ["WEIGHT_FILE_PATH"])
if FLAGS.mode == "build" or FLAGS.mode == "run" or FLAGS.mode == "validate"\
or FLAGS.mode == "benchmark" or FLAGS.mode == "all":
generate_random_input(model_output_dir)
if FLAGS.mode == "build" or FLAGS.mode == "all":
generate_model_code()
build_mace_run_prod(model_output_dir, FLAGS.tuning, libmace_name)
if FLAGS.mode == "run" or FLAGS.mode == "validate" or FLAGS.mode == "all":
run_model(model_output_dir, FLAGS.round, FLAGS.restart_round)
if FLAGS.mode == "benchmark":
benchmark_model(model_output_dir)
if FLAGS.mode == "validate" or FLAGS.mode == "all":
validate_model(model_output_dir)
if FLAGS.mode == "build" or FLAGS.mode == "merge" or FLAGS.mode == "all":
merge_libs_and_tuning_results(FLAGS.output_dir + "/" + target_abi,
model_output_dirs)
if FLAGS.mode == "throughput_test":
merged_lib_file = FLAGS.output_dir + "/%s/libmace/lib/libmace_%s.a" % \
(configs["target_abis"][0], os.environ["PROJECT_NAME"])
generate_random_input(FLAGS.output_dir)
for model_name in configs["models"]:
runtime = configs["models"][model_name]["runtime"]
os.environ["%s_MODEL_TAG" % runtime.upper()] = model_name
build_run_throughput_test(FLAGS.run_seconds, merged_lib_file, FLAGS.output_dir)
if __name__ == "__main__":
FLAGS, unparsed = parse_args()
main(unused_args=[sys.argv[0]] + unparsed)
#!/bin/bash
Usage() {
echo "Usage: bash tools/merge_libs.sh libmace_output_dir model_output_dirs"
}
if [ $# -lt 2 ]; then
Usage
exit 1
fi
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
LIBMACE_BUILD_DIR=$1
MODEL_OUTPUT_DIRS=$2
MODEL_OUTPUT_DIRS_ARR=(${MODEL_OUTPUT_DIRS//,/ })
MODEL_HEADER_DIR=${LIBMACE_BUILD_DIR}/libmace/include/mace/public
MODEL_DATA_DIR=${LIBMACE_BUILD_DIR}/libmace/data
rm -rf ${LIBMACE_BUILD_DIR}/libmace
mkdir -p ${LIBMACE_BUILD_DIR}/libmace/lib
mkdir -p ${MODEL_DATA_DIR}
cp -rf ${LIBMACE_SOURCE_DIR}/include ${LIBMACE_BUILD_DIR}/libmace/
cp ${LIBMACE_SOURCE_DIR}/lib/hexagon/libhexagon_controller.so ${LIBMACE_BUILD_DIR}/libmace/lib
LIBMACE_TEMP_DIR=`mktemp -d -t libmace.XXXX`
# Merge all libraries in to one
echo "create ${LIBMACE_BUILD_DIR}/libmace/lib/libmace_${PROJECT_NAME}.a" > ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib lib/mace/libmace.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib lib/mace/libmace_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
if [ x"TARGET_ABI" = x"host" ]; then
echo "addlib bazel-bin/codegen/libgenerated_opencl_prod.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/codegen/libgenerated_tuning_params.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
else
echo "addlib bazel-bin/codegen/libgenerated_opencl_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "addlib bazel-bin/codegen/libgenerated_tuning_params.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
fi
for model_output_dir in ${MODEL_OUTPUT_DIRS_ARR[@]}; do
for lib in ${model_output_dir}/*.a; do
echo "addlib ${lib}" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
done
for data_file in ${model_output_dir}/*.data; do
cp ${data_file} ${MODEL_DATA_DIR}
done
for header_file in ${model_output_dir}/*.h; do
cp ${header_file} ${MODEL_HEADER_DIR}
done
done
echo "save" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
echo "end" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri
$ANDROID_NDK_HOME/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/bin/aarch64-linux-android-ar \
-M < ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri || exit 1
rm -rf ${LIBMACE_TEMP_DIR}
echo "Libs merged!"
#!/bin/bash
Usage() {
echo "Usage: bash tools/tuning_run.sh model_output_dir round tuning production_mode"
}
if [ $# -lt 4 ]; then
Usage
exit 1
fi
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
MODEL_OUTPUT_DIR=$1
ROUND=$2
TUNING_OR_NOT=$3
PRODUCTION_MODE=$4
RESTART_ROUND=$5
if [ x"$TARGET_ABI" = x"host" ]; then
MACE_CPP_MIN_VLOG_LEVEL=$VLOG_LEVEL \
${MODEL_OUTPUT_DIR}/mace_run \
--input_node="${INPUT_NODES}" \
--input_shape="${INPUT_SHAPES}"\
--output_node="${OUTPUT_NODES}" \
--output_shape="${OUTPUT_SHAPES}"\
--input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \
--output_file=${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} \
--model_data_file=${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data \
--device=${DEVICE_TYPE} \
--round=1 \
--restart_round=1 || exit 1
else
if [[ "${TUNING_OR_NOT}" != "0" && "$PRODUCTION_MODE" != 1 ]];then
tuning_flag=1
else
tuning_flag=0
fi
adb shell "mkdir -p ${PHONE_DATA_DIR}" || exit 1
if [ "$PRODUCTION_MODE" = 0 ]; then
adb shell "mkdir -p ${KERNEL_DIR}" || exit 1
fi
IFS=',' read -r -a INPUT_NAMES <<< "${INPUT_NODES}"
for NAME in "${INPUT_NAMES[@]}";do
FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
adb push ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME} ${PHONE_DATA_DIR} > /dev/null || exit 1
done
adb </dev/null push ${MODEL_OUTPUT_DIR}/mace_run ${PHONE_DATA_DIR} > /dev/null || exit 1
if [ "$EMBED_MODEL_DATA" = 0 ]; then
adb push ${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data ${PHONE_DATA_DIR} > /dev/null || exit 1
fi
adb push lib/hexagon/libhexagon_controller.so ${PHONE_DATA_DIR} > /dev/null || exit 1
mace_adb_output=`adb </dev/null shell \
"LD_LIBRARY_PATH=${PHONE_DATA_DIR} \
MACE_TUNING=${tuning_flag} \
MACE_CPP_MIN_VLOG_LEVEL=$VLOG_LEVEL \
MACE_RUN_PARAMETER_PATH=${PHONE_DATA_DIR}/mace_run.config \
MACE_KERNEL_PATH=$KERNEL_DIR \
MACE_LIMIT_OPENCL_KERNEL_TIME=${LIMIT_OPENCL_KERNEL_TIME} \
${PHONE_DATA_DIR}/mace_run \
--input_node="${INPUT_NODES}" \
--input_shape="${INPUT_SHAPES}"\
--output_node="${OUTPUT_NODES}" \
--output_shape="${OUTPUT_SHAPES}"\
--input_file=${PHONE_DATA_DIR}/${INPUT_FILE_NAME} \
--output_file=${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} \
--model_data_file=${PHONE_DATA_DIR}/${MODEL_TAG}.data \
--device=${DEVICE_TYPE} \
--round=$ROUND \
--restart_round=$RESTART_ROUND; echo \\$?"` || exit 1
echo "$mace_adb_output" | head -n -1
mace_adb_return_code=`echo "$mace_adb_output" | tail -1`
if [ $mace_adb_return_code -ne 0 ]; then
exit 1
fi
fi
import argparse
import sys
import os
import os.path
import numpy as np
import re
from scipy import spatial
# Validation Flow:
# 1. Generate input data
# 2. Use mace_run to run model on phone.
# 3. adb pull the result.
# 4. Compare output data of mace and tf
# python validate.py --model_file tf_model_opt.pb \
# --input_file input_file \
# --mace_out_file output_file \
# --input_node input_node \
# --output_node output_node \
# --input_shape 1,64,64,3 \
# --output_shape 1,64,64,2
def load_data(file):
if os.path.isfile(file):
return np.fromfile(file=file, dtype=np.float32)
else:
return np.empty([0])
def format_output_name(name):
return re.sub('[^0-9a-zA-Z]+', '_', name)
def compare_output(output_name, mace_out_value, out_value):
if mace_out_value.size != 0:
similarity = (1 - spatial.distance.cosine(out_value.flat, mace_out_value.flat))
print output_name, 'MACE VS', FLAGS.platform.upper(), 'similarity: ', similarity
if (FLAGS.mace_runtime == "cpu" and similarity > 0.999) or \
(FLAGS.mace_runtime == "gpu" and similarity > 0.995) or \
(FLAGS.mace_runtime == "dsp" and similarity > 0.930):
print '=======================Similarity Test Passed======================'
else:
print '=======================Similarity Test Failed======================'
sys.exit(-1)
else:
print '=======================Skip empty node==================='
sys.exit(-1)
def validate_tf_model(input_names, input_shapes, output_names):
import tensorflow as tf
if not os.path.isfile(FLAGS.model_file):
print("Input graph file '" + FLAGS.model_file + "' does not exist!")
sys.exit(-1)
input_graph_def = tf.GraphDef()
with open(FLAGS.model_file, "rb") as f:
data = f.read()
input_graph_def.ParseFromString(data)
tf.import_graph_def(input_graph_def, name="")
with tf.Session() as session:
with session.graph.as_default() as graph:
tf.import_graph_def(input_graph_def, name="")
input_dict = {}
for i in range(len(input_names)):
input_value = load_data(FLAGS.input_file + "_" + input_names[i])
input_value = input_value.reshape(input_shapes[i])
input_node = graph.get_tensor_by_name(input_names[i] + ':0')
input_dict[input_node] = input_value
output_nodes = []
for name in output_names:
output_nodes.extend([graph.get_tensor_by_name(name + ':0')])
output_values = session.run(output_nodes, feed_dict=input_dict)
for i in range(len(output_names)):
output_file_name = FLAGS.mace_out_file + "_" + format_output_name(output_names[i])
mace_out_value = load_data(output_file_name)
compare_output(output_names[i], mace_out_value, output_values[i])
def validate_caffe_model(input_names, input_shapes, output_names, output_shapes):
os.environ['GLOG_minloglevel'] = '1' # suprress Caffe verbose prints
import caffe
if not os.path.isfile(FLAGS.model_file):
print("Input graph file '" + FLAGS.model_file + "' does not exist!")
sys.exit(-1)
if not os.path.isfile(FLAGS.weight_file):
print("Input weight file '" + FLAGS.weight_file + "' does not exist!")
sys.exit(-1)
caffe.set_mode_cpu()
net = caffe.Net(FLAGS.model_file, caffe.TEST, weights=FLAGS.weight_file)
for i in range(len(input_names)):
input_value = load_data(FLAGS.input_file + "_" + input_names[i])
input_value = input_value.reshape(input_shapes[i]).transpose((0, 3, 1, 2))
net.blobs[input_names[i]].data[0] = input_value
net.forward()
for i in range(len(output_names)):
value = net.blobs[net.top_names[output_names[i]][0]].data[0]
out_shape = output_shapes[i]
out_shape[1], out_shape[2], out_shape[3] = out_shape[3], out_shape[1], out_shape[2]
value = value.reshape(out_shape).transpose((0, 2, 3, 1))
output_file_name = FLAGS.mace_out_file + "_" + format_output_name(output_names[i])
mace_out_value = load_data(output_file_name)
compare_output(output_names[i], mace_out_value, value)
def main(unused_args):
input_names = [name for name in FLAGS.input_node.split(',')]
input_shape_strs = [shape for shape in FLAGS.input_shape.split(':')]
input_shapes = [[int(x) for x in shape.split(',')] for shape in input_shape_strs]
output_names = [name for name in FLAGS.output_node.split(',')]
assert len(input_names) == len(input_shapes)
if FLAGS.platform == 'tensorflow':
validate_tf_model(input_names, input_shapes, output_names)
elif FLAGS.platform == 'caffe':
output_shape_strs = [shape for shape in FLAGS.output_shape.split(':')]
output_shapes = [[int(x) for x in shape.split(',')] for shape in output_shape_strs]
validate_caffe_model(input_names, input_shapes, output_names, output_shapes)
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--platform",
type=str,
default="",
help="Tensorflow or Caffe.")
parser.add_argument(
"--model_file",
type=str,
default="",
help="TensorFlow or Caffe \'GraphDef\' file to load.")
parser.add_argument(
"--weight_file",
type=str,
default="",
help="caffe model file to load.")
parser.add_argument(
"--input_file",
type=str,
default="",
help="input file.")
parser.add_argument(
"--mace_out_file",
type=str,
default="",
help="mace output file to load.")
parser.add_argument(
"--mace_runtime",
type=str,
default="gpu",
help="mace runtime device.")
parser.add_argument(
"--input_shape",
type=str,
default="1,64,64,3",
help="input shape.")
parser.add_argument(
"--output_shape",
type=str,
default="1,64,64,2",
help="output shape.")
parser.add_argument(
"--input_node",
type=str,
default="input_node",
help="input node")
parser.add_argument(
"--output_node",
type=str,
default="output_node",
help="output node")
return parser.parse_known_args()
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
main(unused_args=[sys.argv[0]] + unparsed)
#!/bin/bash
Usage() {
echo "Usage: bash tools/validate_tools.sh model_output_dir generate_data_or_not"
}
if [ $# -lt 2 ]; then
Usage
exit 1
fi
CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh
MODEL_OUTPUT_DIR=$1
GENERATE_DATA_OR_NOT=$2
IFS=',' read -r -a INPUT_NAMES <<< "${INPUT_NODES}"
IFS=',' read -r -a OUTPUT_NAMES <<< "${OUTPUT_NODES}"
echo $MODEL_OUTPUT_DIR
if [ "$GENERATE_DATA_OR_NOT" = 1 ]; then
for NAME in "${INPUT_NAMES[@]}";do
FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
rm -rf ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME}
done
python -u tools/generate_data.py --input_node=${INPUT_NODES} \
--input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \
--input_shape="${INPUT_SHAPES}" || exit 1
exit 0
fi
if [ "$PLATFORM" == "tensorflow" ];then
if [[ x"$TARGET_ABI" != x"host" ]]; then
for NAME in "${OUTPUT_NAMES[@]}";do
FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
rm -rf ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME}
adb pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME} ${MODEL_OUTPUT_DIR} > /dev/null
done
fi
python -u tools/validate.py --platform=tensorflow \
--model_file ${MODEL_FILE_PATH} \
--input_file ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \
--mace_out_file ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} \
--mace_runtime ${RUNTIME} \
--input_node ${INPUT_NODES} \
--output_node ${OUTPUT_NODES} \
--input_shape ${INPUT_SHAPES} \
--output_shape ${OUTPUT_SHAPES} || exit 1
elif [ "$PLATFORM" == "caffe" ];then
IMAGE_NAME=mace-caffe:latest
CONTAINER_NAME=mace_caffe_validator
RES_FILE=validation.result
if [[ "$(docker images -q mace-caffe:latest 2> /dev/null)" == "" ]]; then
echo "Build caffe docker"
docker build -t ${IMAGE_NAME} docker/caffe || exit 1
fi
if [ ! "$(docker ps -qa -f name=${CONTAINER_NAME})" ]; then
echo "Run caffe container"
docker run -d -it --name ${CONTAINER_NAME} ${IMAGE_NAME} /bin/bash || exit 1
fi
if [ "$(docker inspect -f {{.State.Running}} ${CONTAINER_NAME})" == "false" ];then
echo "Start caffe container"
docker start ${CONTAINER_NAME}
fi
for NAME in "${INPUT_NAMES[@]}";do
FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
docker cp ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME} ${CONTAINER_NAME}:/mace
done
if [[ x"$TARGET_ABI" != x"host" ]]; then
for NAME in "${OUTPUT_NAMES[@]}";do
FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
rm -rf ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME}
adb pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME} ${MODEL_OUTPUT_DIR} > /dev/null
done
fi
for NAME in "${OUTPUT_NAMES[@]}";do
FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
docker cp ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME} ${CONTAINER_NAME}:/mace
done
MODEL_FILE_NAME=$(basename ${MODEL_FILE_PATH})
WEIGHT_FILE_NAME=$(basename ${WEIGHT_FILE_PATH})
docker cp tools/validate.py ${CONTAINER_NAME}:/mace
docker cp ${MODEL_FILE_PATH} ${CONTAINER_NAME}:/mace
docker cp ${WEIGHT_FILE_PATH} ${CONTAINER_NAME}:/mace
docker exec -it ${CONTAINER_NAME} python -u /mace/validate.py \
--platform=caffe \
--model_file /mace/${MODEL_FILE_NAME} \
--weight_file /mace/${WEIGHT_FILE_NAME} \
--input_file /mace/${INPUT_FILE_NAME} \
--mace_out_file /mace/${OUTPUT_FILE_NAME} \
--mace_runtime ${RUNTIME} \
--input_node ${INPUT_NODES} \
--output_node ${OUTPUT_NODES} \
--input_shape ${INPUT_SHAPES} \
--output_shape ${OUTPUT_SHAPES} || exit 1
fi
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册