diff --git a/tools/benchmark.sh b/tools/benchmark.sh new file mode 100644 index 0000000000000000000000000000000000000000..c327825546af73ed8cc8214a94b94b5d82cb46b0 --- /dev/null +++ b/tools/benchmark.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +Usage() { + echo "Usage: bash tools/benchmark.sh model_output_dir" +} + +if [ $# -lt 1 ]; then + Usage + exit 1 +fi + +CURRENT_DIR=`dirname $0` +source ${CURRENT_DIR}/env.sh + +MODEL_OUTPUT_DIR=$1 + +if [ -f "$MODEL_OUTPUT_DIR/benchmark_model" ]; then + rm -rf $MODEL_OUTPUT_DIR/benchmark_model +fi + +if [ "$EMBED_MODEL_DATA" = 0 ]; then + cp codegen/models/${MODEL_TAG}/${MODEL_TAG}.data $MODEL_OUTPUT_DIR +fi + +if [ x"$TARGET_ABI" == x"host" ]; then + bazel build --verbose_failures -c opt --strip always benchmark:benchmark_model \ + --copt="-std=c++11" \ + --copt="-D_GLIBCXX_USE_C99_MATH_TR1" \ + --copt="-Werror=return-type" \ + --copt="-DMACE_MODEL_TAG=${MODEL_TAG}" \ + --copt="-O3" \ + --define openmp=true \ + --define production=true || exit 1 + + cp bazel-bin/benchmark/benchmark_model $MODEL_OUTPUT_DIR + + MACE_CPP_MIN_VLOG_LEVEL=$VLOG_LEVEL \ + ${MODEL_OUTPUT_DIR}/benchmark_model \ + --model_data_file=${PHONE_DATA_DIR}/${MODEL_TAG}.data \ + --device=${DEVICE_TYPE} \ + --input_node="${INPUT_NODES}" \ + --input_shape="${INPUT_SHAPES}"\ + --output_node="${OUTPUT_NODES}" \ + --output_shape="${OUTPUT_SHAPES}"\ + --input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} || exit 1 + +else + bazel build --verbose_failures -c opt --strip always benchmark:benchmark_model \ + --crosstool_top=//external:android/crosstool \ + --host_crosstool_top=@bazel_tools//tools/cpp:toolchain \ + --cpu=${TARGET_ABI} \ + --copt="-std=c++11" \ + --copt="-D_GLIBCXX_USE_C99_MATH_TR1" \ + --copt="-Werror=return-type" \ + --copt="-DMACE_MODEL_TAG=${MODEL_TAG}" \ + --copt="-O3" \ + --define openmp=true \ + --define production=true || exit 1 + + cp bazel-bin/benchmark/benchmark_model $MODEL_OUTPUT_DIR + + adb shell "mkdir -p ${PHONE_DATA_DIR}" || exit 1 + IFS=',' read -r -a INPUT_NAMES <<< "${INPUT_NODES}" + for NAME in "${INPUT_NAMES[@]}";do + FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME}) + adb push ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME} ${PHONE_DATA_DIR} || exit 1 + done + adb push ${MODEL_OUTPUT_DIR}/benchmark_model ${PHONE_DATA_DIR} || exit 1 + if [ "$EMBED_MODEL_DATA" = 0 ]; then + adb push ${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data ${PHONE_DATA_DIR} || exit 1 + fi + + adb /dev/null + adb pull ${PHONE_DATA_DIR}/mace_run.config ${CL_BIN_DIR} > /dev/null + fi +fi + +rm -rf ${CL_CODEGEN_DIR} +mkdir -p ${CL_CODEGEN_DIR} +rm -rf ${TUNING_CODEGEN_DIR} +mkdir -p ${TUNING_CODEGEN_DIR} + +set -x + +python lib/python/tools/opencl_codegen.py \ + --cl_binary_dirs=${CL_BIN_DIRS} \ + --output_path=${CL_CODEGEN_DIR}/opencl_compiled_program.cc + +python lib/python/tools/binary_codegen.py \ + --binary_dirs=${CL_BIN_DIRS} \ + --binary_file_name=mace_run.config \ + --output_path=${TUNING_CODEGEN_DIR}/tuning_params.cc diff --git a/tools/mace_tools.py b/tools/mace_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..197e0fea2abf1270d8b67e2f232e855079b075e9 --- /dev/null +++ b/tools/mace_tools.py @@ -0,0 +1,307 @@ +#!/usr/bin/env python + +# Must run at root dir of libmace project. +# python tools/mace_tools.py \ +# --config=tools/example.yaml \ +# --round=100 \ +# --mode=all + +import argparse +import hashlib +import os +import shutil +import subprocess +import sys +import urllib +import yaml + +from ConfigParser import ConfigParser + +def run_command_real_time(command): + print("Run command: {}".format(command)) + process = subprocess.Popen( + command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + while True: + std_err = process.stderr.readline() + if std_err == '' and process.poll() is not None: + break + if std_err: + print std_err.strip() + while True: + std_out = process.stdout.readline() + if std_out == '' and process.poll() is not None: + break + if std_out: + print std_out.strip() + ret_code = process.poll() + + if ret_code != 0: + raise Exception("Exit not 0 from bash with code: {}, command: {}".format( + ret_code, command)) + +def run_command(command): + print("Run command: {}".format(command)) + result = subprocess.Popen( + command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = result.communicate() + + if out: + print("Stdout msg:\n{}".format(out)) + if err: + print("Stderr msg:\n{}".format(err)) + + if result.returncode != 0: + raise Exception("Exit not 0 from bash with code: {}, command: {}".format( + result.returncode, command)) + + +def get_libs(target_abi, configs): + runtime_list = [] + for model_name in configs["models"]: + model_runtime = configs["models"][model_name]["runtime"] + runtime_list.append(model_runtime.lower()) + + global_runtime = "" + if "dsp" in runtime_list: + global_runtime = "dsp" + elif "gpu" in runtime_list: + global_runtime = "gpu" + elif "cpu" in runtime_list: + global_runtime = "cpu" + else: + raise Exception("Not found available RUNTIME in config files!") + + libmace_name = "libmace-{}-{}".format(target_abi, global_runtime) + + command = "bash tools/download_and_link_lib.sh " + libmace_name + run_command(command) + + return libmace_name + + +def clear_env(): + command = "bash tools/clear_env.sh" + run_command(command) + + +def generate_random_input(model_output_dir): + generate_data_or_not = True + command = "bash tools/validate_tools.sh {} {}".format( + model_output_dir, int(generate_data_or_not)) + run_command(command) + + +def generate_model_code(): + command = "bash tools/generate_model_code.sh" + run_command_real_time(command) + + +def build_mace_run(production_mode, model_output_dir, hexagon_mode): + command = "bash tools/build_mace_run.sh {} {} {}".format( + int(production_mode), model_output_dir, int(hexagon_mode)) + run_command(command) + + +def tuning_run(model_output_dir, running_round, tuning, production_mode, restart_round): + command = "bash tools/tuning_run.sh {} {} {} {} {}".format( + model_output_dir, running_round, int(tuning), int(production_mode), restart_round) + run_command(command) + + +def benchmark_model(model_output_dir): + command = "bash tools/benchmark.sh {}".format(model_output_dir) + run_command(command) + + +def run_model(model_output_dir, running_round, restart_round): + tuning_run(model_output_dir, running_round, False, False, restart_round) + + +def generate_production_code(model_output_dirs, pull_or_not): + cl_bin_dirs = [] + for d in model_output_dirs: + cl_bin_dirs.append(os.path.join(d, "opencl_bin")) + cl_bin_dirs_str = ",".join(cl_bin_dirs) + command = "bash tools/generate_production_code.sh {} {}".format( + cl_bin_dirs_str, int(pull_or_not)) + run_command(command) + + +def build_mace_run_prod(model_output_dir, tuning, libmace_name): + if "dsp" in libmace_name: + hexagon_mode = True + else: + hexagon_mode = False + + production_or_not = False + build_mace_run(production_or_not, model_output_dir, hexagon_mode) + tuning_run( + model_output_dir, + running_round=0, + tuning=tuning, + production_mode=production_or_not, + restart_round=1) + + production_or_not = True + pull_or_not = True + generate_production_code([model_output_dir], pull_or_not) + build_mace_run(production_or_not, model_output_dir, hexagon_mode) + + +def build_run_throughput_test(run_seconds, merged_lib_file, model_input_dir): + command = "bash tools/build_run_throughput_test.sh {} {} {}".format( + run_seconds, merged_lib_file, model_input_dir) + run_command(command) + + +def validate_model(model_output_dir): + generate_data_or_not = False + command = "bash tools/validate_tools.sh {} {}".format( + model_output_dir, int(generate_data_or_not)) + run_command(command) + + +def build_production_code(): + command = "bash tools/build_production_code.sh" + run_command(command) + + +def merge_libs_and_tuning_results(output_dir, model_output_dirs): + pull_or_not = False + generate_production_code(model_output_dirs, pull_or_not) + build_production_code() + + model_output_dirs_str = ",".join(model_output_dirs) + command = "bash tools/merge_libs.sh {} {}".format(output_dir, + model_output_dirs_str) + run_command(command) + + +def parse_model_configs(): + with open(FLAGS.config) as f: + configs = yaml.load(f) + return configs + + +def parse_args(): + """Parses command line arguments.""" + parser = argparse.ArgumentParser() + parser.register("type", "bool", lambda v: v.lower() == "true") + parser.add_argument( + "--config", + type=str, + default="./tool/config", + help="The global config file of models.") + parser.add_argument( + "--output_dir", type=str, default="build", help="The output dir.") + parser.add_argument( + "--round", type=int, default=1, help="The model running round.") + parser.add_argument("--run_seconds", type=int, default=10, + help="The model throughput test running seconds.") + parser.add_argument( + "--restart_round", type=int, default=1, help="The model restart round.") + parser.add_argument( + "--tuning", type="bool", default="true", help="Tune opencl params.") + parser.add_argument("--mode", type=str, default="all", + help="[build|run|validate|merge|all|throughput_test].") + return parser.parse_known_args() + + +def main(unused_args): + configs = parse_model_configs() + + if FLAGS.mode == "build" or FLAGS.mode == "all": + # Remove previous output dirs + if not os.path.exists(FLAGS.output_dir): + os.makedirs(FLAGS.output_dir) + elif os.path.exists(os.path.join(FLAGS.output_dir, "libmace")): + shutil.rmtree(os.path.join(FLAGS.output_dir, "libmace")) + + if FLAGS.mode == "validate": + FLAGS.round = 1 + FLAGS.restart_round = 1 + + # target_abi = configs["target_abi"] + # libmace_name = get_libs(target_abi, configs) + # Transfer params by environment + # os.environ["TARGET_ABI"] = target_abi + os.environ["EMBED_MODEL_DATA"] = str(configs["embed_model_data"]) + os.environ["VLOG_LEVEL"] = str(configs["vlog_level"]) + os.environ["PROJECT_NAME"] = os.path.splitext(os.path.basename(FLAGS.config))[0] + + for target_abi in configs["target_abis"]: + libmace_name = get_libs(target_abi, configs) + # Transfer params by environment + os.environ["TARGET_ABI"] = target_abi + model_output_dirs = [] + for model_name in configs["models"]: + # Transfer params by environment + os.environ["MODEL_TAG"] = model_name + print '=======================', model_name, '=======================' + model_config = configs["models"][model_name] + for key in model_config: + if key in ['input_nodes', 'output_nodes'] and isinstance(model_config[key], list): + os.environ[key.upper()] = ",".join(model_config[key]) + elif key in ['input_shapes', 'output_shapes'] and isinstance(model_config[key], list): + os.environ[key.upper()] = ":".join(model_config[key]) + else: + os.environ[key.upper()] = str(model_config[key]) + + md5 = hashlib.md5() + md5.update(model_config["model_file_path"]) + model_path_digest = md5.hexdigest() + model_output_dir = "%s/%s/%s/%s" % (FLAGS.output_dir, model_name, model_path_digest, target_abi) + model_output_dirs.append(model_output_dir) + + if FLAGS.mode == "build" or FLAGS.mode == "all": + if os.path.exists(model_output_dir): + shutil.rmtree(model_output_dir) + os.makedirs(model_output_dir) + clear_env() + + # Support http:// and https:// + if model_config["model_file_path"].startswith( + "http://") or model_config["model_file_path"].startswith("https://"): + os.environ["MODEL_FILE_PATH"] = model_output_dir + "/model.pb" + urllib.urlretrieve(model_config["model_file_path"], os.environ["MODEL_FILE_PATH"]) + + if model_config["platform"] == "caffe" and (model_config["weight_file_path"].startswith( + "http://") or model_config["weight_file_path"].startswith("https://")): + os.environ["WEIGHT_FILE_PATH"] = model_output_dir + "/model.caffemodel" + urllib.urlretrieve(model_config["weight_file_path"], os.environ["WEIGHT_FILE_PATH"]) + + if FLAGS.mode == "build" or FLAGS.mode == "run" or FLAGS.mode == "validate"\ + or FLAGS.mode == "benchmark" or FLAGS.mode == "all": + generate_random_input(model_output_dir) + + if FLAGS.mode == "build" or FLAGS.mode == "all": + generate_model_code() + build_mace_run_prod(model_output_dir, FLAGS.tuning, libmace_name) + + if FLAGS.mode == "run" or FLAGS.mode == "validate" or FLAGS.mode == "all": + run_model(model_output_dir, FLAGS.round, FLAGS.restart_round) + + if FLAGS.mode == "benchmark": + benchmark_model(model_output_dir) + + if FLAGS.mode == "validate" or FLAGS.mode == "all": + validate_model(model_output_dir) + + if FLAGS.mode == "build" or FLAGS.mode == "merge" or FLAGS.mode == "all": + merge_libs_and_tuning_results(FLAGS.output_dir + "/" + target_abi, + model_output_dirs) + + if FLAGS.mode == "throughput_test": + merged_lib_file = FLAGS.output_dir + "/%s/libmace/lib/libmace_%s.a" % \ + (configs["target_abis"][0], os.environ["PROJECT_NAME"]) + generate_random_input(FLAGS.output_dir) + for model_name in configs["models"]: + runtime = configs["models"][model_name]["runtime"] + os.environ["%s_MODEL_TAG" % runtime.upper()] = model_name + build_run_throughput_test(FLAGS.run_seconds, merged_lib_file, FLAGS.output_dir) + + +if __name__ == "__main__": + FLAGS, unparsed = parse_args() + main(unused_args=[sys.argv[0]] + unparsed) diff --git a/tools/merge_libs.sh b/tools/merge_libs.sh new file mode 100644 index 0000000000000000000000000000000000000000..a60521acab33d9f1df1d2a89364a51d7df2effc3 --- /dev/null +++ b/tools/merge_libs.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +Usage() { + echo "Usage: bash tools/merge_libs.sh libmace_output_dir model_output_dirs" +} + +if [ $# -lt 2 ]; then + Usage + exit 1 +fi + +CURRENT_DIR=`dirname $0` +source ${CURRENT_DIR}/env.sh + +LIBMACE_BUILD_DIR=$1 +MODEL_OUTPUT_DIRS=$2 +MODEL_OUTPUT_DIRS_ARR=(${MODEL_OUTPUT_DIRS//,/ }) +MODEL_HEADER_DIR=${LIBMACE_BUILD_DIR}/libmace/include/mace/public +MODEL_DATA_DIR=${LIBMACE_BUILD_DIR}/libmace/data + +rm -rf ${LIBMACE_BUILD_DIR}/libmace +mkdir -p ${LIBMACE_BUILD_DIR}/libmace/lib +mkdir -p ${MODEL_DATA_DIR} +cp -rf ${LIBMACE_SOURCE_DIR}/include ${LIBMACE_BUILD_DIR}/libmace/ +cp ${LIBMACE_SOURCE_DIR}/lib/hexagon/libhexagon_controller.so ${LIBMACE_BUILD_DIR}/libmace/lib + +LIBMACE_TEMP_DIR=`mktemp -d -t libmace.XXXX` + +# Merge all libraries in to one +echo "create ${LIBMACE_BUILD_DIR}/libmace/lib/libmace_${PROJECT_NAME}.a" > ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri +echo "addlib lib/mace/libmace.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri +echo "addlib lib/mace/libmace_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri +if [ x"TARGET_ABI" = x"host" ]; then + echo "addlib bazel-bin/codegen/libgenerated_opencl_prod.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri + echo "addlib bazel-bin/codegen/libgenerated_tuning_params.pic.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri +else + echo "addlib bazel-bin/codegen/libgenerated_opencl_prod.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri + echo "addlib bazel-bin/codegen/libgenerated_tuning_params.a" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri +fi +for model_output_dir in ${MODEL_OUTPUT_DIRS_ARR[@]}; do + for lib in ${model_output_dir}/*.a; do + echo "addlib ${lib}" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri + done + for data_file in ${model_output_dir}/*.data; do + cp ${data_file} ${MODEL_DATA_DIR} + done + for header_file in ${model_output_dir}/*.h; do + cp ${header_file} ${MODEL_HEADER_DIR} + done +done +echo "save" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri +echo "end" >> ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri +$ANDROID_NDK_HOME/toolchains/aarch64-linux-android-4.9/prebuilt/linux-x86_64/bin/aarch64-linux-android-ar \ + -M < ${LIBMACE_TEMP_DIR}/libmace_${PROJECT_NAME}.mri || exit 1 + +rm -rf ${LIBMACE_TEMP_DIR} + +echo "Libs merged!" diff --git a/tools/tuning_run.sh b/tools/tuning_run.sh new file mode 100644 index 0000000000000000000000000000000000000000..b4d57691b372a1c2e7a65936f869747faaa191b3 --- /dev/null +++ b/tools/tuning_run.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +Usage() { + echo "Usage: bash tools/tuning_run.sh model_output_dir round tuning production_mode" +} + +if [ $# -lt 4 ]; then + Usage + exit 1 +fi + +CURRENT_DIR=`dirname $0` +source ${CURRENT_DIR}/env.sh + +MODEL_OUTPUT_DIR=$1 +ROUND=$2 +TUNING_OR_NOT=$3 +PRODUCTION_MODE=$4 +RESTART_ROUND=$5 + +if [ x"$TARGET_ABI" = x"host" ]; then + MACE_CPP_MIN_VLOG_LEVEL=$VLOG_LEVEL \ + ${MODEL_OUTPUT_DIR}/mace_run \ + --input_node="${INPUT_NODES}" \ + --input_shape="${INPUT_SHAPES}"\ + --output_node="${OUTPUT_NODES}" \ + --output_shape="${OUTPUT_SHAPES}"\ + --input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \ + --output_file=${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} \ + --model_data_file=${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data \ + --device=${DEVICE_TYPE} \ + --round=1 \ + --restart_round=1 || exit 1 +else + if [[ "${TUNING_OR_NOT}" != "0" && "$PRODUCTION_MODE" != 1 ]];then + tuning_flag=1 + else + tuning_flag=0 + fi + + adb shell "mkdir -p ${PHONE_DATA_DIR}" || exit 1 + if [ "$PRODUCTION_MODE" = 0 ]; then + adb shell "mkdir -p ${KERNEL_DIR}" || exit 1 + fi + + IFS=',' read -r -a INPUT_NAMES <<< "${INPUT_NODES}" + for NAME in "${INPUT_NAMES[@]}";do + FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME}) + adb push ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME} ${PHONE_DATA_DIR} > /dev/null || exit 1 + done + + adb /dev/null || exit 1 + if [ "$EMBED_MODEL_DATA" = 0 ]; then + adb push ${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data ${PHONE_DATA_DIR} > /dev/null || exit 1 + fi + adb push lib/hexagon/libhexagon_controller.so ${PHONE_DATA_DIR} > /dev/null || exit 1 + + mace_adb_output=`adb 0.999) or \ + (FLAGS.mace_runtime == "gpu" and similarity > 0.995) or \ + (FLAGS.mace_runtime == "dsp" and similarity > 0.930): + print '=======================Similarity Test Passed======================' + else: + print '=======================Similarity Test Failed======================' + sys.exit(-1) + else: + print '=======================Skip empty node===================' + sys.exit(-1) + + +def validate_tf_model(input_names, input_shapes, output_names): + import tensorflow as tf + if not os.path.isfile(FLAGS.model_file): + print("Input graph file '" + FLAGS.model_file + "' does not exist!") + sys.exit(-1) + + input_graph_def = tf.GraphDef() + with open(FLAGS.model_file, "rb") as f: + data = f.read() + input_graph_def.ParseFromString(data) + tf.import_graph_def(input_graph_def, name="") + + with tf.Session() as session: + with session.graph.as_default() as graph: + tf.import_graph_def(input_graph_def, name="") + input_dict = {} + for i in range(len(input_names)): + input_value = load_data(FLAGS.input_file + "_" + input_names[i]) + input_value = input_value.reshape(input_shapes[i]) + input_node = graph.get_tensor_by_name(input_names[i] + ':0') + input_dict[input_node] = input_value + + output_nodes = [] + for name in output_names: + output_nodes.extend([graph.get_tensor_by_name(name + ':0')]) + output_values = session.run(output_nodes, feed_dict=input_dict) + for i in range(len(output_names)): + output_file_name = FLAGS.mace_out_file + "_" + format_output_name(output_names[i]) + mace_out_value = load_data(output_file_name) + compare_output(output_names[i], mace_out_value, output_values[i]) + +def validate_caffe_model(input_names, input_shapes, output_names, output_shapes): + os.environ['GLOG_minloglevel'] = '1' # suprress Caffe verbose prints + import caffe + if not os.path.isfile(FLAGS.model_file): + print("Input graph file '" + FLAGS.model_file + "' does not exist!") + sys.exit(-1) + if not os.path.isfile(FLAGS.weight_file): + print("Input weight file '" + FLAGS.weight_file + "' does not exist!") + sys.exit(-1) + + caffe.set_mode_cpu() + + net = caffe.Net(FLAGS.model_file, caffe.TEST, weights=FLAGS.weight_file) + + for i in range(len(input_names)): + input_value = load_data(FLAGS.input_file + "_" + input_names[i]) + input_value = input_value.reshape(input_shapes[i]).transpose((0, 3, 1, 2)) + net.blobs[input_names[i]].data[0] = input_value + + net.forward() + + for i in range(len(output_names)): + value = net.blobs[net.top_names[output_names[i]][0]].data[0] + out_shape = output_shapes[i] + out_shape[1], out_shape[2], out_shape[3] = out_shape[3], out_shape[1], out_shape[2] + value = value.reshape(out_shape).transpose((0, 2, 3, 1)) + output_file_name = FLAGS.mace_out_file + "_" + format_output_name(output_names[i]) + mace_out_value = load_data(output_file_name) + compare_output(output_names[i], mace_out_value, value) + +def main(unused_args): + input_names = [name for name in FLAGS.input_node.split(',')] + input_shape_strs = [shape for shape in FLAGS.input_shape.split(':')] + input_shapes = [[int(x) for x in shape.split(',')] for shape in input_shape_strs] + output_names = [name for name in FLAGS.output_node.split(',')] + assert len(input_names) == len(input_shapes) + + if FLAGS.platform == 'tensorflow': + validate_tf_model(input_names, input_shapes, output_names) + elif FLAGS.platform == 'caffe': + output_shape_strs = [shape for shape in FLAGS.output_shape.split(':')] + output_shapes = [[int(x) for x in shape.split(',')] for shape in output_shape_strs] + validate_caffe_model(input_names, input_shapes, output_names, output_shapes) + +def parse_args(): + """Parses command line arguments.""" + parser = argparse.ArgumentParser() + parser.register("type", "bool", lambda v: v.lower() == "true") + parser.add_argument( + "--platform", + type=str, + default="", + help="Tensorflow or Caffe.") + parser.add_argument( + "--model_file", + type=str, + default="", + help="TensorFlow or Caffe \'GraphDef\' file to load.") + parser.add_argument( + "--weight_file", + type=str, + default="", + help="caffe model file to load.") + parser.add_argument( + "--input_file", + type=str, + default="", + help="input file.") + parser.add_argument( + "--mace_out_file", + type=str, + default="", + help="mace output file to load.") + parser.add_argument( + "--mace_runtime", + type=str, + default="gpu", + help="mace runtime device.") + parser.add_argument( + "--input_shape", + type=str, + default="1,64,64,3", + help="input shape.") + parser.add_argument( + "--output_shape", + type=str, + default="1,64,64,2", + help="output shape.") + parser.add_argument( + "--input_node", + type=str, + default="input_node", + help="input node") + parser.add_argument( + "--output_node", + type=str, + default="output_node", + help="output node") + + return parser.parse_known_args() + + +if __name__ == '__main__': + FLAGS, unparsed = parse_args() + main(unused_args=[sys.argv[0]] + unparsed) + diff --git a/tools/validate_tools.sh b/tools/validate_tools.sh new file mode 100644 index 0000000000000000000000000000000000000000..6a458676dcaced33e878da197acc545123e0aeb8 --- /dev/null +++ b/tools/validate_tools.sh @@ -0,0 +1,105 @@ +#!/bin/bash + +Usage() { + echo "Usage: bash tools/validate_tools.sh model_output_dir generate_data_or_not" +} + +if [ $# -lt 2 ]; then + Usage + exit 1 +fi + +CURRENT_DIR=`dirname $0` +source ${CURRENT_DIR}/env.sh + +MODEL_OUTPUT_DIR=$1 +GENERATE_DATA_OR_NOT=$2 + +IFS=',' read -r -a INPUT_NAMES <<< "${INPUT_NODES}" +IFS=',' read -r -a OUTPUT_NAMES <<< "${OUTPUT_NODES}" + +echo $MODEL_OUTPUT_DIR +if [ "$GENERATE_DATA_OR_NOT" = 1 ]; then + for NAME in "${INPUT_NAMES[@]}";do + FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME}) + rm -rf ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME} + done + python -u tools/generate_data.py --input_node=${INPUT_NODES} \ + --input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \ + --input_shape="${INPUT_SHAPES}" || exit 1 + exit 0 +fi + +if [ "$PLATFORM" == "tensorflow" ];then + if [[ x"$TARGET_ABI" != x"host" ]]; then + for NAME in "${OUTPUT_NAMES[@]}";do + FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME}) + rm -rf ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME} + adb pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME} ${MODEL_OUTPUT_DIR} > /dev/null + done + fi + python -u tools/validate.py --platform=tensorflow \ + --model_file ${MODEL_FILE_PATH} \ + --input_file ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \ + --mace_out_file ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} \ + --mace_runtime ${RUNTIME} \ + --input_node ${INPUT_NODES} \ + --output_node ${OUTPUT_NODES} \ + --input_shape ${INPUT_SHAPES} \ + --output_shape ${OUTPUT_SHAPES} || exit 1 + +elif [ "$PLATFORM" == "caffe" ];then + IMAGE_NAME=mace-caffe:latest + CONTAINER_NAME=mace_caffe_validator + RES_FILE=validation.result + + if [[ "$(docker images -q mace-caffe:latest 2> /dev/null)" == "" ]]; then + echo "Build caffe docker" + docker build -t ${IMAGE_NAME} docker/caffe || exit 1 + fi + + if [ ! "$(docker ps -qa -f name=${CONTAINER_NAME})" ]; then + echo "Run caffe container" + docker run -d -it --name ${CONTAINER_NAME} ${IMAGE_NAME} /bin/bash || exit 1 + fi + + if [ "$(docker inspect -f {{.State.Running}} ${CONTAINER_NAME})" == "false" ];then + echo "Start caffe container" + docker start ${CONTAINER_NAME} + fi + + for NAME in "${INPUT_NAMES[@]}";do + FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME}) + docker cp ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME} ${CONTAINER_NAME}:/mace + done + + if [[ x"$TARGET_ABI" != x"host" ]]; then + for NAME in "${OUTPUT_NAMES[@]}";do + FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME}) + rm -rf ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME} + adb pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME} ${MODEL_OUTPUT_DIR} > /dev/null + done + fi + for NAME in "${OUTPUT_NAMES[@]}";do + FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME}) + docker cp ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME} ${CONTAINER_NAME}:/mace + done + + MODEL_FILE_NAME=$(basename ${MODEL_FILE_PATH}) + WEIGHT_FILE_NAME=$(basename ${WEIGHT_FILE_PATH}) + docker cp tools/validate.py ${CONTAINER_NAME}:/mace + docker cp ${MODEL_FILE_PATH} ${CONTAINER_NAME}:/mace + docker cp ${WEIGHT_FILE_PATH} ${CONTAINER_NAME}:/mace + docker exec -it ${CONTAINER_NAME} python -u /mace/validate.py \ + --platform=caffe \ + --model_file /mace/${MODEL_FILE_NAME} \ + --weight_file /mace/${WEIGHT_FILE_NAME} \ + --input_file /mace/${INPUT_FILE_NAME} \ + --mace_out_file /mace/${OUTPUT_FILE_NAME} \ + --mace_runtime ${RUNTIME} \ + --input_node ${INPUT_NODES} \ + --output_node ${OUTPUT_NODES} \ + --input_shape ${INPUT_SHAPES} \ + --output_shape ${OUTPUT_SHAPES} || exit 1 + +fi