提交 43111426 编写于 作者: L liuqi

Support multiple input or output API.

上级 01225056
...@@ -58,7 +58,11 @@ else ...@@ -58,7 +58,11 @@ else
cp bazel-bin/benchmark/benchmark_model $MODEL_OUTPUT_DIR cp bazel-bin/benchmark/benchmark_model $MODEL_OUTPUT_DIR
adb shell "mkdir -p ${PHONE_DATA_DIR}" || exit 1 adb shell "mkdir -p ${PHONE_DATA_DIR}" || exit 1
adb push ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} ${PHONE_DATA_DIR} || exit 1 IFS=',' read -r -a INPUT_NAMES <<< "${INPUT_NODE}"
for NAME in "${INPUT_NAMES[@]}";do
FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
adb push ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME} ${PHONE_DATA_DIR} || exit 1
done
adb push ${MODEL_OUTPUT_DIR}/benchmark_model ${PHONE_DATA_DIR} || exit 1 adb push ${MODEL_OUTPUT_DIR}/benchmark_model ${PHONE_DATA_DIR} || exit 1
if [ "$EMBED_MODEL_DATA" = 0 ]; then if [ "$EMBED_MODEL_DATA" = 0 ]; then
adb push ${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data ${PHONE_DATA_DIR} || exit 1 adb push ${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data ${PHONE_DATA_DIR} || exit 1
...@@ -73,7 +77,9 @@ else ...@@ -73,7 +77,9 @@ else
${PHONE_DATA_DIR}/benchmark_model \ ${PHONE_DATA_DIR}/benchmark_model \
--model_data_file=${PHONE_DATA_DIR}/${MODEL_TAG}.data \ --model_data_file=${PHONE_DATA_DIR}/${MODEL_TAG}.data \
--device=${DEVICE_TYPE} \ --device=${DEVICE_TYPE} \
--input_node="${INPUT_NODE}" \
--input_shape="${INPUT_SHAPE}"\ --input_shape="${INPUT_SHAPE}"\
--output_node="${OUTPUT_NODE}" \
--output_shape="${OUTPUT_SHAPE}"\ --output_shape="${OUTPUT_SHAPE}"\
--input_file=${PHONE_DATA_DIR}/${INPUT_FILE_NAME} || exit 1 --input_file=${PHONE_DATA_DIR}/${INPUT_FILE_NAME} || exit 1
fi fi
...@@ -3,8 +3,7 @@ LIBMACE_TAG=`git describe --abbrev=0 --tags` ...@@ -3,8 +3,7 @@ LIBMACE_TAG=`git describe --abbrev=0 --tags`
LIBMACE_SOURCE_DIR=`/bin/pwd` LIBMACE_SOURCE_DIR=`/bin/pwd`
INPUT_FILE_NAME="model_input" INPUT_FILE_NAME="model_input"
OUTPUT_FILE_NAME="model.out" OUTPUT_FILE_NAME="model_out"
OUTPUT_LIST_FILE="model.list"
PHONE_DATA_DIR="/data/local/tmp/mace_run" PHONE_DATA_DIR="/data/local/tmp/mace_run"
KERNEL_DIR="${PHONE_DATA_DIR}/cl/" KERNEL_DIR="${PHONE_DATA_DIR}/cl/"
CODEGEN_DIR=${LIBMACE_SOURCE_DIR}/codegen CODEGEN_DIR=${LIBMACE_SOURCE_DIR}/codegen
......
...@@ -25,10 +25,10 @@ models: ...@@ -25,10 +25,10 @@ models:
weight_file_path: path/to/weight.caffemodel weight_file_path: path/to/weight.caffemodel
model_sha256_checksum: 05d92625809dc9edd6484882335c48c043397aed450a168d75eb8b538e86881a model_sha256_checksum: 05d92625809dc9edd6484882335c48c043397aed450a168d75eb8b538e86881a
weight_sha256_checksum: 05d92625809dc9edd6484882335c48c043397aed450a168d75eb8b538e86881a weight_sha256_checksum: 05d92625809dc9edd6484882335c48c043397aed450a168d75eb8b538e86881a
input_node: input_node input_node: input_node0,input_node1
output_node: output_node output_node: output_node0,output_node1
input_shape: 1,256,256,3 input_shape: 1,256,256,3:1,128,128,3
output_shape: 1,256,256,2 output_shape: 1,256,256,2:1,1,1,2
runtime: cpu runtime: cpu
limit_opencl_kernel_time: 1 limit_opencl_kernel_time: 1
dsp_mode: 0 dsp_mode: 0
......
import argparse
import sys
import os
import os.path
import numpy as np
import re
from scipy import spatial
# Validation Flow:
# 1. Generate input data
# python generate_data.py \
# --input_node input_node \
# --input_shape 1,64,64,3 \
# --input_file input_file
#
def generate_data(name, shape):
np.random.seed()
data = np.random.random(shape) * 2 - 1
input_file_name = FLAGS.input_file + "_" + re.sub('[^0-9a-zA-Z]+', '_', name)
print 'Generate input file: ', input_file_name
data.astype(np.float32).tofile(input_file_name)
def main(unused_args):
input_names = [name for name in FLAGS.input_node.split(',')]
input_shapes = [shape for shape in FLAGS.input_shape.split(':')]
assert len(input_names) == len(input_shapes)
for i in range(len(input_names)):
shape = [int(x) for x in input_shapes[i].split(',')]
generate_data(input_names[i], shape)
print "Generate input file done."
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--input_file",
type=str,
default="",
help="input file.")
parser.add_argument(
"--input_node",
type=str,
default="input_node",
help="input node")
parser.add_argument(
"--input_shape",
type=str,
default="1,64,64,3",
help="input shape.")
return parser.parse_known_args()
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
main(unused_args=[sys.argv[0]] + unparsed)
...@@ -20,13 +20,15 @@ PRODUCTION_MODE=$4 ...@@ -20,13 +20,15 @@ PRODUCTION_MODE=$4
if [ x"$TARGET_ABI" = x"host" ]; then if [ x"$TARGET_ABI" = x"host" ]; then
MACE_CPP_MIN_VLOG_LEVEL=$VLOG_LEVEL \ MACE_CPP_MIN_VLOG_LEVEL=$VLOG_LEVEL \
${MODEL_OUTPUT_DIR}/mace_run \ ${MODEL_OUTPUT_DIR}/mace_run \
--input_shape="${INPUT_SHAPE}"\ --input_node="${INPUT_NODE}" \
--output_shape="${OUTPUT_SHAPE}"\ --input_shape="${INPUT_SHAPE}"\
--input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \ --output_node="${OUTPUT_NODE}" \
--output_file=${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} \ --output_shape="${OUTPUT_SHAPE}"\
--model_data_file=${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data \ --input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \
--device=${DEVICE_TYPE} \ --output_file=${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} \
--round=1 || exit 1 --model_data_file=${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data \
--device=${DEVICE_TYPE} \
--round=1 || exit 1
else else
if [[ "${TUNING_OR_NOT}" != "0" && "$PRODUCTION_MODE" != 1 ]];then if [[ "${TUNING_OR_NOT}" != "0" && "$PRODUCTION_MODE" != 1 ]];then
tuning_flag=1 tuning_flag=1
...@@ -38,7 +40,14 @@ else ...@@ -38,7 +40,14 @@ else
if [ "$PRODUCTION_MODE" = 0 ]; then if [ "$PRODUCTION_MODE" = 0 ]; then
adb shell "mkdir -p ${KERNEL_DIR}" || exit 1 adb shell "mkdir -p ${KERNEL_DIR}" || exit 1
fi fi
adb push ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} ${PHONE_DATA_DIR} || exit 1
IFS=',' read -r -a INPUT_NAMES <<< "${INPUT_NODE}"
for NAME in "${INPUT_NAMES[@]}";do
FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
echo $FORMATTED_NAME
adb push ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME} ${PHONE_DATA_DIR} || exit 1
done
adb push ${MODEL_OUTPUT_DIR}/mace_run ${PHONE_DATA_DIR} || exit 1 adb push ${MODEL_OUTPUT_DIR}/mace_run ${PHONE_DATA_DIR} || exit 1
if [ "$EMBED_MODEL_DATA" = 0 ]; then if [ "$EMBED_MODEL_DATA" = 0 ]; then
adb push ${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data ${PHONE_DATA_DIR} || exit 1 adb push ${MODEL_OUTPUT_DIR}/${MODEL_TAG}.data ${PHONE_DATA_DIR} || exit 1
...@@ -53,8 +62,10 @@ else ...@@ -53,8 +62,10 @@ else
MACE_KERNEL_PATH=$KERNEL_DIR \ MACE_KERNEL_PATH=$KERNEL_DIR \
MACE_LIMIT_OPENCL_KERNEL_TIME=${LIMIT_OPENCL_KERNEL_TIME} \ MACE_LIMIT_OPENCL_KERNEL_TIME=${LIMIT_OPENCL_KERNEL_TIME} \
${PHONE_DATA_DIR}/mace_run \ ${PHONE_DATA_DIR}/mace_run \
--input_shape=${INPUT_SHAPE}\ --input_node="${INPUT_NODE}" \
--output_shape=${OUTPUT_SHAPE}\ --input_shape="${INPUT_SHAPE}"\
--output_node="${OUTPUT_NODE}" \
--output_shape="${OUTPUT_SHAPE}"\
--input_file=${PHONE_DATA_DIR}/${INPUT_FILE_NAME} \ --input_file=${PHONE_DATA_DIR}/${INPUT_FILE_NAME} \
--output_file=${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} \ --output_file=${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} \
--model_data_file=${PHONE_DATA_DIR}/${MODEL_TAG}.data \ --model_data_file=${PHONE_DATA_DIR}/${MODEL_TAG}.data \
......
...@@ -2,18 +2,12 @@ import argparse ...@@ -2,18 +2,12 @@ import argparse
import sys import sys
import os import os
import os.path import os.path
import tensorflow as tf
import numpy as np import numpy as np
import re
from scipy import spatial from scipy import spatial
from tensorflow import gfile
# Validation Flow: # Validation Flow:
# 1. Generate input data # 1. Generate input data
# python validate.py --generate_data true \
# --input_file input_file
# --input_shape 1,64,64,3
#
# 2. Use mace_run to run model on phone. # 2. Use mace_run to run model on phone.
# 3. adb pull the result. # 3. adb pull the result.
# 4. Compare output data of mace and tf # 4. Compare output data of mace and tf
...@@ -25,23 +19,18 @@ from tensorflow import gfile ...@@ -25,23 +19,18 @@ from tensorflow import gfile
# --input_shape 1,64,64,3 \ # --input_shape 1,64,64,3 \
# --output_shape 1,64,64,2 # --output_shape 1,64,64,2
def generate_data(shape):
np.random.seed()
data = np.random.random(shape) * 2 - 1
print FLAGS.input_file
data.astype(np.float32).tofile(FLAGS.input_file)
print "Generate input file done."
def load_data(file): def load_data(file):
if os.path.isfile(file): if os.path.isfile(file):
return np.fromfile(file=file, dtype=np.float32) return np.fromfile(file=file, dtype=np.float32)
else: else:
return np.empty([0]) return np.empty([0])
def valid_output(out_shape, mace_out_file, tf_out_value): def format_output_name(name):
mace_out_value = load_data(mace_out_file) return re.sub('[^0-9a-zA-Z]+', '_', name)
def compare_output(mace_out_value, out_value):
if mace_out_value.size != 0: if mace_out_value.size != 0:
similarity = (1 - spatial.distance.cosine(tf_out_value.flat, mace_out_value)) similarity = (1 - spatial.distance.cosine(out_value.flat, mace_out_value.flat))
print 'MACE VS TF similarity: ', similarity print 'MACE VS TF similarity: ', similarity
if (FLAGS.mace_runtime == "cpu" and similarity > 0.999) or \ if (FLAGS.mace_runtime == "cpu" and similarity > 0.999) or \
(FLAGS.mace_runtime == "gpu" and similarity > 0.995) or \ (FLAGS.mace_runtime == "gpu" and similarity > 0.995) or \
...@@ -53,13 +42,14 @@ def valid_output(out_shape, mace_out_file, tf_out_value): ...@@ -53,13 +42,14 @@ def valid_output(out_shape, mace_out_file, tf_out_value):
print '=======================Skip empty node===================' print '=======================Skip empty node==================='
def run_model(input_shape): def validate_tf_model(input_names, input_shapes, output_names):
if not gfile.Exists(FLAGS.model_file): import tensorflow as tf
if not os.path.isfile(FLAGS.model_file):
print("Input graph file '" + FLAGS.model_file + "' does not exist!") print("Input graph file '" + FLAGS.model_file + "' does not exist!")
sys.exit(-1) sys.exit(-1)
input_graph_def = tf.GraphDef() input_graph_def = tf.GraphDef()
with gfile.Open(FLAGS.model_file, "rb") as f: with open(FLAGS.model_file, "rb") as f:
data = f.read() data = f.read()
input_graph_def.ParseFromString(data) input_graph_def.ParseFromString(data)
tf.import_graph_def(input_graph_def, name="") tf.import_graph_def(input_graph_def, name="")
...@@ -67,35 +57,85 @@ def run_model(input_shape): ...@@ -67,35 +57,85 @@ def run_model(input_shape):
with tf.Session() as session: with tf.Session() as session:
with session.graph.as_default() as graph: with session.graph.as_default() as graph:
tf.import_graph_def(input_graph_def, name="") tf.import_graph_def(input_graph_def, name="")
input_node = graph.get_tensor_by_name(FLAGS.input_node + ':0') input_dict = {}
output_node = graph.get_tensor_by_name(FLAGS.output_node + ':0') for i in range(len(input_names)):
input_value = load_data(FLAGS.input_file + "_" + input_names[i])
input_value = input_value.reshape(input_shapes[i])
input_node = graph.get_tensor_by_name(input_names[i] + ':0')
input_dict[input_node] = input_value
output_nodes = []
for name in output_names:
output_nodes.extend([graph.get_tensor_by_name(name + ':0')])
output_values = session.run(output_nodes, feed_dict=input_dict)
for i in range(len(output_names)):
output_file_name = FLAGS.mace_out_file + "_" + format_output_name(output_names[i])
mace_out_value = load_data(output_file_name)
compare_output(mace_out_value, output_values[i])
def validate_caffe_model(input_names, input_shapes, output_names, output_shapes):
os.environ['GLOG_minloglevel'] = '1' # suprress Caffe verbose prints
import caffe
if not os.path.isfile(FLAGS.model_file):
print("Input graph file '" + FLAGS.model_file + "' does not exist!")
sys.exit(-1)
if not os.path.isfile(FLAGS.weight_file):
print("Input weight file '" + FLAGS.weight_file + "' does not exist!")
sys.exit(-1)
input_value = load_data(FLAGS.input_file) caffe.set_mode_cpu()
input_value = input_value.reshape(input_shape)
output_value = session.run(output_node, feed_dict={input_node: input_value})
output_value.astype(np.float32).tofile( os.path.dirname(FLAGS.input_file) + '/tf_out')
return output_value
def main(unused_args): net = caffe.Net(FLAGS.model_file, caffe.TEST, weights=FLAGS.weight_file)
input_shape = [int(x) for x in FLAGS.input_shape.split(',')]
output_shape = [int(x) for x in FLAGS.output_shape.split(',')] for i in range(len(input_names)):
if FLAGS.generate_data: input_value = load_data(FLAGS.input_file + "_" + input_names[i])
generate_data(input_shape) input_value = input_value.reshape(input_shapes[i]).transpose((0, 3, 1, 2))
else: net.blobs[input_names[i]].data[0] = input_value
output_value = run_model(input_shape)
valid_output(output_shape, FLAGS.mace_out_file, output_value) net.forward()
for i in range(len(output_names)):
value = net.blobs[output_names[i]].data[0]
out_shape = output_shapes[i]
out_shape[1], out_shape[2], out_shape[3] = out_shape[3], out_shape[1], out_shape[2]
value = value.reshape(out_shape).transpose((0, 2, 3, 1))
output_file_name = FLAGS.mace_out_file + "_" + format_output_name(output_names[i])
mace_out_value = load_data(output_file_name)
compare_output(mace_out_value, value)
def main(unused_args):
input_names = [name for name in FLAGS.input_node.split(',')]
input_shape_strs = [shape for shape in FLAGS.input_shape.split(':')]
input_shapes = [[int(x) for x in shape.split(',')] for shape in input_shape_strs]
output_names = [name for name in FLAGS.output_node.split(',')]
assert len(input_names) == len(input_shapes)
if FLAGS.platform == 'tensorflow':
validate_tf_model(input_names, input_shapes, output_names)
elif FLAGS.platform == 'caffe':
output_shape_strs = [shape for shape in FLAGS.output_shape.split(':')]
output_shapes = [[int(x) for x in shape.split(',')] for shape in output_shape_strs]
validate_caffe_model(input_names, input_shapes, output_names, output_shapes)
def parse_args(): def parse_args():
"""Parses command line arguments.""" """Parses command line arguments."""
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true") parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--platform",
type=str,
default="",
help="Tensorflow or Caffe.")
parser.add_argument( parser.add_argument(
"--model_file", "--model_file",
type=str, type=str,
default="", default="",
help="TensorFlow \'GraphDef\' file to load.") help="TensorFlow or Caffe \'GraphDef\' file to load.")
parser.add_argument(
"--weight_file",
type=str,
default="",
help="caffe model file to load.")
parser.add_argument( parser.add_argument(
"--input_file", "--input_file",
type=str, type=str,
...@@ -131,11 +171,6 @@ def parse_args(): ...@@ -131,11 +171,6 @@ def parse_args():
type=str, type=str,
default="output_node", default="output_node",
help="output node") help="output node")
parser.add_argument(
"--generate_data",
type='bool',
default="false",
help="Generate data or not.")
return parser.parse_known_args() return parser.parse_known_args()
......
import argparse
import sys
import os
import os.path
import numpy as np
from scipy import spatial
os.environ['GLOG_minloglevel'] = '1' # suprress Caffe verbose prints
import caffe
# Validation Flow:
# 1. Generate input data
# python validate.py --generate_data true \
# --input_file input_file
# --input_shape 1,64,64,3
#
# 2. Use mace_run to run model on phone.
# 3. adb pull the result.
# 4. Compare output data of mace and tf
# python validate.py --model_file tf_model_opt.pb \
# --input_file input_file \
# --mace_out_file output_file \
# --input_node input_node \
# --output_node output_node \
# --input_shape 1,64,64,3 \
# --output_shape 1,64,64,2
def generate_data(shape):
np.random.seed()
data = np.random.random(shape) * 2 - 1
print FLAGS.input_file
data.astype(np.float32).tofile(FLAGS.input_file)
print "Generate input file done."
def load_data(file):
if os.path.isfile(file):
return np.fromfile(file=file, dtype=np.float32)
else:
return np.empty([0])
def valid_output(out_shape, mace_out_file, out_value):
mace_out_value = load_data(mace_out_file)
if mace_out_value.size != 0:
mace_out_value = mace_out_value.reshape(out_shape)
out_shape[1], out_shape[2], out_shape[3] = out_shape[3], out_shape[1], out_shape[2]
out_value = out_value.reshape(out_shape).transpose((0, 2, 3, 1))
similarity = (1 - spatial.distance.cosine(out_value.flat, mace_out_value.flat))
print 'MACE VS Caffe similarity: ', similarity
if (FLAGS.mace_runtime == "cpu" and similarity > 0.999) or \
(FLAGS.mace_runtime == "gpu" and similarity > 0.995) or \
(FLAGS.mace_runtime == "dsp" and similarity > 0.930):
print '=======================Similarity Test Passed======================'
else:
print '=======================Similarity Test Failed======================'
else:
print '=======================Skip empty node==================='
def run_model(input_shape):
if not os.path.isfile(FLAGS.model_file):
print("Input graph file '" + FLAGS.model_file + "' does not exist!")
sys.exit(-1)
if not os.path.isfile(FLAGS.weight_file):
print("Input weight file '" + FLAGS.weight_file + "' does not exist!")
sys.exit(-1)
caffe.set_mode_cpu()
net = caffe.Net(FLAGS.model_file, caffe.TEST, weights=FLAGS.weight_file)
input_value = load_data(FLAGS.input_file)
input_value = input_value.reshape(input_shape).transpose((0, 3, 1, 2))
net.blobs[FLAGS.input_node].data[0] = input_value
net.forward(start=FLAGS.input_node, end=FLAGS.output_node)
result = net.blobs[FLAGS.output_node].data[0]
return result
def main(unused_args):
input_shape = [int(x) for x in FLAGS.input_shape.split(',')]
output_shape = [int(x) for x in FLAGS.output_shape.split(',')]
if FLAGS.generate_data:
generate_data(input_shape)
else:
output_value = run_model(input_shape)
valid_output(output_shape, FLAGS.mace_out_file, output_value)
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_file",
type=str,
default="",
help="caffe prototxt file to load.")
parser.add_argument(
"--weight_file",
type=str,
default="",
help="caffe model file to load.")
parser.add_argument(
"--input_file",
type=str,
default="",
help="input file.")
parser.add_argument(
"--mace_out_file",
type=str,
default="",
help="mace output file to load.")
parser.add_argument(
"--mace_runtime",
type=str,
default="gpu",
help="mace runtime device.")
parser.add_argument(
"--input_shape",
type=str,
default="1,64,64,3",
help="input shape.")
parser.add_argument(
"--output_shape",
type=str,
default="1,64,64,2",
help="output shape.")
parser.add_argument(
"--input_node",
type=str,
default="input_node",
help="input node")
parser.add_argument(
"--output_node",
type=str,
default="output_node",
help="output node")
parser.add_argument(
"--generate_data",
type='bool',
default="false",
help="Generate data or not.")
return parser.parse_known_args()
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
main(unused_args=[sys.argv[0]] + unparsed)
...@@ -15,21 +15,31 @@ source ${CURRENT_DIR}/env.sh ...@@ -15,21 +15,31 @@ source ${CURRENT_DIR}/env.sh
MODEL_OUTPUT_DIR=$1 MODEL_OUTPUT_DIR=$1
GENERATE_DATA_OR_NOT=$2 GENERATE_DATA_OR_NOT=$2
IFS=',' read -r -a INPUT_NAMES <<< "${INPUT_NODE}"
IFS=',' read -r -a OUTPUT_NAMES <<< "${OUTPUT_NODE}"
echo $MODEL_OUTPUT_DIR echo $MODEL_OUTPUT_DIR
if [ "$GENERATE_DATA_OR_NOT" = 1 ]; then if [ "$GENERATE_DATA_OR_NOT" = 1 ]; then
rm -rf ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} for NAME in "${INPUT_NAMES[@]}";do
python tools/validate.py --generate_data true \ FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
--input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \ rm -rf ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME}
--input_shape="${INPUT_SHAPE}" || exit 1 done
python tools/generate_data.py --input_node=${INPUT_NODE} \
--input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \
--input_shape="${INPUT_SHAPE}" || exit 1
exit 0 exit 0
fi fi
if [ "$PLATFORM" == "tensorflow" ];then if [ "$PLATFORM" == "tensorflow" ];then
if [[ x"$TARGET_ABI" -ne x"host" ]]; then if [[ x"$TARGET_ABI" -ne x"host" ]]; then
rm -rf ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} for NAME in "${OUTPUT_NAMES[@]}";do
adb </dev/null pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} ${MODEL_OUTPUT_DIR} FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
rm -rf ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME}
adb </dev/null pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME} ${MODEL_OUTPUT_DIR}
done
fi fi
python tools/validate.py --model_file ${MODEL_FILE_PATH} \ python tools/validate.py --platform=tensorflow \
--model_file ${MODEL_FILE_PATH} \
--input_file ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \ --input_file ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \
--mace_out_file ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} \ --mace_out_file ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} \
--mace_runtime ${RUNTIME} \ --mace_runtime ${RUNTIME} \
...@@ -58,19 +68,27 @@ elif [ "$PLATFORM" == "caffe" ];then ...@@ -58,19 +68,27 @@ elif [ "$PLATFORM" == "caffe" ];then
docker start ${CONTAINER_NAME} docker start ${CONTAINER_NAME}
fi fi
for NAME in "${INPUT_NAMES[@]}";do
FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
docker cp ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}_${FORMATTED_NAME} ${CONTAINER_NAME}:/mace
done
if [[ x"$TARGET_ABI" -ne x"host" ]]; then if [[ x"$TARGET_ABI" -ne x"host" ]]; then
rm -rf ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} for NAME in "${OUTPUT_NAMES[@]}";do
adb </dev/null pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} ${MODEL_OUTPUT_DIR} FORMATTED_NAME=$(sed s/[^[:alnum:]]/_/g <<< ${NAME})
rm -rf ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME}
adb </dev/null pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME} ${MODEL_OUTPUT_DIR}
docker cp ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME}_${FORMATTED_NAME} ${CONTAINER_NAME}:/mace
done
fi fi
MODEL_FILE_NAME=$(basename ${MODEL_FILE_PATH}) MODEL_FILE_NAME=$(basename ${MODEL_FILE_PATH})
WEIGHT_FILE_NAME=$(basename ${WEIGHT_FILE_PATH}) WEIGHT_FILE_NAME=$(basename ${WEIGHT_FILE_PATH})
docker cp tools/validate_caffe.py ${CONTAINER_NAME}:/mace docker cp tools/validate.py ${CONTAINER_NAME}:/mace
docker cp ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} ${CONTAINER_NAME}:/mace
docker cp ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} ${CONTAINER_NAME}:/mace
docker cp ${MODEL_FILE_PATH} ${CONTAINER_NAME}:/mace docker cp ${MODEL_FILE_PATH} ${CONTAINER_NAME}:/mace
docker cp ${WEIGHT_FILE_PATH} ${CONTAINER_NAME}:/mace docker cp ${WEIGHT_FILE_PATH} ${CONTAINER_NAME}:/mace
docker exec -it ${CONTAINER_NAME} python /mace/validate_caffe.py --model_file /mace/${MODEL_FILE_NAME} \ docker exec -it ${CONTAINER_NAME} python /mace/validate.py --platform=caffe \
--model_file /mace/${MODEL_FILE_NAME} \
--weight_file /mace/${WEIGHT_FILE_NAME} \ --weight_file /mace/${WEIGHT_FILE_NAME} \
--input_file /mace/${INPUT_FILE_NAME} \ --input_file /mace/${INPUT_FILE_NAME} \
--mace_out_file /mace/${OUTPUT_FILE_NAME} \ --mace_out_file /mace/${OUTPUT_FILE_NAME} \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册