提交 199d53ed 编写于 作者: L Liangliang He

Merge branch 'caffe' into 'master'

Support caffe model

See merge request !45
...@@ -7,7 +7,9 @@ embed_model_data: 1 ...@@ -7,7 +7,9 @@ embed_model_data: 1
vlog_level: 0 vlog_level: 0
models: models:
preview_net: preview_net:
platform: tensorflow
model_file_path: path/to/model64.pb # also support http:// and https:// model_file_path: path/to/model64.pb # also support http:// and https://
model_sha256_checksum: 05d92625809dc9edd6484882335c48c043397aed450a168d75eb8b538e86881a
input_node: input_node input_node: input_node
output_node: output_node output_node: output_node
input_shape: 1,64,64,3 input_shape: 1,64,64,3
...@@ -15,12 +17,18 @@ models: ...@@ -15,12 +17,18 @@ models:
runtime: gpu runtime: gpu
limit_opencl_kernel_time: 0 limit_opencl_kernel_time: 0
dsp_mode: 0 dsp_mode: 0
obfuscate: 1
capture_net: capture_net:
model_file_path: path/to/model256.pb platform: caffe
model_file_path: path/to/model.prototxt
weight_file_path: path/to/weight.caffemodel
model_sha256_checksum: 05d92625809dc9edd6484882335c48c043397aed450a168d75eb8b538e86881a
weight_sha256_checksum: 05d92625809dc9edd6484882335c48c043397aed450a168d75eb8b538e86881a
input_node: input_node input_node: input_node
output_node: output_node output_node: output_node
input_shape: 1,256,256,3 input_shape: 1,256,256,3
output_shape: 1,256,256,2 output_shape: 1,256,256,2
runtime: gpu runtime: cpu
limit_opencl_kernel_time: 1 limit_opencl_kernel_time: 1
dsp_mode: 0 dsp_mode: 0
obfuscate: 1
...@@ -3,29 +3,26 @@ ...@@ -3,29 +3,26 @@
CURRENT_DIR=`dirname $0` CURRENT_DIR=`dirname $0`
source ${CURRENT_DIR}/env.sh source ${CURRENT_DIR}/env.sh
bazel build //lib/python/tools:tf_converter || exit 1 bazel build //lib/python/tools:converter || exit 1
rm -rf ${MODEL_CODEGEN_DIR} rm -rf ${MODEL_CODEGEN_DIR}
mkdir -p ${MODEL_CODEGEN_DIR} mkdir -p ${MODEL_CODEGEN_DIR}
if [ ${DSP_MODE} ]; then if [ ${DSP_MODE} ]; then
DSP_MODE_FLAG="--dsp_mode=${DSP_MODE}" DSP_MODE_FLAG="--dsp_mode=${DSP_MODE}"
fi fi
OBFUSCATE=True bazel-bin/lib/python/tools/converter --platform=${PLATFORM} \
if [ "${BENCHMARK_FLAG}" = "1" ]; then --model_file=${MODEL_FILE_PATH} \
OBFUSCATE=False --weight_file=${WEIGHT_FILE_PATH} \
fi --model_checksum=${MODEL_SHA256_CHECKSUM} \
--output=${MODEL_CODEGEN_DIR}/model.cc \
bazel-bin/lib/python/tools/tf_converter --input=${MODEL_FILE_PATH} \ --input_node=${INPUT_NODE} \
--model_checksum=${MODEL_SHA256_CHECKSUM} \ --output_node=${OUTPUT_NODE} \
--output=${MODEL_CODEGEN_DIR}/model.cc \ --data_type=${DATA_TYPE} \
--input_node=${INPUT_NODE} \ --runtime=${RUNTIME} \
--output_node=${OUTPUT_NODE} \ --output_type=source \
--data_type=${DATA_TYPE} \ --template=${LIBMACE_SOURCE_DIR}/lib/python/tools/model.template \
--runtime=${RUNTIME} \ --model_tag=${MODEL_TAG} \
--output_type=source \ --input_shape=${INPUT_SHAPE} \
--template=${LIBMACE_SOURCE_DIR}/lib/python/tools/model.template \ ${DSP_MODE_FLAG} \
--model_tag=${MODEL_TAG} \ --embed_model_data=${EMBED_MODEL_DATA} \
--input_shape=${INPUT_SHAPE} \ --obfuscate=${OBFUSCATE} || exit 1
${DSP_MODE_FLAG} \
--embed_model_data=${EMBED_MODEL_DATA} \
--obfuscate=${OBFUSCATE} || exit 1
...@@ -17,7 +17,6 @@ import yaml ...@@ -17,7 +17,6 @@ import yaml
from ConfigParser import ConfigParser from ConfigParser import ConfigParser
def run_command(command): def run_command(command):
print("Run command: {}".format(command)) print("Run command: {}".format(command))
result = subprocess.Popen( result = subprocess.Popen(
...@@ -226,6 +225,11 @@ def main(unused_args): ...@@ -226,6 +225,11 @@ def main(unused_args):
os.environ["MODEL_FILE_PATH"] = model_output_dir + "/model.pb" os.environ["MODEL_FILE_PATH"] = model_output_dir + "/model.pb"
urllib.urlretrieve(model_config["model_file_path"], os.environ["MODEL_FILE_PATH"]) urllib.urlretrieve(model_config["model_file_path"], os.environ["MODEL_FILE_PATH"])
if model_config["platform"] == "caffe" and (model_config["weight_file_path"].startswith(
"http://") or model_config["weight_file_path"].startswith("https://")):
os.environ["WEIGHT_FILE_PATH"] = model_output_dir + "/model.caffemodel"
urllib.urlretrieve(model_config["weight_file_path"], os.environ["WEIGHT_FILE_PATH"])
if FLAGS.mode == "build" or FLAGS.mode == "run" or FLAGS.mode == "validate" or FLAGS.mode == "all": if FLAGS.mode == "build" or FLAGS.mode == "run" or FLAGS.mode == "validate" or FLAGS.mode == "all":
generate_random_input(model_output_dir) generate_random_input(model_output_dir)
......
...@@ -56,7 +56,7 @@ def valid_output(out_shape, mace_out_file, tf_out_value): ...@@ -56,7 +56,7 @@ def valid_output(out_shape, mace_out_file, tf_out_value):
def run_model(input_shape): def run_model(input_shape):
if not gfile.Exists(FLAGS.model_file): if not gfile.Exists(FLAGS.model_file):
print("Input graph file '" + FLAGS.model_file + "' does not exist!") print("Input graph file '" + FLAGS.model_file + "' does not exist!")
return -1 sys.exit(-1)
input_graph_def = tf.GraphDef() input_graph_def = tf.GraphDef()
with gfile.Open(FLAGS.model_file, "rb") as f: with gfile.Open(FLAGS.model_file, "rb") as f:
......
import argparse
import sys
import os
import os.path
import numpy as np
from scipy import spatial
os.environ['GLOG_minloglevel'] = '1' # suprress Caffe verbose prints
import caffe
# Validation Flow:
# 1. Generate input data
# python validate.py --generate_data true \
# --input_file input_file
# --input_shape 1,64,64,3
#
# 2. Use mace_run to run model on phone.
# 3. adb pull the result.
# 4. Compare output data of mace and tf
# python validate.py --model_file tf_model_opt.pb \
# --input_file input_file \
# --mace_out_file output_file \
# --input_node input_node \
# --output_node output_node \
# --input_shape 1,64,64,3 \
# --output_shape 1,64,64,2
def generate_data(shape):
np.random.seed()
data = np.random.random(shape) * 2 - 1
print FLAGS.input_file
data.astype(np.float32).tofile(FLAGS.input_file)
print "Generate input file done."
def load_data(file):
if os.path.isfile(file):
return np.fromfile(file=file, dtype=np.float32)
else:
return np.empty([0])
def valid_output(out_shape, mace_out_file, out_value):
mace_out_value = load_data(mace_out_file)
if mace_out_value.size != 0:
mace_out_value = mace_out_value.reshape(out_shape)
out_shape[1], out_shape[2], out_shape[3] = out_shape[3], out_shape[1], out_shape[2]
out_value = out_value.reshape(out_shape).transpose((0, 2, 3, 1))
similarity = (1 - spatial.distance.cosine(out_value.flat, mace_out_value.flat))
print 'MACE VS Caffe similarity: ', similarity
if (FLAGS.mace_runtime == "cpu" and similarity > 0.999) or \
(FLAGS.mace_runtime == "gpu" and similarity > 0.995) or \
(FLAGS.mace_runtime == "dsp" and similarity > 0.930):
print '=======================Similarity Test Passed======================'
else:
print '=======================Similarity Test Failed======================'
else:
print '=======================Skip empty node==================='
def run_model(input_shape):
if not os.path.isfile(FLAGS.model_file):
print("Input graph file '" + FLAGS.model_file + "' does not exist!")
sys.exit(-1)
if not os.path.isfile(FLAGS.weight_file):
print("Input weight file '" + FLAGS.weight_file + "' does not exist!")
sys.exit(-1)
caffe.set_mode_cpu()
net = caffe.Net(FLAGS.model_file, caffe.TEST, weights=FLAGS.weight_file)
input_value = load_data(FLAGS.input_file)
input_value = input_value.reshape(input_shape).transpose((0, 3, 1, 2))
net.blobs[FLAGS.input_node].data[0] = input_value
net.forward(start=FLAGS.input_node, end=FLAGS.output_node)
result = net.blobs[FLAGS.output_node].data[0]
return result
def main(unused_args):
input_shape = [int(x) for x in FLAGS.input_shape.split(',')]
output_shape = [int(x) for x in FLAGS.output_shape.split(',')]
if FLAGS.generate_data:
generate_data(input_shape)
else:
output_value = run_model(input_shape)
valid_output(output_shape, FLAGS.mace_out_file, output_value)
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--model_file",
type=str,
default="",
help="caffe prototxt file to load.")
parser.add_argument(
"--weight_file",
type=str,
default="",
help="caffe model file to load.")
parser.add_argument(
"--input_file",
type=str,
default="",
help="input file.")
parser.add_argument(
"--mace_out_file",
type=str,
default="",
help="mace output file to load.")
parser.add_argument(
"--mace_runtime",
type=str,
default="gpu",
help="mace runtime device.")
parser.add_argument(
"--input_shape",
type=str,
default="1,64,64,3",
help="input shape.")
parser.add_argument(
"--output_shape",
type=str,
default="1,64,64,2",
help="output shape.")
parser.add_argument(
"--input_node",
type=str,
default="input_node",
help="input node")
parser.add_argument(
"--output_node",
type=str,
default="output_node",
help="output node")
parser.add_argument(
"--generate_data",
type='bool',
default="false",
help="Generate data or not.")
return parser.parse_known_args()
if __name__ == '__main__':
FLAGS, unparsed = parse_args()
main(unused_args=[sys.argv[0]] + unparsed)
...@@ -15,12 +15,17 @@ source ${CURRENT_DIR}/env.sh ...@@ -15,12 +15,17 @@ source ${CURRENT_DIR}/env.sh
MODEL_OUTPUT_DIR=$1 MODEL_OUTPUT_DIR=$1
GENERATE_DATA_OR_NOT=$2 GENERATE_DATA_OR_NOT=$2
echo $MODEL_OUTPUT_DIR
if [ "$GENERATE_DATA_OR_NOT" = 1 ]; then if [ "$GENERATE_DATA_OR_NOT" = 1 ]; then
rm -rf ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} rm -rf ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME}
python tools/validate.py --generate_data true \ python tools/validate.py --generate_data true \
--input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \ --input_file=${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} \
--input_shape="${INPUT_SHAPE}" || exit 1 --input_shape="${INPUT_SHAPE}" || exit 1
else exit 0
fi
if [ "$PLATFORM" == "tensorflow" ];then
rm -rf ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} rm -rf ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME}
adb </dev/null pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} ${MODEL_OUTPUT_DIR} adb </dev/null pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} ${MODEL_OUTPUT_DIR}
python tools/validate.py --model_file ${MODEL_FILE_PATH} \ python tools/validate.py --model_file ${MODEL_FILE_PATH} \
...@@ -30,5 +35,46 @@ else ...@@ -30,5 +35,46 @@ else
--input_node ${INPUT_NODE} \ --input_node ${INPUT_NODE} \
--output_node ${OUTPUT_NODE} \ --output_node ${OUTPUT_NODE} \
--input_shape ${INPUT_SHAPE} \ --input_shape ${INPUT_SHAPE} \
--output_shape ${OUTPUT_SHAPE} --output_shape ${OUTPUT_SHAPE} || exit 1
elif [ "$PLATFORM" == "caffe" ];then
IMAGE_NAME=mace-caffe:latest
CONTAINER_NAME=mace_caffe_validator
RES_FILE=validation.result
if [[ "$(docker images -q mace-caffe:latest 2> /dev/null)" == "" ]]; then
echo "Build caffe docker"
docker build -t ${IMAGE_NAME} docker/caffe || exit 1
fi
if [ ! "$(docker ps -qa -f name=${CONTAINER_NAME})" ]; then
echo "Run caffe container"
docker run -d -it --name ${CONTAINER_NAME} ${IMAGE_NAME} /bin/bash || exit 1
fi
if [ "$(docker inspect -f {{.State.Running}} ${CONTAINER_NAME})" == "false" ];then
echo "Start caffe container"
docker start ${CONTAINER_NAME}
fi
rm -rf ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME}
adb </dev/null pull ${PHONE_DATA_DIR}/${OUTPUT_FILE_NAME} ${MODEL_OUTPUT_DIR}
MODEL_FILE_NAME=$(basename ${MODEL_FILE_PATH})
WEIGHT_FILE_NAME=$(basename ${WEIGHT_FILE_PATH})
docker cp tools/validate_caffe.py ${CONTAINER_NAME}:/mace
docker cp ${MODEL_OUTPUT_DIR}/${INPUT_FILE_NAME} ${CONTAINER_NAME}:/mace
docker cp ${MODEL_OUTPUT_DIR}/${OUTPUT_FILE_NAME} ${CONTAINER_NAME}:/mace
docker cp ${MODEL_FILE_PATH} ${CONTAINER_NAME}:/mace
docker cp ${WEIGHT_FILE_PATH} ${CONTAINER_NAME}:/mace
docker exec -it ${CONTAINER_NAME} python /mace/validate_caffe.py --model_file /mace/${MODEL_FILE_NAME} \
--weight_file /mace/${WEIGHT_FILE_NAME} \
--input_file /mace/${INPUT_FILE_NAME} \
--mace_out_file /mace/${OUTPUT_FILE_NAME} \
--mace_runtime ${RUNTIME} \
--input_node ${INPUT_NODE} \
--output_node ${OUTPUT_NODE} \
--input_shape ${INPUT_SHAPE} \
--output_shape ${OUTPUT_SHAPE}
fi fi
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册