未验证 提交 22aa3e83 编写于 作者: Y Yanzhan Yang 提交者: GitHub

add ios auto testing functionality (#1730)

上级 1818652e
......@@ -140,7 +140,8 @@ enum MemoryOptimizationLevel {
struct PaddleMobileConfigInternal {
bool load_when_predict = false;
MemoryOptimizationLevel memory_optimization_level = FullMemoryOptimization;
MemoryOptimizationLevel memory_optimization_level =
MemoryOptimizationWithoutFeeds;
std::string model_obfuscate_key = "";
};
......
......@@ -241,12 +241,26 @@ def save_all_op_output(feed_kv=None):
for i in range(len(ops)):
op = ops[i]
var_name = None
for name in op.output_arg_names:
var_name = name
if "tmp" in name:
var_name_index = -1
for index in range(len(op.output_names)):
if op.output_names[index] in ["Y", "Out", "Output"]:
var_name_index = index
break
if "sequence_pool" in var_name:
continue
if var_name_index != -1:
var_name = op.output_arg_names[var_name_index]
else:
for name in op.output_arg_names:
var_name = name
if "tmp" in name:
break
# real_var_name = None
# if op.type == "fetch":
# for name in op.input_arg_names:
# real_var_name = name
# if "tmp" in name:
# break
# else:
# real_var_name = var_name
if fast_check:
if var_name not in fetch_names and var_name not in feed_names:
continue
......@@ -281,7 +295,7 @@ def check_mobile_results(args, fuse, mem_opt):
args = "{} {} {}".format("1" if fuse else "0", "1" if mem_opt else "0", args)
res = sh("adb shell \"cd {} && export LD_LIBRARY_PATH=. && ./test-net {}\"".format(mobile_exec_root, args))
lines = res.split("\n")
print(lines)
# print(lines)
for line in lines:
if line.startswith("auto-test-debug"):
print(line)
......
0
1
images
__pycache__
# -*- coding: utf-8 -*
import os
import sys
import math
import struct
import subprocess
import numpy as np
import paddle.fluid as fluid
fast_check = False
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
ops = None
def check_model(model_path, dump_data_and_model):
check_model_impl(model_path, dump_data_and_model, True)
return check_model_impl(model_path, dump_data_and_model, False)
def check_model_impl(model_path, dump_data_and_model, need_check):
global ops
if need_check:
prog, feeds, fetches = fluid.io.load_inference_model(dirname=model_path, executor=exe, model_filename="model", params_filename="params")
else:
prog, feeds, fetches = fluid.io.load_inference_model(dirname=model_path, executor=exe, model_filename="model-checked", params_filename="params-checked")
ops = prog.current_block().ops
vars = prog.current_block().vars
# 获取变量形状
def get_var_shape(var_name):
vars = prog.current_block().vars
shape = vars[var_name].desc.shape()
for i in range(len(shape)):
dim = shape[i]
if dim == -1:
shape[i] = 1
return shape
# 获取输入变量形状
def get_feed_var_shape(var_name):
# 如果想写死输入形状,放开以下语句
# return [1, 3, 224, 224]
return get_var_shape(var_name)
# 生成feed的key-value对
def gen_feed_kv():
feed_kv = {}
for feed_name in feeds:
feed_shape = get_feed_var_shape(feed_name)
data = np.random.random(feed_shape).astype("float32")
feed_kv[feed_name] = data
return feed_kv
feed_kv = gen_feed_kv()
# 运行模型
def run_model(feed_kv=None):
if feed_kv is None:
feed_kv = gen_feed_kv()
outputs = exe.run(prog, feed=feed_kv, fetch_list=fetches, return_numpy=False)
results = []
for output in outputs:
results.append(np.array(output))
return results
# 获取var的数据
def get_var_data(var_name, feed_kv=None):
# 强制var为可持久化
v = fluid.framework._get_var(var_name, prog)
persistable = v.persistable
if not persistable:
v.persistable = True
# outputs = run_model(feed_kv=feed_kv)
output = np.array(fluid.global_scope().find_var(var_name).get_tensor())
# 恢复var的可持久化属性
v.persistable = persistable
return output
# 强制所有var为可持久化
p_names = []
for name in vars:
name = str(name)
v = fluid.framework._get_var(name, prog)
if not v.persistable:
v.persistable = True
p_names.append(name)
outputs = run_model(feed_kv=feed_kv)
has_found_wrong_shape = False
# 修正每个var的形状
for name in vars:
name = str(name)
v = vars[name]
if v.persistable:
v1 = fluid.global_scope().find_var(name)
try:
t1 = v1.get_tensor()
shape = t1.shape()
except:
continue
if v.desc.shape() != shape:
has_found_wrong_shape = True
v.desc.set_shape(shape)
# 恢复var的可持久化属性
for name in p_names:
v = fluid.framework._get_var(name, prog)
v.persistable = False
if need_check and dump_data_and_model:
fluid.io.save_inference_model(dirname=model_path, feeded_var_names=feeds, target_vars=fetches, executor=exe, main_program=prog, model_filename="model-checked", params_filename="params-checked")
return
var_cache = {}
# 获取每层输出的数据
def save_all_op_output(feed_kv=None):
output_path = "{}/data".format(model_path)
if not os.path.exists(output_path):
os.mkdir(output_path)
ops = prog.current_block().ops
fetch_names = []
for fetch in fetches:
fetch_names.append(fetch.name)
feed_names = feeds
for i in range(len(ops)):
op = ops[i]
var_name = None
for name in op.output_arg_names:
var_name = name
if "tmp" in name:
break
real_var_name = None
if op.type == "fetch":
for name in op.input_arg_names:
real_var_name = name
if "tmp" in name:
break
else:
real_var_name = var_name
if fast_check:
if var_name not in fetch_names and var_name not in feed_names:
continue
try:
shape = get_var_shape(var_name)
var_cache[var_name] = shape
except:
pass
if not dump_data_and_model:
continue
try:
np_data = get_var_data(real_var_name, feed_kv=feed_kv)
index = -1
for i in range(len(fetch_names)):
if real_var_name == fetch_names[i]:
index = i
break
if index != -1:
np_data = outputs[index]
data = np_data.flatten().tolist()
file_name = var_name.replace("/", "_")
var_path = output_path + "/" + file_name
np_data.tofile(var_path)
# out_file = open(var_path, "wb")
# if var_name in feed_names:
# for item in data:
# out_file.write(struct.pack("d", item))
# else:
# for item in data:
# out_file.write(struct.pack("d", item))
# out_file.close()
except:
print("dump {} {} failed".format(op.type, var_name))
pass
save_all_op_output()
return var_cache
if __name__ == "__main__":
model_path = "./1/mobilenet"
check_model(model_path, True)
# -*- coding: utf-8 -*
import os
import sys
import math
import qrcode
import subprocess
import numpy as np
import paddle.fluid as fluid
from flask import Flask, request, send_from_directory, jsonify, make_response
# sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
# from fluidtools import run
from fluidtools import check_model
dump_data_and_model = False
def get_ip_address():
handle = os.popen("ifconfig | grep 172 | grep inet | grep netmask | grep broadcast | cut -d \" \" -f2")
ip = handle.read()
ip = ip.strip()
return ip
app = Flask(__name__, static_url_path='')
param_precisions = [1] # 0 for float16, 1 for float32
def process_model(precision, name):
model_dir = "./{}/{}".format(precision, name)
os.chdir(model_dir)
os.chdir("../..")
var_info = check_model(model_dir, dump_data_and_model)
return var_info
def get_model_info(precision, name):
# model_info = {
# "name": name,
# "params_precision": [precision],
# "fusion": [True, False],
# "reuse_texture": [True, False],
# "use_mps": [True, False],
# "test_performance": True,
# "diff_precision": 0.01,
# "vars_dic": {
# }
# }
model_info = {
"name": name,
"params_precision": [precision],
"fusion": [True],
"reuse_texture": [True],
"use_mps": [True, False],
"test_performance": False,
"diff_precision": 0.01,
"vars_dic": {
}
}
var_info = process_model(precision, name)
model_info["vars_dic"] = var_info
return model_info
model_list = []
def process_models():
for precision in param_precisions:
model_names = os.listdir("./{}".format(precision))
for name in model_names:
model_info = get_model_info(precision, name)
model_list.append(model_info)
@app.route('/images/<path:path>')
def send_image(path):
return send_from_directory('images', path)
@app.route('/getFile/<name>/model')
def send_model(name):
precision = 1
return send_from_directory("{}/{}".format(precision, name), "model-checked")
@app.route('/getFile/<name>/params/<precision>')
def send_params(name, precision):
return send_from_directory("{}/{}".format(precision, name), "params-checked")
@app.route('/getFile/<name>/data/<var>')
def send_data(name, var):
precision = 1
return send_from_directory("{}/{}/data".format(precision, name), var)
@app.route('/getTestInfo', methods=['GET'])
def test_info():
info = {"model_list": model_list}
return make_response(jsonify(info), 200)
test_result = None
@app.route('/putTestResult', methods=['POST'])
def put_test_result():
global test_result
test_result = request.get_json()
success = True
for item in test_result["results"]:
result = item["isResultEqual"]
if not result:
success = False
break
test_result["aaa-success"] = success
os.popen("open -a \"/Applications/Google Chrome.app\" \"{}/showTestResult\"".format(host))
return make_response(jsonify({"msg": "ok"}), 200)
@app.route('/showTestResult', methods=['GET'])
def show_test_result():
global test_result
return make_response(jsonify(test_result), 200)
@app.route('/', methods=['GET'])
def home():
return "<html><body><img src=\"images/qrcode.png\"/></body></html>"
host = None
if __name__ == "__main__":
process_models()
host = "http://{}:8080".format(get_ip_address())
image = qrcode.make(host)
if not os.path.isdir("images"):
os.mkdir("images")
image.save("images/qrcode.png")
os.popen("open -a \"/Applications/Google Chrome.app\" \"{}\"".format(host))
app.run(host="0.0.0.0", port=8080)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册