Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
22aa3e83
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
332
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
22aa3e83
编写于
7月 05, 2019
作者:
Y
Yanzhan Yang
提交者:
GitHub
7月 05, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add ios auto testing functionality (#1730)
上级
1818652e
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
328 addition
and
7 deletion
+328
-7
src/common/types.h
src/common/types.h
+2
-1
tools/python/fluidtools/run.py
tools/python/fluidtools/run.py
+20
-6
tools/python/misc/.gitignore
tools/python/misc/.gitignore
+4
-0
tools/python/misc/fluidtools.py
tools/python/misc/fluidtools.py
+175
-0
tools/python/misc/ios-test-server.py
tools/python/misc/ios-test-server.py
+127
-0
未找到文件。
src/common/types.h
浏览文件 @
22aa3e83
...
...
@@ -140,7 +140,8 @@ enum MemoryOptimizationLevel {
struct
PaddleMobileConfigInternal
{
bool
load_when_predict
=
false
;
MemoryOptimizationLevel
memory_optimization_level
=
FullMemoryOptimization
;
MemoryOptimizationLevel
memory_optimization_level
=
MemoryOptimizationWithoutFeeds
;
std
::
string
model_obfuscate_key
=
""
;
};
...
...
tools/python/fluidtools/run.py
浏览文件 @
22aa3e83
...
...
@@ -241,12 +241,26 @@ def save_all_op_output(feed_kv=None):
for
i
in
range
(
len
(
ops
)):
op
=
ops
[
i
]
var_name
=
None
for
name
in
op
.
output_arg_names
:
var_name
=
name
if
"tmp"
in
name
:
var_name_index
=
-
1
for
index
in
range
(
len
(
op
.
output_names
)):
if
op
.
output_names
[
index
]
in
[
"Y"
,
"Out"
,
"Output"
]:
var_name_index
=
index
break
if
"sequence_pool"
in
var_name
:
continue
if
var_name_index
!=
-
1
:
var_name
=
op
.
output_arg_names
[
var_name_index
]
else
:
for
name
in
op
.
output_arg_names
:
var_name
=
name
if
"tmp"
in
name
:
break
# real_var_name = None
# if op.type == "fetch":
# for name in op.input_arg_names:
# real_var_name = name
# if "tmp" in name:
# break
# else:
# real_var_name = var_name
if
fast_check
:
if
var_name
not
in
fetch_names
and
var_name
not
in
feed_names
:
continue
...
...
@@ -281,7 +295,7 @@ def check_mobile_results(args, fuse, mem_opt):
args
=
"{} {} {}"
.
format
(
"1"
if
fuse
else
"0"
,
"1"
if
mem_opt
else
"0"
,
args
)
res
=
sh
(
"adb shell
\"
cd {} && export LD_LIBRARY_PATH=. && ./test-net {}
\"
"
.
format
(
mobile_exec_root
,
args
))
lines
=
res
.
split
(
"
\n
"
)
print
(
lines
)
#
print(lines)
for
line
in
lines
:
if
line
.
startswith
(
"auto-test-debug"
):
print
(
line
)
...
...
tools/python/misc/.gitignore
0 → 100644
浏览文件 @
22aa3e83
0
1
images
__pycache__
tools/python/misc/fluidtools.py
0 → 100644
浏览文件 @
22aa3e83
# -*- coding: utf-8 -*
import
os
import
sys
import
math
import
struct
import
subprocess
import
numpy
as
np
import
paddle.fluid
as
fluid
fast_check
=
False
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
fluid
.
default_startup_program
())
ops
=
None
def
check_model
(
model_path
,
dump_data_and_model
):
check_model_impl
(
model_path
,
dump_data_and_model
,
True
)
return
check_model_impl
(
model_path
,
dump_data_and_model
,
False
)
def
check_model_impl
(
model_path
,
dump_data_and_model
,
need_check
):
global
ops
if
need_check
:
prog
,
feeds
,
fetches
=
fluid
.
io
.
load_inference_model
(
dirname
=
model_path
,
executor
=
exe
,
model_filename
=
"model"
,
params_filename
=
"params"
)
else
:
prog
,
feeds
,
fetches
=
fluid
.
io
.
load_inference_model
(
dirname
=
model_path
,
executor
=
exe
,
model_filename
=
"model-checked"
,
params_filename
=
"params-checked"
)
ops
=
prog
.
current_block
().
ops
vars
=
prog
.
current_block
().
vars
# 获取变量形状
def
get_var_shape
(
var_name
):
vars
=
prog
.
current_block
().
vars
shape
=
vars
[
var_name
].
desc
.
shape
()
for
i
in
range
(
len
(
shape
)):
dim
=
shape
[
i
]
if
dim
==
-
1
:
shape
[
i
]
=
1
return
shape
# 获取输入变量形状
def
get_feed_var_shape
(
var_name
):
# 如果想写死输入形状,放开以下语句
# return [1, 3, 224, 224]
return
get_var_shape
(
var_name
)
# 生成feed的key-value对
def
gen_feed_kv
():
feed_kv
=
{}
for
feed_name
in
feeds
:
feed_shape
=
get_feed_var_shape
(
feed_name
)
data
=
np
.
random
.
random
(
feed_shape
).
astype
(
"float32"
)
feed_kv
[
feed_name
]
=
data
return
feed_kv
feed_kv
=
gen_feed_kv
()
# 运行模型
def
run_model
(
feed_kv
=
None
):
if
feed_kv
is
None
:
feed_kv
=
gen_feed_kv
()
outputs
=
exe
.
run
(
prog
,
feed
=
feed_kv
,
fetch_list
=
fetches
,
return_numpy
=
False
)
results
=
[]
for
output
in
outputs
:
results
.
append
(
np
.
array
(
output
))
return
results
# 获取var的数据
def
get_var_data
(
var_name
,
feed_kv
=
None
):
# 强制var为可持久化
v
=
fluid
.
framework
.
_get_var
(
var_name
,
prog
)
persistable
=
v
.
persistable
if
not
persistable
:
v
.
persistable
=
True
# outputs = run_model(feed_kv=feed_kv)
output
=
np
.
array
(
fluid
.
global_scope
().
find_var
(
var_name
).
get_tensor
())
# 恢复var的可持久化属性
v
.
persistable
=
persistable
return
output
# 强制所有var为可持久化
p_names
=
[]
for
name
in
vars
:
name
=
str
(
name
)
v
=
fluid
.
framework
.
_get_var
(
name
,
prog
)
if
not
v
.
persistable
:
v
.
persistable
=
True
p_names
.
append
(
name
)
outputs
=
run_model
(
feed_kv
=
feed_kv
)
has_found_wrong_shape
=
False
# 修正每个var的形状
for
name
in
vars
:
name
=
str
(
name
)
v
=
vars
[
name
]
if
v
.
persistable
:
v1
=
fluid
.
global_scope
().
find_var
(
name
)
try
:
t1
=
v1
.
get_tensor
()
shape
=
t1
.
shape
()
except
:
continue
if
v
.
desc
.
shape
()
!=
shape
:
has_found_wrong_shape
=
True
v
.
desc
.
set_shape
(
shape
)
# 恢复var的可持久化属性
for
name
in
p_names
:
v
=
fluid
.
framework
.
_get_var
(
name
,
prog
)
v
.
persistable
=
False
if
need_check
and
dump_data_and_model
:
fluid
.
io
.
save_inference_model
(
dirname
=
model_path
,
feeded_var_names
=
feeds
,
target_vars
=
fetches
,
executor
=
exe
,
main_program
=
prog
,
model_filename
=
"model-checked"
,
params_filename
=
"params-checked"
)
return
var_cache
=
{}
# 获取每层输出的数据
def
save_all_op_output
(
feed_kv
=
None
):
output_path
=
"{}/data"
.
format
(
model_path
)
if
not
os
.
path
.
exists
(
output_path
):
os
.
mkdir
(
output_path
)
ops
=
prog
.
current_block
().
ops
fetch_names
=
[]
for
fetch
in
fetches
:
fetch_names
.
append
(
fetch
.
name
)
feed_names
=
feeds
for
i
in
range
(
len
(
ops
)):
op
=
ops
[
i
]
var_name
=
None
for
name
in
op
.
output_arg_names
:
var_name
=
name
if
"tmp"
in
name
:
break
real_var_name
=
None
if
op
.
type
==
"fetch"
:
for
name
in
op
.
input_arg_names
:
real_var_name
=
name
if
"tmp"
in
name
:
break
else
:
real_var_name
=
var_name
if
fast_check
:
if
var_name
not
in
fetch_names
and
var_name
not
in
feed_names
:
continue
try
:
shape
=
get_var_shape
(
var_name
)
var_cache
[
var_name
]
=
shape
except
:
pass
if
not
dump_data_and_model
:
continue
try
:
np_data
=
get_var_data
(
real_var_name
,
feed_kv
=
feed_kv
)
index
=
-
1
for
i
in
range
(
len
(
fetch_names
)):
if
real_var_name
==
fetch_names
[
i
]:
index
=
i
break
if
index
!=
-
1
:
np_data
=
outputs
[
index
]
data
=
np_data
.
flatten
().
tolist
()
file_name
=
var_name
.
replace
(
"/"
,
"_"
)
var_path
=
output_path
+
"/"
+
file_name
np_data
.
tofile
(
var_path
)
# out_file = open(var_path, "wb")
# if var_name in feed_names:
# for item in data:
# out_file.write(struct.pack("d", item))
# else:
# for item in data:
# out_file.write(struct.pack("d", item))
# out_file.close()
except
:
print
(
"dump {} {} failed"
.
format
(
op
.
type
,
var_name
))
pass
save_all_op_output
()
return
var_cache
if
__name__
==
"__main__"
:
model_path
=
"./1/mobilenet"
check_model
(
model_path
,
True
)
tools/python/misc/ios-test-server.py
0 → 100644
浏览文件 @
22aa3e83
# -*- coding: utf-8 -*
import
os
import
sys
import
math
import
qrcode
import
subprocess
import
numpy
as
np
import
paddle.fluid
as
fluid
from
flask
import
Flask
,
request
,
send_from_directory
,
jsonify
,
make_response
# sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
# from fluidtools import run
from
fluidtools
import
check_model
dump_data_and_model
=
False
def
get_ip_address
():
handle
=
os
.
popen
(
"ifconfig | grep 172 | grep inet | grep netmask | grep broadcast | cut -d
\"
\"
-f2"
)
ip
=
handle
.
read
()
ip
=
ip
.
strip
()
return
ip
app
=
Flask
(
__name__
,
static_url_path
=
''
)
param_precisions
=
[
1
]
# 0 for float16, 1 for float32
def
process_model
(
precision
,
name
):
model_dir
=
"./{}/{}"
.
format
(
precision
,
name
)
os
.
chdir
(
model_dir
)
os
.
chdir
(
"../.."
)
var_info
=
check_model
(
model_dir
,
dump_data_and_model
)
return
var_info
def
get_model_info
(
precision
,
name
):
# model_info = {
# "name": name,
# "params_precision": [precision],
# "fusion": [True, False],
# "reuse_texture": [True, False],
# "use_mps": [True, False],
# "test_performance": True,
# "diff_precision": 0.01,
# "vars_dic": {
# }
# }
model_info
=
{
"name"
:
name
,
"params_precision"
:
[
precision
],
"fusion"
:
[
True
],
"reuse_texture"
:
[
True
],
"use_mps"
:
[
True
,
False
],
"test_performance"
:
False
,
"diff_precision"
:
0.01
,
"vars_dic"
:
{
}
}
var_info
=
process_model
(
precision
,
name
)
model_info
[
"vars_dic"
]
=
var_info
return
model_info
model_list
=
[]
def
process_models
():
for
precision
in
param_precisions
:
model_names
=
os
.
listdir
(
"./{}"
.
format
(
precision
))
for
name
in
model_names
:
model_info
=
get_model_info
(
precision
,
name
)
model_list
.
append
(
model_info
)
@
app
.
route
(
'/images/<path:path>'
)
def
send_image
(
path
):
return
send_from_directory
(
'images'
,
path
)
@
app
.
route
(
'/getFile/<name>/model'
)
def
send_model
(
name
):
precision
=
1
return
send_from_directory
(
"{}/{}"
.
format
(
precision
,
name
),
"model-checked"
)
@
app
.
route
(
'/getFile/<name>/params/<precision>'
)
def
send_params
(
name
,
precision
):
return
send_from_directory
(
"{}/{}"
.
format
(
precision
,
name
),
"params-checked"
)
@
app
.
route
(
'/getFile/<name>/data/<var>'
)
def
send_data
(
name
,
var
):
precision
=
1
return
send_from_directory
(
"{}/{}/data"
.
format
(
precision
,
name
),
var
)
@
app
.
route
(
'/getTestInfo'
,
methods
=
[
'GET'
])
def
test_info
():
info
=
{
"model_list"
:
model_list
}
return
make_response
(
jsonify
(
info
),
200
)
test_result
=
None
@
app
.
route
(
'/putTestResult'
,
methods
=
[
'POST'
])
def
put_test_result
():
global
test_result
test_result
=
request
.
get_json
()
success
=
True
for
item
in
test_result
[
"results"
]:
result
=
item
[
"isResultEqual"
]
if
not
result
:
success
=
False
break
test_result
[
"aaa-success"
]
=
success
os
.
popen
(
"open -a
\"
/Applications/Google Chrome.app
\"
\"
{}/showTestResult
\"
"
.
format
(
host
))
return
make_response
(
jsonify
({
"msg"
:
"ok"
}),
200
)
@
app
.
route
(
'/showTestResult'
,
methods
=
[
'GET'
])
def
show_test_result
():
global
test_result
return
make_response
(
jsonify
(
test_result
),
200
)
@
app
.
route
(
'/'
,
methods
=
[
'GET'
])
def
home
():
return
"<html><body><img src=
\"
images/qrcode.png
\"
/></body></html>"
host
=
None
if
__name__
==
"__main__"
:
process_models
()
host
=
"http://{}:8080"
.
format
(
get_ip_address
())
image
=
qrcode
.
make
(
host
)
if
not
os
.
path
.
isdir
(
"images"
):
os
.
mkdir
(
"images"
)
image
.
save
(
"images/qrcode.png"
)
os
.
popen
(
"open -a
\"
/Applications/Google Chrome.app
\"
\"
{}
\"
"
.
format
(
host
))
app
.
run
(
host
=
"0.0.0.0"
,
port
=
8080
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录