Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
8fca2857
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8fca2857
编写于
6月 21, 2019
作者:
Y
Yanzhan Yang
提交者:
GitHub
6月 21, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
enhance auto debug tool (#1698)
上级
a94978bc
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
123 addition
and
45 deletion
+123
-45
src/common/types.h
src/common/types.h
+1
-0
src/framework/executor.cpp
src/framework/executor.cpp
+3
-1
test/net/test_net.cpp
test/net/test_net.cpp
+29
-17
tools/python/caffetools/run.py
tools/python/caffetools/run.py
+30
-0
tools/python/fluidtools/run.py
tools/python/fluidtools/run.py
+60
-27
未找到文件。
src/common/types.h
浏览文件 @
8fca2857
...
...
@@ -133,6 +133,7 @@ enum PowerMode {
struct
PaddleMobileConfigInternal
{
bool
load_when_predict
=
false
;
bool
enable_memory_optimization
=
true
;
};
extern
const
char
*
G_OP_TYPE_CONV
;
...
...
src/framework/executor.cpp
浏览文件 @
8fca2857
...
...
@@ -65,7 +65,9 @@ Executor<Device, T>::Executor(const Program<Device> &program,
"program_desc_ should not be nullptr"
);
#if !defined(PADDLE_MOBILE_FPGA) && !defined(PADDLE_MOBILE_FPGA_KD) && \
!defined(PADDLE_MOBILE_CL)
pass
::
MemoryOptPass
()(
program_desc_
.
get
(),
program_
.
scope
.
get
());
if
(
config_
.
enable_memory_optimization
)
{
pass
::
MemoryOptPass
()(
program_desc_
.
get
(),
program_
.
scope
.
get
());
}
#endif
// resize feed and fetch list
// should init feed and fetch variables before infer shape
...
...
test/net/test_net.cpp
浏览文件 @
8fca2857
...
...
@@ -17,39 +17,51 @@ limitations under the License. */
#include "../test_helper.h"
#include "../test_include.h"
void
test
(
int
argc
,
char
*
argv
[]
,
bool
fuse
);
void
test
(
int
argc
,
char
*
argv
[]);
int
main
(
int
argc
,
char
*
argv
[])
{
test
(
argc
,
argv
,
false
);
test
(
argc
,
argv
,
true
);
test
(
argc
,
argv
);
return
0
;
}
void
test
(
int
argc
,
char
*
argv
[],
bool
fuse
)
{
paddle_mobile
::
PaddleMobile
<
paddle_mobile
::
CPU
>
paddle_mobile
;
void
test
(
int
argc
,
char
*
argv
[])
{
int
arg_index
=
1
;
bool
fuse
=
std
::
stoi
(
argv
[
arg_index
])
==
1
;
arg_index
++
;
bool
enable_memory_optimization
=
std
::
stoi
(
argv
[
arg_index
])
==
1
;
arg_index
++
;
paddle_mobile
::
PaddleMobileConfigInternal
config
;
config
.
enable_memory_optimization
=
enable_memory_optimization
;
paddle_mobile
::
PaddleMobile
<
paddle_mobile
::
CPU
>
paddle_mobile
(
config
);
paddle_mobile
.
SetThreadNum
(
1
);
std
::
string
tag
=
fuse
?
"-fuse"
:
""
;
int
dim_count
=
std
::
stoi
(
argv
[
1
]);
int
dim_count
=
std
::
stoi
(
argv
[
arg_index
]);
arg_index
++
;
int
size
=
1
;
std
::
vector
<
int64_t
>
dims
;
for
(
int
i
=
0
;
i
<
dim_count
;
i
++
)
{
int64_t
dim
=
std
::
stoi
(
argv
[
2
+
i
]);
int64_t
dim
=
std
::
stoi
(
argv
[
arg_index
+
i
]);
size
*=
dim
;
dims
.
push_back
(
dim
);
}
arg_index
+=
dim_count
;
int
var_count
=
std
::
stoi
(
argv
[
1
+
dim_count
]);
int
var_count
=
std
::
stoi
(
argv
[
arg_index
]);
arg_index
++
;
int
sample_step
=
std
::
stoi
(
argv
[
arg_index
]);
arg_index
++
;
std
::
vector
<
std
::
string
>
var_names
;
for
(
int
i
=
0
;
i
<
var_count
;
i
++
)
{
std
::
string
var_name
=
argv
[
1
+
dim_count
+
1
+
1
+
i
];
std
::
string
var_name
=
argv
[
arg_index
+
i
];
var_names
.
push_back
(
var_name
);
}
arg_index
+=
var_count
;
auto
time1
=
time
();
if
(
paddle_mobile
.
Load
(
"./checked_model/model"
,
"./checked_model/params"
,
fuse
,
false
,
1
,
true
))
{
auto
time2
=
time
();
std
::
cout
<<
"auto-test"
<<
tag
std
::
cout
<<
"auto-test"
<<
" load-time-cost :"
<<
time_diff
(
time1
,
time1
)
<<
"ms"
<<
std
::
endl
;
...
...
@@ -73,8 +85,9 @@ void test(int argc, char *argv[], bool fuse) {
auto
out
=
paddle_mobile
.
Predict
(
input_data
,
dims
);
}
auto
time4
=
time
();
std
::
cout
<<
"auto-test"
<<
tag
<<
" predict-time-cost "
<<
time_diff
(
time3
,
time4
)
/
50
<<
"ms"
<<
std
::
endl
;
std
::
cout
<<
"auto-test"
<<
" predict-time-cost "
<<
time_diff
(
time3
,
time4
)
/
50
<<
"ms"
<<
std
::
endl
;
// 测试正确性
auto
out
=
paddle_mobile
.
Predict
(
input_data
,
dims
);
...
...
@@ -88,13 +101,12 @@ void test(int argc, char *argv[], bool fuse) {
continue
;
}
auto
data
=
out
->
data
<
float
>
();
int
step
=
len
/
20
;
std
::
string
sample
=
""
;
for
(
int
i
=
0
;
i
<
len
;
i
+=
step
)
{
for
(
int
i
=
0
;
i
<
len
;
i
+=
s
ample_s
tep
)
{
sample
+=
" "
+
std
::
to_string
(
data
[
i
]);
}
std
::
cout
<<
"auto-test"
<<
tag
<<
" var "
<<
var_name
<<
sample
<<
std
::
endl
;
std
::
cout
<<
"auto-test"
<<
" var "
<<
var_name
<<
sample
<<
std
::
endl
;
}
std
::
cout
<<
std
::
endl
;
}
...
...
tools/python/caffetools/run.py
0 → 100644
浏览文件 @
8fca2857
import
caffe
import
numpy
as
np
prototxt_path
=
""
caffemodel_path
=
""
input_path
=
"input.txt"
input_name
=
""
output_name
=
""
shape
=
(
1
,
3
,
64
,
64
)
data
=
np
.
loadtxt
(
input_path
).
astype
(
"float32"
).
reshape
(
shape
)
net
=
caffe
.
Net
(
prototxt_path
,
caffemodel_path
,
caffe
.
TEST
)
# view inputs blob names
print
(
net
.
inputs
)
# view outputs blob names
print
(
net
.
outputs
)
# set input data
net
.
blobs
[
input_name
].
reshape
(
*
shape
)
net
.
blobs
[
input_name
].
data
[...]
=
data
# predict
net
.
forward
()
# view output data
print
(
net
.
blobs
[
output_name
].
data
)
tools/python/fluidtools/run.py
浏览文件 @
8fca2857
# -*- coding: utf-8 -*
import
os
import
sys
import
math
...
...
@@ -9,6 +10,9 @@ model_path = "model"
checked_model_path
=
"checked_model"
feed_path
=
"feeds"
output_path
=
"outputs"
diff_threshold
=
0.01
np
.
set_printoptions
(
linewidth
=
150
)
mobile_exec_root
=
"/data/local/tmp/bin"
mobile_src_root
=
os
.
path
.
abspath
(
"../../../"
)
...
...
@@ -16,11 +20,11 @@ if mobile_src_root.endswith("/"):
mobile_src_root
=
mobile_src_root
[:
-
1
]
dot
=
"•"
black
=
lambda
x
:
"
\033
[30m"
+
str
(
x
)
red
=
lambda
x
:
"
\033
[31m"
+
str
(
x
)
green
=
lambda
x
:
"
\033
[32m"
+
str
(
x
)
black
=
lambda
x
:
"
\033
[30m"
+
str
(
x
)
+
"
\033
[0m"
red
=
lambda
x
:
"
\033
[31m"
+
str
(
x
)
+
"
\033
[0m"
green
=
lambda
x
:
"
\033
[32m"
+
str
(
x
)
+
"
\033
[0m"
yellow
=
lambda
x
:
"
\033
[33m"
+
str
(
x
)
+
"
\033
[0m"
reset
=
lambda
x
:
"
\033
[0m"
+
str
(
x
)
yellow
=
lambda
x
:
"
\033
[33m"
+
str
(
x
)
def
pp_tab
(
x
,
level
=
0
):
header
=
""
...
...
@@ -61,6 +65,7 @@ def resave_model():
# 强制所有var为可持久化
p_names
=
[]
for
name
in
vars
:
name
=
str
(
name
)
v
=
fluid
.
framework
.
_get_var
(
name
,
prog
)
if
not
v
.
persistable
:
v
.
persistable
=
True
...
...
@@ -69,6 +74,7 @@ def resave_model():
has_found_wrong_shape
=
False
# 修正每个var的形状
for
name
in
vars
:
name
=
str
(
name
)
v
=
vars
[
name
]
if
v
.
persistable
:
v1
=
fluid
.
global_scope
().
find_var
(
name
)
...
...
@@ -117,6 +123,8 @@ last_feed_var_name = None
last_feed_file_name
=
None
# 加载feed的key-value对
def
load_feed_kv
():
if
not
os
.
path
.
exists
(
feed_path
):
return
None
global
last_feed_var_name
global
last_feed_file_name
feed_kv
=
{}
...
...
@@ -128,7 +136,16 @@ def load_feed_kv():
file_name
=
feed_name
.
replace
(
"/"
,
"_"
)
last_feed_var_name
=
feed_name
last_feed_file_name
=
file_name
data
=
np
.
loadtxt
(
feed_path
+
"/"
+
file_name
).
reshape
(
feed_shape
).
astype
(
"float32"
)
feed_file_path
=
feed_path
+
"/"
+
file_name
if
not
os
.
path
.
exists
(
feed_file_path
):
return
None
data
=
np
.
loadtxt
(
feed_file_path
)
expected_len
=
1
for
dim
in
feed_shape
:
expected_len
*=
dim
if
len
(
data
)
!=
expected_len
:
return
None
data
=
data
.
reshape
(
feed_shape
).
astype
(
"float32"
)
feed_kv
[
feed_name
]
=
data
return
feed_kv
...
...
@@ -166,10 +183,11 @@ def get_var_data(var_name, feed_kv=None):
return
output
output_var_cache
=
{}
sample_step
=
1
def
tensor_sample
(
tensor
):
step
=
math
.
floor
(
len
(
tensor
)
/
20
)
#
step = math.floor(len(tensor) / 20)
sample
=
[]
for
i
in
range
(
0
,
len
(
tensor
),
step
):
for
i
in
range
(
0
,
len
(
tensor
),
s
ample_s
tep
):
sample
.
append
(
tensor
[
i
])
return
sample
op_cache
=
{}
...
...
@@ -209,19 +227,21 @@ for op in ops:
op_types
.
add
(
op
.
type
)
pp_tab
(
"op types : {}"
.
format
(
op_types
),
1
)
def
check_mobile_results
(
lines
,
fuse
):
pp_yellow
(
dot
+
dot
+
" checking {} paddle mobile results"
.
format
(
"fusion"
if
fuse
else
"non fusion"
))
def
check_mobile_results
(
args
,
fuse
,
mem_opt
):
args
=
"{} {} {}"
.
format
(
"1"
if
fuse
else
"0"
,
"1"
if
mem_opt
else
"0"
,
args
)
res
=
sh
(
"adb shell
\"
cd {} && export LD_LIBRARY_PATH=. && ./test-net {}
\"
"
.
format
(
mobile_exec_root
,
args
))
lines
=
res
.
split
(
"
\n
"
)
for
line
in
lines
:
if
line
.
startswith
(
"auto-test-debug"
):
print
(
line
)
pp_yellow
(
dot
+
dot
+
" checking paddle mobile results for {} -- {} "
.
format
(
green
(
"【fusion】"
if
fuse
else
"【non fusion】"
),
green
(
"【memory-optimization】"
if
mem_opt
else
"【non-memory-optimization】"
)))
mobile_var_cache
=
{}
for
line
in
lines
:
parts
=
line
.
split
(
" "
)
if
len
(
parts
)
<=
0
:
if
len
(
parts
)
<
2
:
continue
if
"auto-test"
!=
parts
[
0
]:
continue
if
fuse
:
if
"auto-test-fuse"
!=
parts
[
0
]:
continue
else
:
if
"auto-test"
!=
parts
[
0
]:
continue
if
parts
[
1
]
==
"load-time-cost"
:
pp_green
(
"load time cost : {}"
.
format
(
parts
[
2
]),
1
)
elif
parts
[
1
]
==
"predict-time-cost"
:
...
...
@@ -235,6 +255,14 @@ def check_mobile_results(lines, fuse):
error_values2
=
None
for
index
in
op_cache
:
op_output_var_name
,
op
=
op_cache
[
index
]
if
mem_opt
:
found_in_fetch
=
False
for
fetch
in
fetches
:
if
op_output_var_name
==
fetch
.
name
:
found_in_fetch
=
True
break
if
not
found_in_fetch
:
continue
if
not
op_output_var_name
in
output_var_cache
:
continue
if
not
op_output_var_name
in
mobile_var_cache
:
...
...
@@ -247,7 +275,7 @@ def check_mobile_results(lines, fuse):
for
i
in
range
(
len
(
values1
)):
v1
=
values1
[
i
]
v2
=
values2
[
i
]
if
abs
(
v1
-
v2
)
>
0.01
:
if
abs
(
v1
-
v2
)
>
diff_threshold
:
error_index
=
index
break
if
error_index
!=
None
:
...
...
@@ -257,19 +285,23 @@ def check_mobile_results(lines, fuse):
if
error_index
==
None
:
pp_green
(
"outputs are all correct"
,
1
)
else
:
error_values1
=
np
.
array
(
error_values1
)
error_values2
=
np
.
array
(
error_values2
)
pp_red
(
"{} op's output is not correct, op's type is {}"
.
format
(
error_index
,
op_cache
[
error_index
][
1
].
type
),
1
)
pp_red
(
"fluid results are : {}"
.
format
(
error_values1
),
1
)
pp_red
(
"paddle mobile results are : {}"
.
format
(
error_values2
),
1
)
pp_red
(
"fluid results are : "
,
1
)
pp_red
(
str
(
error_values1
).
replace
(
"
\n
"
,
"
\n
"
+
"
\t
"
*
1
),
1
)
pp_red
(
"paddle mobile results are : "
,
1
)
pp_red
(
str
(
error_values2
).
replace
(
"
\n
"
,
"
\n
"
+
"
\t
"
*
1
),
1
)
# print(output_var_cache)
# print(mobile_var_cache)
def
main
():
# 如果feed_path不存在,则需要生成并保存feed的键值对
if
not
os
.
path
.
exists
(
feed_path
):
feed_kv
=
gen_feed_kv
()
save_feed_kv
(
feed_kv
)
# 加载kv
feed_kv
=
load_feed_kv
()
if
feed_kv
==
None
:
feed_kv
=
gen_feed_kv
()
save_feed_kv
(
feed_kv
)
feed_kv
=
load_feed_kv
()
pp_yellow
(
dot
+
dot
+
" checking fetch info"
)
for
fetch
in
fetches
:
pp_tab
(
"fetch var name : {}"
.
format
(
fetch
.
name
),
1
)
...
...
@@ -297,12 +329,13 @@ def main():
for
dim
in
last_feed_var_shape
:
args
+=
" "
+
str
(
dim
)
args
+=
" "
+
str
(
len
(
output_var_cache
))
args
+=
" "
+
str
(
sample_step
)
for
var_name
in
output_var_cache
.
keys
():
args
+=
" "
+
var_name
res
=
sh
(
"adb shell
\"
cd {} && export LD_LIBRARY_PATH=. && ./test-net {}
\"
"
.
format
(
mobile_exec_root
,
args
)
)
lines
=
res
.
split
(
"
\n
"
)
check_mobile_results
(
lines
,
False
)
check_mobile_results
(
lines
,
True
)
check_mobile_results
(
args
,
False
,
False
)
check_mobile_results
(
args
,
False
,
True
)
check_mobile_results
(
args
,
True
,
False
)
check_mobile_results
(
args
,
True
,
True
)
if
__name__
==
"__main__"
:
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录