Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
c8deaaa9
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c8deaaa9
编写于
6月 23, 2019
作者:
C
Chunwei
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'chunwei/refine-light-api' into 'incubate/lite'
Chunwei/refine light api See merge request inference/paddlelite!46
上级
55471868
d0f1670e
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
73 addition
and
39 deletion
+73
-39
paddle/fluid/lite/api/CMakeLists.txt
paddle/fluid/lite/api/CMakeLists.txt
+6
-8
paddle/fluid/lite/api/apis_test.cc
paddle/fluid/lite/api/apis_test.cc
+22
-5
paddle/fluid/lite/api/cxx_api.cc
paddle/fluid/lite/api/cxx_api.cc
+21
-4
paddle/fluid/lite/api/cxx_api.h
paddle/fluid/lite/api/cxx_api.h
+2
-16
paddle/fluid/lite/tools/build.sh
paddle/fluid/lite/tools/build.sh
+17
-1
paddle/fluid/lite/utils/io.h
paddle/fluid/lite/utils/io.h
+5
-5
未找到文件。
paddle/fluid/lite/api/CMakeLists.txt
浏览文件 @
c8deaaa9
...
...
@@ -74,18 +74,16 @@ endif()
# These tests needs CLI arguments, and is not supported in ARM CI.
# TODO(Superjomn) support latter.
lite_cc_test
(
test_light_api SRCS light_api_test.cc
lite_cc_test
(
test_light_api
_lite
SRCS light_api_test.cc
DEPS light_api_lite program_lite mir_passes
ARGS --optimized_model=
${
LITE_MODEL_DIR
}
/lite_naive_model_opt
SERIAL
)
if
(
NOT LITE_ON_MOBILE
)
lite_cc_test
(
test_apis_lite SRCS apis_test.cc
lite_cc_test
(
test_apis_lite SRCS apis_test.cc
DEPS cxx_api_lite light_api_lite
${
ops_lite
}
X86_DEPS
${
x86_kernels
}
operator
ARGS --model_dir=
${
LITE_MODEL_DIR
}
/lite_naive_model
--optimized_model=
${
LITE_MODEL_DIR
}
/lite_naive_model_opt SERIAL
)
endif
()
#lite_cc_binary(cxx_api_lite_bin SRCS cxx_api_bin.cc
#X86_DEPS operator
...
...
paddle/fluid/lite/api/apis_test.cc
浏览文件 @
c8deaaa9
...
...
@@ -46,16 +46,34 @@ bool CompareTensors(const std::string& name, const Predictor& cxx_api,
return
TensorCompareWith
(
*
a
,
*
b
);
}
#ifndef LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
TEST
(
CXXApi_LightApi
,
optim_model
)
{
lite
::
Predictor
cxx_api
;
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
Place
{
TARGET
(
kX86
),
PRECISION
(
kFloat
)},
Place
{
TARGET
(
kARM
),
PRECISION
(
kFloat
)},
// Both works on X86 and ARM
});
// On ARM devices, the preferred X86 target not works, but it can still
// select ARM kernels.
cxx_api
.
Build
(
FLAGS_model_dir
,
Place
{
TARGET
(
kX86
),
PRECISION
(
kFloat
)},
valid_places
);
cxx_api
.
SaveModel
(
FLAGS_optimized_model
);
}
TEST
(
CXXApi_LightApi
,
save_and_load_model
)
{
lite
::
Predictor
cxx_api
;
lite
::
LightPredictor
light_api
(
FLAGS_optimized_model
);
// CXXAPi
{
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
Place
{
TARGET
(
kX86
),
PRECISION
(
kFloat
)}});
cxx_api
.
Build
(
FLAGS_model_dir
,
Place
{
TARGET
(
kCUDA
),
PRECISION
(
kFloat
)},
std
::
vector
<
Place
>
valid_places
({
Place
{
TARGET
(
kHost
),
PRECISION
(
kFloat
)},
Place
{
TARGET
(
kX86
),
PRECISION
(
kFloat
)},
Place
{
TARGET
(
kARM
),
PRECISION
(
kFloat
)},
// Both works on X86 and ARM
});
// On ARM devices, the preferred X86 target not works, but it can still
// select ARM kernels.
cxx_api
.
Build
(
FLAGS_model_dir
,
Place
{
TARGET
(
kX86
),
PRECISION
(
kFloat
)},
valid_places
);
auto
*
x
=
cxx_api
.
GetInput
(
0
);
...
...
@@ -87,7 +105,6 @@ TEST(CXXApi_LightApi, save_and_load_model) {
ASSERT_TRUE
(
CompareTensors
(
tensor_name
,
cxx_api
,
light_api
));
}
}
#endif // LITE_WITH_LIGHT_WEIGHT_FRAMEWORK
}
// namespace lite
}
// namespace paddle
paddle/fluid/lite/api/cxx_api.cc
浏览文件 @
c8deaaa9
...
...
@@ -24,13 +24,11 @@ namespace lite {
void
Predictor
::
SaveModel
(
const
std
::
string
&
dir
)
{
#ifndef LITE_WITH_ARM
LOG
(
INFO
)
<<
"Save model to "
<<
dir
;
MkDirRecur
(
dir
);
program_
->
PersistModel
(
dir
,
program_desc_
);
#else
LOG
(
INFO
)
<<
"Save model to ./"
;
program_
->
PersistModel
(
"./"
,
program_desc_
);
#endif
program_
->
PersistModel
(
dir
,
program_desc_
);
LOG
(
INFO
)
<<
"Save model to "
<<
dir
;
}
lite
::
Tensor
*
Predictor
::
GetInput
(
size_t
offset
)
{
...
...
@@ -61,5 +59,24 @@ const framework::proto::ProgramDesc &Predictor::program_desc() const {
return
program_desc_
;
}
void
Predictor
::
Build
(
const
framework
::
proto
::
ProgramDesc
&
desc
,
const
Place
&
prefer_place
,
const
std
::
vector
<
Place
>
&
valid_places
)
{
program_desc_
=
desc
;
Program
program
(
desc
,
scope_
,
valid_places
);
optimizer_
.
KernelPickPreferPlace
(
prefer_place
);
core
::
KernelPickFactor
factor
;
factor
.
ConsiderTarget
();
factor
.
ConsiderPrecision
();
optimizer_
.
Run
(
std
::
move
(
program
),
valid_places
,
factor
);
program_
=
optimizer_
.
GenRuntimeProgram
();
}
const
lite
::
Tensor
*
Predictor
::
GetTensor
(
const
std
::
string
&
name
)
const
{
auto
*
var
=
program_
->
exec_scope
()
->
FindVar
(
name
);
return
&
var
->
Get
<
lite
::
Tensor
>
();
}
}
// namespace lite
}
// namespace paddle
paddle/fluid/lite/api/cxx_api.h
浏览文件 @
c8deaaa9
...
...
@@ -42,18 +42,7 @@ class Predictor {
const
std
::
vector
<
Place
>&
valid_places
);
void
Build
(
const
framework
::
proto
::
ProgramDesc
&
desc
,
const
Place
&
prefer_place
,
const
std
::
vector
<
Place
>&
valid_places
)
{
program_desc_
=
desc
;
Program
program
(
desc
,
scope_
,
valid_places
);
optimizer_
.
KernelPickPreferPlace
(
prefer_place
);
core
::
KernelPickFactor
factor
;
factor
.
ConsiderTarget
();
factor
.
ConsiderPrecision
();
optimizer_
.
Run
(
std
::
move
(
program
),
valid_places
,
factor
);
program_
=
optimizer_
.
GenRuntimeProgram
();
}
const
Place
&
prefer_place
,
const
std
::
vector
<
Place
>&
valid_places
);
// Run the predictor for a single batch of data.
void
Run
()
{
program_
->
Run
();
}
...
...
@@ -66,10 +55,7 @@ class Predictor {
// Return the program desc for debug.
const
framework
::
proto
::
ProgramDesc
&
program_desc
()
const
;
const
lite
::
Tensor
*
GetTensor
(
const
std
::
string
&
name
)
const
{
auto
*
var
=
program_
->
exec_scope
()
->
FindVar
(
name
);
return
&
var
->
Get
<
lite
::
Tensor
>
();
}
const
lite
::
Tensor
*
GetTensor
(
const
std
::
string
&
name
)
const
;
// This method is disabled in mobile, for unnecessary dependencies required.
void
SaveModel
(
const
std
::
string
&
dir
);
...
...
paddle/fluid/lite/tools/build.sh
浏览文件 @
c8deaaa9
...
...
@@ -116,7 +116,7 @@ function test_arm_android {
echo
"test name:
${
test_name
}
"
adb_work_dir
=
"/data/local/tmp"
skip_list
=(
"test_model_parser_lite"
"test_mobilenetv1_lite"
"test_mobilenetv2_lite"
"test_resnet50_lite"
"test_inceptionv4_lite"
"test_light_api"
)
skip_list
=(
"test_model_parser_lite"
"test_mobilenetv1_lite"
"test_mobilenetv2_lite"
"test_resnet50_lite"
"test_inceptionv4_lite"
"test_light_api
_lite"
"test_apis_lite
"
)
for
skip_name
in
${
skip_list
[@]
}
;
do
[[
$skip_name
=
~
(
^|[[:space:]]
)
$test_name
(
$|
[[
:space:]]
)
]]
&&
echo
"skip
$test_name
"
&&
return
done
...
...
@@ -368,6 +368,22 @@ function build_test_arm_subtask_model {
echo
"Done"
}
# this test load a model, optimize it and check the prediction result of both cxx and light APIS.
function
test_arm_predict_apis
{
local
port
=
$1
local
workspace
=
$2
local
naive_model_path
=
$3
local
api_test_path
=
$(
find
.
-name
"test_apis_lite"
)
# the model is pushed to ./lite_naive_model
adb
-s
emulator-
${
port
}
push
${
naive_model_path
}
${
workspace
}
adb
-s
emulator-
${
port
}
push
$api_test_path
${
workspace
}
# test cxx_api first to store the optimized model.
adb
-s
emulator-
${
port
}
shell ./test_apis_lite
--model_dir
./lite_naive_model
--optimized_model
./lite_naive_model_opt
}
# Build the code and run lite arm tests. This is executed in the CI system.
function
build_test_arm
{
########################################################################
...
...
paddle/fluid/lite/utils/io.h
浏览文件 @
c8deaaa9
...
...
@@ -14,9 +14,7 @@
#pragma once
#ifndef LITE_WITH_ARM
#include <bits/stdc++.h>
#endif
#include <sys/stat.h>
#include <fstream>
#include <string>
#include "paddle/fluid/lite/utils/cp_logging.h"
...
...
@@ -35,12 +33,14 @@ static bool IsFileExists(const std::string& path) {
}
// ARM mobile not support mkdir in C++
#ifndef LITE_WITH_ARM
static
void
MkDirRecur
(
const
std
::
string
&
path
)
{
#ifndef LITE_WITH_ARM
CHECK_EQ
(
system
(
string_format
(
"mkdir -p %s"
,
path
.
c_str
()).
c_str
()),
0
)
<<
"Cann't mkdir "
<<
path
;
}
#else // On ARM
CHECK_NE
(
mkdir
(
path
.
c_str
(),
S_IRWXU
),
-
1
)
<<
"Cann't mkdir "
<<
path
;
#endif
}
}
// namespace lite
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录