Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
e9961bc3
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e9961bc3
编写于
12月 01, 2020
作者:
W
Wilber
提交者:
GitHub
12月 01, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update lite api. (#29225)
上级
7e8e3bab
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
22 addition
and
7 deletion
+22
-7
cmake/external/lite.cmake
cmake/external/lite.cmake
+7
-3
paddle/fluid/inference/lite/engine.cc
paddle/fluid/inference/lite/engine.cc
+1
-2
paddle/fluid/inference/lite/test_engine.cc
paddle/fluid/inference/lite/test_engine.cc
+2
-1
paddle/fluid/operators/lite/lite_engine_op_test.cc
paddle/fluid/operators/lite/lite_engine_op_test.cc
+2
-1
paddle/fluid/operators/lite/ut_helper.h
paddle/fluid/operators/lite/ut_helper.h
+10
-0
未找到文件。
cmake/external/lite.cmake
浏览文件 @
e9961bc3
...
@@ -36,7 +36,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
...
@@ -36,7 +36,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
set
(
LITE_INSTALL_DIR
${
THIRD_PARTY_PATH
}
/install/lite
)
set
(
LITE_INSTALL_DIR
${
THIRD_PARTY_PATH
}
/install/lite
)
if
(
NOT LITE_GIT_TAG
)
if
(
NOT LITE_GIT_TAG
)
set
(
LITE_GIT_TAG
42ab4d559f6659edfc35040fb30fdcec3dc3f8aa
)
set
(
LITE_GIT_TAG
release/v2.7
)
endif
()
endif
()
if
(
NOT CUDA_ARCH_NAME
)
if
(
NOT CUDA_ARCH_NAME
)
...
@@ -84,10 +84,8 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
...
@@ -84,10 +84,8 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
${
EXTERNAL_OPTIONAL_ARGS
}
${
EXTERNAL_OPTIONAL_ARGS
}
${
LITE_OPTIONAL_ARGS
}
${
LITE_OPTIONAL_ARGS
}
)
)
set
(
LITE_OUTPUT_BIN_DIR inference_lite_lib.armlinux.armv8
)
else
()
else
()
set
(
LITE_BUILD_COMMAND $
(
MAKE
)
publish_inference -j
)
set
(
LITE_BUILD_COMMAND $
(
MAKE
)
publish_inference -j
)
set
(
LITE_OUTPUT_BIN_DIR inference_lite_lib
)
set
(
LITE_OPTIONAL_ARGS -DWITH_MKL=ON
set
(
LITE_OPTIONAL_ARGS -DWITH_MKL=ON
-DLITE_WITH_CUDA=
${
WITH_GPU
}
-DLITE_WITH_CUDA=
${
WITH_GPU
}
-DWITH_MKLDNN=OFF
-DWITH_MKLDNN=OFF
...
@@ -135,6 +133,12 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
...
@@ -135,6 +133,12 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
endif
()
endif
()
if
(
WITH_ARM
)
set
(
LITE_OUTPUT_BIN_DIR inference_lite_lib.armlinux.armv8
)
else
()
set
(
LITE_OUTPUT_BIN_DIR inference_lite_lib
)
endif
()
message
(
STATUS
"Paddle-lite BINARY_DIR:
${
LITE_BINARY_DIR
}
"
)
message
(
STATUS
"Paddle-lite BINARY_DIR:
${
LITE_BINARY_DIR
}
"
)
message
(
STATUS
"Paddle-lite SOURCE_DIR:
${
LITE_SOURCE_DIR
}
"
)
message
(
STATUS
"Paddle-lite SOURCE_DIR:
${
LITE_SOURCE_DIR
}
"
)
include_directories
(
${
LITE_SOURCE_DIR
}
)
include_directories
(
${
LITE_SOURCE_DIR
}
)
...
...
paddle/fluid/inference/lite/engine.cc
浏览文件 @
e9961bc3
...
@@ -55,8 +55,7 @@ paddle::lite_api::PaddlePredictor* EngineManager::Create(
...
@@ -55,8 +55,7 @@ paddle::lite_api::PaddlePredictor* EngineManager::Create(
#ifdef PADDLE_WITH_ARM
#ifdef PADDLE_WITH_ARM
set_threads
.
set_threads
(
cfg
.
cpu_math_library_num_threads
);
set_threads
.
set_threads
(
cfg
.
cpu_math_library_num_threads
);
#else
#else
lite_cxx_config
.
set_x86_math_library_num_threads
(
lite_cxx_config
.
set_x86_math_num_threads
(
cfg
.
cpu_math_library_num_threads
);
cfg
.
cpu_math_library_num_threads
);
#endif
#endif
#ifdef LITE_SUBGRAPH_WITH_XPU
#ifdef LITE_SUBGRAPH_WITH_XPU
...
...
paddle/fluid/inference/lite/test_engine.cc
浏览文件 @
e9961bc3
...
@@ -28,6 +28,7 @@ namespace inference {
...
@@ -28,6 +28,7 @@ namespace inference {
namespace
lite
{
namespace
lite
{
using
inference
::
lite
::
AddTensorToBlockDesc
;
using
inference
::
lite
::
AddTensorToBlockDesc
;
using
paddle
::
inference
::
lite
::
AddFetchListToBlockDesc
;
using
inference
::
lite
::
CreateTensor
;
using
inference
::
lite
::
CreateTensor
;
using
inference
::
lite
::
serialize_params
;
using
inference
::
lite
::
serialize_params
;
...
@@ -64,7 +65,7 @@ void make_fake_model(std::string* model, std::string* param) {
...
@@ -64,7 +65,7 @@ void make_fake_model(std::string* model, std::string* param) {
AddTensorToBlockDesc
(
block_
,
"x"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
true
);
AddTensorToBlockDesc
(
block_
,
"x"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
true
);
AddTensorToBlockDesc
(
block_
,
"y"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
true
);
AddTensorToBlockDesc
(
block_
,
"y"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
true
);
AddTensorToBlockDesc
(
block_
,
"z"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
false
);
AddTensorToBlockDesc
(
block_
,
"z"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
false
);
Add
TensorToBlockDesc
(
block_
,
"out"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
false
);
Add
FetchListToBlockDesc
(
block_
,
"out"
);
*
block_
->
add_ops
()
=
*
feed0
->
Proto
();
*
block_
->
add_ops
()
=
*
feed0
->
Proto
();
*
block_
->
add_ops
()
=
*
feed1
->
Proto
();
*
block_
->
add_ops
()
=
*
feed1
->
Proto
();
...
...
paddle/fluid/operators/lite/lite_engine_op_test.cc
浏览文件 @
e9961bc3
...
@@ -25,6 +25,7 @@
...
@@ -25,6 +25,7 @@
USE_NO_KERNEL_OP
(
lite_engine
)
USE_NO_KERNEL_OP
(
lite_engine
)
using
paddle
::
inference
::
lite
::
AddTensorToBlockDesc
;
using
paddle
::
inference
::
lite
::
AddTensorToBlockDesc
;
using
paddle
::
inference
::
lite
::
AddFetchListToBlockDesc
;
using
paddle
::
inference
::
lite
::
CreateTensor
;
using
paddle
::
inference
::
lite
::
CreateTensor
;
using
paddle
::
inference
::
lite
::
serialize_params
;
using
paddle
::
inference
::
lite
::
serialize_params
;
namespace
paddle
{
namespace
paddle
{
...
@@ -60,7 +61,7 @@ TEST(LiteEngineOp, engine_op) {
...
@@ -60,7 +61,7 @@ TEST(LiteEngineOp, engine_op) {
AddTensorToBlockDesc
(
block_
,
"x"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
true
);
AddTensorToBlockDesc
(
block_
,
"x"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
true
);
AddTensorToBlockDesc
(
block_
,
"y"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
true
);
AddTensorToBlockDesc
(
block_
,
"y"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
true
);
AddTensorToBlockDesc
(
block_
,
"z"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
false
);
AddTensorToBlockDesc
(
block_
,
"z"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
false
);
Add
TensorToBlockDesc
(
block_
,
"out"
,
std
::
vector
<
int64_t
>
({
2
,
4
}),
false
);
Add
FetchListToBlockDesc
(
block_
,
"out"
);
*
block_
->
add_ops
()
=
*
feed1
->
Proto
();
*
block_
->
add_ops
()
=
*
feed1
->
Proto
();
*
block_
->
add_ops
()
=
*
feed0
->
Proto
();
*
block_
->
add_ops
()
=
*
feed0
->
Proto
();
*
block_
->
add_ops
()
=
*
elt_add
->
Proto
();
*
block_
->
add_ops
()
=
*
elt_add
->
Proto
();
...
...
paddle/fluid/operators/lite/ut_helper.h
浏览文件 @
e9961bc3
...
@@ -41,6 +41,16 @@ void AddTensorToBlockDesc(framework::proto::BlockDesc* block,
...
@@ -41,6 +41,16 @@ void AddTensorToBlockDesc(framework::proto::BlockDesc* block,
desc
.
SetPersistable
(
persistable
);
desc
.
SetPersistable
(
persistable
);
*
var
=
*
desc
.
Proto
();
*
var
=
*
desc
.
Proto
();
}
}
void
AddFetchListToBlockDesc
(
framework
::
proto
::
BlockDesc
*
block
,
const
std
::
string
&
name
)
{
using
framework
::
proto
::
VarType
;
auto
*
var
=
block
->
add_vars
();
framework
::
VarDesc
desc
(
name
);
desc
.
SetType
(
VarType
::
FETCH_LIST
);
*
var
=
*
desc
.
Proto
();
}
void
serialize_params
(
std
::
string
*
str
,
framework
::
Scope
*
scope
,
void
serialize_params
(
std
::
string
*
str
,
framework
::
Scope
*
scope
,
const
std
::
vector
<
std
::
string
>&
params
)
{
const
std
::
vector
<
std
::
string
>&
params
)
{
std
::
ostringstream
os
;
std
::
ostringstream
os
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录