Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleX
提交
3066636d
P
PaddleX
项目概览
PaddlePaddle
/
PaddleX
通知
138
Star
4
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
43
列表
看板
标记
里程碑
合并请求
5
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleX
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
43
Issue
43
列表
看板
标记
里程碑
合并请求
5
合并请求
5
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
3066636d
编写于
7月 06, 2020
作者:
J
Jason
提交者:
GitHub
7月 06, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #189 from joey12300/develop
add use_ir_optim cmd args
上级
a3f3e830
755d1a87
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
25 addition
and
8 deletion
+25
-8
deploy/cpp/demo/classifier.cpp
deploy/cpp/demo/classifier.cpp
+3
-1
deploy/cpp/demo/detector.cpp
deploy/cpp/demo/detector.cpp
+3
-1
deploy/cpp/demo/segmenter.cpp
deploy/cpp/demo/segmenter.cpp
+3
-1
deploy/cpp/include/paddlex/paddlex.h
deploy/cpp/include/paddlex/paddlex.h
+6
-3
deploy/cpp/include/paddlex/visualize.h
deploy/cpp/include/paddlex/visualize.h
+6
-1
deploy/cpp/src/paddlex.cpp
deploy/cpp/src/paddlex.cpp
+4
-1
未找到文件。
deploy/cpp/demo/classifier.cpp
浏览文件 @
3066636d
...
@@ -37,6 +37,7 @@ DEFINE_int32(batch_size, 1, "Batch size of infering");
...
@@ -37,6 +37,7 @@ DEFINE_int32(batch_size, 1, "Batch size of infering");
DEFINE_int32
(
thread_num
,
DEFINE_int32
(
thread_num
,
omp_get_num_procs
(),
omp_get_num_procs
(),
"Number of preprocessing threads"
);
"Number of preprocessing threads"
);
DEFINE_bool
(
use_ir_optim
,
true
,
"use ir optimization"
);
int
main
(
int
argc
,
char
**
argv
)
{
int
main
(
int
argc
,
char
**
argv
)
{
// Parsing command-line
// Parsing command-line
...
@@ -57,7 +58,8 @@ int main(int argc, char** argv) {
...
@@ -57,7 +58,8 @@ int main(int argc, char** argv) {
FLAGS_use_gpu
,
FLAGS_use_gpu
,
FLAGS_use_trt
,
FLAGS_use_trt
,
FLAGS_gpu_id
,
FLAGS_gpu_id
,
FLAGS_key
);
FLAGS_key
,
FLAGS_use_ir_optim
);
// 进行预测
// 进行预测
double
total_running_time_s
=
0.0
;
double
total_running_time_s
=
0.0
;
...
...
deploy/cpp/demo/detector.cpp
浏览文件 @
3066636d
...
@@ -43,6 +43,7 @@ DEFINE_double(threshold,
...
@@ -43,6 +43,7 @@ DEFINE_double(threshold,
DEFINE_int32
(
thread_num
,
DEFINE_int32
(
thread_num
,
omp_get_num_procs
(),
omp_get_num_procs
(),
"Number of preprocessing threads"
);
"Number of preprocessing threads"
);
DEFINE_bool
(
use_ir_optim
,
true
,
"use ir optimization"
);
int
main
(
int
argc
,
char
**
argv
)
{
int
main
(
int
argc
,
char
**
argv
)
{
// 解析命令行参数
// 解析命令行参数
...
@@ -62,7 +63,8 @@ int main(int argc, char** argv) {
...
@@ -62,7 +63,8 @@ int main(int argc, char** argv) {
FLAGS_use_gpu
,
FLAGS_use_gpu
,
FLAGS_use_trt
,
FLAGS_use_trt
,
FLAGS_gpu_id
,
FLAGS_gpu_id
,
FLAGS_key
);
FLAGS_key
,
FLAGS_use_ir_optim
);
double
total_running_time_s
=
0.0
;
double
total_running_time_s
=
0.0
;
double
total_imread_time_s
=
0.0
;
double
total_imread_time_s
=
0.0
;
...
...
deploy/cpp/demo/segmenter.cpp
浏览文件 @
3066636d
...
@@ -39,6 +39,7 @@ DEFINE_int32(batch_size, 1, "Batch size of infering");
...
@@ -39,6 +39,7 @@ DEFINE_int32(batch_size, 1, "Batch size of infering");
DEFINE_int32
(
thread_num
,
DEFINE_int32
(
thread_num
,
omp_get_num_procs
(),
omp_get_num_procs
(),
"Number of preprocessing threads"
);
"Number of preprocessing threads"
);
DEFINE_bool
(
use_ir_optim
,
false
,
"use ir optimization"
);
int
main
(
int
argc
,
char
**
argv
)
{
int
main
(
int
argc
,
char
**
argv
)
{
// 解析命令行参数
// 解析命令行参数
...
@@ -59,7 +60,8 @@ int main(int argc, char** argv) {
...
@@ -59,7 +60,8 @@ int main(int argc, char** argv) {
FLAGS_use_gpu
,
FLAGS_use_gpu
,
FLAGS_use_trt
,
FLAGS_use_trt
,
FLAGS_gpu_id
,
FLAGS_gpu_id
,
FLAGS_key
);
FLAGS_key
,
FLAGS_use_ir_optim
);
double
total_running_time_s
=
0.0
;
double
total_running_time_s
=
0.0
;
double
total_imread_time_s
=
0.0
;
double
total_imread_time_s
=
0.0
;
...
...
deploy/cpp/include/paddlex/paddlex.h
浏览文件 @
3066636d
...
@@ -72,20 +72,23 @@ class Model {
...
@@ -72,20 +72,23 @@ class Model {
* @param use_trt: use Tensor RT or not when infering
* @param use_trt: use Tensor RT or not when infering
* @param gpu_id: the id of gpu when infering with using gpu
* @param gpu_id: the id of gpu when infering with using gpu
* @param key: the key of encryption when using encrypted model
* @param key: the key of encryption when using encrypted model
* @param use_ir_optim: use ir optimization when infering
* */
* */
void
Init
(
const
std
::
string
&
model_dir
,
void
Init
(
const
std
::
string
&
model_dir
,
bool
use_gpu
=
false
,
bool
use_gpu
=
false
,
bool
use_trt
=
false
,
bool
use_trt
=
false
,
int
gpu_id
=
0
,
int
gpu_id
=
0
,
std
::
string
key
=
""
)
{
std
::
string
key
=
""
,
create_predictor
(
model_dir
,
use_gpu
,
use_trt
,
gpu_id
,
key
);
bool
use_ir_optim
=
true
)
{
create_predictor
(
model_dir
,
use_gpu
,
use_trt
,
gpu_id
,
key
,
use_ir_optim
);
}
}
void
create_predictor
(
const
std
::
string
&
model_dir
,
void
create_predictor
(
const
std
::
string
&
model_dir
,
bool
use_gpu
=
false
,
bool
use_gpu
=
false
,
bool
use_trt
=
false
,
bool
use_trt
=
false
,
int
gpu_id
=
0
,
int
gpu_id
=
0
,
std
::
string
key
=
""
);
std
::
string
key
=
""
,
bool
use_ir_optim
=
true
);
/*
/*
* @brief
* @brief
...
...
deploy/cpp/include/paddlex/visualize.h
浏览文件 @
3066636d
...
@@ -22,9 +22,14 @@
...
@@ -22,9 +22,14 @@
#include <io.h>
#include <io.h>
#else // Linux/Unix
#else // Linux/Unix
#include <dirent.h>
#include <dirent.h>
#include <sys/io.h>
// #include <sys/io.h>
#ifdef __arm__ // for arm
#include <aarch64-linux-gpu/sys/stat.h>
#include <aarch64-linux-gpu/sys/types.h>
#else
#include <sys/stat.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/types.h>
#endif
#include <unistd.h>
#include <unistd.h>
#endif
#endif
#include <string>
#include <string>
...
...
deploy/cpp/src/paddlex.cpp
浏览文件 @
3066636d
...
@@ -22,7 +22,8 @@ void Model::create_predictor(const std::string& model_dir,
...
@@ -22,7 +22,8 @@ void Model::create_predictor(const std::string& model_dir,
bool
use_gpu
,
bool
use_gpu
,
bool
use_trt
,
bool
use_trt
,
int
gpu_id
,
int
gpu_id
,
std
::
string
key
)
{
std
::
string
key
,
bool
use_ir_optim
)
{
paddle
::
AnalysisConfig
config
;
paddle
::
AnalysisConfig
config
;
std
::
string
model_file
=
model_dir
+
OS_PATH_SEP
+
"__model__"
;
std
::
string
model_file
=
model_dir
+
OS_PATH_SEP
+
"__model__"
;
std
::
string
params_file
=
model_dir
+
OS_PATH_SEP
+
"__params__"
;
std
::
string
params_file
=
model_dir
+
OS_PATH_SEP
+
"__params__"
;
...
@@ -63,6 +64,8 @@ void Model::create_predictor(const std::string& model_dir,
...
@@ -63,6 +64,8 @@ void Model::create_predictor(const std::string& model_dir,
}
}
config
.
SwitchUseFeedFetchOps
(
false
);
config
.
SwitchUseFeedFetchOps
(
false
);
config
.
SwitchSpecifyInputNames
(
true
);
config
.
SwitchSpecifyInputNames
(
true
);
// 开启图优化
config
.
SwitchIrOptim
(
use_ir_optim
);
// 开启内存优化
// 开启内存优化
config
.
EnableMemoryOptim
();
config
.
EnableMemoryOptim
();
if
(
use_trt
)
{
if
(
use_trt
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录