Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
4801beb1
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4801beb1
编写于
9月 19, 2018
作者:
N
nhzlx
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add arguments for trt config
上级
202e0a1e
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
22 addition
and
6 deletion
+22
-6
paddle/fluid/inference/analysis/analyzer_tester.cc
paddle/fluid/inference/analysis/analyzer_tester.cc
+11
-3
paddle/fluid/inference/analysis/subgraph_splitter.cc
paddle/fluid/inference/analysis/subgraph_splitter.cc
+1
-1
paddle/fluid/inference/analysis/subgraph_splitter_tester.cc
paddle/fluid/inference/analysis/subgraph_splitter_tester.cc
+1
-1
paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc
...fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc
+8
-0
paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc
paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc
+1
-1
未找到文件。
paddle/fluid/inference/analysis/analyzer_tester.cc
浏览文件 @
4801beb1
...
...
@@ -37,12 +37,20 @@ TEST(Analyzer, analysis_without_tensorrt) {
TEST
(
Analyzer
,
analysis_with_tensorrt
)
{
FLAGS_IA_enable_tensorrt_subgraph_engine
=
true
;
Argument
argument
;
int
*
minimum_subgraph_size
=
new
int
(
0
);
int
*
max_batch_size
=
new
int
(
3
);
int
*
workspace_size
=
new
int
(
1
<<
20
);
std
::
string
*
precision_mode
=
new
std
::
string
(
"FP32"
);
argument
.
Set
<
int
>
(
"minimum_subgraph_size"
,
minimum_subgraph_size
);
argument
.
Set
<
int
>
(
"max_batch_size"
,
max_batch_size
);
argument
.
Set
<
int
>
(
"workspace_size"
,
workspace_size
);
argument
.
Set
<
std
::
string
>
(
"precision_mode"
,
precision_mode
);
argument
.
fluid_model_dir
.
reset
(
new
std
::
string
(
FLAGS_inference_model_dir
));
Analyzer
analyser
;
analyser
.
Run
(
&
argument
);
}
void
TestWord2vecPrediction
(
const
std
::
string
&
model_path
)
{
void
TestWord2vecPrediction
(
const
std
::
string
&
model_path
)
{
NativeConfig
config
;
config
.
model_dir
=
model_path
;
config
.
use_gpu
=
false
;
...
...
@@ -73,8 +81,8 @@ void TestWord2vecPrediction(const std::string &model_path) {
// The outputs' buffers are in CPU memory.
for
(
size_t
i
=
0
;
i
<
std
::
min
(
5UL
,
num_elements
);
i
++
)
{
LOG
(
INFO
)
<<
"data: "
<<
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
];
PADDLE_ENFORCE
(
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
],
<<
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
];
PADDLE_ENFORCE
(
static_cast
<
float
*>
(
outputs
.
front
().
data
.
data
())[
i
],
result
[
i
]);
}
}
...
...
paddle/fluid/inference/analysis/subgraph_splitter.cc
浏览文件 @
4801beb1
...
...
@@ -309,7 +309,7 @@ void SubGraphFuse::operator()() { ReplaceNodesWithSubGraphs(); }
void
SubGraphFuse
::
ReplaceNodesWithSubGraphs
()
{
auto
subgraphs
=
SubGraphSplitter
(
graph_
,
node_inside_subgraph_teller_
)();
for
(
auto
&
subgraph
:
subgraphs
)
{
if
(
subgraph
.
size
()
<=
argument_
->
Get
<
int
>
(
"minimu
n
_subgraph_size"
))
if
(
subgraph
.
size
()
<=
argument_
->
Get
<
int
>
(
"minimu
m
_subgraph_size"
))
continue
;
std
::
unordered_set
<
Node
*>
subgraph_uniq
(
subgraph
.
begin
(),
subgraph
.
end
());
// replace this sub-graph with the first node. Two steps: 1. Create a Block
...
...
paddle/fluid/inference/analysis/subgraph_splitter_tester.cc
浏览文件 @
4801beb1
...
...
@@ -68,7 +68,7 @@ TEST(SubGraphSplitter, Fuse) {
auto
dfg
=
ProgramDescToDFG
(
desc
);
Argument
argument
;
int
*
minmum_subgraph_size
=
new
int
(
3
);
argument
.
Set
<
int
>
(
"minmum_subgraph_size"
,
minmum_subgraph_size
);
argument
.
Set
<
int
>
(
"min
i
mum_subgraph_size"
,
minmum_subgraph_size
);
size_t
count0
=
dfg
.
nodes
.
size
();
...
...
paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc
浏览文件 @
4801beb1
...
...
@@ -36,6 +36,14 @@ TEST(TensorRTSubGraphPass, main) {
};
Argument
argument
(
FLAGS_inference_model_dir
);
int
*
minimum_subgraph_size
=
new
int
(
0
);
int
*
max_batch_size
=
new
int
(
3
);
int
*
workspace_size
=
new
int
(
1
<<
20
);
std
::
string
*
precision_mode
=
new
std
::
string
(
"FP32"
);
argument
.
Set
<
int
>
(
"minimun_subgraph_size"
,
minimum_subgraph_size
);
argument
.
Set
<
int
>
(
"max_batch_size"
,
max_batch_size
);
argument
.
Set
<
int
>
(
"workspace_size"
,
workspace_size
);
argument
.
Set
<
std
::
string
>
(
"precision_mode"
,
precision_mode
);
DFG_GraphvizDrawPass
::
Config
config
{
FLAGS_dot_dir
,
"origin"
};
DFG_GraphvizDrawPass
::
Config
config1
{
FLAGS_dot_dir
,
"fusion"
};
...
...
paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc
浏览文件 @
4801beb1
...
...
@@ -94,7 +94,7 @@ class TensorRTSubgraphPredictor : public NativePaddlePredictor {
int
*
max_batch_size
=
new
int
(
config_
.
max_batch_size
);
int
*
workspace_size
=
new
int
(
config_
.
workspace_size
);
std
::
string
*
precision_mode
=
new
std
::
string
(
config_
.
precision_mode
);
argument
.
Set
<
int
>
(
"minimu
n
_subgraph_size"
,
minimum_subgraph_size
);
argument
.
Set
<
int
>
(
"minimu
m
_subgraph_size"
,
minimum_subgraph_size
);
argument
.
Set
<
int
>
(
"max_batch_size"
,
max_batch_size
);
argument
.
Set
<
int
>
(
"workspace_size"
,
workspace_size
);
argument
.
Set
<
std
::
string
>
(
"precision_mode"
,
precision_mode
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录