Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
02019804
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
02019804
编写于
12月 28, 2022
作者:
Y
Yuanle Liu
提交者:
GitHub
12月 28, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update some trt log (#49330)
上级
e2b2f7d0
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
11 addition
and
4 deletion
+11
-4
paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc
...id/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc
+2
-2
paddle/fluid/inference/api/analysis_predictor.h
paddle/fluid/inference/api/analysis_predictor.h
+7
-0
paddle/fluid/inference/tensorrt/engine.cc
paddle/fluid/inference/tensorrt/engine.cc
+0
-1
paddle/fluid/inference/tensorrt/engine.h
paddle/fluid/inference/tensorrt/engine.h
+2
-1
未找到文件。
paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc
浏览文件 @
02019804
...
@@ -272,7 +272,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp(
...
@@ -272,7 +272,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp(
if
(
x
->
Var
()
->
GetDataType
()
==
framework
::
proto
::
VarType
::
INT64
)
{
if
(
x
->
Var
()
->
GetDataType
()
==
framework
::
proto
::
VarType
::
INT64
)
{
std
::
string
tmp_name
=
x
->
Name
()
+
"_cast_to_INT32"
;
std
::
string
tmp_name
=
x
->
Name
()
+
"_cast_to_INT32"
;
LOG
(
WARNING
)
LOG
(
WARNING
)
<<
"tensorrt_subgraph's input named "
<<
tmp_name
<<
"tensorrt_subgraph's input named "
<<
x
->
Name
()
<<
" having int64 dtype in pdmodel description, we will cast them to "
<<
" having int64 dtype in pdmodel description, we will cast them to "
"int32 dtype to feed them into paddle-trt."
;
"int32 dtype to feed them into paddle-trt."
;
/*
/*
...
@@ -395,7 +395,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp(
...
@@ -395,7 +395,7 @@ void TensorRtSubgraphPass::CreateTensorRTOp(
map_origin_outputs_dtype
[
name
])
==
map_origin_outputs_dtype
[
name
])
==
framework
::
proto
::
VarType
::
INT64
)
{
framework
::
proto
::
VarType
::
INT64
)
{
std
::
string
tmp_name
=
name
+
"_cast_to_INT64"
;
std
::
string
tmp_name
=
name
+
"_cast_to_INT64"
;
LOG
(
WARNING
)
<<
"tensorrt_subgraph's output named "
<<
tmp_
name
LOG
(
WARNING
)
<<
"tensorrt_subgraph's output named "
<<
name
<<
" having int64 dtype in pdmodel description, but in fact "
<<
" having int64 dtype in pdmodel description, but in fact "
"it is int32 "
"it is int32 "
"dtype after executing this tensorrt_subgraph, so we "
"dtype after executing this tensorrt_subgraph, so we "
...
...
paddle/fluid/inference/api/analysis_predictor.h
浏览文件 @
02019804
...
@@ -105,7 +105,14 @@ class AnalysisPredictor : public PaddlePredictor {
...
@@ -105,7 +105,14 @@ class AnalysisPredictor : public PaddlePredictor {
}
}
auto
trt_identifier
=
config_
.
trt_engine_memory_sharing_identifier_
;
auto
trt_identifier
=
config_
.
trt_engine_memory_sharing_identifier_
;
if
(
trt_identifier
>
0
)
{
if
(
trt_identifier
>
0
)
{
// NOTE(liuyuanle): For convenience, we set the id of the predictor to
// negative sharing_identifier directly. In the future, this may affect
// the meaning of negative predictor id.
predictor_id_
=
-
trt_identifier
;
predictor_id_
=
-
trt_identifier
;
LOG
(
WARNING
)
<<
"Since the engine context memory of multiple predictors "
"is enabled in Paddle-TRT, we set the id of current predictor to "
"negative sharing_identifier you specified."
;
}
else
{
}
else
{
predictor_id_
=
inference
::
GetUniqueId
();
predictor_id_
=
inference
::
GetUniqueId
();
}
}
...
...
paddle/fluid/inference/tensorrt/engine.cc
浏览文件 @
02019804
...
@@ -75,7 +75,6 @@ void TensorRTEngine::InitNetwork() {
...
@@ -75,7 +75,6 @@ void TensorRTEngine::InitNetwork() {
}
}
infer_builder_config_
.
reset
(
infer_builder_
->
createBuilderConfig
());
infer_builder_config_
.
reset
(
infer_builder_
->
createBuilderConfig
());
// optim_profile_ = infer_builder_->createOptimizationProfile();
optim_profiles_
.
resize
(
max_profile_num_
);
optim_profiles_
.
resize
(
max_profile_num_
);
for
(
int
i
=
0
;
i
<
max_profile_num_
;
i
++
)
for
(
int
i
=
0
;
i
<
max_profile_num_
;
i
++
)
optim_profiles_
[
i
]
=
infer_builder_
->
createOptimizationProfile
();
optim_profiles_
[
i
]
=
infer_builder_
->
createOptimizationProfile
();
...
...
paddle/fluid/inference/tensorrt/engine.h
浏览文件 @
02019804
...
@@ -802,6 +802,8 @@ class TRTEngineManager {
...
@@ -802,6 +802,8 @@ class TRTEngineManager {
}
}
void
updateContextMemorySize
(
size_t
mem_size
,
PredictorID
predictor_id
)
{
void
updateContextMemorySize
(
size_t
mem_size
,
PredictorID
predictor_id
)
{
VLOG
(
3
)
<<
"TensorRT engine context memory size is "
<<
mem_size
<<
" in predictor id "
<<
predictor_id
;
bool
size_updated
{
false
};
bool
size_updated
{
false
};
{
{
...
@@ -825,7 +827,6 @@ class TRTEngineManager {
...
@@ -825,7 +827,6 @@ class TRTEngineManager {
if
(
context_memorys_
.
count
(
predictor_id
)
==
0
)
{
if
(
context_memorys_
.
count
(
predictor_id
)
==
0
)
{
auto
context_memory
=
auto
context_memory
=
memory
::
Alloc
(
place
,
max_ctx_mem_size_
+
alignment
,
stream
);
memory
::
Alloc
(
place
,
max_ctx_mem_size_
+
alignment
,
stream
);
// context_memory_[predictor_id].reset(context_memory.release());
context_memorys_
[
predictor_id
]
=
std
::
move
(
context_memory
);
context_memorys_
[
predictor_id
]
=
std
::
move
(
context_memory
);
}
}
return
getAlignedMemory
(
context_memorys_
[
predictor_id
]
->
ptr
(),
alignment
);
return
getAlignedMemory
(
context_memorys_
[
predictor_id
]
->
ptr
(),
alignment
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录