Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
1bf48365
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1bf48365
编写于
11月 11, 2020
作者:
W
Wilber
提交者:
GitHub
11月 11, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Inference] Add TryShrinkMemory interface. (#28409)
上级
26d292b1
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
93 addition
and
4 deletion
+93
-4
paddle/fluid/inference/api/analysis_predictor.cc
paddle/fluid/inference/api/analysis_predictor.cc
+13
-2
paddle/fluid/inference/api/analysis_predictor.h
paddle/fluid/inference/api/analysis_predictor.h
+11
-0
paddle/fluid/inference/api/analysis_predictor_tester.cc
paddle/fluid/inference/api/analysis_predictor_tester.cc
+44
-2
paddle/fluid/inference/api/api_tester.cc
paddle/fluid/inference/api/api_tester.cc
+1
-0
paddle/fluid/inference/api/paddle_api.h
paddle/fluid/inference/api/paddle_api.h
+11
-0
paddle/fluid/inference/api/paddle_inference_api.h
paddle/fluid/inference/api/paddle_inference_api.h
+11
-0
paddle/fluid/pybind/inference_api.cc
paddle/fluid/pybind/inference_api.cc
+2
-0
未找到文件。
paddle/fluid/inference/api/analysis_predictor.cc
浏览文件 @
1bf48365
...
...
@@ -175,7 +175,10 @@ bool AnalysisPredictor::PrepareScope(
status_is_cloned_
=
true
;
}
else
{
paddle
::
framework
::
InitDevices
(
false
);
scope_
.
reset
(
new
paddle
::
framework
::
Scope
());
scope_
.
reset
(
new
paddle
::
framework
::
Scope
(),
[
&
](
framework
::
Scope
*
scope
)
{
delete
scope
;
memory
::
Release
(
place_
);
});
status_is_cloned_
=
false
;
}
sub_scope_
=
&
scope_
->
NewScope
();
...
...
@@ -591,7 +594,6 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
gflags
.
push_back
(
"--allocator_strategy=thread_local"
);
process_level_allocator_enabled
=
false
;
}
else
{
gflags
.
push_back
(
"--allocator_strategy=naive_best_fit"
);
process_level_allocator_enabled
=
true
;
}
...
...
@@ -890,6 +892,11 @@ bool AnalysisPredictor::LoadParameters() {
return
true
;
}
uint64_t
AnalysisPredictor
::
TryShrinkMemory
()
{
ClearIntermediateTensor
();
return
paddle
::
memory
::
Release
(
place_
);
}
void
AnalysisPredictor
::
ClearIntermediateTensor
()
{
PADDLE_ENFORCE_NOT_NULL
(
inference_program_
.
get
(),
platform
::
errors
::
PreconditionNotMet
(
...
...
@@ -985,6 +992,8 @@ AnalysisPredictor::~AnalysisPredictor() {
mkldnn_quantizer_
=
nullptr
;
}
#endif
memory
::
Release
(
place_
);
}
std
::
unique_ptr
<
PaddlePredictor
>
AnalysisPredictor
::
Clone
()
{
...
...
@@ -1142,6 +1151,8 @@ void Predictor::ClearIntermediateTensor() {
predictor_
->
ClearIntermediateTensor
();
}
uint64_t
Predictor
::
TryShrinkMemory
()
{
return
predictor_
->
TryShrinkMemory
();
}
int
GetNumBytesOfDataType
(
DataType
dtype
)
{
switch
(
dtype
)
{
case
DataType
::
FLOAT32
:
...
...
paddle/fluid/inference/api/analysis_predictor.h
浏览文件 @
1bf48365
...
...
@@ -193,6 +193,17 @@ class AnalysisPredictor : public PaddlePredictor {
///
void
ClearIntermediateTensor
();
///
/// \brief Release all tmp tensor to compress the size of the memory pool.
/// The memory pool is considered to be composed of a list of chunks, if
/// the chunk is not occupied, it can be released.
///
/// \return Number of bytes released. It may be smaller than the actual
/// released memory, because part of the memory is not managed by the
/// MemoryPool.
///
uint64_t
TryShrinkMemory
()
override
;
///
/// \brief Get the argument used by predictor
///
...
...
paddle/fluid/inference/api/analysis_predictor_tester.cc
浏览文件 @
1bf48365
...
...
@@ -135,6 +135,7 @@ TEST(AnalysisPredictor, ZeroCopy) {
auto
*
out_data
=
out
->
data
<
float
>
(
&
place
,
&
size
);
LOG
(
INFO
)
<<
"output size: "
<<
size
/
sizeof
(
float
);
LOG
(
INFO
)
<<
"output_data: "
<<
out_data
;
predictor
->
TryShrinkMemory
();
}
TEST
(
AnalysisPredictor
,
Clone
)
{
...
...
@@ -253,8 +254,7 @@ class MkldnnQuantizerTest : public testing::Test {
public:
MkldnnQuantizerTest
()
{
AnalysisConfig
config
(
FLAGS_dirname
);
predictor
.
reset
(
new
AnalysisPredictor
(
config
));
predictor
=
std
::
move
(
CreatePaddlePredictor
(
config
));
auto
*
predictor_p
=
static_cast
<
AnalysisPredictor
*>
(
predictor
.
get
());
auto
qconfig
=
new
MkldnnQuantizerConfig
();
...
...
@@ -507,3 +507,45 @@ TEST(AnalysisPredictor, bf16_pass_strategy) {
}
}
// namespace paddle
namespace
paddle_infer
{
TEST
(
Predictor
,
Run
)
{
Config
config
;
config
.
SetModel
(
FLAGS_dirname
);
auto
predictor
=
CreatePredictor
(
config
);
auto
w0
=
predictor
->
GetInputHandle
(
"firstw"
);
auto
w1
=
predictor
->
GetInputHandle
(
"secondw"
);
auto
w2
=
predictor
->
GetInputHandle
(
"thirdw"
);
auto
w3
=
predictor
->
GetInputHandle
(
"forthw"
);
w0
->
Reshape
({
4
,
1
});
w1
->
Reshape
({
4
,
1
});
w2
->
Reshape
({
4
,
1
});
w3
->
Reshape
({
4
,
1
});
auto
*
w0_data
=
w0
->
mutable_data
<
int64_t
>
(
PlaceType
::
kCPU
);
auto
*
w1_data
=
w1
->
mutable_data
<
int64_t
>
(
PlaceType
::
kCPU
);
auto
*
w2_data
=
w2
->
mutable_data
<
int64_t
>
(
PlaceType
::
kCPU
);
auto
*
w3_data
=
w3
->
mutable_data
<
int64_t
>
(
PlaceType
::
kCPU
);
for
(
int
i
=
0
;
i
<
4
;
i
++
)
{
w0_data
[
i
]
=
i
;
w1_data
[
i
]
=
i
;
w2_data
[
i
]
=
i
;
w3_data
[
i
]
=
i
;
}
predictor
->
Run
();
auto
out
=
predictor
->
GetOutputHandle
(
"fc_1.tmp_2"
);
PlaceType
place
;
int
size
=
0
;
out
->
data
<
float
>
(
&
place
,
&
size
);
LOG
(
INFO
)
<<
"output size: "
<<
size
/
sizeof
(
float
);
predictor
->
TryShrinkMemory
();
}
}
// namespace paddle_infer
paddle/fluid/inference/api/api_tester.cc
浏览文件 @
1bf48365
...
...
@@ -60,6 +60,7 @@ TEST(paddle_inference_api, demo) {
auto
predictor
=
CreatePaddlePredictor
(
config
);
std
::
vector
<
PaddleTensor
>
outputs
;
predictor
->
Run
({},
&
outputs
);
predictor
->
TryShrinkMemory
();
}
TEST
(
paddle_inference_api
,
get_version
)
{
...
...
paddle/fluid/inference/api/paddle_api.h
浏览文件 @
1bf48365
...
...
@@ -319,6 +319,17 @@ class PD_INFER_DECL PaddlePredictor {
///
virtual
void
ClearIntermediateTensor
()
{}
///
/// \brief Release all tmp tensor to compress the size of the memory pool.
/// The memory pool is considered to be composed of a list of chunks, if
/// the chunk is not occupied, it can be released.
///
/// \return Number of bytes released. It may be smaller than the actual
/// released memory, because part of the memory is not managed by the
/// MemoryPool.
///
virtual
uint64_t
TryShrinkMemory
()
{
return
0
;
}
/// \brief Clone an existing predictor
/// When using clone, the same network will be created,
/// and the parameters between them are shared.
...
...
paddle/fluid/inference/api/paddle_inference_api.h
浏览文件 @
1bf48365
...
...
@@ -224,6 +224,17 @@ class PD_INFER_DECL Predictor {
/// \brief Clear the intermediate tensors of the predictor
void
ClearIntermediateTensor
();
///
/// \brief Release all tmp tensor to compress the size of the memory pool.
/// The memory pool is considered to be composed of a list of chunks, if
/// the chunk is not occupied, it can be released.
///
/// \return Number of bytes released. It may be smaller than the actual
/// released memory, because part of the memory is not managed by the
/// MemoryPool.
///
uint64_t
TryShrinkMemory
();
private:
std
::
unique_ptr
<
paddle
::
PaddlePredictor
>
predictor_
;
};
...
...
paddle/fluid/pybind/inference_api.cc
浏览文件 @
1bf48365
...
...
@@ -566,6 +566,7 @@ void BindAnalysisPredictor(py::module *m) {
.
def
(
"zero_copy_run"
,
&
AnalysisPredictor
::
ZeroCopyRun
)
.
def
(
"clear_intermediate_tensor"
,
&
AnalysisPredictor
::
ClearIntermediateTensor
)
.
def
(
"try_shrink_memory"
,
&
AnalysisPredictor
::
TryShrinkMemory
)
.
def
(
"create_feed_fetch_var"
,
&
AnalysisPredictor
::
CreateFeedFetchVar
)
.
def
(
"prepare_feed_fetch"
,
&
AnalysisPredictor
::
PrepareFeedFetch
)
.
def
(
"prepare_argument"
,
&
AnalysisPredictor
::
PrepareArgument
)
...
...
@@ -593,6 +594,7 @@ void BindPaddleInferPredictor(py::module *m) {
.
def
(
"get_output_handle"
,
&
paddle_infer
::
Predictor
::
GetOutputHandle
)
.
def
(
"run"
,
&
paddle_infer
::
Predictor
::
Run
)
.
def
(
"clone"
,
&
paddle_infer
::
Predictor
::
Clone
)
.
def
(
"try_shrink_memory"
,
&
paddle_infer
::
Predictor
::
TryShrinkMemory
)
.
def
(
"clear_intermediate_tensor"
,
&
paddle_infer
::
Predictor
::
ClearIntermediateTensor
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录