Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
743cb840
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
743cb840
编写于
12月 06, 2018
作者:
T
Tao Luo
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update with comments
test=develop
上级
42359e88
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
56 addition
and
52 deletion
+56
-52
paddle/fluid/framework/executor_thread_worker.cc
paddle/fluid/framework/executor_thread_worker.cc
+1
-1
paddle/fluid/inference/analysis/argument.h
paddle/fluid/inference/analysis/argument.h
+1
-1
paddle/fluid/inference/analysis/passes/ir_graph_build_pass.cc
...le/fluid/inference/analysis/passes/ir_graph_build_pass.cc
+7
-3
paddle/fluid/inference/analysis/passes/ir_graph_build_pass.h
paddle/fluid/inference/analysis/passes/ir_graph_build_pass.h
+1
-1
paddle/fluid/inference/api/analysis_config.cc
paddle/fluid/inference/api/analysis_config.cc
+7
-6
paddle/fluid/inference/api/analysis_predictor.cc
paddle/fluid/inference/api/analysis_predictor.cc
+4
-3
paddle/fluid/inference/api/paddle_analysis_config.h
paddle/fluid/inference/api/paddle_analysis_config.h
+4
-6
paddle/fluid/inference/io.cc
paddle/fluid/inference/io.cc
+20
-19
paddle/fluid/inference/io.h
paddle/fluid/inference/io.h
+5
-6
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
+2
-2
paddle/fluid/inference/tests/api/config_printer.h
paddle/fluid/inference/tests/api/config_printer.h
+1
-1
paddle/fluid/operators/load_combine_op.cc
paddle/fluid/operators/load_combine_op.cc
+3
-3
未找到文件。
paddle/fluid/framework/executor_thread_worker.cc
浏览文件 @
743cb840
...
...
@@ -97,7 +97,7 @@ void ExecutorThreadWorker::SetDevice() {
static
unsigned
concurrency_cap
=
std
::
thread
::
hardware_concurrency
();
int
thread_id
=
this
->
thread_id_
;
if
(
(
unsigned
)
thread_id
<
concurrency_cap
)
{
if
(
static_cast
<
unsigned
>
(
thread_id
)
<
concurrency_cap
)
{
unsigned
proc
=
thread_id
;
cpu_set_t
mask
;
...
...
paddle/fluid/inference/analysis/argument.h
浏览文件 @
743cb840
...
...
@@ -103,7 +103,7 @@ struct Argument {
// Model specified with program and parameters files.
DECL_ARGUMENT_FIELD
(
model_program_path
,
ModelProgramPath
,
std
::
string
);
DECL_ARGUMENT_FIELD
(
model_params_path
,
ModelParamsPath
,
std
::
string
);
DECL_ARGUMENT_FIELD
(
is_memory_load
,
IsMemoryLoad
,
bool
);
DECL_ARGUMENT_FIELD
(
model_from_memory
,
ModelFromMemory
,
bool
);
// The overall graph to work on.
DECL_ARGUMENT_UNIQUE_FIELD
(
main_graph
,
MainGraph
,
framework
::
ir
::
Graph
);
...
...
paddle/fluid/inference/analysis/passes/ir_graph_build_pass.cc
浏览文件 @
743cb840
...
...
@@ -46,7 +46,7 @@ void IrGraphBuildPass::RunImpl(Argument *argument) {
argument
->
model_params_path_valid
())
{
auto
program
=
LoadModel
(
argument
->
model_program_path
(),
argument
->
model_params_path
(),
argument
->
scope_ptr
(),
place
,
argument
->
is_memory_load
());
argument
->
scope_ptr
(),
place
,
argument
->
model_from_memory
());
argument
->
SetMainProgram
(
program
.
release
());
}
else
{
PADDLE_THROW
(
...
...
@@ -69,9 +69,13 @@ std::unique_ptr<framework::ProgramDesc> IrGraphBuildPass::LoadModel(
std
::
unique_ptr
<
framework
::
ProgramDesc
>
IrGraphBuildPass
::
LoadModel
(
const
std
::
string
&
program_path
,
const
std
::
string
&
params_path
,
framework
::
Scope
*
scope
,
const
platform
::
Place
&
place
,
bool
is_memory_load
)
{
bool
model_from_memory
)
{
framework
::
Executor
exe
(
place
);
return
Load
(
&
exe
,
scope
,
program_path
,
params_path
,
is_memory_load
);
if
(
!
model_from_memory
)
{
return
Load
(
&
exe
,
scope
,
program_path
,
params_path
);
}
else
{
return
LoadFromMemory
(
&
exe
,
scope
,
program_path
,
params_path
);
}
}
std
::
string
IrGraphBuildPass
::
repr
()
const
{
return
"ir-graph-build-pass"
;
}
...
...
paddle/fluid/inference/analysis/passes/ir_graph_build_pass.h
浏览文件 @
743cb840
...
...
@@ -39,7 +39,7 @@ class IrGraphBuildPass : public AnalysisPass {
std
::
unique_ptr
<
framework
::
ProgramDesc
>
LoadModel
(
const
std
::
string
&
program_path
,
const
std
::
string
&
params_path
,
framework
::
Scope
*
scope
,
const
platform
::
Place
&
place
,
bool
is_memory_load
);
bool
model_from_memory
);
std
::
string
model_binary_str_
;
};
...
...
paddle/fluid/inference/api/analysis_config.cc
浏览文件 @
743cb840
...
...
@@ -53,7 +53,7 @@ contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) {
use_tensorrt_
=
other
.
use_tensorrt_
;
tensorrt_max_batchsize_
=
other
.
tensorrt_max_batchsize_
;
tensorrt_workspace_size_
=
other
.
tensorrt_workspace_size_
;
is_memory_load_
=
other
.
is_memory_load
_
;
model_from_memory_
=
other
.
model_from_memory
_
;
if
(
use_gpu
)
{
pass_builder_
.
reset
(
new
GpuPassStrategy
(
...
...
@@ -81,7 +81,7 @@ contrib::AnalysisConfig::AnalysisConfig(contrib::AnalysisConfig &&other) {
use_tensorrt_
=
other
.
use_tensorrt_
;
tensorrt_max_batchsize_
=
other
.
tensorrt_max_batchsize_
;
tensorrt_workspace_size_
=
other
.
tensorrt_workspace_size_
;
is_memory_load_
=
other
.
is_memory_load
_
;
model_from_memory_
=
other
.
model_from_memory
_
;
pass_builder_
=
std
::
move
(
other
.
pass_builder_
);
}
...
...
@@ -105,12 +105,13 @@ void contrib::AnalysisConfig::EnableTensorRtEngine(int workspace_size,
pass_builder
()
->
InsertPass
(
1
,
"tensorrt_subgraph_pass"
);
}
void
contrib
::
AnalysisConfig
::
SetProgBufferAndParamBuffer
(
const
char
*
prog_buffer
,
size_t
prog_buffer_size
,
const
char
*
param_buffer
,
size_t
param_buffer_size
)
{
void
contrib
::
AnalysisConfig
::
SetModelBuffer
(
const
char
*
prog_buffer
,
size_t
prog_buffer_size
,
const
char
*
param_buffer
,
size_t
param_buffer_size
)
{
prog_file
=
std
::
string
(
prog_buffer
,
prog_buffer
+
prog_buffer_size
);
param_file
=
std
::
string
(
param_buffer
,
param_buffer
+
param_buffer_size
);
is_memory_load
_
=
true
;
model_from_memory
_
=
true
;
}
}
// namespace paddle
paddle/fluid/inference/api/analysis_predictor.cc
浏览文件 @
743cb840
...
...
@@ -308,7 +308,7 @@ void AnalysisPredictor::OptimizeInferenceProgram() {
argument_
.
SetUseGPU
(
config_
.
use_gpu
);
argument_
.
SetGPUDeviceId
(
config_
.
device
);
argument_
.
Set
IsMemoryLoad
(
config_
.
is_memory_load
_
);
argument_
.
Set
ModelFromMemory
(
config_
.
model_from_memory
_
);
// Analyze inference_program
if
(
!
config_
.
model_dir
.
empty
())
{
argument_
.
SetModelDir
(
config_
.
model_dir
);
...
...
@@ -451,11 +451,12 @@ bool AnalysisPredictor::LoadProgramDesc() {
// Create ProgramDesc
framework
::
proto
::
ProgramDesc
proto
;
if
(
!
config_
.
is_memory_load
())
{
if
(
!
config_
.
model_from_memory
())
{
std
::
string
pb_content
;
// Read binary
std
::
ifstream
fin
(
filename
,
std
::
ios
::
in
|
std
::
ios
::
binary
);
PADDLE_ENFORCE
(
static_cast
<
bool
>
(
fin
),
"Cannot open file %s"
,
filename
);
PADDLE_ENFORCE
(
static_cast
<
bool
>
(
fin
.
is_open
()),
"Cannot open file %s"
,
filename
);
fin
.
seekg
(
0
,
std
::
ios
::
end
);
pb_content
.
resize
(
fin
.
tellg
());
fin
.
seekg
(
0
,
std
::
ios
::
beg
);
...
...
paddle/fluid/inference/api/paddle_analysis_config.h
浏览文件 @
743cb840
...
...
@@ -55,11 +55,9 @@ struct AnalysisConfig : public NativeConfig {
bool
use_mkldnn
()
const
{
return
use_mkldnn_
;
}
// Specify the memory buffer of program and parameter
void
SetProgBufferAndParamBuffer
(
const
char
*
prog_buffer
,
size_t
prog_buffer_size
,
const
char
*
program_buffer
,
size_t
program_buffer_size
);
bool
is_memory_load
()
const
{
return
is_memory_load_
;
}
void
SetModelBuffer
(
const
char
*
prog_buffer
,
size_t
prog_buffer_size
,
const
char
*
program_buffer
,
size_t
program_buffer_size
);
bool
model_from_memory
()
const
{
return
model_from_memory_
;
}
friend
class
::
paddle
::
AnalysisPredictor
;
...
...
@@ -69,7 +67,7 @@ struct AnalysisConfig : public NativeConfig {
int
tensorrt_workspace_size_
;
int
tensorrt_max_batchsize_
;
std
::
unique_ptr
<
PassStrategy
>
pass_builder_
;
bool
is_memory_load
_
{
false
};
bool
model_from_memory
_
{
false
};
};
// Configurations for Anakin engine.
...
...
paddle/fluid/inference/io.cc
浏览文件 @
743cb840
...
...
@@ -70,7 +70,7 @@ void LoadPersistables(framework::Executor* executor, framework::Scope* scope,
const
framework
::
ProgramDesc
&
main_program
,
const
std
::
string
&
dirname
,
const
std
::
string
&
param_filename
,
bool
is_memory_load
=
false
)
{
bool
model_from_memory
=
false
)
{
const
framework
::
BlockDesc
&
global_block
=
main_program
.
Block
(
0
);
framework
::
ProgramDesc
*
load_program
=
new
framework
::
ProgramDesc
();
...
...
@@ -109,7 +109,7 @@ void LoadPersistables(framework::Executor* executor, framework::Scope* scope,
op
->
SetType
(
"load_combine"
);
op
->
SetOutput
(
"Out"
,
paramlist
);
op
->
SetAttr
(
"file_path"
,
{
param_filename
});
op
->
SetAttr
(
"
is_memory_load"
,
{
is_memory_load
});
op
->
SetAttr
(
"
model_from_memory"
,
{
model_from_memory
});
op
->
CheckAttrs
();
}
...
...
@@ -132,23 +132,17 @@ std::unique_ptr<framework::ProgramDesc> Load(framework::Executor* executor,
"model version %ld is not supported."
,
main_program
->
Version
());
//
is_memory_load
is false in seperate parameters.
//
model_from_memory
is false in seperate parameters.
LoadPersistables
(
executor
,
scope
,
*
main_program
,
dirname
,
""
,
false
/*
is_memory_load
*/
);
false
/*
model_from_memory
*/
);
return
main_program
;
}
std
::
unique_ptr
<
framework
::
ProgramDesc
>
Load
(
framework
::
Executor
*
executor
,
framework
::
Scope
*
scope
,
const
std
::
string
&
prog_filename
,
const
std
::
string
&
param_filename
,
bool
is_memory_load
=
false
)
{
std
::
unique_ptr
<
framework
::
ProgramDesc
>
Load
(
framework
::
Executor
*
executor
,
framework
::
Scope
*
scope
,
const
std
::
string
&
prog_filename
,
const
std
::
string
&
param_filename
)
{
std
::
string
program_desc_str
;
if
(
!
is_memory_load
)
{
ReadBinaryFile
(
prog_filename
,
&
program_desc_str
);
}
else
{
program_desc_str
=
prog_filename
;
}
ReadBinaryFile
(
prog_filename
,
&
program_desc_str
);
std
::
unique_ptr
<
framework
::
ProgramDesc
>
main_program
(
new
framework
::
ProgramDesc
(
program_desc_str
));
...
...
@@ -157,15 +151,22 @@ std::unique_ptr<framework::ProgramDesc> Load(framework::Executor* executor,
main_program
->
Version
());
LoadPersistables
(
executor
,
scope
,
*
main_program
,
""
,
param_filename
,
is_memory_load
);
false
/* model_from_memory */
);
return
main_program
;
}
std
::
unique_ptr
<
framework
::
ProgramDesc
>
Load
(
std
::
unique_ptr
<
framework
::
ProgramDesc
>
Load
FromMemory
(
framework
::
Executor
*
executor
,
framework
::
Scope
*
scope
,
const
std
::
string
&
prog_filename
,
const
std
::
string
&
param_filename
)
{
return
Load
(
executor
,
scope
,
prog_filename
,
param_filename
,
false
/* is_memory_load */
);
const
std
::
string
&
prog_buffer
,
const
std
::
string
&
param_buffer
)
{
std
::
unique_ptr
<
framework
::
ProgramDesc
>
main_program
(
new
framework
::
ProgramDesc
(
prog_buffer
));
PADDLE_ENFORCE
(
framework
::
IsProgramVersionSupported
(
main_program
->
Version
()),
"model version %ld is not supported."
,
main_program
->
Version
());
LoadPersistables
(
executor
,
scope
,
*
main_program
,
""
,
param_buffer
,
true
/* model_filename */
);
return
main_program
;
}
void
SaveVars
(
const
framework
::
Scope
&
scope
,
...
...
paddle/fluid/inference/io.h
浏览文件 @
743cb840
...
...
@@ -30,7 +30,8 @@ void Init(const std::vector<std::string> argv);
void
LoadPersistables
(
framework
::
Executor
*
executor
,
framework
::
Scope
*
scope
,
const
framework
::
ProgramDesc
&
main_program
,
const
std
::
string
&
dirname
,
const
std
::
string
&
param_filename
,
bool
is_memory_load
);
const
std
::
string
&
param_filename
,
bool
model_from_memory
);
std
::
unique_ptr
<
framework
::
ProgramDesc
>
Load
(
framework
::
Executor
*
executor
,
framework
::
Scope
*
scope
,
...
...
@@ -41,11 +42,9 @@ std::unique_ptr<framework::ProgramDesc> Load(framework::Executor* executor,
const
std
::
string
&
prog_filename
,
const
std
::
string
&
param_filename
);
std
::
unique_ptr
<
framework
::
ProgramDesc
>
Load
(
framework
::
Executor
*
executor
,
framework
::
Scope
*
scope
,
const
std
::
string
&
prog_filename
,
const
std
::
string
&
param_filename
,
bool
is_memory_load
);
std
::
unique_ptr
<
framework
::
ProgramDesc
>
LoadFromMemory
(
framework
::
Executor
*
executor
,
framework
::
Scope
*
scope
,
const
std
::
string
&
prog_buffer
,
const
std
::
string
&
param_buffer
);
// Save the variables from a scope to disk.
void
SaveVars
(
const
framework
::
Scope
&
scope
,
...
...
paddle/fluid/inference/tests/api/analyzer_ner_tester.cc
浏览文件 @
743cb840
...
...
@@ -98,8 +98,8 @@ void SetConfig(contrib::AnalysisConfig *cfg, bool memory_load = false) {
std
::
string
buffer_prog
,
buffer_param
;
ReadBinaryFile
(
FLAGS_infer_model
+
"/__model__"
,
&
buffer_prog
);
ReadBinaryFile
(
FLAGS_infer_model
+
"/param"
,
&
buffer_param
);
cfg
->
Set
ProgBufferAndParamBuffer
(
&
buffer_prog
[
0
],
buffer_prog
.
size
()
,
&
buffer_param
[
0
],
buffer_param
.
size
());
cfg
->
Set
ModelBuffer
(
&
buffer_prog
[
0
],
buffer_prog
.
size
(),
&
buffer_param
[
0
]
,
buffer_param
.
size
());
}
else
{
cfg
->
prog_file
=
FLAGS_infer_model
+
"/__model__"
;
cfg
->
param_file
=
FLAGS_infer_model
+
"/param"
;
...
...
paddle/fluid/inference/tests/api/config_printer.h
浏览文件 @
743cb840
...
...
@@ -63,7 +63,7 @@ std::ostream &operator<<(std::ostream &os,
os
<<
GenSpaces
(
num_spaces
)
<<
"contrib::AnalysisConfig {
\n
"
;
num_spaces
++
;
os
<<
*
reinterpret_cast
<
const
NativeConfig
*>
(
&
config
);
if
(
!
config
.
is_memory_load
())
{
if
(
!
config
.
model_from_memory
())
{
os
<<
GenSpaces
(
num_spaces
)
<<
"prog_file: "
<<
config
.
prog_file
<<
"
\n
"
;
os
<<
GenSpaces
(
num_spaces
)
<<
"param_file: "
<<
config
.
param_file
<<
"
\n
"
;
}
else
{
...
...
paddle/fluid/operators/load_combine_op.cc
浏览文件 @
743cb840
...
...
@@ -32,12 +32,12 @@ class LoadCombineOp : public framework::OperatorBase {
const
platform
::
Place
&
place
)
const
override
{
auto
filename
=
Attr
<
std
::
string
>
(
"file_path"
);
auto
load_as_fp16
=
Attr
<
bool
>
(
"load_as_fp16"
);
auto
is_memory_load
=
Attr
<
bool
>
(
"is_memory_load
"
);
auto
model_from_memory
=
Attr
<
bool
>
(
"model_from_memory
"
);
auto
out_var_names
=
Outputs
(
"Out"
);
PADDLE_ENFORCE_GT
(
static_cast
<
int
>
(
out_var_names
.
size
()),
0
,
"The number of output variables should be greater than 0."
);
if
(
!
is_memory_load
)
{
if
(
!
model_from_memory
)
{
std
::
ifstream
fin
(
filename
);
PADDLE_ENFORCE
(
static_cast
<
bool
>
(
fin
),
"Cannot open file %s for load_combine op"
,
filename
);
...
...
@@ -112,7 +112,7 @@ class LoadCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker {
"LoDTensors will be loaded from
\"
file_path
\"
."
)
.
AddCustomChecker
(
[](
const
std
::
string
&
path
)
{
return
!
path
.
empty
();
});
AddAttr
<
bool
>
(
"
is_memory_load
"
,
AddAttr
<
bool
>
(
"
model_from_memory
"
,
"(boolean, default false)"
"If true, file_path is in memory, and LoDTensors will be "
"loaded directly from memory"
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录