Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
3a294042
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
3a294042
编写于
6月 07, 2018
作者:
T
tensor-tang
提交者:
GitHub
6月 07, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #11233 from tensor-tang/multithreads
Fix abort issue in cpu multi-threads
上级
4f95bc94
4b7b17a8
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
46 addition
and
35 deletion
+46
-35
paddle/fluid/framework/scope.cc
paddle/fluid/framework/scope.cc
+11
-1
paddle/fluid/framework/scope.h
paddle/fluid/framework/scope.h
+9
-3
paddle/fluid/inference/tests/book/test_inference_nlp.cc
paddle/fluid/inference/tests/book/test_inference_nlp.cc
+26
-31
未找到文件。
paddle/fluid/framework/scope.cc
浏览文件 @
3a294042
...
@@ -43,6 +43,8 @@ Scope& Scope::NewScope() const {
...
@@ -43,6 +43,8 @@ Scope& Scope::NewScope() const {
}
}
Variable
*
Scope
::
Var
(
const
std
::
string
&
name
)
{
Variable
*
Scope
::
Var
(
const
std
::
string
&
name
)
{
// acquire the lock when new var under this scope
std
::
unique_lock
<
std
::
mutex
>
lock
(
mutex_
);
auto
*
v
=
FindVarLocally
(
name
);
auto
*
v
=
FindVarLocally
(
name
);
if
(
v
!=
nullptr
)
return
v
;
if
(
v
!=
nullptr
)
return
v
;
...
@@ -62,11 +64,17 @@ Variable* Scope::Var(std::string* name) {
...
@@ -62,11 +64,17 @@ Variable* Scope::Var(std::string* name) {
}
}
Variable
*
Scope
::
FindVar
(
const
std
::
string
&
name
)
const
{
Variable
*
Scope
::
FindVar
(
const
std
::
string
&
name
)
const
{
// acquire the lock when find var
std
::
unique_lock
<
std
::
mutex
>
lock
(
mutex_
);
return
FindVarInternal
(
name
);
}
Variable
*
Scope
::
FindVarInternal
(
const
std
::
string
&
name
)
const
{
auto
var
=
FindVarLocally
(
name
);
auto
var
=
FindVarLocally
(
name
);
if
(
var
!=
nullptr
)
{
if
(
var
!=
nullptr
)
{
return
var
;
return
var
;
}
}
return
(
parent_
==
nullptr
)
?
nullptr
:
parent_
->
FindVar
(
name
);
return
(
parent_
==
nullptr
)
?
nullptr
:
parent_
->
FindVar
Internal
(
name
);
}
}
const
Scope
*
Scope
::
FindScope
(
const
Variable
*
var
)
const
{
const
Scope
*
Scope
::
FindScope
(
const
Variable
*
var
)
const
{
...
@@ -78,6 +86,7 @@ const Scope* Scope::FindScope(const Variable* var) const {
...
@@ -78,6 +86,7 @@ const Scope* Scope::FindScope(const Variable* var) const {
return
(
parent_
==
nullptr
)
?
nullptr
:
parent_
->
FindScope
(
var
);
return
(
parent_
==
nullptr
)
?
nullptr
:
parent_
->
FindScope
(
var
);
}
}
void
Scope
::
DropKids
()
{
void
Scope
::
DropKids
()
{
std
::
unique_lock
<
std
::
mutex
>
lock
(
mutex_
);
for
(
Scope
*
s
:
kids_
)
delete
s
;
for
(
Scope
*
s
:
kids_
)
delete
s
;
kids_
.
clear
();
kids_
.
clear
();
}
}
...
@@ -105,6 +114,7 @@ void Scope::DeleteScope(Scope* scope) const {
...
@@ -105,6 +114,7 @@ void Scope::DeleteScope(Scope* scope) const {
}
}
void
Scope
::
EraseVars
(
const
std
::
vector
<
std
::
string
>&
var_names
)
{
void
Scope
::
EraseVars
(
const
std
::
vector
<
std
::
string
>&
var_names
)
{
std
::
unique_lock
<
std
::
mutex
>
lock
(
mutex_
);
std
::
set
<
std
::
string
>
var_set
(
var_names
.
begin
(),
var_names
.
end
());
std
::
set
<
std
::
string
>
var_set
(
var_names
.
begin
(),
var_names
.
end
());
for
(
auto
it
=
vars_
.
begin
();
it
!=
vars_
.
end
();)
{
for
(
auto
it
=
vars_
.
begin
();
it
!=
vars_
.
end
();)
{
if
(
var_set
.
find
(
it
->
first
)
!=
var_set
.
end
())
{
if
(
var_set
.
find
(
it
->
first
)
!=
var_set
.
end
())
{
...
...
paddle/fluid/framework/scope.h
浏览文件 @
3a294042
...
@@ -81,14 +81,20 @@ class Scope {
...
@@ -81,14 +81,20 @@ class Scope {
// Rename variable to a new name and return the new name
// Rename variable to a new name and return the new name
std
::
string
Rename
(
const
std
::
string
&
origin_name
)
const
;
std
::
string
Rename
(
const
std
::
string
&
origin_name
)
const
;
/// Caller doesn't own the returned Variable.
Variable
*
FindVarLocally
(
const
std
::
string
&
name
)
const
;
private:
private:
// Call Scope::NewScope for a sub-scope.
// Call Scope::NewScope for a sub-scope.
explicit
Scope
(
Scope
const
*
parent
)
:
parent_
(
parent
)
{}
explicit
Scope
(
Scope
const
*
parent
)
:
parent_
(
parent
)
{}
// Called by FindVar recursively.
// Caller doesn't own the returned Variable.
Variable
*
FindVarInternal
(
const
std
::
string
&
name
)
const
;
// Called by FindVarInternal and Var.
// Caller doesn't own the returned Variable.
Variable
*
FindVarLocally
(
const
std
::
string
&
name
)
const
;
mutable
std
::
unordered_map
<
std
::
string
,
std
::
unique_ptr
<
Variable
>>
vars_
;
mutable
std
::
unordered_map
<
std
::
string
,
std
::
unique_ptr
<
Variable
>>
vars_
;
// Scope in `kids_` are owned by this class.
// Scope in `kids_` are owned by this class.
mutable
std
::
list
<
Scope
*>
kids_
;
mutable
std
::
list
<
Scope
*>
kids_
;
Scope
const
*
parent_
{
nullptr
};
Scope
const
*
parent_
{
nullptr
};
...
...
paddle/fluid/inference/tests/book/test_inference_nlp.cc
浏览文件 @
3a294042
...
@@ -101,23 +101,22 @@ void SplitData(
...
@@ -101,23 +101,22 @@ void SplitData(
}
}
void
ThreadRunInfer
(
void
ThreadRunInfer
(
const
int
tid
,
paddle
::
framework
::
Executor
*
executor
,
const
int
tid
,
paddle
::
framework
::
Scope
*
scope
,
paddle
::
framework
::
Scope
*
scope
,
const
std
::
unique_ptr
<
paddle
::
framework
::
ProgramDesc
>&
inference_program
,
const
std
::
vector
<
std
::
vector
<
const
paddle
::
framework
::
LoDTensor
*>>&
jobs
)
{
const
std
::
vector
<
std
::
vector
<
const
paddle
::
framework
::
LoDTensor
*>>&
jobs
)
{
auto
copy_program
=
std
::
unique_ptr
<
paddle
::
framework
::
ProgramDesc
>
(
// maybe framework:ProgramDesc is not thread-safe
new
paddle
::
framework
::
ProgramDesc
(
*
inference_program
));
auto
&
sub_scope
=
scope
->
NewScope
();
auto
&
sub_scope
=
scope
->
NewScope
();
auto
place
=
paddle
::
platform
::
CPUPlace
();
auto
executor
=
paddle
::
framework
::
Executor
(
place
);
auto
inference_program
=
paddle
::
inference
::
Load
(
&
executor
,
scope
,
FLAGS_model_path
);
std
::
string
feed_holder_name
=
"feed_"
+
paddle
::
string
::
to_string
(
tid
);
auto
ctx
=
executor
.
Prepare
(
*
inference_program
,
/*block_id*/
0
);
std
::
string
fetch_holder_name
=
"fetch_"
+
paddle
::
string
::
to_string
(
tid
);
executor
.
CreateVariables
(
*
inference_program
,
&
sub_scope
,
/*block_id*/
0
);
copy_program
->
SetFeedHolderName
(
feed_holder_name
);
copy_program
->
SetFetchHolderName
(
fetch_holder_name
);
const
std
::
vector
<
std
::
string
>&
feed_target_names
=
const
std
::
vector
<
std
::
string
>&
feed_target_names
=
copy
_program
->
GetFeedTargetNames
();
inference
_program
->
GetFeedTargetNames
();
const
std
::
vector
<
std
::
string
>&
fetch_target_names
=
const
std
::
vector
<
std
::
string
>&
fetch_target_names
=
copy
_program
->
GetFetchTargetNames
();
inference
_program
->
GetFetchTargetNames
();
PADDLE_ENFORCE_EQ
(
fetch_target_names
.
size
(),
1UL
);
PADDLE_ENFORCE_EQ
(
fetch_target_names
.
size
(),
1UL
);
std
::
map
<
std
::
string
,
paddle
::
framework
::
LoDTensor
*>
fetch_targets
;
std
::
map
<
std
::
string
,
paddle
::
framework
::
LoDTensor
*>
fetch_targets
;
...
@@ -131,9 +130,8 @@ void ThreadRunInfer(
...
@@ -131,9 +130,8 @@ void ThreadRunInfer(
auto
start_ms
=
GetCurrentMs
();
auto
start_ms
=
GetCurrentMs
();
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
++
i
)
{
feed_targets
[
feed_target_names
[
0
]]
=
inputs
[
i
];
feed_targets
[
feed_target_names
[
0
]]
=
inputs
[
i
];
executor
->
Run
(
*
copy_program
,
&
sub_scope
,
&
feed_targets
,
&
fetch_targets
,
executor
.
RunPreparedContext
(
ctx
.
get
(),
&
sub_scope
,
&
feed_targets
,
true
/*create_local_scope*/
,
true
/*create_vars*/
,
&
fetch_targets
,
false
/*create_local_scope*/
);
feed_holder_name
,
fetch_holder_name
);
}
}
auto
stop_ms
=
GetCurrentMs
();
auto
stop_ms
=
GetCurrentMs
();
scope
->
DeleteScope
(
&
sub_scope
);
scope
->
DeleteScope
(
&
sub_scope
);
...
@@ -158,22 +156,10 @@ TEST(inference, nlp) {
...
@@ -158,22 +156,10 @@ TEST(inference, nlp) {
LOG
(
INFO
)
<<
"Number of samples (seq_len<1024): "
<<
datasets
.
size
();
LOG
(
INFO
)
<<
"Number of samples (seq_len<1024): "
<<
datasets
.
size
();
LOG
(
INFO
)
<<
"Total number of words: "
<<
num_total_words
;
LOG
(
INFO
)
<<
"Total number of words: "
<<
num_total_words
;
const
bool
model_combined
=
false
;
// 0. Call `paddle::framework::InitDevices()` initialize all the devices
// 0. Call `paddle::framework::InitDevices()` initialize all the devices
// 1. Define place, executor, scope
auto
place
=
paddle
::
platform
::
CPUPlace
();
auto
executor
=
paddle
::
framework
::
Executor
(
place
);
std
::
unique_ptr
<
paddle
::
framework
::
Scope
>
scope
(
std
::
unique_ptr
<
paddle
::
framework
::
Scope
>
scope
(
new
paddle
::
framework
::
Scope
());
new
paddle
::
framework
::
Scope
());
// 2. Initialize the inference_program and load parameters
std
::
unique_ptr
<
paddle
::
framework
::
ProgramDesc
>
inference_program
;
inference_program
=
InitProgram
(
&
executor
,
scope
.
get
(),
FLAGS_model_path
,
model_combined
);
if
(
FLAGS_use_mkldnn
)
{
EnableMKLDNN
(
inference_program
);
}
#ifdef PADDLE_WITH_MKLML
#ifdef PADDLE_WITH_MKLML
// only use 1 thread number per std::thread
// only use 1 thread number per std::thread
omp_set_dynamic
(
0
);
omp_set_dynamic
(
0
);
...
@@ -189,21 +175,30 @@ TEST(inference, nlp) {
...
@@ -189,21 +175,30 @@ TEST(inference, nlp) {
start_ms
=
GetCurrentMs
();
start_ms
=
GetCurrentMs
();
for
(
int
i
=
0
;
i
<
FLAGS_num_threads
;
++
i
)
{
for
(
int
i
=
0
;
i
<
FLAGS_num_threads
;
++
i
)
{
threads
.
emplace_back
(
threads
.
emplace_back
(
new
std
::
thread
(
ThreadRunInfer
,
i
,
&
executor
,
scope
.
get
(),
new
std
::
thread
(
ThreadRunInfer
,
i
,
scope
.
get
(),
std
::
ref
(
jobs
)));
std
::
ref
(
inference_program
),
std
::
ref
(
jobs
)));
}
}
for
(
int
i
=
0
;
i
<
FLAGS_num_threads
;
++
i
)
{
for
(
int
i
=
0
;
i
<
FLAGS_num_threads
;
++
i
)
{
threads
[
i
]
->
join
();
threads
[
i
]
->
join
();
}
}
stop_ms
=
GetCurrentMs
();
stop_ms
=
GetCurrentMs
();
}
else
{
}
else
{
if
(
FLAGS_prepare_vars
)
{
// 1. Define place, executor, scope
executor
.
CreateVariables
(
*
inference_program
,
scope
.
get
(),
0
);
auto
place
=
paddle
::
platform
::
CPUPlace
();
auto
executor
=
paddle
::
framework
::
Executor
(
place
);
// 2. Initialize the inference_program and load parameters
std
::
unique_ptr
<
paddle
::
framework
::
ProgramDesc
>
inference_program
;
inference_program
=
InitProgram
(
&
executor
,
scope
.
get
(),
FLAGS_model_path
,
/*model combined*/
false
);
if
(
FLAGS_use_mkldnn
)
{
EnableMKLDNN
(
inference_program
);
}
}
// always prepare context
// always prepare context
std
::
unique_ptr
<
paddle
::
framework
::
ExecutorPrepareContext
>
ctx
;
std
::
unique_ptr
<
paddle
::
framework
::
ExecutorPrepareContext
>
ctx
;
ctx
=
executor
.
Prepare
(
*
inference_program
,
0
);
ctx
=
executor
.
Prepare
(
*
inference_program
,
0
);
if
(
FLAGS_prepare_vars
)
{
executor
.
CreateVariables
(
*
inference_program
,
scope
.
get
(),
0
);
}
// preapre fetch
// preapre fetch
const
std
::
vector
<
std
::
string
>&
fetch_target_names
=
const
std
::
vector
<
std
::
string
>&
fetch_target_names
=
inference_program
->
GetFetchTargetNames
();
inference_program
->
GetFetchTargetNames
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录