Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
7f8bc49d
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
7f8bc49d
编写于
5月 24, 2019
作者:
G
guru4elephant
提交者:
GitHub
5月 24, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
polish_executor_and_add_ctx_cache (#17536)
* polish_executor_and_add_ctx_cache
上级
7ae461eb
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
78 addition
and
15 deletion
+78
-15
paddle/fluid/framework/executor.cc
paddle/fluid/framework/executor.cc
+20
-1
paddle/fluid/framework/executor.h
paddle/fluid/framework/executor.h
+15
-9
paddle/fluid/framework/hogwild_worker.cc
paddle/fluid/framework/hogwild_worker.cc
+1
-1
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+19
-1
python/paddle/fluid/executor.py
python/paddle/fluid/executor.py
+23
-3
未找到文件。
paddle/fluid/framework/executor.cc
浏览文件 @
7f8bc49d
...
...
@@ -244,6 +244,25 @@ static bool has_fetch_operators(
return
fetch_count
>
0
;
}
std
::
unique_ptr
<
ExecutorPrepareContext
>
Executor
::
PrepareCtxCache
(
const
ProgramDesc
&
program
,
int
block_id
,
const
std
::
vector
<
std
::
string
>&
skip_ref_cnt_vars
,
bool
force_disable_gc
)
{
std
::
unique_ptr
<
ExecutorPrepareContext
>
ctx
;
ctx
.
reset
(
new
ExecutorPrepareContext
(
program
,
block_id
));
auto
&
block
=
program
.
Block
(
block_id
);
for
(
auto
&
op_desc
:
block
.
AllOps
())
{
ctx
->
ops_
.
push_back
(
OpRegistry
::
CreateOp
(
*
op_desc
));
}
#ifdef PADDLE_WITH_NGRAPH
if
(
FLAGS_use_ngraph
)
{
paddle
::
operators
::
NgraphEngine
::
FuseNgraphOps
(
ctx
->
prog_
.
Block
(
ctx
->
block_id_
),
&
ctx
->
ops_
);
}
#endif
ctx
->
PrepareUnusedVars
(
skip_ref_cnt_vars
,
force_disable_gc
);
return
ctx
;
}
void
Executor
::
Run
(
const
ProgramDesc
&
program
,
Scope
*
scope
,
std
::
map
<
std
::
string
,
const
LoDTensor
*>*
feed_targets
,
std
::
map
<
std
::
string
,
LoDTensor
*>*
fetch_targets
,
...
...
@@ -368,6 +387,7 @@ std::vector<std::shared_ptr<ExecutorPrepareContext>> Executor::Prepare(
void
Executor
::
RunPreparedContext
(
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
bool
create_local_scope
,
bool
create_vars
,
bool
keep_kids
)
{
platform
::
RecordBlock
b
(
kProgramId
);
PADDLE_ENFORCE_NOT_NULL
(
scope
);
Scope
*
local_scope
=
scope
;
if
(
create_vars
)
{
...
...
@@ -407,7 +427,6 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope,
for
(
auto
&
op
:
ctx
->
ops_
)
{
op
->
Run
(
*
local_scope
,
place_
);
if
(
gc
)
{
DeleteUnusedTensors
(
*
local_scope
,
op
.
get
(),
ctx
->
unused_vars_
,
gc
.
get
());
}
...
...
paddle/fluid/framework/executor.h
浏览文件 @
7f8bc49d
...
...
@@ -83,6 +83,21 @@ class Executor {
const
std
::
string
&
feed_holder_name
=
"feed"
,
const
std
::
string
&
fetch_holder_name
=
"fetch"
);
// This API is very slow.
void
RunPreparedContext
(
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
std
::
map
<
std
::
string
,
const
LoDTensor
*>*
feed_targets
,
std
::
map
<
std
::
string
,
LoDTensor
*>*
fetch_targets
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
const
std
::
string
&
feed_holder_name
=
"feed"
,
const
std
::
string
&
fetch_holder_name
=
"fetch"
);
std
::
unique_ptr
<
ExecutorPrepareContext
>
PrepareCtxCache
(
const
ProgramDesc
&
program
,
int
block_id
,
const
std
::
vector
<
std
::
string
>&
skip_ref_cnt_vars
=
std
::
vector
<
std
::
string
>
(),
bool
force_disable_gc
=
false
);
static
std
::
unique_ptr
<
ExecutorPrepareContext
>
Prepare
(
const
ProgramDesc
&
program
,
int
block_id
,
const
std
::
vector
<
std
::
string
>&
skip_ref_cnt_vars
=
...
...
@@ -101,15 +116,6 @@ class Executor {
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
bool
keep_kids
=
false
);
// This API is very slow.
void
RunPreparedContext
(
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
std
::
map
<
std
::
string
,
const
LoDTensor
*>*
feed_targets
,
std
::
map
<
std
::
string
,
LoDTensor
*>*
fetch_targets
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
const
std
::
string
&
feed_holder_name
=
"feed"
,
const
std
::
string
&
fetch_holder_name
=
"fetch"
);
void
EnableMKLDNN
(
const
ProgramDesc
&
program
);
void
RunFromDataset
(
const
ProgramDesc
&
main_program
,
Scope
*
scope
,
...
...
paddle/fluid/framework/hogwild_worker.cc
浏览文件 @
7f8bc49d
...
...
@@ -24,7 +24,7 @@ void HogwildWorker::Initialize(const TrainerDesc& desc) {
fetch_config_
=
desc
.
fetch_config
();
param_
=
desc
.
hogwild_param
();
skip_ops_
.
resize
(
param_
.
skip_ops_size
());
for
(
size_
t
i
=
0
;
i
<
param_
.
skip_ops_size
();
++
i
)
{
for
(
in
t
i
=
0
;
i
<
param_
.
skip_ops_size
();
++
i
)
{
skip_ops_
[
i
]
=
param_
.
skip_ops
(
i
);
}
use_cvm_
=
desc
.
use_cvm
();
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
7f8bc49d
...
...
@@ -1032,10 +1032,28 @@ All parameter, weight, gradient are variables in Paddle.
[](
const
OperatorBase
&
op
)
{
return
op
.
OutputVars
(
false
);
})
.
def
(
"support_gpu"
,
&
OperatorBase
::
SupportGPU
);
py
::
class_
<
framework
::
ExecutorPrepareContext
>
(
m
,
"ExecutorPrepareContext"
)
.
def
(
py
::
init
<
const
ProgramDesc
&
,
size_t
>
());
py
::
class_
<
framework
::
Executor
>
(
m
,
"Executor"
)
.
def
(
py
::
init
<
const
platform
::
Place
&>
())
.
def
(
"close"
,
&
Executor
::
Close
)
.
def
(
"run_from_dataset"
,
&
Executor
::
RunFromDataset
)
.
def
(
"run_from_dataset"
,
&
Executor
::
RunFromDataset
,
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"run_prepared_ctx"
,
[](
Executor
&
self
,
ExecutorPrepareContext
*
ctx
,
Scope
*
scope
,
std
::
map
<
std
::
string
,
const
LoDTensor
*>
*
feed_targets
,
std
::
map
<
std
::
string
,
LoDTensor
*>
*
fetch_targets
,
bool
create_local_scope
=
true
,
bool
create_vars
=
true
,
const
std
::
string
&
feed_holder_name
=
"feed"
,
const
std
::
string
&
fetch_holder_name
=
"fetch"
)
{
pybind11
::
gil_scoped_release
release
;
self
.
RunPreparedContext
(
ctx
,
scope
,
feed_targets
,
fetch_targets
,
create_local_scope
,
create_vars
,
feed_holder_name
,
fetch_holder_name
);
})
.
def
(
"prepare_ctx_cache"
,
&
Executor
::
PrepareCtxCache
,
py
::
call_guard
<
py
::
gil_scoped_release
>
())
.
def
(
"run"
,
[](
Executor
&
self
,
const
ProgramDesc
&
prog
,
Scope
*
scope
,
int
block_id
,
bool
create_local_scope
,
bool
create_vars
,
const
std
::
vector
<
std
::
string
>
&
fetch_vars
)
{
...
...
python/paddle/fluid/executor.py
浏览文件 @
7f8bc49d
...
...
@@ -247,6 +247,10 @@ def _to_name_str(var):
raise
TypeError
(
str
(
var
)
+
" should be Variable or str"
)
def
_get_strong_program_cache_key
(
program
,
feed
,
fetch_list
):
return
str
(
id
(
program
))
+
_get_program_cache_key
(
feed
,
fetch_list
)
def
_get_program_cache_key
(
feed
,
fetch_list
):
feed_var_names
=
list
(
feed
.
keys
())
fetch_var_names
=
list
(
map
(
_to_name_str
,
fetch_list
))
...
...
@@ -356,17 +360,24 @@ class Executor(object):
def
__init__
(
self
,
place
):
self
.
place
=
place
self
.
program_caches
=
dict
()
self
.
ctx_caches
=
dict
()
p
=
core
.
Place
()
p
.
set_place
(
self
.
place
)
self
.
_default_executor
=
core
.
Executor
(
p
)
self
.
_closed
=
False
def
_get_ctx_cache
(
self
,
program_cache_key
):
return
self
.
ctx_caches
.
get
(
program_cache_key
,
None
)
def
_get_program_cache
(
self
,
program_cache_key
):
return
self
.
program_caches
.
get
(
program_cache_key
,
None
)
def
_add_program_cache
(
self
,
program_cache_key
,
program
):
self
.
program_caches
[
program_cache_key
]
=
program
def
_add_ctx_cache
(
self
,
ctx_cache_key
,
ctx
):
self
.
ctx_caches
[
ctx_cache_key
]
=
ctx
def
_add_feed_fetch_ops
(
self
,
program
,
feed
,
fetch_list
,
feed_var_name
,
fetch_var_name
):
tmp_program
=
program
.
clone
()
...
...
@@ -645,6 +656,7 @@ class Executor(object):
# performance.
# TODO(panyx0718): executor should be able to run graph.
assert
program
.
_program
,
"CompiledProgram is compiled from graph, can only run with_data_parallel."
# use_program_cache is not valid with CompiledProgram
return
self
.
_run
(
program
.
_program
,
self
.
_default_executor
,
...
...
@@ -654,7 +666,7 @@ class Executor(object):
fetch_var_name
=
fetch_var_name
,
scope
=
scope
,
return_numpy
=
return_numpy
,
use_program_cache
=
use_program_cach
e
)
use_program_cache
=
Fals
e
)
def
_run
(
self
,
program
,
exe
,
feed
,
fetch_list
,
feed_var_name
,
fetch_var_name
,
scope
,
return_numpy
,
use_program_cache
):
...
...
@@ -677,9 +689,10 @@ class Executor(object):
"Executor requires Program as its Parameter. But you passed in %s"
%
(
type
(
program
)))
cache_key
=
_get_
program_cache_key
(
feed
,
fetch_list
)
cache_key
=
_get_
strong_program_cache_key
(
program
,
feed
,
fetch_list
)
if
use_program_cache
:
cached_program
=
self
.
_get_program_cache
(
cache_key
)
cached_ctx
=
self
.
_get_ctx_cache
(
cache_key
)
if
cached_program
is
None
:
cached_program
=
self
.
_add_feed_fetch_ops
(
program
=
program
,
...
...
@@ -688,7 +701,11 @@ class Executor(object):
feed_var_name
=
feed_var_name
,
fetch_var_name
=
fetch_var_name
)
self
.
_add_program_cache
(
cache_key
,
cached_program
)
cached_ctx
=
self
.
_default_executor
.
prepare_ctx_cache
(
cached_program
.
desc
,
0
,
fetch_list
,
False
)
self
.
_add_ctx_cache
(
cache_key
,
cached_ctx
)
program
=
cached_program
ctx
=
cached_ctx
else
:
self
.
program_caches
.
pop
(
cache_key
,
None
)
program
=
self
.
_add_feed_fetch_ops
(
...
...
@@ -699,7 +716,10 @@ class Executor(object):
fetch_var_name
=
fetch_var_name
)
self
.
_feed_data
(
program
,
feed
,
feed_var_name
,
scope
)
if
not
use_program_cache
:
exe
.
run
(
program
.
desc
,
scope
,
0
,
True
,
True
,
fetch_var_name
)
else
:
exe
.
run_prepared_ctx
(
ctx
,
scope
,
True
,
True
,
False
)
outs
=
self
.
_fetch_data
(
fetch_list
,
fetch_var_name
,
scope
)
if
return_numpy
:
outs
=
as_numpy
(
outs
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录