Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
a528a971
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a528a971
编写于
10月 10, 2017
作者:
Y
Yang Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remove prune as member function to function
上级
2e7cd201
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
71 addition
and
72 deletion
+71
-72
paddle/framework/executor.cc
paddle/framework/executor.cc
+60
-60
paddle/framework/executor.h
paddle/framework/executor.h
+11
-12
未找到文件。
paddle/framework/executor.cc
浏览文件 @
a528a971
...
...
@@ -32,66 +32,7 @@ namespace framework {
const
std
::
string
kFeedOpType
=
"feed"
;
const
std
::
string
kFetchOpType
=
"fetch"
;
Executor
::
Executor
(
const
std
::
vector
<
platform
::
Place
>&
places
)
{
PADDLE_ENFORCE_GT
(
places
.
size
(),
0
);
device_contexts_
.
resize
(
places
.
size
());
for
(
size_t
i
=
0
;
i
<
places
.
size
();
i
++
)
{
if
(
platform
::
is_cpu_place
(
places
[
i
]))
{
device_contexts_
[
i
]
=
new
platform
::
CPUDeviceContext
(
boost
::
get
<
platform
::
CPUPlace
>
(
places
[
i
]));
}
else
if
(
platform
::
is_gpu_place
(
places
[
i
]))
{
#ifdef PADDLE_WITH_CUDA
device_contexts_
[
i
]
=
new
platform
::
CUDADeviceContext
(
boost
::
get
<
platform
::
GPUPlace
>
(
places
[
i
]));
#else
PADDLE_THROW
(
"'GPUPlace' is not supported in CPU only device."
);
#endif
}
}
}
Executor
::~
Executor
()
{
for
(
auto
&
device_context
:
device_contexts_
)
{
delete
device_context
;
}
}
void
Executor
::
Run
(
const
ProgramDesc
&
pdesc
,
Scope
*
scope
,
int
block_id
)
{
// TODO(tonyyang-svail):
// - only runs on the first device (i.e. no interdevice communication)
// - will change to use multiple blocks for RNN op and Cond Op
PADDLE_ENFORCE_GT
(
pdesc
.
blocks_size
(),
block_id
);
auto
&
block
=
pdesc
.
blocks
(
block_id
);
auto
&
device
=
device_contexts_
[
0
];
// Instantiate all the vars in the global scope
for
(
auto
&
var
:
block
.
vars
())
{
scope
->
NewVar
(
var
.
name
());
}
Scope
&
local_scope
=
scope
->
NewScope
();
std
::
vector
<
bool
>
should_run
=
Prune
(
pdesc
,
block_id
);
PADDLE_ENFORCE_EQ
(
should_run
.
size
(),
static_cast
<
size_t
>
(
block
.
ops_size
()));
for
(
size_t
i
=
0
;
i
<
should_run
.
size
();
++
i
)
{
if
(
should_run
[
i
])
{
for
(
auto
&
var
:
block
.
ops
(
i
).
outputs
())
{
for
(
auto
&
argu
:
var
.
arguments
())
{
if
(
local_scope
.
FindVar
(
argu
)
==
nullptr
)
{
local_scope
.
NewVar
(
argu
);
}
}
}
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
block
.
ops
(
i
));
op
->
Run
(
local_scope
,
*
device
);
}
}
// TODO(tonyyang-svail):
// - Destroy local_scope
}
std
::
vector
<
bool
>
Executor
::
Prune
(
const
ProgramDesc
&
pdesc
,
int
block_id
)
{
std
::
vector
<
bool
>
Prune
(
const
ProgramDesc
&
pdesc
,
int
block_id
)
{
// TODO(tonyyang-svail):
// - will change to use multiple blocks for RNN op and Cond Op
...
...
@@ -159,5 +100,64 @@ std::vector<bool> Executor::Prune(const ProgramDesc& pdesc, int block_id) {
return
should_run
;
}
Executor
::
Executor
(
const
std
::
vector
<
platform
::
Place
>&
places
)
{
PADDLE_ENFORCE_GT
(
places
.
size
(),
0
);
device_contexts_
.
resize
(
places
.
size
());
for
(
size_t
i
=
0
;
i
<
places
.
size
();
i
++
)
{
if
(
platform
::
is_cpu_place
(
places
[
i
]))
{
device_contexts_
[
i
]
=
new
platform
::
CPUDeviceContext
(
boost
::
get
<
platform
::
CPUPlace
>
(
places
[
i
]));
}
else
if
(
platform
::
is_gpu_place
(
places
[
i
]))
{
#ifdef PADDLE_WITH_CUDA
device_contexts_
[
i
]
=
new
platform
::
CUDADeviceContext
(
boost
::
get
<
platform
::
GPUPlace
>
(
places
[
i
]));
#else
PADDLE_THROW
(
"'GPUPlace' is not supported in CPU only device."
);
#endif
}
}
}
Executor
::~
Executor
()
{
for
(
auto
&
device_context
:
device_contexts_
)
{
delete
device_context
;
}
}
void
Executor
::
Run
(
const
ProgramDesc
&
pdesc
,
Scope
*
scope
,
int
block_id
)
{
// TODO(tonyyang-svail):
// - only runs on the first device (i.e. no interdevice communication)
// - will change to use multiple blocks for RNN op and Cond Op
PADDLE_ENFORCE_GT
(
pdesc
.
blocks_size
(),
block_id
);
auto
&
block
=
pdesc
.
blocks
(
block_id
);
auto
&
device
=
device_contexts_
[
0
];
// Instantiate all the vars in the global scope
for
(
auto
&
var
:
block
.
vars
())
{
scope
->
NewVar
(
var
.
name
());
}
Scope
&
local_scope
=
scope
->
NewScope
();
std
::
vector
<
bool
>
should_run
=
Prune
(
pdesc
,
block_id
);
PADDLE_ENFORCE_EQ
(
should_run
.
size
(),
static_cast
<
size_t
>
(
block
.
ops_size
()));
for
(
size_t
i
=
0
;
i
<
should_run
.
size
();
++
i
)
{
if
(
should_run
[
i
])
{
for
(
auto
&
var
:
block
.
ops
(
i
).
outputs
())
{
for
(
auto
&
argu
:
var
.
arguments
())
{
if
(
local_scope
.
FindVar
(
argu
)
==
nullptr
)
{
local_scope
.
NewVar
(
argu
);
}
}
}
auto
op
=
paddle
::
framework
::
OpRegistry
::
CreateOp
(
block
.
ops
(
i
));
op
->
Run
(
local_scope
,
*
device
);
}
}
// TODO(tonyyang-svail):
// - Destroy local_scope
}
}
// namespace framework
}
// namespace paddle
paddle/framework/executor.h
浏览文件 @
a528a971
...
...
@@ -36,21 +36,20 @@ class Executor {
*/
void
Run
(
const
ProgramDesc
&
,
Scope
*
,
int
);
protected:
/* @Brief
* Pruning the graph
*
* @param
* ProgramDesc
*
* @return
* vector<bool> Same size as ops. Indicates whether an op should be run.
*/
std
::
vector
<
bool
>
Prune
(
const
ProgramDesc
&
pdesc
,
int
block_id
);
private:
std
::
vector
<
platform
::
DeviceContext
*>
device_contexts_
;
};
/* @Brief
* Pruning the graph
*
* @param
* ProgramDesc
*
* @return
* vector<bool> Same size as ops. Indicates whether an op should be run.
*/
std
::
vector
<
bool
>
Prune
(
const
ProgramDesc
&
pdesc
,
int
block_id
);
}
// namespace framework
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录