Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
8c11d3fe
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8c11d3fe
编写于
11月 06, 2018
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
clean up
上级
0a896505
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
7 addition
and
10 deletion
+7
-10
paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc
...uid/framework/details/fast_threaded_ssa_graph_executor.cc
+1
-1
paddle/fluid/framework/details/multi_devices_graph_check_pass.cc
...fluid/framework/details/multi_devices_graph_check_pass.cc
+1
-1
paddle/fluid/framework/details/multi_devices_graph_pass.cc
paddle/fluid/framework/details/multi_devices_graph_pass.cc
+1
-0
paddle/fluid/framework/details/multi_devices_graph_print_pass.cc
...fluid/framework/details/multi_devices_graph_print_pass.cc
+1
-1
paddle/fluid/framework/details/reference_count_pass.cc
paddle/fluid/framework/details/reference_count_pass.cc
+1
-1
paddle/fluid/framework/details/threaded_ssa_graph_executor.cc
...le/fluid/framework/details/threaded_ssa_graph_executor.cc
+1
-1
paddle/fluid/framework/ir/graph_helper.h
paddle/fluid/framework/ir/graph_helper.h
+1
-1
python/paddle/fluid/tests/unittests/test_reader_reset.py
python/paddle/fluid/tests/unittests/test_reader_reset.py
+0
-4
未找到文件。
paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.cc
浏览文件 @
8c11d3fe
...
@@ -33,7 +33,7 @@ FastThreadedSSAGraphExecutor::FastThreadedSSAGraphExecutor(
...
@@ -33,7 +33,7 @@ FastThreadedSSAGraphExecutor::FastThreadedSSAGraphExecutor(
pool_
(
strategy
.
num_threads_
+
pool_
(
strategy
.
num_threads_
+
1
),
// add one more thread for generate op_deps
1
),
// add one more thread for generate op_deps
fetch_ctxs_
(
places
)
{
fetch_ctxs_
(
places
)
{
for
(
auto
&
op
:
ir
::
GetFilteredNodes
<
OpHandleBase
>
(
*
graph_
))
{
for
(
auto
&
op
:
ir
::
FilterByNodeWrapper
<
OpHandleBase
>
(
*
graph_
))
{
int
dep
=
static_cast
<
int
>
(
op
->
NotReadyInputSize
());
int
dep
=
static_cast
<
int
>
(
op
->
NotReadyInputSize
());
op_deps_
.
emplace
(
op
,
dep
);
op_deps_
.
emplace
(
op
,
dep
);
if
(
dep
==
0
)
{
if
(
dep
==
0
)
{
...
...
paddle/fluid/framework/details/multi_devices_graph_check_pass.cc
浏览文件 @
8c11d3fe
...
@@ -46,7 +46,7 @@ bool SSAGraghBuilderWithChecker::IsValidGraph(const ir::Graph *graph) const {
...
@@ -46,7 +46,7 @@ bool SSAGraghBuilderWithChecker::IsValidGraph(const ir::Graph *graph) const {
insert_pending_var
(
var
);
insert_pending_var
(
var
);
}
}
for
(
OpHandleBase
*
op
:
ir
::
GetFilteredNodes
<
OpHandleBase
>
(
*
graph
))
{
for
(
OpHandleBase
*
op
:
ir
::
FilterByNodeWrapper
<
OpHandleBase
>
(
*
graph
))
{
if
(
op
->
Inputs
().
empty
())
{
if
(
op
->
Inputs
().
empty
())
{
ready_ops
.
insert
(
op
);
ready_ops
.
insert
(
op
);
}
else
{
}
else
{
...
...
paddle/fluid/framework/details/multi_devices_graph_pass.cc
浏览文件 @
8c11d3fe
...
@@ -36,6 +36,7 @@ namespace framework {
...
@@ -36,6 +36,7 @@ namespace framework {
namespace
details
{
namespace
details
{
namespace
{
namespace
{
// TODO(panyx0718): Clean this up as well.
// all operators. NOTE that even we use a vector here, the operators is
// all operators. NOTE that even we use a vector here, the operators is
// unordered.
// unordered.
typedef
std
::
vector
<
OpHandleBase
*>
GraphOps
;
typedef
std
::
vector
<
OpHandleBase
*>
GraphOps
;
...
...
paddle/fluid/framework/details/multi_devices_graph_print_pass.cc
浏览文件 @
8c11d3fe
...
@@ -63,7 +63,7 @@ void GraphvizSSAGraphPrinter::Print(const ir::Graph &graph,
...
@@ -63,7 +63,7 @@ void GraphvizSSAGraphPrinter::Print(const ir::Graph &graph,
});
});
size_t
op_id
=
0
;
size_t
op_id
=
0
;
for
(
auto
&
op
:
ir
::
GetFilteredNodes
<
OpHandleBase
>
(
graph
))
{
for
(
auto
&
op
:
ir
::
FilterByNodeWrapper
<
OpHandleBase
>
(
graph
))
{
std
::
string
op_name
=
"op_"
+
std
::
to_string
(
op_id
++
);
std
::
string
op_name
=
"op_"
+
std
::
to_string
(
op_id
++
);
sout
<<
op_name
<<
" [label=
\"
"
<<
op
->
Name
()
<<
"
\"
, shape=rect]"
sout
<<
op_name
<<
" [label=
\"
"
<<
op
->
Name
()
<<
"
\"
, shape=rect]"
<<
std
::
endl
;
<<
std
::
endl
;
...
...
paddle/fluid/framework/details/reference_count_pass.cc
浏览文件 @
8c11d3fe
...
@@ -157,7 +157,7 @@ std::unique_ptr<ir::Graph> ReferenceCountPass::ApplyImpl(
...
@@ -157,7 +157,7 @@ std::unique_ptr<ir::Graph> ReferenceCountPass::ApplyImpl(
}
}
};
};
auto
all_ops
=
ir
::
GetFilteredNodes
<
OpHandleBase
>
(
*
graph
);
auto
all_ops
=
ir
::
FilterByNodeWrapper
<
OpHandleBase
>
(
*
graph
);
for
(
auto
&
op
:
all_ops
)
{
for
(
auto
&
op
:
all_ops
)
{
auto
in_var_names
=
get_ref_cnts_from_compute_op
(
op
,
op
->
Inputs
());
auto
in_var_names
=
get_ref_cnts_from_compute_op
(
op
,
op
->
Inputs
());
auto
out_var_names
=
get_ref_cnts_from_compute_op
(
op
,
op
->
Outputs
());
auto
out_var_names
=
get_ref_cnts_from_compute_op
(
op
,
op
->
Outputs
());
...
...
paddle/fluid/framework/details/threaded_ssa_graph_executor.cc
浏览文件 @
8c11d3fe
...
@@ -60,7 +60,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run(
...
@@ -60,7 +60,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run(
InsertPendingVar
(
&
pending_vars
,
ready_vars
.
get
(),
var
);
InsertPendingVar
(
&
pending_vars
,
ready_vars
.
get
(),
var
);
}
}
for
(
auto
&
op
:
ir
::
GetFilteredNodes
<
OpHandleBase
>
(
*
graph_
))
{
for
(
auto
&
op
:
ir
::
FilterByNodeWrapper
<
OpHandleBase
>
(
*
graph_
))
{
if
(
op
->
Inputs
().
empty
())
{
// Special case, Op has no input.
if
(
op
->
Inputs
().
empty
())
{
// Special case, Op has no input.
ready_ops
.
insert
(
op
);
ready_ops
.
insert
(
op
);
}
else
{
}
else
{
...
...
paddle/fluid/framework/ir/graph_helper.h
浏览文件 @
8c11d3fe
...
@@ -38,7 +38,7 @@ std::map<ir::Node *, std::unordered_set<ir::Node *>> BuildOperationAdjList(
...
@@ -38,7 +38,7 @@ std::map<ir::Node *, std::unordered_set<ir::Node *>> BuildOperationAdjList(
const
Graph
&
graph
);
const
Graph
&
graph
);
template
<
typename
T
>
template
<
typename
T
>
std
::
vector
<
T
*>
GetFilteredNodes
(
const
Graph
&
graph
)
{
std
::
vector
<
T
*>
FilterByNodeWrapper
(
const
Graph
&
graph
)
{
std
::
vector
<
T
*>
ret
;
std
::
vector
<
T
*>
ret
;
for
(
ir
::
Node
*
n
:
graph
.
Nodes
())
{
for
(
ir
::
Node
*
n
:
graph
.
Nodes
())
{
if
(
n
->
IsWrappedBy
<
T
>
())
ret
.
push_back
(
&
n
->
Wrapper
<
T
>
());
if
(
n
->
IsWrappedBy
<
T
>
())
ret
.
push_back
(
&
n
->
Wrapper
<
T
>
());
...
...
python/paddle/fluid/tests/unittests/test_reader_reset.py
浏览文件 @
8c11d3fe
...
@@ -14,7 +14,6 @@
...
@@ -14,7 +14,6 @@
from
__future__
import
print_function
from
__future__
import
print_function
import
os
import
os
import
sys
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle
import
paddle
import
numpy
as
np
import
numpy
as
np
...
@@ -91,13 +90,11 @@ class TestReaderReset(unittest.TestCase):
...
@@ -91,13 +90,11 @@ class TestReaderReset(unittest.TestCase):
try
:
try
:
data_val
,
label_val
=
parallel_exe
.
run
(
fetch_list
,
data_val
,
label_val
=
parallel_exe
.
run
(
fetch_list
,
return_numpy
=
True
)
return_numpy
=
True
)
sys
.
stderr
.
write
(
'fetched %s
\n
'
%
label_val
)
ins_num
=
data_val
.
shape
[
0
]
ins_num
=
data_val
.
shape
[
0
]
broadcasted_label
=
np
.
ones
((
ins_num
,
)
+
tuple
(
broadcasted_label
=
np
.
ones
((
ins_num
,
)
+
tuple
(
self
.
ins_shape
))
*
label_val
.
reshape
((
ins_num
,
1
))
self
.
ins_shape
))
*
label_val
.
reshape
((
ins_num
,
1
))
self
.
assertEqual
(
data_val
.
all
(),
broadcasted_label
.
all
())
self
.
assertEqual
(
data_val
.
all
(),
broadcasted_label
.
all
())
for
l
in
label_val
:
for
l
in
label_val
:
sys
.
stderr
.
write
(
'label_val: %s
\n
'
%
l
[
0
])
self
.
assertFalse
(
data_appeared
[
l
[
0
]])
self
.
assertFalse
(
data_appeared
[
l
[
0
]])
data_appeared
[
l
[
0
]]
=
True
data_appeared
[
l
[
0
]]
=
True
...
@@ -107,7 +104,6 @@ class TestReaderReset(unittest.TestCase):
...
@@ -107,7 +104,6 @@ class TestReaderReset(unittest.TestCase):
data_appeared
=
data_appeared
[:
-
parallel_exe
.
device_count
*
data_appeared
=
data_appeared
[:
-
parallel_exe
.
device_count
*
self
.
batch_size
]
self
.
batch_size
]
for
i
in
data_appeared
:
for
i
in
data_appeared
:
sys
.
stderr
.
write
(
'appeared %s
\n
'
%
i
)
self
.
assertTrue
(
i
)
self
.
assertTrue
(
i
)
if
pass_count
<
self
.
test_pass_num
:
if
pass_count
<
self
.
test_pass_num
:
data_appeared
=
[
False
]
*
self
.
total_ins_num
data_appeared
=
[
False
]
*
self
.
total_ins_num
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录