Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
b6e84806
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b6e84806
编写于
7月 26, 2022
作者:
R
Ruibiao Chen
提交者:
GitHub
7月 26, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Merge kProgramDescs in GraphToProgram (#44526)
上级
9841b308
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
98 addition
and
76 deletion
+98
-76
paddle/fluid/framework/CMakeLists.txt
paddle/fluid/framework/CMakeLists.txt
+5
-5
paddle/fluid/framework/ir/CMakeLists.txt
paddle/fluid/framework/ir/CMakeLists.txt
+1
-1
paddle/fluid/framework/ir/graph_helper.cc
paddle/fluid/framework/ir/graph_helper.cc
+25
-9
paddle/fluid/framework/ir/pass.cc
paddle/fluid/framework/ir/pass.cc
+1
-56
paddle/fluid/framework/program_utils.cc
paddle/fluid/framework/program_utils.cc
+57
-1
paddle/fluid/framework/program_utils.h
paddle/fluid/framework/program_utils.h
+4
-1
paddle/fluid/framework/program_utils_test.cc
paddle/fluid/framework/program_utils_test.cc
+1
-1
python/paddle/fluid/executor.py
python/paddle/fluid/executor.py
+2
-0
tools/parallel_UT_rule.py
tools/parallel_UT_rule.py
+2
-2
未找到文件。
paddle/fluid/framework/CMakeLists.txt
浏览文件 @
b6e84806
...
...
@@ -517,13 +517,13 @@ cc_test(
DEPS op_call_stack
)
cc_library
(
program_
processing
SRCS program_
processing
.cc
program_
utils
SRCS program_
utils
.cc
DEPS proto_desc
)
cc_test
(
program_
processing
_test
SRCS program_
processing
_test.cc
DEPS proto_desc program_
processing
)
program_
utils
_test
SRCS program_
utils
_test.cc
DEPS proto_desc program_
utils
)
if
(
WITH_GPU
)
nv_test
(
...
...
paddle/fluid/framework/ir/CMakeLists.txt
浏览文件 @
b6e84806
...
...
@@ -67,7 +67,7 @@ cc_library(
cc_library
(
graph_helper
SRCS graph_helper.cc
DEPS graph scale_loss_grad_op_handle
)
DEPS graph
program_utils
scale_loss_grad_op_handle
)
cc_library
(
pass
SRCS pass.cc
...
...
paddle/fluid/framework/ir/graph_helper.cc
浏览文件 @
b6e84806
...
...
@@ -19,7 +19,9 @@ limitations under the License. */
#include "paddle/fluid/framework/details/multi_devices_helper.h"
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/framework/program_utils.h"
DECLARE_bool
(
convert_all_blocks
);
PADDLE_DEFINE_EXPORTED_string
(
print_sub_graph_dir
,
...
...
@@ -559,20 +561,27 @@ static void GraphToBlock(const Graph &graph,
<<
vars2remove
.
size
()
<<
" nodes"
;
}
std
::
vector
<
proto
::
VarDesc
>
vars_in_graph
;
for
(
Node
*
node
:
graph
.
Nodes
())
{
if
(
node
->
IsVar
()
&&
node
->
Var
()
&&
node
->
GetVarNodeBlockId
()
==
graph
.
GetBlockId
())
{
vars_in_graph
.
emplace_back
(
*
node
->
Var
()
->
Proto
());
}
}
// add vars_in_graph to blcok
block
->
clear_vars
();
std
::
unordered_set
<
std
::
string
>
visited_vars
;
for
(
Node
*
n
:
graph
.
Nodes
())
{
if
(
n
->
IsVar
())
{
if
(
n
->
Var
()
&&
visited_vars
.
count
(
n
->
Var
()
->
Name
())
==
0
&&
!
vars2remove
.
count
(
n
->
Var
()
->
Name
())
&&
n
->
GetVarNodeBlockId
()
==
graph
.
GetBlockId
())
{
visited_vars
.
insert
(
n
->
Var
()
->
Name
());
block
->
add_vars
()
->
MergeFrom
(
*
n
->
Var
()
->
Proto
());
}
for
(
proto
::
VarDesc
&
var
:
vars_in_graph
)
{
const
std
::
string
&
var_name
=
var
.
name
();
if
(
visited_vars
.
find
(
var_name
)
==
visited_vars
.
end
()
&&
vars2remove
.
find
(
var_name
)
==
vars2remove
.
end
())
{
block
->
add_vars
()
->
MergeFrom
(
var
);
visited_vars
.
insert
(
var_name
);
}
}
block
->
clear_ops
();
block
->
clear_ops
();
std
::
vector
<
Node
*>
nodes
;
if
(
sort_kind
!=
nullptr
)
{
// Inference Memory Optimize relays on this branch.
...
...
@@ -630,6 +639,13 @@ void GraphToProgram(const Graph &graph,
}
program
->
CopyFrom
(
program_pb
);
if
(
graph
.
Has
(
details
::
kProgramDescs
))
{
details
::
ProgramDescs
program_descs
=
graph
.
Get
<
details
::
ProgramDescs
>
(
details
::
kProgramDescs
);
VLOG
(
8
)
<<
"Merge main programs"
;
MergePrograms
(
program
,
program_descs
,
/*append=*/
false
);
}
}
static
std
::
vector
<
std
::
vector
<
ir
::
Node
::
Dep
>>
GetOpDependencies
(
...
...
paddle/fluid/framework/ir/pass.cc
浏览文件 @
b6e84806
...
...
@@ -18,6 +18,7 @@ limitations under the License. */
#include "paddle/fluid/framework/ir/graph_helper.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/framework/program_utils.h"
namespace
paddle
{
namespace
framework
{
...
...
@@ -78,62 +79,6 @@ Graph *Pass::Apply(Graph *graph) const {
return
graph
;
}
template
<
typename
Container
,
typename
Visitor
>
static
void
VisitAllElements
(
Container
&&
container
,
Visitor
&&
visitor
,
bool
reverse
)
{
if
(
reverse
)
{
std
::
for_each
(
container
.
rbegin
(),
container
.
rend
(),
visitor
);
}
else
{
std
::
for_each
(
container
.
begin
(),
container
.
end
(),
visitor
);
}
}
static
void
MergePrograms
(
ProgramDesc
*
dst
,
const
details
::
ProgramDescs
&
srcs
,
bool
append
)
{
PADDLE_ENFORCE_NOT_NULL
(
dst
,
platform
::
errors
::
InvalidArgument
(
"Dst program must be provided."
));
bool
reverse
=
!
append
;
auto
create_var_visitor
=
[
dst
](
const
ProgramDesc
&
src
)
{
PADDLE_ENFORCE_EQ
(
src
.
Size
(),
1
,
platform
::
errors
::
Unimplemented
(
"MergePrograms can only support to "
"merge program with only one block."
));
const
auto
&
src_block
=
src
.
Block
(
0
);
auto
*
dst_block
=
dst
->
MutableBlock
(
0
);
for
(
const
auto
*
src_new_var
:
src_block
.
AllVars
())
{
if
(
dst_block
->
FindVar
(
src_new_var
->
Name
()))
continue
;
auto
*
dst_new_var
=
dst_block
->
Var
(
src_new_var
->
Name
());
*
dst_new_var
=
*
src_new_var
;
VLOG
(
10
)
<<
"Create new variable "
<<
dst_new_var
->
Name
();
}
};
VisitAllElements
(
srcs
,
create_var_visitor
,
reverse
);
auto
create_op_visitor
=
[
dst
,
reverse
](
const
ProgramDesc
&
src
)
{
auto
ops
=
src
.
Block
(
0
).
AllOps
();
auto
copy_op_visitor
=
[
dst
,
reverse
](
const
OpDesc
*
src_op
)
{
auto
*
dst_block
=
dst
->
MutableBlock
(
0
);
auto
*
op
=
reverse
?
dst_block
->
PrependOp
()
:
dst_block
->
AppendOp
();
op
->
CopyFrom
(
*
src_op
);
VLOG
(
10
)
<<
(
reverse
?
"Prepend"
:
"Append"
)
<<
" op "
<<
op
->
Type
();
// FIXME(zjl): some passes does not add VarDesc to program,
// we should fix this bug later...
for
(
const
auto
&
in_var_name
:
op
->
InputArgumentNames
())
{
dst_block
->
Var
(
in_var_name
);
}
for
(
const
auto
&
out_var_name
:
op
->
OutputArgumentNames
())
{
dst_block
->
Var
(
out_var_name
);
}
};
VisitAllElements
(
ops
,
copy_op_visitor
,
reverse
);
};
VisitAllElements
(
srcs
,
create_op_visitor
,
reverse
);
}
static
void
FillNotSpecifiedOpRole
(
const
ProgramDesc
&
main_program
)
{
for
(
size_t
block_idx
=
0
;
block_idx
<
main_program
.
Size
();
++
block_idx
)
{
auto
ops
=
main_program
.
Block
(
block_idx
).
AllOps
();
...
...
paddle/fluid/framework/program_
processing
.cc
→
paddle/fluid/framework/program_
utils
.cc
浏览文件 @
b6e84806
...
...
@@ -12,13 +12,69 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/program_
processing
.h"
#include "paddle/fluid/framework/program_
utils
.h"
#include "paddle/fluid/framework/block_desc.h"
namespace
paddle
{
namespace
framework
{
template
<
typename
Container
,
typename
Visitor
>
inline
void
VisitAllElements
(
Container
&&
container
,
Visitor
&&
visitor
,
bool
reverse
)
{
if
(
reverse
)
{
std
::
for_each
(
container
.
rbegin
(),
container
.
rend
(),
visitor
);
}
else
{
std
::
for_each
(
container
.
begin
(),
container
.
end
(),
visitor
);
}
}
void
MergePrograms
(
ProgramDesc
*
dst
,
const
std
::
vector
<
ProgramDesc
>
&
srcs
,
bool
append
)
{
PADDLE_ENFORCE_NOT_NULL
(
dst
,
platform
::
errors
::
InvalidArgument
(
"Dst program must be provided."
));
bool
reverse
=
!
append
;
auto
create_var_visitor
=
[
dst
](
const
ProgramDesc
&
src
)
{
PADDLE_ENFORCE_EQ
(
src
.
Size
(),
1
,
platform
::
errors
::
Unimplemented
(
"MergePrograms can only support to "
"merge program with only one block."
));
const
auto
&
src_block
=
src
.
Block
(
0
);
auto
*
dst_block
=
dst
->
MutableBlock
(
0
);
for
(
const
auto
*
src_new_var
:
src_block
.
AllVars
())
{
if
(
dst_block
->
FindVar
(
src_new_var
->
Name
()))
continue
;
auto
*
dst_new_var
=
dst_block
->
Var
(
src_new_var
->
Name
());
*
dst_new_var
=
*
src_new_var
;
VLOG
(
10
)
<<
"Create new variable "
<<
dst_new_var
->
Name
();
}
};
VisitAllElements
(
srcs
,
create_var_visitor
,
reverse
);
auto
create_op_visitor
=
[
dst
,
reverse
](
const
ProgramDesc
&
src
)
{
auto
ops
=
src
.
Block
(
0
).
AllOps
();
auto
copy_op_visitor
=
[
dst
,
reverse
](
const
OpDesc
*
src_op
)
{
auto
*
dst_block
=
dst
->
MutableBlock
(
0
);
auto
*
op
=
reverse
?
dst_block
->
PrependOp
()
:
dst_block
->
AppendOp
();
op
->
CopyFrom
(
*
src_op
);
VLOG
(
10
)
<<
(
reverse
?
"Prepend"
:
"Append"
)
<<
" op "
<<
op
->
Type
();
// FIXME(zjl): some passes does not add VarDesc to program,
// we should fix this bug later...
for
(
const
auto
&
in_var_name
:
op
->
InputArgumentNames
())
{
dst_block
->
Var
(
in_var_name
);
}
for
(
const
auto
&
out_var_name
:
op
->
OutputArgumentNames
())
{
dst_block
->
Var
(
out_var_name
);
}
};
VisitAllElements
(
ops
,
copy_op_visitor
,
reverse
);
};
VisitAllElements
(
srcs
,
create_op_visitor
,
reverse
);
}
void
ProgramProcessor
::
GetInputsOutputsInBlock
(
const
BlockDesc
&
current_block
,
std
::
set
<
std
::
string
>
*
inner_inputs
,
...
...
paddle/fluid/framework/program_
processing
.h
→
paddle/fluid/framework/program_
utils
.h
浏览文件 @
b6e84806
...
...
@@ -18,7 +18,9 @@ limitations under the License. */
namespace
paddle
{
namespace
framework
{
class
ProgramDesc
;
void
MergePrograms
(
ProgramDesc
*
dst
,
const
std
::
vector
<
ProgramDesc
>
&
srcs
,
bool
append
);
class
ProgramProcessor
{
public:
...
...
@@ -30,5 +32,6 @@ class ProgramProcessor {
void
AddDepToBlockOp
(
const
BlockDesc
&
block
);
};
}
// namespace framework
}
// namespace paddle
paddle/fluid/framework/program_
processing
_test.cc
→
paddle/fluid/framework/program_
utils
_test.cc
浏览文件 @
b6e84806
...
...
@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/program_
processing
.h"
#include "paddle/fluid/framework/program_
utils
.h"
#include "gtest/gtest-message.h"
#include "gtest/gtest-test-part.h"
...
...
python/paddle/fluid/executor.py
浏览文件 @
b6e84806
...
...
@@ -1445,9 +1445,11 @@ class Executor(object):
if
key
not
in
self
.
_executor_cache
.
_cached_executors
:
# To apply IR pass, compile the Program to IrGraph and convert it back to Program
if
isinstance
(
program
,
compiler
.
CompiledProgram
):
# print(f"Program before convert:\n {inner_program}", flush=True)
program
.
_compile
(
scope
,
self
.
place
)
ir_graph
=
framework
.
IrGraph
(
program
.
_graph
)
inner_program
=
ir_graph
.
to_program
()
# print(f"Program after convert:\n {inner_program}", flush=True)
else
:
from
paddle.incubate.autograd
import
prim_enabled
,
prim2orig
if
prim_enabled
()
and
program
==
default_main_program
():
...
...
tools/parallel_UT_rule.py
浏览文件 @
b6e84806
...
...
@@ -470,7 +470,7 @@ HIGH_PARALLEL_JOB_NEW = [
'cipher_utils_test'
,
'test_program_code'
,
'test_save_model_without_var'
,
'program_
processing
_test'
,
'program_
utils
_test'
,
'test_fleet_distributed_strategy'
,
'test_hybrid_parallel_topology'
,
'test_ascend_trigger'
,
...
...
@@ -1719,7 +1719,7 @@ CPU_PARALLEL_JOB = [
'test_sum_api'
,
'test_op_compat_sensible_pass'
,
'test_generate_pass_cc'
,
'program_
processing
_test'
,
'program_
utils
_test'
,
'build_strategy_test'
,
'test_fc_rnn_mkldnn_fuse_pass'
,
'scope_guard_test'
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录