Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
6f0e630c
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6f0e630c
编写于
2月 05, 2018
作者:
K
Kexin Zhao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix prune and program desc constructor
上级
c3d27b15
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
85 addition
and
25 deletion
+85
-25
paddle/framework/block_desc.cc
paddle/framework/block_desc.cc
+2
-0
paddle/framework/op_desc.cc
paddle/framework/op_desc.cc
+15
-2
paddle/framework/program_desc.cc
paddle/framework/program_desc.cc
+18
-0
paddle/framework/prune.cc
paddle/framework/prune.cc
+24
-12
python/paddle/v2/fluid/io.py
python/paddle/v2/fluid/io.py
+6
-0
python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py
...on/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py
+20
-11
未找到文件。
paddle/framework/block_desc.cc
浏览文件 @
6f0e630c
...
...
@@ -155,6 +155,8 @@ BlockDesc::BlockDesc(ProgramDesc *prog, proto::BlockDesc *desc)
for
(
const
proto
::
OpDesc
&
op_desc
:
desc_
->
ops
())
{
ops_
.
emplace_back
(
new
OpDesc
(
op_desc
,
prog
,
this
));
}
std
::
cout
<<
"Constructed block idx "
<<
desc
->
idx
()
<<
" from protobuf str"
<<
std
::
endl
;
}
BlockDesc
::
BlockDesc
(
const
BlockDesc
&
other
,
proto
::
BlockDesc
*
desc
,
...
...
paddle/framework/op_desc.cc
浏览文件 @
6f0e630c
...
...
@@ -124,11 +124,24 @@ OpDesc::OpDesc(const proto::OpDesc &desc, ProgramDesc *prog, BlockDesc *block)
// restore attrs_
for
(
const
proto
::
OpDesc
::
Attr
&
attr
:
desc_
.
attrs
())
{
std
::
string
attr_name
=
attr
.
name
();
// we use a trick to handle attr.type() is BLOCK here, because at this
// moment the sub_block hasn't beed added to ProgramDesc's vector<Block>
// so we cast the block_idx to a dummy BlockDesc pointer
if
(
attr
.
type
()
!=
proto
::
AttrType
::
BLOCK
)
{
attrs_
[
attr_name
]
=
GetAttrValue
(
attr
);
}
else
{
auto
bid
=
attr
.
block_idx
();
attrs_
[
attr_name
]
=
prog
->
MutableBlock
(
bid
);
size_t
blk_idx
=
attr
.
block_idx
();
if
(
blk_idx
<
prog
->
Size
())
{
attrs_
[
attr_name
]
=
prog
->
MutableBlock
(
blk_idx
);
}
else
{
std
::
cout
<<
"Setting blockdesc attribute for id "
<<
blk_idx
<<
std
::
endl
;
attrs_
[
attr_name
]
=
reinterpret_cast
<
BlockDesc
*>
(
blk_idx
);
std
::
cout
<<
"Testing reinterpret_cast result is "
<<
reinterpret_cast
<
size_t
>
(
boost
::
get
<
BlockDesc
*>
(
attrs_
[
attr_name
]))
<<
std
::
endl
;
}
}
}
this
->
block_
=
block
;
...
...
paddle/framework/program_desc.cc
浏览文件 @
6f0e630c
...
...
@@ -52,9 +52,27 @@ ProgramDesc::ProgramDesc(const ProgramDesc &o) {
ProgramDesc
::
ProgramDesc
(
const
proto
::
ProgramDesc
&
desc
)
{
desc_
=
desc
;
std
::
cout
<<
std
::
endl
<<
"starting in ProgDesc constructor"
<<
std
::
endl
;
for
(
auto
&
block_desc
:
*
desc_
.
mutable_blocks
())
{
blocks_
.
emplace_back
(
new
BlockDesc
(
this
,
&
block_desc
));
std
::
cout
<<
"Done constructing block idx "
<<
block_desc
.
idx
()
<<
" parent idx "
<<
block_desc
.
parent_idx
()
<<
std
::
endl
;
}
for
(
auto
&
block
:
blocks_
)
{
for
(
auto
*
op
:
block
->
AllOps
())
{
for
(
auto
&
name
:
op
->
AttrNames
())
{
if
(
op
->
GetAttrType
(
name
)
==
proto
::
AttrType
::
BLOCK
)
{
auto
attr
=
op
->
GetAttr
(
name
);
size_t
blk_idx
=
reinterpret_cast
<
size_t
>
(
boost
::
get
<
BlockDesc
*>
(
attr
));
op
->
SetBlockAttr
(
name
,
*
this
->
MutableBlock
(
blk_idx
));
std
::
cout
<<
"Update attr name "
<<
name
<<
" for block idx "
<<
blk_idx
<<
std
::
endl
;
}
}
}
}
std
::
cout
<<
"Done ProgDesc construction"
<<
std
::
endl
<<
std
::
endl
;
}
ProgramDesc
::
ProgramDesc
(
const
std
::
string
&
binary_str
)
{
...
...
paddle/framework/prune.cc
浏览文件 @
6f0e630c
...
...
@@ -109,15 +109,14 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output,
// we reverse the should_run vector
std
::
reverse
(
should_run
.
begin
(),
should_run
.
end
());
//*output = input;
// copy the current block from input to output
auto
*
block_field
=
output
->
mutable_blocks
();
*
block_field
->
Add
()
=
input
.
blocks
(
block_id
);
int
output_block_id
=
output
->
blocks_size
()
-
1
;
auto
*
output_block
=
output
->
mutable_blocks
(
output_block_id
);
output_block
->
set_idx
=
output_block_id
;
output_block
->
set_parent_idx
=
parent_block_id
;
output_block
->
set_idx
(
output_block_id
)
;
output_block
->
set_parent_idx
(
parent_block_id
)
;
auto
*
op_field
=
output_block
->
mutable_ops
();
op_field
->
Clear
();
...
...
@@ -128,17 +127,18 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output,
if
(
HasSubBlock
(
*
op
))
{
// create sub_block_dependent_vars here to help prune the sub block
std
::
set
<
std
::
string
>
sub_block_dependent_vars
;
for
(
auto
&
var
:
op
.
inputs
())
{
for
(
auto
&
var
:
op
->
inputs
())
{
for
(
auto
&
argu
:
var
.
arguments
())
{
sub_block_dependent_vars
.
insert
(
argu
);
}
}
for
(
auto
&
var
:
op
.
outputs
())
{
for
(
auto
&
var
:
op
->
outputs
())
{
for
(
auto
&
argu
:
var
.
arguments
())
{
sub_block_dependent_vars
.
insert
(
argu
);
}
}
std
::
cout
<<
"pruning the next block, the current output_block_id is "
<<
output_block_id
<<
std
::
endl
;
// GetSubBlockIndex(*op) is the idx of the sub_block in the input desc
// output_block_id is the idx of the current block in the output desc
prune_impl
(
input
,
output
,
GetSubBlockIndex
(
*
op
),
output_block_id
,
...
...
@@ -147,6 +147,8 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output,
}
}
std
::
cout
<<
"Starting to remove unreferenced variables"
<<
" for block idx "
<<
output_block_id
<<
std
::
endl
;
// remove the VarDescs in BlockDesc that are not referenced in
// the pruned OpDescs
std
::
unordered_map
<
std
::
string
,
proto
::
VarDesc
>
var_map
;
...
...
@@ -155,28 +157,38 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output,
var_map
[
var
.
name
()]
=
var
;
}
var_field
->
Clear
()
;
std
::
set
<
std
::
string
>
var_names
;
for
(
const
auto
&
op
:
*
op_field
)
{
// add VarDescs of all input arguments for each OpDesc
auto
&
input_field
=
op
.
inputs
();
for
(
auto
&
input_var
:
input_field
)
{
for
(
auto
&
arg
:
input_var
.
arguments
())
{
*
var_field
->
Add
()
=
var_map
.
at
(
arg
);
if
(
var_map
.
count
(
arg
)
!=
0
)
{
var_names
.
insert
(
arg
);
}
}
}
// add VarDescs of all output arguments for each OpDesc
auto
&
output_field
=
op
.
outputs
();
for
(
auto
&
output_var
:
output_field
)
{
for
(
auto
&
arg
:
output_var
.
arguments
())
{
*
var_field
->
Add
()
=
var_map
.
at
(
arg
);
if
(
var_map
.
count
(
arg
)
!=
0
)
{
var_names
.
insert
(
arg
);
}
}
}
}
var_field
->
Clear
();
for
(
const
auto
&
name
:
var_names
)
{
*
var_field
->
Add
()
=
var_map
[
name
];
}
}
// TODO(fengjiayi): Prune() could be inplaced to avoid unnecessary copies
void
Prune
(
const
proto
::
ProgramDesc
&
input
,
proto
::
ProgramDesc
*
output
)
{
prune_impl
(
input
,
output
,
0
,
-
1
,
{});
std
::
set
<
std
::
string
>
dependent_vars
;
std
::
cout
<<
std
::
endl
<<
"Start C++ framework::prune"
<<
std
::
endl
;
prune_impl
(
input
,
output
,
0
,
-
1
,
dependent_vars
);
std
::
cout
<<
"Finished C++ framework::prune"
<<
std
::
endl
<<
std
::
endl
;
}
void
inference_optimize_impl
(
const
proto
::
ProgramDesc
&
input
,
...
...
python/paddle/v2/fluid/io.py
浏览文件 @
6f0e630c
...
...
@@ -342,6 +342,12 @@ def save_inference_model(dirname,
prepend_feed_ops
(
inference_program
,
feeded_var_names
)
append_fetch_ops
(
inference_program
,
fetch_var_names
)
# save for checking
curstr
=
inference_program
.
to_string
(
True
)
f
=
open
(
"save_inf_prog_after_feed_fetch.txt"
,
'w'
)
f
.
write
(
curstr
)
f
.
close
()
model_file_name
=
dirname
+
"/__model__"
with
open
(
model_file_name
,
"wb"
)
as
f
:
f
.
write
(
inference_program
.
desc
.
serialize_to_string
())
...
...
python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py
浏览文件 @
6f0e630c
...
...
@@ -197,14 +197,15 @@ def train(save_dirname=None):
" avg_cost="
+
str
(
avg_cost_val
))
if
batch_id
>
3
:
if
save_dirname
is
not
None
:
fluid
.
io
.
save_inference_model
(
save_dirname
,
[
'source_sequence'
,
'target_sequence'
,
'label_sequence'
],
[
prediction
],
exe
)
fluid
.
io
.
save_inference_model
(
save_dirname
,
[
'source_sequence'
,
'target_sequence'
],
[
prediction
],
exe
)
return
exit
(
0
)
batch_id
+=
1
def
infer
ence
(
save_dirname
=
None
):
def
infer
(
save_dirname
=
None
):
if
save_dirname
is
None
:
return
...
...
@@ -221,24 +222,32 @@ def inference(save_dirname=None):
data
=
[[
0
,
1
,
0
,
1
],
[
0
,
1
,
1
,
0
,
0
,
1
]]
word_data
=
to_lodtensor
(
data
,
place
)
trg_word
=
to_lodtensor
(
data
,
place
)
trg_word_next
=
to_lodtensor
(
data
,
place
)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
print
(
"Print feed fetch target names as follows"
)
print
(
feed_target_names
)
assert
feed_target_names
[
0
]
==
'source_sequence'
assert
feed_target_names
[
1
]
==
'target_sequence'
assert
feed_target_names
[
2
]
==
'label_sequence'
print
([
var
.
name
for
var
in
fetch_targets
])
# save for checking
curstr
=
inference_program
.
to_string
(
True
)
f
=
open
(
"loaded_infer_prog.txt"
,
'w'
)
f
.
write
(
curstr
)
f
.
close
()
results
=
exe
.
run
(
inference_program
,
feed
=
{
feed_target_names
[
0
]:
word_data
,
feed_target_names
[
1
]:
trg_word
,
feed_target_names
[
2
]:
trg_word_next
},
fetch_list
=
fetch_targets
)
print
(
"Inference Shape: "
,
results
[
0
].
shape
)
print
(
"infer results: "
,
results
[
0
])
fetch_list
=
fetch_targets
,
return_numpy
=
False
)
print
(
results
[
0
].
lod
())
np_data
=
np
.
array
(
results
[
0
])
print
(
"Inference shape: "
,
np_data
.
shape
)
print
(
"Inference results: "
,
np_data
)
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录