Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
278ac7be
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
278ac7be
编写于
12月 20, 2017
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Compelete basic framework
上级
61a7df2e
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
49 addition
and
31 deletion
+49
-31
paddle/pybind/protobuf.cc
paddle/pybind/protobuf.cc
+1
-7
paddle/pybind/pybind.cc
paddle/pybind/pybind.cc
+2
-2
python/paddle/v2/fluid/backward.py
python/paddle/v2/fluid/backward.py
+46
-22
未找到文件。
paddle/pybind/protobuf.cc
浏览文件 @
278ac7be
...
...
@@ -240,13 +240,7 @@ void BindOpDesc(py::module &m) {
.
value
(
"BLOCK"
,
AttrType
::
BLOCK
);
py
::
class_
<
OpDescBind
>
op_desc
(
m
,
"OpDesc"
,
""
);
op_desc
.
def
(
"__init__"
,
[](
OpDescBind
&
self
,
const
std
::
string
&
type
,
const
VariableNameMap
&
inputs
,
const
VariableNameMap
&
outputs
,
const
AttributeMap
&
attrs
)
{
new
(
&
self
)
OpDescBind
(
type
,
inputs
,
outputs
,
attrs
);
})
op_desc
.
def
(
"__init__"
,
[](
OpDescBind
&
self
)
{
new
(
&
self
)
OpDescBind
();
})
.
def
(
"type"
,
&
OpDescBind
::
Type
)
.
def
(
"set_type"
,
&
OpDescBind
::
SetType
)
.
def
(
"input"
,
&
OpDescBind
::
Input
)
...
...
paddle/pybind/pybind.cc
浏览文件 @
278ac7be
...
...
@@ -285,8 +285,8 @@ All parameter, weight, gradient are variables in Paddle.
m
.
def
(
"get_grad_op_desc"
,
[](
const
OpDescBind
&
op_desc
,
const
std
::
unordered_set
<
std
::
string
>
&
no_grad_set
,
std
::
unordered_map
<
std
::
string
,
std
::
string
>
&
grad_to_var
,
const
std
::
vector
<
BlockDescBind
*>
&
grad_sub_block
)
{
std
::
unordered_map
<
std
::
string
,
std
::
string
>
grad_to_var
;
std
::
vector
<
std
::
unique_ptr
<
OpDescBind
>>
grad_op_descs
=
framework
::
OpInfoMap
::
Instance
()
.
Get
(
op_desc
.
Type
())
...
...
@@ -297,7 +297,7 @@ All parameter, weight, gradient are variables in Paddle.
grad_op_descs
.
begin
(),
grad_op_descs
.
end
(),
grad_op_desc_ptrs
.
begin
(),
[](
std
::
unique_ptr
<
OpDescBind
>
&
p
)
{
return
p
.
release
();
});
return
grad_op_desc_ptrs
;
return
std
::
make_pair
(
grad_op_desc_ptrs
,
grad_to_var
)
;
});
m
.
def
(
"prune"
,
[](
const
ProgramDescBind
&
origin
,
const
std
::
vector
<
std
::
array
<
size_t
,
2
>>
&
targets
)
{
...
...
python/paddle/v2/fluid/backward.py
浏览文件 @
278ac7be
...
...
@@ -6,7 +6,8 @@ import pdb
__all__
=
[
'append_backward_ops'
]
def
rename_arg
(
op_desc_list
,
old_name
,
new_name
,
begin_idx
=
None
,
end_idx
=
None
):
def
_rename_arg_
(
op_desc_list
,
old_name
,
new_name
,
begin_idx
=
None
,
end_idx
=
None
):
if
begin_idx
is
None
:
begin_idx
=
0
if
end_idx
is
None
:
...
...
@@ -16,6 +17,21 @@ def rename_arg(op_desc_list, old_name, new_name, begin_idx=None, end_idx=None):
op_desc_list
[
i
].
rename_output
(
old_name
,
new_name
)
def
_create_op_desc_
(
op_type
,
inputs
,
outputs
,
attrs
):
op_desc
=
core
.
OpDesc
()
op_desc
.
set_type
(
op_type
)
for
para
,
args
in
inputs
.
iteritems
():
op_desc
.
set_input
(
para
,
args
)
for
para
,
args
in
outputs
.
iteritems
():
op_desc
.
set_output
(
para
,
args
)
for
name
,
val
in
attrs
.
iteritems
():
if
isinstance
(
val
,
framework
.
Block
):
op_desc
.
set_block_attr
(
name
,
val
.
desc
)
else
:
op_desc
.
set_attr
(
name
,
val
)
return
op_desc
def
backward_impl
(
target
,
block
,
target_block
,
...
...
@@ -23,9 +39,9 @@ def backward_impl(target,
grad_info_map
,
callback
=
None
):
grad_op_descs
=
[]
grad_to_var
=
{}
grad_to_var
=
dict
()
program
=
block
.
program
for
each_op
in
block
.
ops
:
for
each_op
in
reversed
(
block
.
ops
)
:
grad_sub_block_list
=
[]
if
each_op
.
has_attr
(
"sub_block"
):
sub_block_idx
=
each_op
.
block_attr
(
"sub_block"
)
...
...
@@ -34,10 +50,10 @@ def backward_impl(target,
backward_impl
(
target
,
sub_block
,
grad_sub_block
,
no_grad_set
,
grad_info_map
,
callback
)
grad_sub_block_list
.
append
(
grad_sub_block
)
grad_op_desc
=
core
.
get_grad_op_desc
(
each_op
.
desc
,
no_grad_set
[
block
.
idx
],
grad_to_var
,
grad_sub_block_list
)
grad_op_desc
,
op_grad_to_var
=
core
.
get_grad_op_desc
(
each_op
.
desc
,
no_grad_set
[
block
.
idx
],
grad_sub_block_list
)
grad_op_descs
.
append
(
grad_op_desc
)
grad_to_var
=
dict
(
grad_to_var
,
**
op_grad_to_var
)
# grad_op_descs = [[op1_g1, op1_g2], [op2_g], ...]
# flatten grad_op_descs
grad_op_descs
=
[
op
for
sublist
in
grad_op_descs
for
op
in
sublist
]
# ?????
...
...
@@ -48,11 +64,10 @@ def backward_impl(target,
for
pos
,
op_desc
in
enumerate
(
grad_op_descs
):
for
var_name
in
op_desc
.
input_arg_names
():
if
len
(
var_inputs
[
var_name
])
>
1
:
pdb
.
set_trace
()
pending_sum_ops
.
append
((
core
.
OpDesc
(
type
=
"sum_op"
,
pending_sum_ops
.
append
((
_create_op_desc_
(
op_type
=
"sum_op"
,
inputs
=
var_inputs
[
var_name
],
output
=
[
var_name
],
output
s
=
[
var_name
],
attrs
=
{}),
pos
))
var_inputs
[
var_name
]
=
[
var_name
]
for
var_name
in
op_desc
.
output_arg_names
():
...
...
@@ -66,8 +81,8 @@ def backward_impl(target,
var_rename_count
[
var_name
]
=
var_rename_count
[
var_name
]
+
1
# rename original var_name
var_inputs
[
var_name
][
0
]
=
new_name
rename_arg
(
grad_op_descs
,
var_name
,
new_name
,
0
,
pos
)
rename_arg
(
pending_sum_ops
,
var_name
,
new_name
)
_rename_arg_
(
grad_op_descs
,
var_name
,
new_name
,
0
,
pos
)
_rename_arg_
(
pending_sum_ops
,
var_name
,
new_name
)
new_name
=
var_name
+
"@RENAME@"
+
\
str
(
var_rename_count
[
var_name
])
...
...
@@ -76,10 +91,11 @@ def backward_impl(target,
var_inputs
[
var_name
].
append
(
new_name
)
for
var_name
,
inputs
in
var_inputs
.
iteritems
():
if
len
(
inputs
)
>
1
:
pdb
.
set_trace
()
pending_sum_ops
.
append
((
core
.
OpDesc
(
"sum_op"
,
{
"X"
:
inputs
},
{
"Out"
:
var_name
},
{}),
len
(
grad_op_descs
)))
pending_sum_ops
.
append
((
_create_op_desc_
(
op_type
=
"sum_op"
,
inputs
=
{
"X"
:
inputs
},
outputs
=
{
"Out"
:
var_name
},
attrs
=
{}),
len
(
grad_op_descs
)))
# TODO: remove op in no grad set
# 根据append的顺序可以看出pending_sum_ops一定是根据sum_op的插入位置排序的
...
...
@@ -103,15 +119,22 @@ def backward_impl(target,
target_block
.
desc
.
var
(
grad_target_name
)
grad_op_descs
.
insert
(
0
,
core
.
OpDesc
(
u
"fill_constant"
,
{},
{
u
"Out"
:
[
unicode
(
grad_target_name
,
"ascii"
)]
},
{
u
"shape"
:
(
1
),
u
"value"
:
1.0
,
u
"dtype"
:
core
.
DataType
.
FP32
}))
_create_op_desc_
(
op_type
=
"fill_constant"
,
inputs
=
{},
outputs
=
{
"Out"
:
[
grad_target_name
]},
attrs
=
{
"shape"
:
[
1
],
"value"
:
1.0
,
"dtype"
:
core
.
DataType
.
FP32
}))
# insert backward operators to target_block
for
op_desc
in
grad_op_descs
:
op_desc
.
infer_var_type
(
target_block
.
desc
)
op_desc
.
infer_shape
(
target_block
.
desc
)
target_block
.
desc
.
append_allocated_op
(
op_desc
)
pdb
.
set_trace
()
target_block
.
sync_with_cpp
()
...
...
@@ -147,6 +170,7 @@ def append_backward_ops(loss, parameter_list=None, no_grad_set=None):
grad_info_map
=
dict
()
root_block
=
loss
.
block
.
program
.
block
(
0
)
pdb
.
set_trace
()
backward_impl
(
loss
,
root_block
,
root_block
,
no_grad_set
,
grad_info_map
)
pdb
.
set_trace
()
if
parameter_list
is
not
None
:
...
...
@@ -159,7 +183,7 @@ def append_backward_ops(loss, parameter_list=None, no_grad_set=None):
if
param
not
in
grad_info_map
:
raise
ValueError
(
"param %s is not in map"
%
param
)
grad_info
=
grad_info_map
[
param
]
grad_block
=
loss
.
block
.
program
.
block
(
grad_info
[
1
])
grad_block
=
grad_info
[
1
]
if
not
grad_block
.
has_var
(
grad_info
[
0
]):
raise
ValueError
(
"grad block[{0}] did not have grad var {1}"
.
format
(
grad_info
[
1
],
grad_info
[
0
]))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录