Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
2db6e3ed
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2db6e3ed
编写于
1月 15, 2019
作者:
X
Xin Pan
提交者:
GitHub
1月 15, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #15292 from panyx0718/imperative
polish imperative codes
上级
b14d4cdd
b29eca3b
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
43 addition
and
30 deletion
+43
-30
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+6
-2
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+3
-0
paddle/fluid/imperative/tracer.cc
paddle/fluid/imperative/tracer.cc
+11
-9
python/paddle/fluid/imperative/layers.py
python/paddle/fluid/imperative/layers.py
+21
-3
python/paddle/fluid/tests/unittests/test_imperative.py
python/paddle/fluid/tests/unittests/test_imperative.py
+2
-16
未找到文件。
paddle/fluid/imperative/layer.cc
浏览文件 @
2db6e3ed
...
@@ -27,6 +27,9 @@
...
@@ -27,6 +27,9 @@
namespace
paddle
{
namespace
paddle
{
namespace
imperative
{
namespace
imperative
{
const
char
*
PyLayer
::
kFwdInp
=
"X"
;
const
char
*
PyLayer
::
kFwdOut
=
"Out"
;
std
::
map
<
int
,
py
::
object
>
py_funcs_
;
std
::
map
<
int
,
py
::
object
>
py_funcs_
;
using
framework
::
Variable
;
using
framework
::
Variable
;
...
@@ -131,8 +134,9 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
...
@@ -131,8 +134,9 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
std
::
map
<
std
::
string
,
std
::
vector
<
framework
::
Variable
*>>
grad_outputs
;
std
::
map
<
std
::
string
,
std
::
vector
<
framework
::
Variable
*>>
grad_outputs
;
if
(
backward_id_
>
0
)
{
if
(
backward_id_
>
0
)
{
VLOG
(
3
)
<<
"py_layer_grad"
;
VLOG
(
3
)
<<
"py_layer_grad"
;
grad_outputs
[
"Out@GRAD"
]
=
grad_outputs
[
framework
::
GradVarName
(
PyLayer
::
kFwdOut
)]
=
PyLayer
::
ApplyGrad
(
PyLayer
::
ApplyGrad
(
backward_id_
,
grad_input_vars_
[
"X@GRAD"
]);
backward_id_
,
grad_input_vars_
[
framework
::
GradVarName
(
PyLayer
::
kFwdInp
)]);
}
else
{
}
else
{
VLOG
(
3
)
<<
"op grad "
<<
grad_op_desc_
->
Type
();
VLOG
(
3
)
<<
"op grad "
<<
grad_op_desc_
->
Type
();
for
(
auto
it
:
grad_output_vars_
)
{
for
(
auto
it
:
grad_output_vars_
)
{
...
...
paddle/fluid/imperative/layer.h
浏览文件 @
2db6e3ed
...
@@ -200,6 +200,9 @@ class PyLayer {
...
@@ -200,6 +200,9 @@ class PyLayer {
public:
public:
virtual
~
PyLayer
()
{}
virtual
~
PyLayer
()
{}
static
const
char
*
kFwdInp
;
static
const
char
*
kFwdOut
;
static
void
RegisterFunc
(
int
func_id
,
const
py
::
object
&
py_func
);
static
void
RegisterFunc
(
int
func_id
,
const
py
::
object
&
py_func
);
static
int
NumFuncs
();
static
int
NumFuncs
();
...
...
paddle/fluid/imperative/tracer.cc
浏览文件 @
2db6e3ed
...
@@ -164,28 +164,30 @@ std::vector<VarBase*> Tracer::PyTrace(OpBase* op,
...
@@ -164,28 +164,30 @@ std::vector<VarBase*> Tracer::PyTrace(OpBase* op,
const
std
::
vector
<
VarBase
*>&
inputs
,
const
std
::
vector
<
VarBase
*>&
inputs
,
bool
stop_gradient
)
{
bool
stop_gradient
)
{
VLOG
(
3
)
<<
"py_trace"
;
VLOG
(
3
)
<<
"py_trace"
;
op
->
input_vars_
[
"X"
]
=
inputs
;
op
->
input_vars_
[
PyLayer
::
kFwdInp
]
=
inputs
;
op
->
output_vars_
[
"Out"
]
=
PyLayer
::
Apply
(
op
->
forward_id_
,
inputs
);
op
->
output_vars_
[
PyLayer
::
kFwdOut
]
=
PyLayer
::
Apply
(
op
->
forward_id_
,
inputs
);
for
(
VarBase
*
inp
:
inputs
)
{
for
(
VarBase
*
inp
:
inputs
)
{
if
(
inp
->
pre_op_
)
{
if
(
inp
->
pre_op_
)
{
op
->
pre_ops_
[
"X"
].
push_back
(
inp
->
pre_op_
);
op
->
pre_ops_
[
PyLayer
::
kFwdInp
].
push_back
(
inp
->
pre_op_
);
op
->
pre_ops_out_idx_
[
"X"
].
push_back
(
inp
->
pre_op_out_idx_
);
op
->
pre_ops_out_idx_
[
PyLayer
::
kFwdInp
].
push_back
(
inp
->
pre_op_out_idx_
);
}
else
{
}
else
{
op
->
pre_ops_
[
"X"
].
push_back
(
nullptr
);
op
->
pre_ops_
[
PyLayer
::
kFwdInp
].
push_back
(
nullptr
);
}
}
}
}
auto
&
outputs
=
op
->
output_vars_
[
"Out"
];
auto
&
outputs
=
op
->
output_vars_
[
PyLayer
::
kFwdOut
];
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
VarBase
*
out
=
outputs
[
i
];
VarBase
*
out
=
outputs
[
i
];
out
->
stop_gradient_
=
stop_gradient
;
out
->
stop_gradient_
=
stop_gradient
;
out
->
pre_op_
=
op
;
out
->
pre_op_
=
op
;
out
->
pre_op_out_name_
=
"Out"
;
out
->
pre_op_out_name_
=
PyLayer
::
kFwdOut
;
out
->
pre_op_out_idx_
=
i
;
out
->
pre_op_out_idx_
=
i
;
}
}
if
(
!
stop_gradient
)
{
if
(
!
stop_gradient
)
{
auto
&
grad_input_vars
=
op
->
grad_input_vars_
[
"X@GRAD"
];
auto
&
grad_input_vars
=
auto
&
grad_output_vars
=
op
->
grad_output_vars_
[
"Out@GRAD"
];
op
->
grad_input_vars_
[
framework
::
GradVarName
(
PyLayer
::
kFwdInp
)];
auto
&
grad_output_vars
=
op
->
grad_output_vars_
[
framework
::
GradVarName
(
PyLayer
::
kFwdOut
)];
for
(
const
VarBase
*
inp
:
inputs
)
{
for
(
const
VarBase
*
inp
:
inputs
)
{
grad_input_vars
.
push_back
(
inp
->
var_
);
grad_input_vars
.
push_back
(
inp
->
var_
);
...
...
python/paddle/fluid/imperative/layers.py
浏览文件 @
2db6e3ed
...
@@ -54,6 +54,25 @@ class PyLayer(core.PyLayer):
...
@@ -54,6 +54,25 @@ class PyLayer(core.PyLayer):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
PyLayer
,
self
).
__init__
()
super
(
PyLayer
,
self
).
__init__
()
@
classmethod
def
_do_forward
(
cls
,
inputs
):
return
cls
.
_to_tuple
(
cls
.
forward
(
inputs
))
@
classmethod
def
_do_backward
(
cls
,
inputs
):
return
cls
.
_to_tuple
(
cls
.
backward
(
inputs
))
@
staticmethod
def
_to_tuple
(
inputs
):
if
not
isinstance
(
inputs
,
list
)
and
not
isinstance
(
inputs
,
tuple
):
inputs
=
[
inputs
]
ret
=
[]
for
inp
in
inputs
:
tensor
=
core
.
LoDTensor
()
tensor
.
set
(
inp
,
core
.
CPUPlace
())
ret
.
append
(
tensor
)
return
tuple
(
ret
)
@
staticmethod
@
staticmethod
def
forward
(
*
inputs
):
def
forward
(
*
inputs
):
raise
NotImplementedError
raise
NotImplementedError
...
@@ -70,16 +89,15 @@ class PyLayer(core.PyLayer):
...
@@ -70,16 +89,15 @@ class PyLayer(core.PyLayer):
if
not
hasattr
(
cls
,
'forward_id'
):
if
not
hasattr
(
cls
,
'forward_id'
):
cls
.
forward_id
=
core
.
PyLayer
.
num_funcs
()
+
1
cls
.
forward_id
=
core
.
PyLayer
.
num_funcs
()
+
1
PyLayer
.
register_func
(
cls
.
forward_id
,
cls
.
forward
)
PyLayer
.
register_func
(
cls
.
forward_id
,
cls
.
_do_
forward
)
cls
.
backward_id
=
core
.
PyLayer
.
num_funcs
()
+
1
cls
.
backward_id
=
core
.
PyLayer
.
num_funcs
()
+
1
PyLayer
.
register_func
(
cls
.
backward_id
,
cls
.
backward
)
PyLayer
.
register_func
(
cls
.
backward_id
,
cls
.
_do_
backward
)
iop
=
core
.
OpBase
()
iop
=
core
.
OpBase
()
iop
.
forward_id
=
cls
.
forward_id
iop
.
forward_id
=
cls
.
forward_id
iop
.
backward_id
=
cls
.
backward_id
iop
.
backward_id
=
cls
.
backward_id
block
.
ops
.
append
(
iop
)
block
.
ops
.
append
(
iop
)
ivars
=
tracer
.
py_trace
(
iop
,
ivar_inputs
,
False
)
ivars
=
tracer
.
py_trace
(
iop
,
ivar_inputs
,
False
)
# ivars = core.PyLayer.apply(cls.forward, inputs)
ret
=
[]
ret
=
[]
for
ivar
in
ivars
:
for
ivar
in
ivars
:
tensor
=
ivar
.
value
().
get_tensor
()
tensor
=
ivar
.
value
().
get_tensor
()
...
...
python/paddle/fluid/tests/unittests/test_imperative.py
浏览文件 @
2db6e3ed
...
@@ -41,26 +41,12 @@ class MyPyLayer(fluid.imperative.PyLayer):
...
@@ -41,26 +41,12 @@ class MyPyLayer(fluid.imperative.PyLayer):
@
staticmethod
@
staticmethod
def
forward
(
inputs
):
def
forward
(
inputs
):
sys
.
stderr
.
write
(
'before forward
\n
'
)
return
np
.
tanh
(
inputs
[
0
])
ret
=
np
.
tanh
(
inputs
[
0
])
sys
.
stderr
.
write
(
'after forward: %s
\n
'
%
ret
)
tensor
=
core
.
LoDTensor
()
tensor
.
set
(
ret
,
core
.
CPUPlace
())
return
tuple
([
tensor
])
@
staticmethod
@
staticmethod
def
backward
(
inputs
):
def
backward
(
inputs
):
sys
.
stderr
.
write
(
'calling into backward: %s
\n
'
%
str
(
inputs
))
inp
,
out
,
dout
=
inputs
inp
,
out
,
dout
=
inputs
inp
=
np
.
array
(
inp
)
return
np
.
array
(
dout
)
*
(
1
-
np
.
square
(
np
.
array
(
out
)))
out
=
np
.
array
(
out
)
dout
=
np
.
array
(
dout
)
sys
.
stderr
.
write
(
'calling into backward: %s, %s, %s
\n
'
%
(
inp
,
out
,
dout
))
ret
=
np
.
array
(
dout
)
*
(
1
-
np
.
square
(
np
.
array
(
out
)))
tensor
=
core
.
LoDTensor
()
tensor
.
set
(
ret
,
core
.
CPUPlace
())
return
tuple
([
tensor
])
class
MLP
(
fluid
.
imperative
.
Layer
):
class
MLP
(
fluid
.
imperative
.
Layer
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录