Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
0d5819eb
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0d5819eb
编写于
1月 11, 2019
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
polish imperative codes
test=develop
上级
e33427da
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
30 addition
and
21 deletion
+30
-21
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+3
-2
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+3
-0
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+1
-0
python/paddle/fluid/imperative/layers.py
python/paddle/fluid/imperative/layers.py
+21
-3
python/paddle/fluid/tests/unittests/test_imperative.py
python/paddle/fluid/tests/unittests/test_imperative.py
+2
-16
未找到文件。
paddle/fluid/imperative/layer.cc
浏览文件 @
0d5819eb
...
...
@@ -131,8 +131,9 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
std
::
map
<
std
::
string
,
std
::
vector
<
framework
::
Variable
*>>
grad_outputs
;
if
(
backward_id_
>
0
)
{
VLOG
(
3
)
<<
"py_layer_grad"
;
grad_outputs
[
"Out@GRAD"
]
=
PyLayer
::
ApplyGrad
(
backward_id_
,
grad_input_vars_
[
"X@GRAD"
]);
grad_outputs
[
framework
::
GradVarName
(
PyLayer
::
kFwdOut
)]
=
PyLayer
::
ApplyGrad
(
backward_id_
,
grad_input_vars_
[
framework
::
GradVarName
(
PyLayer
::
kFwdInp
)]);
}
else
{
VLOG
(
3
)
<<
"op grad "
<<
grad_op_desc_
->
Type
();
for
(
auto
it
:
grad_output_vars_
)
{
...
...
paddle/fluid/imperative/layer.h
浏览文件 @
0d5819eb
...
...
@@ -200,6 +200,9 @@ class PyLayer {
public:
virtual
~
PyLayer
()
{}
static
constexpr
char
*
kFwdInp
=
"X"
;
static
constexpr
char
*
kFwdOut
=
"Out"
;
static
void
RegisterFunc
(
int
func_id
,
const
py
::
object
&
py_func
);
static
int
NumFuncs
();
...
...
paddle/fluid/imperative/tracer.h
浏览文件 @
0d5819eb
...
...
@@ -48,6 +48,7 @@ class Tracer {
std
::
vector
<
VarBase
*>
PyTrace
(
OpBase
*
op
,
const
std
::
vector
<
VarBase
*>&
inputs
,
bool
stop_gradient
=
false
);
private:
framework
::
BlockDesc
*
root_block_
;
};
...
...
python/paddle/fluid/imperative/layers.py
浏览文件 @
0d5819eb
...
...
@@ -54,6 +54,25 @@ class PyLayer(core.PyLayer):
def
__init__
(
self
):
super
(
PyLayer
,
self
).
__init__
()
@
classmethod
def
_do_forward
(
cls
,
inputs
):
return
cls
.
_to_tuple
(
cls
.
forward
(
inputs
))
@
classmethod
def
_do_backward
(
cls
,
inputs
):
return
cls
.
_to_tuple
(
cls
.
backward
(
inputs
))
@
staticmethod
def
_to_tuple
(
inputs
):
if
not
isinstance
(
inputs
,
list
)
and
not
isinstance
(
inputs
,
tuple
):
inputs
=
[
inputs
]
ret
=
[]
for
inp
in
inputs
:
tensor
=
core
.
LoDTensor
()
tensor
.
set
(
inp
,
core
.
CPUPlace
())
ret
.
append
(
tensor
)
return
tuple
(
ret
)
@
staticmethod
def
forward
(
*
inputs
):
raise
NotImplementedError
...
...
@@ -70,16 +89,15 @@ class PyLayer(core.PyLayer):
if
not
hasattr
(
cls
,
'forward_id'
):
cls
.
forward_id
=
core
.
PyLayer
.
num_funcs
()
+
1
PyLayer
.
register_func
(
cls
.
forward_id
,
cls
.
forward
)
PyLayer
.
register_func
(
cls
.
forward_id
,
cls
.
_do_
forward
)
cls
.
backward_id
=
core
.
PyLayer
.
num_funcs
()
+
1
PyLayer
.
register_func
(
cls
.
backward_id
,
cls
.
backward
)
PyLayer
.
register_func
(
cls
.
backward_id
,
cls
.
_do_
backward
)
iop
=
core
.
OpBase
()
iop
.
forward_id
=
cls
.
forward_id
iop
.
backward_id
=
cls
.
backward_id
block
.
ops
.
append
(
iop
)
ivars
=
tracer
.
py_trace
(
iop
,
ivar_inputs
,
False
)
# ivars = core.PyLayer.apply(cls.forward, inputs)
ret
=
[]
for
ivar
in
ivars
:
tensor
=
ivar
.
value
().
get_tensor
()
...
...
python/paddle/fluid/tests/unittests/test_imperative.py
浏览文件 @
0d5819eb
...
...
@@ -41,26 +41,12 @@ class MyPyLayer(fluid.imperative.PyLayer):
@
staticmethod
def
forward
(
inputs
):
sys
.
stderr
.
write
(
'before forward
\n
'
)
ret
=
np
.
tanh
(
inputs
[
0
])
sys
.
stderr
.
write
(
'after forward: %s
\n
'
%
ret
)
tensor
=
core
.
LoDTensor
()
tensor
.
set
(
ret
,
core
.
CPUPlace
())
return
tuple
([
tensor
])
return
np
.
tanh
(
inputs
[
0
])
@
staticmethod
def
backward
(
inputs
):
sys
.
stderr
.
write
(
'calling into backward: %s
\n
'
%
str
(
inputs
))
inp
,
out
,
dout
=
inputs
inp
=
np
.
array
(
inp
)
out
=
np
.
array
(
out
)
dout
=
np
.
array
(
dout
)
sys
.
stderr
.
write
(
'calling into backward: %s, %s, %s
\n
'
%
(
inp
,
out
,
dout
))
ret
=
np
.
array
(
dout
)
*
(
1
-
np
.
square
(
np
.
array
(
out
)))
tensor
=
core
.
LoDTensor
()
tensor
.
set
(
ret
,
core
.
CPUPlace
())
return
tuple
([
tensor
])
return
np
.
array
(
dout
)
*
(
1
-
np
.
square
(
np
.
array
(
out
)))
class
MLP
(
fluid
.
imperative
.
Layer
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录