Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
9597fd05
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9597fd05
编写于
1月 09, 2019
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
polish
test=develop
上级
7aad6afd
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
136 addition
and
111 deletion
+136
-111
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+60
-30
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+20
-67
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+3
-2
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+6
-4
python/paddle/fluid/imperative/layers.py
python/paddle/fluid/imperative/layers.py
+7
-4
python/paddle/fluid/tests/unittests/test_imperative.py
python/paddle/fluid/tests/unittests/test_imperative.py
+40
-4
未找到文件。
paddle/fluid/imperative/layer.cc
浏览文件 @
9597fd05
...
@@ -128,26 +128,23 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
...
@@ -128,26 +128,23 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
return
{};
return
{};
}
}
std
::
vector
<
std
::
unique_ptr
<
framework
::
Variable
>>
tmp_vars
;
std
::
map
<
std
::
string
,
std
::
vector
<
framework
::
Variable
*>>
grad_outputs
;
std
::
map
<
std
::
string
,
std
::
vector
<
framework
::
Variable
*>>
grad_outputs
;
for
(
auto
it
:
grad_output_vars_
)
{
auto
&
outputs
=
grad_outputs
[
it
.
first
];
for
(
size_t
i
=
0
;
i
<
it
.
second
.
size
();
++
i
)
{
// Allocate a new variable
Variable
*
tmp_var
=
new
framework
::
Variable
();
tmp_var
->
GetMutable
<
framework
::
LoDTensor
>
();
tmp_vars
.
emplace_back
(
tmp_var
);
outputs
.
push_back
(
tmp_var
);
}
}
if
(
backward_id_
>
0
)
{
if
(
backward_id_
>
0
)
{
VLOG
(
3
)
<<
"py_layer_grad"
;
VLOG
(
3
)
<<
"py_layer_grad"
;
PyLayer
::
ApplyGrad
(
backward_id_
,
grad_input_vars_
[
"X@GRAD"
],
grad_outputs
[
"Out@GRAD"
]
=
&
(
grad_outputs
[
"Out@GRAD"
])
);
PyLayer
::
ApplyGrad
(
backward_id_
,
grad_input_vars_
[
"X@GRAD"
]
);
}
else
{
}
else
{
VLOG
(
3
)
<<
"op grad "
<<
grad_op_desc_
->
Type
();
VLOG
(
3
)
<<
"op grad "
<<
grad_op_desc_
->
Type
();
for
(
auto
it
:
grad_output_vars_
)
{
auto
&
outputs
=
grad_outputs
[
it
.
first
];
for
(
size_t
i
=
0
;
i
<
it
.
second
.
size
();
++
i
)
{
// Allocate a new variable
Variable
*
tmp_var
=
new
framework
::
Variable
();
tmp_var
->
GetMutable
<
framework
::
LoDTensor
>
();
outputs
.
push_back
(
tmp_var
);
}
}
framework
::
RuntimeContext
ctx
(
grad_input_vars_
,
grad_outputs
);
framework
::
RuntimeContext
ctx
(
grad_input_vars_
,
grad_outputs
);
// No need to do compile time infer shape here.
// No need to do compile time infer shape here.
...
@@ -170,10 +167,13 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
...
@@ -170,10 +167,13 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
for
(
auto
it
:
grad_output_vars_
)
{
for
(
auto
it
:
grad_output_vars_
)
{
auto
&
outputs
=
grad_outputs
[
it
.
first
];
auto
&
outputs
=
grad_outputs
[
it
.
first
];
auto
&
origin_outputs
=
it
.
second
;
auto
&
origin_outputs
=
it
.
second
;
PADDLE_ENFORCE_EQ
(
outputs
.
size
(),
origin_outputs
.
size
());
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
framework
::
Variable
*
grad
=
outputs
[
i
];
framework
::
Variable
*
orig_grad
=
origin_outputs
[
i
];
framework
::
Variable
*
orig_grad
=
origin_outputs
[
i
];
AddTo
(
outputs
[
i
],
orig_grad
);
AddTo
(
grad
,
orig_grad
);
delete
grad
;
}
}
}
}
return
input_vars_
;
return
input_vars_
;
...
@@ -197,30 +197,60 @@ void PyLayer::RegisterFunc(int func_id, const py::object& py_func) {
...
@@ -197,30 +197,60 @@ void PyLayer::RegisterFunc(int func_id, const py::object& py_func) {
py_funcs_
[
func_id
]
=
py_func
;
py_funcs_
[
func_id
]
=
py_func
;
}
}
int
PyLayer
::
NumFuncs
()
{
return
py_funcs_
.
size
();
}
std
::
vector
<
VarBase
*>
PyLayer
::
Apply
(
int
func_id
,
std
::
vector
<
VarBase
*>
PyLayer
::
Apply
(
int
func_id
,
const
std
::
vector
<
VarBase
*>&
inputs
)
{
const
std
::
vector
<
VarBase
*>&
inputs
)
{
std
::
vector
<
framework
::
LoDTensor
>
tensor_inputs
;
std
::
vector
<
framework
::
Variable
*>
invars
;
std
::
vector
<
VarBase
*>
ret
;
for
(
const
VarBase
*
in
:
inputs
)
{
for
(
const
VarBase
*
in
:
inputs
)
{
tensor_inputs
.
push_back
(
in
->
var_
->
Get
<
framework
::
LoDTensor
>
()
);
invars
.
push_back
(
in
->
var_
);
}
}
PADDLE_ENFORCE
(
py_funcs_
.
find
(
func_id
)
!=
py_funcs_
.
end
());
PADDLE_ENFORCE
(
py_funcs_
.
find
(
func_id
)
!=
py_funcs_
.
end
());
CallPythonFunc
(
py_funcs_
[
func_id
],
tensor_inputs
,
&
ret
);
std
::
vector
<
Variable
*>
outvars
=
CallPythonFunc
(
py_funcs_
[
func_id
],
invars
);
std
::
vector
<
VarBase
*>
ret
;
for
(
Variable
*
v
:
outvars
)
{
ret
.
push_back
(
new
VarBase
(
v
,
new
Variable
()));
}
return
ret
;
return
ret
;
}
}
void
PyLayer
::
ApplyGrad
(
int
func_id
,
std
::
vector
<
Variable
*>
PyLayer
::
ApplyGrad
(
const
std
::
vector
<
framework
::
Variable
*>&
inputs
,
int
func_id
,
const
std
::
vector
<
framework
::
Variable
*>&
inputs
)
{
std
::
vector
<
framework
::
Variable
*>*
outputs
)
{
PADDLE_ENFORCE
(
py_funcs_
.
find
(
func_id
)
!=
py_funcs_
.
end
());
std
::
vector
<
framework
::
LoDTensor
>
tensor_inputs
;
return
CallPythonFunc
(
py_funcs_
[
func_id
],
inputs
)
;
std
::
vector
<
VarBase
*>
ret
;
}
for
(
const
Variable
*
in
:
inputs
)
{
std
::
vector
<
framework
::
Variable
*>
PyLayer
::
CallPythonFunc
(
tensor_inputs
.
push_back
(
in
->
Get
<
framework
::
LoDTensor
>
());
const
py
::
object
&
callable
,
const
std
::
vector
<
framework
::
Variable
*>&
ins
)
{
py
::
gil_scoped_acquire
guard
;
py
::
tuple
in_args
(
ins
.
size
());
for
(
size_t
i
=
0
;
i
<
ins
.
size
();
++
i
)
{
const
framework
::
LoDTensor
&
t
=
ins
[
i
]
->
Get
<
framework
::
LoDTensor
>
();
in_args
[
i
]
=
t
.
IsInitialized
()
?
py
::
cast
(
t
)
:
py
::
cast
(
nullptr
);
}
}
PADDLE_ENFORCE
(
py_funcs_
.
find
(
func_id
)
!=
py_funcs_
.
end
());
VLOG
(
3
)
<<
"pyfunc in "
<<
py
::
len
(
in_args
);
CallPythonFunc
(
py_funcs_
[
func_id
],
tensor_inputs
,
outputs
);
// TODO(panyx0718): Who owns the returned LoDTensor.
auto
ret
=
callable
(
in_args
);
auto
ret_tuple
=
py
::
cast
<
py
::
tuple
>
(
ret
);
size_t
ret_num
=
py
::
len
(
ret_tuple
);
std
::
vector
<
framework
::
Variable
*>
outs
;
VLOG
(
3
)
<<
"pyfunc out "
<<
ret_num
;
for
(
size_t
i
=
0
;
i
<
ret_num
;
++
i
)
{
try
{
auto
*
py_out_tensor
=
py
::
cast
<
framework
::
LoDTensor
*>
(
ret_tuple
[
i
]);
PADDLE_ENFORCE_NOT_NULL
(
py_out_tensor
,
"Output tensor %d should not be nullptr"
,
i
);
auto
*
var
=
new
framework
::
Variable
();
auto
*
tensor
=
var
->
GetMutable
<
framework
::
LoDTensor
>
();
tensor
->
ShareDataWith
(
*
py_out_tensor
);
tensor
->
set_lod
(
py_out_tensor
->
lod
());
outs
.
push_back
(
var
);
}
catch
(
py
::
cast_error
&
)
{
PADDLE_THROW
(
"The %d-th output must be LoDTensor"
,
i
);
}
}
return
outs
;
}
}
}
// namespace imperative
}
// namespace imperative
...
...
paddle/fluid/imperative/layer.h
浏览文件 @
9597fd05
...
@@ -87,12 +87,15 @@ class OpBase;
...
@@ -87,12 +87,15 @@ class OpBase;
class
VarBase
{
class
VarBase
{
public:
public:
VarBase
()
VarBase
()
:
VarBase
(
new
framework
::
Variable
(),
new
framework
::
Variable
())
{}
// Owns `var` and `grad`
VarBase
(
framework
::
Variable
*
var
,
framework
::
Variable
*
grad
)
:
pre_op_
(
nullptr
),
:
pre_op_
(
nullptr
),
pre_op_out_idx_
(
-
1
),
pre_op_out_idx_
(
-
1
),
var_desc_
(
nullptr
),
var_desc_
(
nullptr
),
var_
(
new
framework
::
Variable
()
),
var_
(
var
),
grads_
(
new
framework
::
Variable
()
),
grads_
(
grad
),
stop_gradient_
(
false
)
{}
stop_gradient_
(
false
)
{}
explicit
VarBase
(
bool
stop_gradient
)
explicit
VarBase
(
bool
stop_gradient
)
...
@@ -131,8 +134,8 @@ class OpBase {
...
@@ -131,8 +134,8 @@ class OpBase {
public:
public:
OpBase
()
OpBase
()
:
op_desc_
(
nullptr
),
:
op_desc_
(
nullptr
),
grad_op_desc_
(
nullptr
),
forward_id_
(
-
1
),
forward_id_
(
-
1
),
grad_op_desc_
(
nullptr
),
backward_id_
(
-
1
)
{}
backward_id_
(
-
1
)
{}
virtual
~
OpBase
()
{
virtual
~
OpBase
()
{
...
@@ -141,10 +144,13 @@ class OpBase {
...
@@ -141,10 +144,13 @@ class OpBase {
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>
ApplyGrad
();
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>
ApplyGrad
();
// One of `op_desc_` or `forward_id_` is set, not both.
// For pure python PyLayer, use `forward_id_`, otherwise, use op_desc_.
framework
::
OpDesc
*
op_desc_
;
framework
::
OpDesc
*
op_desc_
;
framework
::
OpDesc
*
grad_op_desc_
;
int
forward_id_
;
int
forward_id_
;
// When has backward, one of `grad_op_desc_` or `backward_id_` is set,
// not both.
framework
::
OpDesc
*
grad_op_desc_
;
int
backward_id_
;
int
backward_id_
;
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>
input_vars_
;
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>
input_vars_
;
...
@@ -167,76 +173,23 @@ class Layer {
...
@@ -167,76 +173,23 @@ class Layer {
}
}
};
};
static
void
CallPythonFunc
(
const
py
::
object
&
callable
,
const
std
::
vector
<
framework
::
LoDTensor
>&
ins
,
std
::
vector
<
VarBase
*>*
outs
)
{
py
::
gil_scoped_acquire
guard
;
py
::
tuple
in_args
(
ins
.
size
());
for
(
size_t
i
=
0
;
i
<
ins
.
size
();
++
i
)
{
in_args
[
i
]
=
ins
[
i
].
IsInitialized
()
?
py
::
cast
(
ins
[
i
])
:
py
::
cast
(
nullptr
);
}
// TODO(panyx0718): Who owns the returned LoDTensor.
auto
ret
=
callable
(
in_args
);
auto
ret_tuple
=
py
::
cast
<
py
::
tuple
>
(
ret
);
size_t
ret_num
=
py
::
len
(
ret_tuple
);
for
(
size_t
i
=
0
;
i
<
ret_num
;
++
i
)
{
try
{
auto
*
py_out_tensor
=
py
::
cast
<
framework
::
LoDTensor
*>
(
ret_tuple
[
i
]);
PADDLE_ENFORCE_NOT_NULL
(
py_out_tensor
,
"Output tensor %d should not be nullptr"
,
i
);
VarBase
*
var
=
new
VarBase
();
auto
*
tensor
=
var
->
var_
->
GetMutable
<
framework
::
LoDTensor
>
();
tensor
->
ShareDataWith
(
*
py_out_tensor
);
tensor
->
set_lod
(
py_out_tensor
->
lod
());
outs
->
push_back
(
var
);
}
catch
(
py
::
cast_error
&
)
{
PADDLE_THROW
(
"The %d-th output must be LoDTensor"
,
i
);
}
}
}
static
void
CallPythonFunc
(
const
py
::
object
&
callable
,
const
std
::
vector
<
framework
::
LoDTensor
>&
ins
,
std
::
vector
<
framework
::
Variable
*>*
outs
)
{
py
::
gil_scoped_acquire
guard
;
py
::
tuple
in_args
(
ins
.
size
());
for
(
size_t
i
=
0
;
i
<
ins
.
size
();
++
i
)
{
in_args
[
i
]
=
ins
[
i
].
IsInitialized
()
?
py
::
cast
(
ins
[
i
])
:
py
::
cast
(
nullptr
);
}
VLOG
(
3
)
<<
"pyfunc in "
<<
py
::
len
(
in_args
);
// TODO(panyx0718): Who owns the returned LoDTensor.
auto
ret
=
callable
(
in_args
);
auto
ret_tuple
=
py
::
cast
<
py
::
tuple
>
(
ret
);
size_t
ret_num
=
py
::
len
(
ret_tuple
);
VLOG
(
3
)
<<
"pyfunc out "
<<
ret_num
;
for
(
size_t
i
=
0
;
i
<
ret_num
;
++
i
)
{
try
{
auto
*
py_out_tensor
=
py
::
cast
<
framework
::
LoDTensor
*>
(
ret_tuple
[
i
]);
PADDLE_ENFORCE_NOT_NULL
(
py_out_tensor
,
"Output tensor %d should not be nullptr"
,
i
);
auto
*
tensor
=
(
*
outs
)[
i
]
->
GetMutable
<
framework
::
LoDTensor
>
();
tensor
->
ShareDataWith
(
*
py_out_tensor
);
tensor
->
set_lod
(
py_out_tensor
->
lod
());
}
catch
(
py
::
cast_error
&
)
{
PADDLE_THROW
(
"The %d-th output must be LoDTensor"
,
i
);
}
}
}
class
PyLayer
{
class
PyLayer
{
public:
public:
virtual
~
PyLayer
()
{}
virtual
~
PyLayer
()
{}
static
void
RegisterFunc
(
int
func_id
,
const
py
::
object
&
py_func
);
static
void
RegisterFunc
(
int
func_id
,
const
py
::
object
&
py_func
);
static
int
NumFuncs
();
static
std
::
vector
<
VarBase
*>
Apply
(
int
func_id
,
static
std
::
vector
<
VarBase
*>
Apply
(
int
func_id
,
const
std
::
vector
<
VarBase
*>&
inputs
);
const
std
::
vector
<
VarBase
*>&
inputs
);
static
void
ApplyGrad
(
int
func_id
,
static
std
::
vector
<
framework
::
Variable
*>
ApplyGrad
(
const
std
::
vector
<
framework
::
Variable
*>&
inputs
,
int
func_id
,
const
std
::
vector
<
framework
::
Variable
*>&
inputs
);
std
::
vector
<
framework
::
Variable
*>*
outputs
);
private:
static
std
::
vector
<
framework
::
Variable
*>
CallPythonFunc
(
const
py
::
object
&
callable
,
const
std
::
vector
<
framework
::
Variable
*>&
ins
);
};
};
}
// namespace imperative
}
// namespace imperative
...
...
paddle/fluid/imperative/tracer.h
浏览文件 @
9597fd05
...
@@ -132,8 +132,9 @@ class Tracer {
...
@@ -132,8 +132,9 @@ class Tracer {
if
(
!
stop_gradient
)
{
if
(
!
stop_gradient
)
{
framework
::
OpDesc
*
grad_op_desc
;
framework
::
OpDesc
*
grad_op_desc
;
// TODO(panyx): Is this leaked?
// TODO(panyx): Is this leaked?
auto
grad_to_var
=
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
();
std
::
unique_ptr
<
std
::
unordered_map
<
std
::
string
,
std
::
string
>>
grad_to_var
(
CreateGradOp
(
*
op_desc
,
{},
{
block
},
&
grad_op_desc
,
grad_to_var
);
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
());
CreateGradOp
(
*
op_desc
,
{},
{
block
},
&
grad_op_desc
,
grad_to_var
.
get
());
op
->
grad_op_desc_
=
grad_op_desc
;
op
->
grad_op_desc_
=
grad_op_desc
;
for
(
auto
it
:
grad_op_desc
->
Inputs
())
{
for
(
auto
it
:
grad_op_desc
->
Inputs
())
{
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
9597fd05
...
@@ -191,7 +191,7 @@ PYBIND11_MODULE(core, m) {
...
@@ -191,7 +191,7 @@ PYBIND11_MODULE(core, m) {
return
self
.
Forward
(
inputs
);
return
self
.
Forward
(
inputs
);
});
});
py
::
class_
<
paddle
::
imperative
::
PyLayer
>
(
m
,
"PyLayer"
)
py
::
class_
<
imperative
::
PyLayer
>
(
m
,
"PyLayer"
)
.
def
(
py
::
init
<>
())
.
def
(
py
::
init
<>
())
.
def_static
(
.
def_static
(
"apply"
,
"apply"
,
...
@@ -200,9 +200,11 @@ PYBIND11_MODULE(core, m) {
...
@@ -200,9 +200,11 @@ PYBIND11_MODULE(core, m) {
return
imperative
::
PyLayer
::
Apply
(
func_id
,
inputs
);
return
imperative
::
PyLayer
::
Apply
(
func_id
,
inputs
);
},
},
py
::
return_value_policy
::
take_ownership
)
py
::
return_value_policy
::
take_ownership
)
.
def_static
(
"register_func"
,
[](
int
func_id
,
const
py
::
object
&
callable
)
{
.
def_static
(
"register_func"
,
imperative
::
PyLayer
::
RegisterFunc
(
func_id
,
callable
);
[](
int
func_id
,
const
py
::
object
&
callable
)
{
});
imperative
::
PyLayer
::
RegisterFunc
(
func_id
,
callable
);
})
.
def_static
(
"num_funcs"
,
&
imperative
::
PyLayer
::
NumFuncs
);
BindTracer
(
&
m
);
BindTracer
(
&
m
);
...
...
python/paddle/fluid/imperative/layers.py
浏览文件 @
9597fd05
...
@@ -68,12 +68,15 @@ class PyLayer(core.PyLayer):
...
@@ -68,12 +68,15 @@ class PyLayer(core.PyLayer):
block
=
framework
.
default_main_program
().
current_block
()
block
=
framework
.
default_main_program
().
current_block
()
inputs
=
[
x
.
_ivar
for
x
in
inputs
]
inputs
=
[
x
.
_ivar
for
x
in
inputs
]
PyLayer
.
register_func
(
1
,
cls
.
forward
)
if
not
hasattr
(
cls
,
'forward_id'
):
PyLayer
.
register_func
(
2
,
cls
.
backward
)
cls
.
forward_id
=
core
.
PyLayer
.
num_funcs
()
+
1
PyLayer
.
register_func
(
cls
.
forward_id
,
cls
.
forward
)
cls
.
backward_id
=
core
.
PyLayer
.
num_funcs
()
+
1
PyLayer
.
register_func
(
cls
.
backward_id
,
cls
.
backward
)
iop
=
core
.
OpBase
()
iop
=
core
.
OpBase
()
iop
.
forward_id
=
1
iop
.
forward_id
=
cls
.
forward_id
iop
.
backward_id
=
2
iop
.
backward_id
=
cls
.
backward_id
block
.
ops
.
append
(
iop
)
block
.
ops
.
append
(
iop
)
ivars
=
tracer
.
py_trace
(
iop
,
inputs
,
False
)
ivars
=
tracer
.
py_trace
(
iop
,
inputs
,
False
)
# ivars = core.PyLayer.apply(cls.forward, inputs)
# ivars = core.PyLayer.apply(cls.forward, inputs)
...
...
python/paddle/fluid/tests/unittests/test_imperative.py
浏览文件 @
9597fd05
...
@@ -81,14 +81,52 @@ class MLP(fluid.imperative.Layer):
...
@@ -81,14 +81,52 @@ class MLP(fluid.imperative.Layer):
class
TestImperative
(
unittest
.
TestCase
):
class
TestImperative
(
unittest
.
TestCase
):
"""
def
test_layer
(
self
):
def
test_layer
(
self
):
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
cl
=
core
.
Layer
()
cl
=
core
.
Layer
()
cl
.
forward
([])
cl
.
forward
([])
l
=
fluid
.
imperative
.
Layer
()
l
=
fluid
.
imperative
.
Layer
()
self
.
assertRaises
(
NotImplementedError
,
l
.
forward
,
[])
self
.
assertRaises
(
NotImplementedError
,
l
.
forward
,
[])
"""
def
test_pylayer_func_id
(
self
):
with
fluid
.
imperative
.
guard
():
class
PyLayer1
(
fluid
.
imperative
.
PyLayer
):
def
__init__
(
self
):
super
(
PyLayer1
,
self
).
__init__
()
@
staticmethod
def
forward
(
inputs
):
return
inputs
@
staticmethod
def
backward
(
inputs
):
return
inputs
class
PyLayer2
(
fluid
.
imperative
.
PyLayer
):
def
__init__
(
self
):
super
(
PyLayer2
,
self
).
__init__
()
@
staticmethod
def
forward
(
inputs
):
return
inputs
@
staticmethod
def
backward
(
inputs
):
return
inputs
py_layer_1
=
PyLayer1
()
py_layer_2
=
PyLayer2
()
py_layer_1
([
fluid
.
imperative
.
base
.
to_variable
(
np
.
ones
([
2
,
2
]))])
py_layer_2
([
fluid
.
imperative
.
base
.
to_variable
(
np
.
ones
([
2
,
2
]))])
id
=
py_layer_1
.
forward_id
self
.
assertGreater
(
id
,
0
)
self
.
assertEqual
(
py_layer_1
.
backward_id
,
id
+
1
)
self
.
assertEqual
(
py_layer_2
.
forward_id
,
id
+
2
)
self
.
assertEqual
(
py_layer_2
.
backward_id
,
id
+
3
)
py_layer_1
([
fluid
.
imperative
.
base
.
to_variable
(
np
.
ones
([
2
,
2
]))])
self
.
assertEqual
(
py_layer_1
.
forward_id
,
id
)
def
test_pylayer
(
self
):
def
test_pylayer
(
self
):
np_inp
=
np
.
ones
([
2
,
2
],
np
.
float32
)
np_inp
=
np
.
ones
([
2
,
2
],
np
.
float32
)
...
@@ -118,7 +156,6 @@ class TestImperative(unittest.TestCase):
...
@@ -118,7 +156,6 @@ class TestImperative(unittest.TestCase):
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
"""
def
test_layer_in_out
(
self
):
def
test_layer_in_out
(
self
):
np_inp
=
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
)
np_inp
=
np
.
array
([
1.0
,
2.0
,
-
1.0
],
dtype
=
np
.
float32
)
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
...
@@ -172,7 +209,6 @@ class TestImperative(unittest.TestCase):
...
@@ -172,7 +209,6 @@ class TestImperative(unittest.TestCase):
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
"""
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录