Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
7aad6afd
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7aad6afd
编写于
1月 09, 2019
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
forward and backward
test=develop
上级
2349acea
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
175 addition
and
42 deletion
+175
-42
paddle/fluid/imperative/layer.cc
paddle/fluid/imperative/layer.cc
+43
-22
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+34
-1
paddle/fluid/imperative/tracer.h
paddle/fluid/imperative/tracer.h
+46
-10
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+8
-1
python/paddle/fluid/imperative/layers.py
python/paddle/fluid/imperative/layers.py
+4
-3
python/paddle/fluid/tests/unittests/test_imperative.py
python/paddle/fluid/tests/unittests/test_imperative.py
+40
-5
未找到文件。
paddle/fluid/imperative/layer.cc
浏览文件 @
7aad6afd
...
@@ -57,6 +57,7 @@ class Autograd {
...
@@ -57,6 +57,7 @@ class Autograd {
if
(
var
->
stop_gradient_
)
{
if
(
var
->
stop_gradient_
)
{
return
;
return
;
}
}
VLOG
(
3
)
<<
"start autograd"
;
std
::
deque
<
OpBase
*>
ready
;
std
::
deque
<
OpBase
*>
ready
;
ready
.
push_back
(
var
->
pre_op_
);
ready
.
push_back
(
var
->
pre_op_
);
...
@@ -122,11 +123,10 @@ framework::LoDTensor& VarBase::Grad() {
...
@@ -122,11 +123,10 @@ framework::LoDTensor& VarBase::Grad() {
}
}
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>
OpBase
::
ApplyGrad
()
{
std
::
map
<
std
::
string
,
std
::
vector
<
VarBase
*>>
OpBase
::
ApplyGrad
()
{
if
(
!
grad_op_desc_
)
{
if
(
!
grad_op_desc_
&&
backward_id_
<=
0
)
{
LOG
(
WARNING
)
<<
"op with no grad: "
<<
op_desc_
->
Type
();
LOG
(
WARNING
)
<<
"op with no grad: "
<<
op_desc_
->
Type
();
return
{};
return
{};
}
}
VLOG
(
3
)
<<
"op grad "
<<
grad_op_desc_
->
Type
();
std
::
vector
<
std
::
unique_ptr
<
framework
::
Variable
>>
tmp_vars
;
std
::
vector
<
std
::
unique_ptr
<
framework
::
Variable
>>
tmp_vars
;
std
::
map
<
std
::
string
,
std
::
vector
<
framework
::
Variable
*>>
grad_outputs
;
std
::
map
<
std
::
string
,
std
::
vector
<
framework
::
Variable
*>>
grad_outputs
;
...
@@ -142,23 +142,30 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
...
@@ -142,23 +142,30 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
}
}
}
}
framework
::
RuntimeContext
ctx
(
grad_input_vars_
,
grad_outputs
);
if
(
backward_id_
>
0
)
{
VLOG
(
3
)
<<
"py_layer_grad"
;
// No need to do compile time infer shape here.
PyLayer
::
ApplyGrad
(
backward_id_
,
grad_input_vars_
[
"X@GRAD"
],
// grad_op_desc_->InferShape(*block_);
&
(
grad_outputs
[
"Out@GRAD"
]));
grad_op_desc_
->
InferVarType
(
block_
);
}
else
{
VLOG
(
3
)
<<
"op grad "
<<
grad_op_desc_
->
Type
();
std
::
unique_ptr
<
framework
::
OperatorBase
>
opbase
=
framework
::
RuntimeContext
ctx
(
grad_input_vars_
,
grad_outputs
);
framework
::
OpRegistry
::
CreateOp
(
*
grad_op_desc_
);
framework
::
OperatorWithKernel
*
op_kernel
=
// No need to do compile time infer shape here.
dynamic_cast
<
framework
::
OperatorWithKernel
*>
(
opbase
.
get
());
// grad_op_desc_->InferShape(*block_);
PADDLE_ENFORCE_NOT_NULL
(
op_kernel
,
"only support op with kernel"
);
grad_op_desc_
->
InferVarType
(
block_
);
framework
::
Scope
scope
;
std
::
unique_ptr
<
framework
::
OperatorBase
>
opbase
=
platform
::
CPUPlace
place
;
framework
::
OpRegistry
::
CreateOp
(
*
grad_op_desc_
);
PreparedOp
p
=
PreparedOp
::
Prepare
(
ctx
,
*
op_kernel
,
place
);
framework
::
OperatorWithKernel
*
op_kernel
=
p
.
op
.
RuntimeInferShape
(
scope
,
place
,
ctx
);
dynamic_cast
<
framework
::
OperatorWithKernel
*>
(
opbase
.
get
());
p
.
func
(
framework
::
ExecutionContext
(
p
.
op
,
scope
,
*
p
.
dev_ctx
,
p
.
ctx
));
PADDLE_ENFORCE_NOT_NULL
(
op_kernel
,
"only support op with kernel"
);
framework
::
Scope
scope
;
platform
::
CPUPlace
place
;
PreparedOp
p
=
PreparedOp
::
Prepare
(
ctx
,
*
op_kernel
,
place
);
p
.
op
.
RuntimeInferShape
(
scope
,
place
,
ctx
);
p
.
func
(
framework
::
ExecutionContext
(
p
.
op
,
scope
,
*
p
.
dev_ctx
,
p
.
ctx
));
}
for
(
auto
it
:
grad_output_vars_
)
{
for
(
auto
it
:
grad_output_vars_
)
{
auto
&
outputs
=
grad_outputs
[
it
.
first
];
auto
&
outputs
=
grad_outputs
[
it
.
first
];
...
@@ -175,6 +182,7 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
...
@@ -175,6 +182,7 @@ std::map<std::string, std::vector<VarBase*>> OpBase::ApplyGrad() {
void
VarBase
::
RunBackward
()
{
void
VarBase
::
RunBackward
()
{
if
(
!
pre_op_
)
return
;
if
(
!
pre_op_
)
return
;
VLOG
(
3
)
<<
"start backward"
;
auto
grads_t
=
grads_
->
GetMutable
<
framework
::
LoDTensor
>
();
auto
grads_t
=
grads_
->
GetMutable
<
framework
::
LoDTensor
>
();
float
*
data
=
grads_t
->
mutable_data
<
float
>
(
platform
::
CPUPlace
());
float
*
data
=
grads_t
->
mutable_data
<
float
>
(
platform
::
CPUPlace
());
std
::
fill
(
data
,
data
+
grads_t
->
numel
(),
1.0
);
std
::
fill
(
data
,
data
+
grads_t
->
numel
(),
1.0
);
...
@@ -190,17 +198,30 @@ void PyLayer::RegisterFunc(int func_id, const py::object& py_func) {
...
@@ -190,17 +198,30 @@ void PyLayer::RegisterFunc(int func_id, const py::object& py_func) {
}
}
std
::
vector
<
VarBase
*>
PyLayer
::
Apply
(
int
func_id
,
std
::
vector
<
VarBase
*>
PyLayer
::
Apply
(
int
func_id
,
const
std
::
vector
<
VarBase
>&
inputs
)
{
const
std
::
vector
<
VarBase
*
>&
inputs
)
{
std
::
vector
<
framework
::
LoDTensor
>
tensor_inputs
;
std
::
vector
<
framework
::
LoDTensor
>
tensor_inputs
;
std
::
vector
<
VarBase
*>
ret
;
std
::
vector
<
VarBase
*>
ret
;
for
(
const
VarBase
&
in
:
inputs
)
{
for
(
const
VarBase
*
in
:
inputs
)
{
tensor_inputs
.
push_back
(
in
.
var_
->
Get
<
framework
::
LoDTensor
>
());
tensor_inputs
.
push_back
(
in
->
var_
->
Get
<
framework
::
LoDTensor
>
());
}
}
PADDLE_ENFORCE
(
py_funcs_
.
find
(
func_id
)
!=
py_funcs_
.
end
());
PADDLE_ENFORCE
(
py_funcs_
.
find
(
func_id
)
!=
py_funcs_
.
end
());
CallPythonFunc
(
py_funcs_
[
func_id
],
tensor_inputs
,
&
ret
);
CallPythonFunc
(
py_funcs_
[
func_id
],
tensor_inputs
,
&
ret
);
return
ret
;
return
ret
;
}
}
void
PyLayer
::
ApplyGrad
(
int
func_id
,
const
std
::
vector
<
framework
::
Variable
*>&
inputs
,
std
::
vector
<
framework
::
Variable
*>*
outputs
)
{
std
::
vector
<
framework
::
LoDTensor
>
tensor_inputs
;
std
::
vector
<
VarBase
*>
ret
;
for
(
const
Variable
*
in
:
inputs
)
{
tensor_inputs
.
push_back
(
in
->
Get
<
framework
::
LoDTensor
>
());
}
PADDLE_ENFORCE
(
py_funcs_
.
find
(
func_id
)
!=
py_funcs_
.
end
());
CallPythonFunc
(
py_funcs_
[
func_id
],
tensor_inputs
,
outputs
);
}
}
// namespace imperative
}
// namespace imperative
}
// namespace paddle
}
// namespace paddle
paddle/fluid/imperative/layer.h
浏览文件 @
7aad6afd
...
@@ -196,6 +196,35 @@ static void CallPythonFunc(const py::object& callable,
...
@@ -196,6 +196,35 @@ static void CallPythonFunc(const py::object& callable,
}
}
}
}
static
void
CallPythonFunc
(
const
py
::
object
&
callable
,
const
std
::
vector
<
framework
::
LoDTensor
>&
ins
,
std
::
vector
<
framework
::
Variable
*>*
outs
)
{
py
::
gil_scoped_acquire
guard
;
py
::
tuple
in_args
(
ins
.
size
());
for
(
size_t
i
=
0
;
i
<
ins
.
size
();
++
i
)
{
in_args
[
i
]
=
ins
[
i
].
IsInitialized
()
?
py
::
cast
(
ins
[
i
])
:
py
::
cast
(
nullptr
);
}
VLOG
(
3
)
<<
"pyfunc in "
<<
py
::
len
(
in_args
);
// TODO(panyx0718): Who owns the returned LoDTensor.
auto
ret
=
callable
(
in_args
);
auto
ret_tuple
=
py
::
cast
<
py
::
tuple
>
(
ret
);
size_t
ret_num
=
py
::
len
(
ret_tuple
);
VLOG
(
3
)
<<
"pyfunc out "
<<
ret_num
;
for
(
size_t
i
=
0
;
i
<
ret_num
;
++
i
)
{
try
{
auto
*
py_out_tensor
=
py
::
cast
<
framework
::
LoDTensor
*>
(
ret_tuple
[
i
]);
PADDLE_ENFORCE_NOT_NULL
(
py_out_tensor
,
"Output tensor %d should not be nullptr"
,
i
);
auto
*
tensor
=
(
*
outs
)[
i
]
->
GetMutable
<
framework
::
LoDTensor
>
();
tensor
->
ShareDataWith
(
*
py_out_tensor
);
tensor
->
set_lod
(
py_out_tensor
->
lod
());
}
catch
(
py
::
cast_error
&
)
{
PADDLE_THROW
(
"The %d-th output must be LoDTensor"
,
i
);
}
}
}
class
PyLayer
{
class
PyLayer
{
public:
public:
virtual
~
PyLayer
()
{}
virtual
~
PyLayer
()
{}
...
@@ -203,7 +232,11 @@ class PyLayer {
...
@@ -203,7 +232,11 @@ class PyLayer {
static
void
RegisterFunc
(
int
func_id
,
const
py
::
object
&
py_func
);
static
void
RegisterFunc
(
int
func_id
,
const
py
::
object
&
py_func
);
static
std
::
vector
<
VarBase
*>
Apply
(
int
func_id
,
static
std
::
vector
<
VarBase
*>
Apply
(
int
func_id
,
const
std
::
vector
<
VarBase
>&
inputs
);
const
std
::
vector
<
VarBase
*>&
inputs
);
static
void
ApplyGrad
(
int
func_id
,
const
std
::
vector
<
framework
::
Variable
*>&
inputs
,
std
::
vector
<
framework
::
Variable
*>*
outputs
);
};
};
}
// namespace imperative
}
// namespace imperative
...
...
paddle/fluid/imperative/tracer.h
浏览文件 @
7aad6afd
...
@@ -131,6 +131,7 @@ class Tracer {
...
@@ -131,6 +131,7 @@ class Tracer {
if
(
!
stop_gradient
)
{
if
(
!
stop_gradient
)
{
framework
::
OpDesc
*
grad_op_desc
;
framework
::
OpDesc
*
grad_op_desc
;
// TODO(panyx): Is this leaked?
auto
grad_to_var
=
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
();
auto
grad_to_var
=
new
std
::
unordered_map
<
std
::
string
,
std
::
string
>
();
CreateGradOp
(
*
op_desc
,
{},
{
block
},
&
grad_op_desc
,
grad_to_var
);
CreateGradOp
(
*
op_desc
,
{},
{
block
},
&
grad_op_desc
,
grad_to_var
);
op
->
grad_op_desc_
=
grad_op_desc
;
op
->
grad_op_desc_
=
grad_op_desc
;
...
@@ -143,12 +144,14 @@ class Tracer {
...
@@ -143,12 +144,14 @@ class Tracer {
if
(
var_it
==
grad_to_var
->
end
())
{
if
(
var_it
==
grad_to_var
->
end
())
{
auto
fwd_var_it
=
vars
.
find
(
grad_invar
);
auto
fwd_var_it
=
vars
.
find
(
grad_invar
);
PADDLE_ENFORCE
(
fwd_var_it
!=
vars
.
end
());
PADDLE_ENFORCE
(
fwd_var_it
!=
vars
.
end
());
// Forward inputs or outputs.
grad_in_vars
.
push_back
(
fwd_var_it
->
second
->
var_
);
grad_in_vars
.
push_back
(
fwd_var_it
->
second
->
var_
);
}
else
{
}
else
{
VarBase
*
var
=
vars
[
var_it
->
second
];
VarBase
*
var
=
vars
[
var_it
->
second
];
if
(
!
var
->
grads_
->
IsInitialized
())
{
if
(
!
var
->
grads_
->
IsInitialized
())
{
InitVar
(
var
->
var_
,
var
->
grads_
);
InitVar
(
var
->
var_
,
var
->
grads_
);
}
}
// Douts.
grad_in_vars
.
push_back
(
var
->
grads_
);
grad_in_vars
.
push_back
(
var
->
grads_
);
}
}
}
}
...
@@ -172,18 +175,51 @@ class Tracer {
...
@@ -172,18 +175,51 @@ class Tracer {
op
->
block_
=
block
;
op
->
block_
=
block
;
}
}
std
::
vector
<
VarBase
*>
PyTrace
(
OpBase
*
op
,
std
::
vector
<
VarBase
*>
PyTrace
(
OpBase
*
op
,
const
std
::
vector
<
VarBase
*>&
inputs
,
const
std
::
vector
<
VarBase
>&
inputs
)
{
bool
stop_gradient
=
false
)
{
std
::
vector
<
VarBase
*>
outputs
=
PyLayer
::
Apply
(
op
->
forward_id_
,
inputs
);
VLOG
(
3
)
<<
"py_trace"
;
/*
op
->
input_vars_
[
"X"
]
=
inputs
;
for (const VarBase& inp : inputs) {
op
->
output_vars_
[
"Out"
]
=
PyLayer
::
Apply
(
op
->
forward_id_
,
inputs
);
if (inp.pre_op_) {
for
(
VarBase
*
inp
:
inputs
)
{
op->pre_ops_[it.first].push_back(inp->pre_op_);
if
(
inp
->
pre_op_
)
{
op->pre_ops_out_idx_[it.first].push_back(inp->pre_op_out_idx_);
op
->
pre_ops_
[
"X"
].
push_back
(
inp
->
pre_op_
);
op
->
pre_ops_out_idx_
[
"X"
].
push_back
(
inp
->
pre_op_out_idx_
);
}
else
{
}
else
{
op->pre_ops_[
it.first
].push_back(nullptr);
op
->
pre_ops_
[
"X"
].
push_back
(
nullptr
);
}
}
}*/
}
auto
&
outputs
=
op
->
output_vars_
[
"Out"
];
for
(
size_t
i
=
0
;
i
<
outputs
.
size
();
++
i
)
{
VarBase
*
out
=
outputs
[
i
];
out
->
stop_gradient_
=
stop_gradient
;
out
->
pre_op_
=
op
;
out
->
pre_op_out_name_
=
"Out"
;
out
->
pre_op_out_idx_
=
i
;
}
if
(
!
stop_gradient
)
{
auto
&
grad_input_vars
=
op
->
grad_input_vars_
[
"X@GRAD"
];
auto
&
grad_output_vars
=
op
->
grad_output_vars_
[
"Out@GRAD"
];
for
(
const
VarBase
*
inp
:
inputs
)
{
grad_input_vars
.
push_back
(
inp
->
var_
);
}
for
(
VarBase
*
out
:
outputs
)
{
grad_input_vars
.
push_back
(
out
->
var_
);
}
for
(
VarBase
*
out
:
outputs
)
{
grad_input_vars
.
push_back
(
out
->
grads_
);
if
(
!
grad_input_vars
.
back
()
->
IsInitialized
())
{
InitVar
(
out
->
var_
,
grad_input_vars
.
back
());
}
}
for
(
const
VarBase
*
inp
:
inputs
)
{
grad_output_vars
.
push_back
(
inp
->
grads_
);
if
(
!
grad_output_vars
.
back
()
->
IsInitialized
())
{
InitVar
(
inp
->
var_
,
grad_output_vars
.
back
());
}
}
}
return
outputs
;
return
outputs
;
}
}
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
7aad6afd
...
@@ -175,6 +175,13 @@ PYBIND11_MODULE(core, m) {
...
@@ -175,6 +175,13 @@ PYBIND11_MODULE(core, m) {
[](
imperative
::
OpBase
&
self
,
int
forward_id
)
{
[](
imperative
::
OpBase
&
self
,
int
forward_id
)
{
self
.
forward_id_
=
forward_id
;
self
.
forward_id_
=
forward_id
;
},
},
py
::
return_value_policy
::
reference
)
.
def_property
(
"backward_id"
,
[](
const
imperative
::
OpBase
&
self
)
{
return
self
.
backward_id_
;
},
[](
imperative
::
OpBase
&
self
,
int
backward_id
)
{
self
.
backward_id_
=
backward_id
;
},
py
::
return_value_policy
::
reference
);
py
::
return_value_policy
::
reference
);
py
::
class_
<
imperative
::
Layer
,
Layer
/* <--- trampoline*/
>
layer
(
m
,
"Layer"
);
py
::
class_
<
imperative
::
Layer
,
Layer
/* <--- trampoline*/
>
layer
(
m
,
"Layer"
);
...
@@ -188,7 +195,7 @@ PYBIND11_MODULE(core, m) {
...
@@ -188,7 +195,7 @@ PYBIND11_MODULE(core, m) {
.
def
(
py
::
init
<>
())
.
def
(
py
::
init
<>
())
.
def_static
(
.
def_static
(
"apply"
,
"apply"
,
[](
int
func_id
,
const
std
::
vector
<
imperative
::
VarBase
>
&
inputs
)
[](
int
func_id
,
const
std
::
vector
<
imperative
::
VarBase
*
>
&
inputs
)
->
std
::
vector
<
imperative
::
VarBase
*>
{
->
std
::
vector
<
imperative
::
VarBase
*>
{
return
imperative
::
PyLayer
::
Apply
(
func_id
,
inputs
);
return
imperative
::
PyLayer
::
Apply
(
func_id
,
inputs
);
},
},
...
...
python/paddle/fluid/imperative/layers.py
浏览文件 @
7aad6afd
...
@@ -59,22 +59,23 @@ class PyLayer(core.PyLayer):
...
@@ -59,22 +59,23 @@ class PyLayer(core.PyLayer):
raise
NotImplementedError
raise
NotImplementedError
@
staticmethod
@
staticmethod
def
backward
(
inp
uts
):
def
backward
(
do
uts
):
raise
NotImplementedError
raise
NotImplementedError
@
classmethod
@
classmethod
def
__call__
(
cls
,
inputs
):
def
__call__
(
cls
,
inputs
):
tracer
=
framework
.
_imperative_tracer
()
tracer
=
framework
.
_imperative_tracer
()
block
=
framework
.
default_main_program
().
current_block
()
block
=
framework
.
default_main_program
().
current_block
()
inputs
=
map
(
base
.
to_variable
,
inputs
)
inputs
=
[
x
.
_ivar
for
x
in
inputs
]
inputs
=
[
x
.
_ivar
for
x
in
inputs
]
PyLayer
.
register_func
(
1
,
cls
.
forward
)
PyLayer
.
register_func
(
1
,
cls
.
forward
)
PyLayer
.
register_func
(
2
,
cls
.
backward
)
iop
=
core
.
OpBase
()
iop
=
core
.
OpBase
()
iop
.
forward_id
=
1
iop
.
forward_id
=
1
iop
.
backward_id
=
2
block
.
ops
.
append
(
iop
)
block
.
ops
.
append
(
iop
)
ivars
=
tracer
.
py_trace
(
iop
,
inputs
)
ivars
=
tracer
.
py_trace
(
iop
,
inputs
,
False
)
# ivars = core.PyLayer.apply(cls.forward, inputs)
# ivars = core.PyLayer.apply(cls.forward, inputs)
ret
=
[]
ret
=
[]
for
ivar
in
ivars
:
for
ivar
in
ivars
:
...
...
python/paddle/fluid/tests/unittests/test_imperative.py
浏览文件 @
7aad6afd
...
@@ -49,8 +49,18 @@ class MyPyLayer(fluid.imperative.PyLayer):
...
@@ -49,8 +49,18 @@ class MyPyLayer(fluid.imperative.PyLayer):
return
tuple
([
tensor
])
return
tuple
([
tensor
])
@
staticmethod
@
staticmethod
def
backward
(
douts
,
outs
):
def
backward
(
inputs
):
return
np
.
array
(
douts
[
0
])
*
(
1
-
np
.
square
(
np
.
array
(
outs
[
0
])))
sys
.
stderr
.
write
(
'calling into backward: %s
\n
'
%
str
(
inputs
))
inp
,
out
,
dout
=
inputs
inp
=
np
.
array
(
inp
)
out
=
np
.
array
(
out
)
dout
=
np
.
array
(
dout
)
sys
.
stderr
.
write
(
'calling into backward: %s, %s, %s
\n
'
%
(
inp
,
out
,
dout
))
ret
=
np
.
array
(
dout
)
*
(
1
-
np
.
square
(
np
.
array
(
out
)))
tensor
=
core
.
LoDTensor
()
tensor
.
set
(
ret
,
core
.
CPUPlace
())
return
tuple
([
tensor
])
class
MLP
(
fluid
.
imperative
.
Layer
):
class
MLP
(
fluid
.
imperative
.
Layer
):
...
@@ -71,20 +81,44 @@ class MLP(fluid.imperative.Layer):
...
@@ -71,20 +81,44 @@ class MLP(fluid.imperative.Layer):
class
TestImperative
(
unittest
.
TestCase
):
class
TestImperative
(
unittest
.
TestCase
):
"""
def test_layer(self):
def test_layer(self):
with fluid.imperative.guard():
with fluid.imperative.guard():
cl = core.Layer()
cl = core.Layer()
cl.forward([])
cl.forward([])
l = fluid.imperative.Layer()
l = fluid.imperative.Layer()
self.assertRaises(NotImplementedError, l.forward, [])
self.assertRaises(NotImplementedError, l.forward, [])
"""
def
test_pylayer
(
self
):
def
test_pylayer
(
self
):
np_inp
=
np
.
ones
([
2
,
2
],
np
.
float32
)
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
my_py_layer
=
MyPyLayer
()
my_py_layer
=
MyPyLayer
()
outs
=
my_py_layer
([
np
.
ones
([
2
,
2
],
np
.
float32
)])
var_inp
=
fluid
.
imperative
.
base
.
to_variable
(
np_inp
)
sys
.
stderr
.
write
(
'%s
\n
'
%
outs
[
0
].
_numpy
())
outs
=
my_py_layer
([
var_inp
])
# out.backward()
dy_out
=
np
.
sum
(
outs
[
0
].
_numpy
())
outs
[
0
].
_backward
()
dy_grad
=
var_inp
.
_gradient
()
with
new_program_scope
():
inp
=
fluid
.
layers
.
data
(
name
=
"inp"
,
shape
=
[
2
,
2
],
append_batch_size
=
False
)
# TODO(panyx0718): Paddle doesn't diff against data `inp`.
x1
=
inp
*
1
# TODO(panyx0718): If reduce_sum is skipped, the result is wrong.
x
=
fluid
.
layers
.
reduce_sum
(
fluid
.
layers
.
tanh
(
x1
))
param_grads
=
fluid
.
backward
.
append_backward
(
x
,
parameter_list
=
[
x1
.
name
])[
0
]
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
static_out
,
static_grad
=
exe
.
run
(
feed
=
{
inp
.
name
:
np_inp
},
fetch_list
=
[
x
.
name
,
param_grads
[
1
].
name
])
self
.
assertTrue
(
np
.
allclose
(
dy_out
,
static_out
))
self
.
assertTrue
(
np
.
allclose
(
dy_grad
,
static_grad
))
"""
def test_layer_in_out(self):
def test_layer_in_out(self):
np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
with fluid.imperative.guard():
with fluid.imperative.guard():
...
@@ -138,6 +172,7 @@ class TestImperative(unittest.TestCase):
...
@@ -138,6 +172,7 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_out, static_out))
self.assertTrue(np.allclose(dy_grad, static_grad))
self.assertTrue(np.allclose(dy_grad, static_grad))
"""
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录