Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
9a4314f0
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9a4314f0
编写于
1月 16, 2019
作者:
X
Xin Pan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
imperative gan
test=develop
上级
a61e7d0f
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
40 addition
and
15 deletion
+40
-15
paddle/fluid/imperative/layer.h
paddle/fluid/imperative/layer.h
+7
-2
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+1
-0
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+3
-0
python/paddle/fluid/tests/unittests/test_imperative_gan.py
python/paddle/fluid/tests/unittests/test_imperative_gan.py
+29
-13
未找到文件。
paddle/fluid/imperative/layer.h
浏览文件 @
9a4314f0
...
@@ -101,7 +101,6 @@ class VarBase {
...
@@ -101,7 +101,6 @@ class VarBase {
// Owns `var` and `grad`
// Owns `var` and `grad`
VarBase
(
framework
::
Variable
*
var
,
VarBase
*
grad
)
VarBase
(
framework
::
Variable
*
var
,
VarBase
*
grad
)
:
pre_op_
(
nullptr
),
:
pre_op_
(
nullptr
),
pre_op_out_name_
(),
pre_op_out_idx_
(
-
1
),
pre_op_out_idx_
(
-
1
),
var_desc_
(
nullptr
),
var_desc_
(
nullptr
),
var_
(
var
),
var_
(
var
),
...
@@ -110,7 +109,6 @@ class VarBase {
...
@@ -110,7 +109,6 @@ class VarBase {
explicit
VarBase
(
bool
stop_gradient
)
explicit
VarBase
(
bool
stop_gradient
)
:
pre_op_
(
nullptr
),
:
pre_op_
(
nullptr
),
pre_op_out_name_
(),
pre_op_out_idx_
(
-
1
),
pre_op_out_idx_
(
-
1
),
var_desc_
(
nullptr
),
var_desc_
(
nullptr
),
var_
(
new
framework
::
Variable
()),
var_
(
new
framework
::
Variable
()),
...
@@ -127,6 +125,13 @@ class VarBase {
...
@@ -127,6 +125,13 @@ class VarBase {
}
}
}
}
void
Clear
()
{
delete
grads_
;
grads_
=
new
VarBase
(
true
);
pre_op_
=
nullptr
;
pre_op_out_name_
=
""
;
}
void
RunBackward
();
void
RunBackward
();
framework
::
LoDTensor
&
GradValue
();
framework
::
LoDTensor
&
GradValue
();
...
...
paddle/fluid/pybind/pybind.cc
浏览文件 @
9a4314f0
...
@@ -133,6 +133,7 @@ PYBIND11_MODULE(core, m) {
...
@@ -133,6 +133,7 @@ PYBIND11_MODULE(core, m) {
[](
imperative
::
VarBase
&
self
)
{
self
.
RunBackward
();
})
[](
imperative
::
VarBase
&
self
)
{
self
.
RunBackward
();
})
.
def
(
"_grad_name"
,
&
imperative
::
VarBase
::
GradName
)
.
def
(
"_grad_name"
,
&
imperative
::
VarBase
::
GradName
)
.
def
(
"_grad_value"
,
&
imperative
::
VarBase
::
GradValue
)
.
def
(
"_grad_value"
,
&
imperative
::
VarBase
::
GradValue
)
.
def
(
"_clear"
,
&
imperative
::
VarBase
::
Clear
)
.
def
(
"_grad_ivar"
,
.
def
(
"_grad_ivar"
,
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
grads_
;
},
[](
const
imperative
::
VarBase
&
self
)
{
return
self
.
grads_
;
},
py
::
return_value_policy
::
reference
)
py
::
return_value_policy
::
reference
)
...
...
python/paddle/fluid/framework.py
浏览文件 @
9a4314f0
...
@@ -388,6 +388,9 @@ class Variable(object):
...
@@ -388,6 +388,9 @@ class Variable(object):
def
_gradient
(
self
):
def
_gradient
(
self
):
return
np
.
array
(
self
.
_ivar
.
_grad_value
())
return
np
.
array
(
self
.
_ivar
.
_grad_value
())
def
_clear
(
self
):
self
.
_ivar
.
_clear
()
def
__str__
(
self
):
def
__str__
(
self
):
return
self
.
to_string
(
True
)
return
self
.
to_string
(
True
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_gan.py
浏览文件 @
9a4314f0
...
@@ -69,8 +69,6 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -69,8 +69,6 @@ class TestImperativeMnist(unittest.TestCase):
generate_p
.
random_seed
=
seed
generate_p
.
random_seed
=
seed
scope
=
fluid
.
core
.
Scope
()
scope
=
fluid
.
core
.
Scope
()
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
sys
.
stderr
.
write
(
'1111
\n
'
)
with
new_program_scope
(
with
new_program_scope
(
main
=
discriminate_p
,
startup
=
startup
,
scope
=
scope
):
main
=
discriminate_p
,
startup
=
startup
,
scope
=
scope
):
discriminator
=
Discriminator
()
discriminator
=
Discriminator
()
...
@@ -117,6 +115,8 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -117,6 +115,8 @@ class TestImperativeMnist(unittest.TestCase):
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
sgd
.
minimize
(
g_loss
)
sgd
.
minimize
(
g_loss
)
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
static_params
=
dict
()
with
fluid
.
scope_guard
(
scope
):
with
fluid
.
scope_guard
(
scope
):
img
=
np
.
ones
([
2
,
1
],
np
.
float32
)
img
=
np
.
ones
([
2
,
1
],
np
.
float32
)
noise
=
np
.
ones
([
2
,
2
],
np
.
float32
)
noise
=
np
.
ones
([
2
,
2
],
np
.
float32
)
...
@@ -128,14 +128,14 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -128,14 +128,14 @@ class TestImperativeMnist(unittest.TestCase):
g_loss_val
=
exe
.
run
(
generate_p
,
g_loss_val
=
exe
.
run
(
generate_p
,
feed
=
{
'noise'
:
noise
},
feed
=
{
'noise'
:
noise
},
fetch_list
=
[
g_loss
])[
0
]
fetch_list
=
[
g_loss
])[
0
]
sys
.
stderr
.
write
(
'd_loss %s, g_loss: %s
\n
'
%
for
param
in
generate_p
.
global_block
().
all_parameters
():
(
d_loss_val
,
g_loss_val
))
static_params
=
dict
()
for
param
in
discriminate_p
.
global_block
().
all_parameters
():
sys
.
stderr
.
write
(
'%s
\n
'
%
param
.
name
)
static_params
[
param
.
name
]
=
np
.
array
(
static_params
[
param
.
name
]
=
np
.
array
(
scope
.
find_var
(
param
.
name
).
get_tensor
())
scope
.
find_var
(
param
.
name
).
get_tensor
())
sys
.
stderr
.
write
(
'static_param_loss: %s: %s
\n
'
%
(
param
.
name
,
np
.
sum
(
static_params
[
param
.
name
])))
sys
.
stderr
.
write
(
'd_loss %s, g_loss: %s
\n
'
%
(
d_loss_val
,
g_loss_val
))
dy_params
=
dict
()
dy_params
=
dict
()
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
...
@@ -158,15 +158,31 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -158,15 +158,31 @@ class TestImperativeMnist(unittest.TestCase):
x
=
d_fake
,
label
=
to_variable
(
np
.
zeros
([
2
,
1
],
np
.
float32
))))
x
=
d_fake
,
label
=
to_variable
(
np
.
zeros
([
2
,
1
],
np
.
float32
))))
d_loss
=
d_loss_real
+
d_loss_fake
d_loss
=
d_loss_real
+
d_loss_fake
sys
.
stderr
.
write
(
'dy_d_loss: %s
\n
'
%
d_loss
.
_numpy
())
d_loss
.
_backward
()
d_loss
.
_backward
()
sgd
.
minimize
(
d_loss
)
sgd
.
minimize
(
d_loss
)
for
p
in
discriminator
.
parameters
():
for
p
in
discriminator
.
parameters
():
dy_params
[
p
.
name
]
=
p
.
_numpy
()
p
.
_clear
()
for
p
in
generator
.
parameters
():
p
.
_clear
()
for
k
,
v
in
six
.
iteritems
(
dy_params
):
d_fake
=
discriminator
(
sys
.
stderr
.
write
(
'dy_param_loss: %s: %s
\n
'
%
(
k
,
np
.
sum
(
v
)))
generator
(
to_variable
(
np
.
ones
([
2
,
2
],
np
.
float32
))))
sys
.
stderr
.
write
(
'static_param_loss: %s: %s
\n
'
%
(
k
,
np
.
sum
(
v
)))
g_loss
=
fluid
.
layers
.
reduce_mean
(
fluid
.
layers
.
sigmoid_cross_entropy_with_logits
(
x
=
d_fake
,
label
=
to_variable
(
np
.
ones
([
2
,
1
],
np
.
float32
))))
g_loss
.
_backward
()
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
sgd
.
minimize
(
g_loss
)
for
p
in
discriminator
.
parameters
():
dy_params
[
p
.
name
]
=
p
.
_numpy
()
sys
.
stderr
.
write
(
'dy_param_loss: %s: %s
\n
'
%
(
p
.
name
,
np
.
sum
(
dy_params
[
p
.
name
])))
for
p
in
generator
.
parameters
():
dy_params
[
p
.
name
]
=
p
.
_numpy
()
sys
.
stderr
.
write
(
'dy_param_loss: %s: %s
\n
'
%
(
p
.
name
,
np
.
sum
(
dy_params
[
p
.
name
])))
sys
.
stderr
.
write
(
'dy_d_loss: %s, dy_g_loss: %s
\n
'
%
(
d_loss
.
_numpy
(),
g_loss
.
_numpy
()))
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录