Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
de27569e
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
de27569e
编写于
6月 30, 2020
作者:
A
Aurelius84
提交者:
GitHub
6月 30, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Dy2Stat] fix diff of cycle GAN model on GPU (#25233)
* fix GPU diff test=develop * refine code test=develop
上级
23a4f54b
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
12 addition
and
13 deletion
+12
-13
python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py
...fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py
+12
-13
未找到文件。
python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py
浏览文件 @
de27569e
...
@@ -40,10 +40,13 @@ from paddle.fluid.dygraph.nn import Conv2D, Conv2DTranspose, BatchNorm
...
@@ -40,10 +40,13 @@ from paddle.fluid.dygraph.nn import Conv2D, Conv2DTranspose, BatchNorm
# Note: Set True to eliminate randomness.
# Note: Set True to eliminate randomness.
# 1. For one operation, cuDNN has several algorithms,
# 1. For one operation, cuDNN has several algorithms,
# some algorithm results are non-deterministic, like convolution algorithms.
# some algorithm results are non-deterministic, like convolution algorithms.
# 2. If include BatchNorm, please set `use_global_stats=True` to avoid using
# cudnnBatchNormalizationBackward which is non-deterministic.
if
fluid
.
is_compiled_with_cuda
():
if
fluid
.
is_compiled_with_cuda
():
fluid
.
set_flags
({
'FLAGS_cudnn_deterministic'
:
True
})
fluid
.
set_flags
({
'FLAGS_cudnn_deterministic'
:
True
})
use_cudnn
=
True
# set False to speed up training.
use_cudnn
=
False
step_per_epoch
=
10
step_per_epoch
=
10
lambda_A
=
10.0
lambda_A
=
10.0
lambda_B
=
10.0
lambda_B
=
10.0
...
@@ -110,7 +113,7 @@ class Cycle_Gan(fluid.dygraph.Layer):
...
@@ -110,7 +113,7 @@ class Cycle_Gan(fluid.dygraph.Layer):
return
fake_A
,
fake_B
,
cyc_A
,
cyc_B
,
g_A_loss
,
g_B_loss
,
idt_loss_A
,
idt_loss_B
,
cyc_A_loss
,
cyc_B_loss
,
g_loss
return
fake_A
,
fake_B
,
cyc_A
,
cyc_B
,
g_A_loss
,
g_B_loss
,
idt_loss_A
,
idt_loss_B
,
cyc_A_loss
,
cyc_B_loss
,
g_loss
@
declarative
@
declarative
def
disriminatorA
(
self
,
input_A
,
input_B
):
def
dis
c
riminatorA
(
self
,
input_A
,
input_B
):
"""
"""
Discriminator A of GAN model.
Discriminator A of GAN model.
"""
"""
...
@@ -326,6 +329,7 @@ class conv2d(fluid.dygraph.Layer):
...
@@ -326,6 +329,7 @@ class conv2d(fluid.dygraph.Layer):
bias_attr
=
con_bias_attr
)
bias_attr
=
con_bias_attr
)
if
norm
:
if
norm
:
self
.
bn
=
BatchNorm
(
self
.
bn
=
BatchNorm
(
use_global_stats
=
True
,
# set True to use deterministic algorithm
num_channels
=
num_filters
,
num_channels
=
num_filters
,
param_attr
=
fluid
.
ParamAttr
(
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NormalInitializer
(
1.0
,
0.02
)),
initializer
=
fluid
.
initializer
.
NormalInitializer
(
1.0
,
0.02
)),
...
@@ -381,6 +385,7 @@ class DeConv2D(fluid.dygraph.Layer):
...
@@ -381,6 +385,7 @@ class DeConv2D(fluid.dygraph.Layer):
bias_attr
=
de_bias_attr
)
bias_attr
=
de_bias_attr
)
if
norm
:
if
norm
:
self
.
bn
=
BatchNorm
(
self
.
bn
=
BatchNorm
(
use_global_stats
=
True
,
# set True to use deterministic algorithm
num_channels
=
num_filters
,
num_channels
=
num_filters
,
param_attr
=
fluid
.
ParamAttr
(
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NormalInitializer
(
1.0
,
0.02
)),
initializer
=
fluid
.
initializer
.
NormalInitializer
(
1.0
,
0.02
)),
...
@@ -429,7 +434,6 @@ class ImagePool(object):
...
@@ -429,7 +434,6 @@ class ImagePool(object):
def
reader_creater
():
def
reader_creater
():
# local_random = np.random.RandomState(SEED)
def
reader
():
def
reader
():
while
True
:
while
True
:
fake_image
=
np
.
uint8
(
fake_image
=
np
.
uint8
(
...
@@ -480,13 +484,8 @@ def optimizer_setting(parameters):
...
@@ -480,13 +484,8 @@ def optimizer_setting(parameters):
def
train
(
args
,
to_static
):
def
train
(
args
,
to_static
):
# FIXME(Aurelius84): Found diff just on GPU and it disappears when we remove the BatchNorm layers.
place
=
fluid
.
CUDAPlace
(
0
)
if
fluid
.
is_compiled_with_cuda
()
\
# In dygraph mode, it still exists with different output while executing the every time.
else
fluid
.
CPUPlace
()
# place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \
# else fluid.CPUPlace()
place
=
fluid
.
CPUPlace
()
program_translator
.
enable
(
to_static
)
program_translator
.
enable
(
to_static
)
...
@@ -553,8 +552,8 @@ def train(args, to_static):
...
@@ -553,8 +552,8 @@ def train(args, to_static):
fake_pool_A
=
to_variable
(
fake_pool_A
)
fake_pool_A
=
to_variable
(
fake_pool_A
)
# optimize the d_A network
# optimize the d_A network
rec_B
,
fake_pool_rec_B
=
cycle_gan
.
disriminatorA
(
data_B
,
rec_B
,
fake_pool_rec_B
=
cycle_gan
.
dis
c
riminatorA
(
data_B
,
fake_pool_B
)
fake_pool_B
)
d_loss_A
=
(
fluid
.
layers
.
square
(
fake_pool_rec_B
)
+
d_loss_A
=
(
fluid
.
layers
.
square
(
fake_pool_rec_B
)
+
fluid
.
layers
.
square
(
rec_B
-
1
))
/
2.0
fluid
.
layers
.
square
(
rec_B
-
1
))
/
2.0
d_loss_A
=
fluid
.
layers
.
reduce_mean
(
d_loss_A
)
d_loss_A
=
fluid
.
layers
.
reduce_mean
(
d_loss_A
)
...
@@ -581,7 +580,6 @@ def train(args, to_static):
...
@@ -581,7 +580,6 @@ def train(args, to_static):
idt_loss_A
,
g_B_loss
,
cyc_B_loss
,
idt_loss_B
idt_loss_A
,
g_B_loss
,
cyc_B_loss
,
idt_loss_B
]
]
cur_batch_loss
=
[
x
.
numpy
()[
0
]
for
x
in
cur_batch_loss
]
cur_batch_loss
=
[
x
.
numpy
()[
0
]
for
x
in
cur_batch_loss
]
loss_data
.
append
(
cur_batch_loss
)
batch_time
=
time
.
time
()
-
s_time
batch_time
=
time
.
time
()
-
s_time
t_time
+=
batch_time
t_time
+=
batch_time
...
@@ -593,6 +591,7 @@ def train(args, to_static):
...
@@ -593,6 +591,7 @@ def train(args, to_static):
if
batch_id
>
args
.
train_step
:
if
batch_id
>
args
.
train_step
:
break
break
loss_data
.
append
(
cur_batch_loss
)
return
np
.
array
(
loss_data
)
return
np
.
array
(
loss_data
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录