Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleGAN
提交
c56dbd8f
P
PaddleGAN
项目概览
PaddlePaddle
/
PaddleGAN
大约 1 年 前同步成功
通知
97
Star
7254
Fork
1210
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
4
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleGAN
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
4
Issue
4
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
c56dbd8f
编写于
8月 06, 2020
作者:
L
LielinJiang
提交者:
GitHub
8月 06, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #5 from LielinJiang/benchmark
for Benchmark test
上级
3211114d
3586d7d1
变更
8
显示空白变更内容
内联
并排
Showing
8 changed file
with
64 addition
and
20 deletion
+64
-20
configs/cyclegan_cityscapes.yaml
configs/cyclegan_cityscapes.yaml
+1
-0
configs/pix2pix_cityscapes.yaml
configs/pix2pix_cityscapes.yaml
+1
-0
ppgan/datasets/builder.py
ppgan/datasets/builder.py
+1
-1
ppgan/engine/trainer.py
ppgan/engine/trainer.py
+28
-4
ppgan/models/cycle_gan_model.py
ppgan/models/cycle_gan_model.py
+16
-2
ppgan/models/generators/resnet.py
ppgan/models/generators/resnet.py
+1
-5
ppgan/models/pix2pix_model.py
ppgan/models/pix2pix_model.py
+14
-4
ppgan/utils/filesystem.py
ppgan/utils/filesystem.py
+2
-4
未找到文件。
configs/cyclegan_cityscapes.yaml
浏览文件 @
c56dbd8f
...
...
@@ -28,6 +28,7 @@ dataset:
train
:
name
:
UnpairedDataset
dataroot
:
data/cityscapes
num_workers
:
4
phase
:
train
max_dataset_size
:
inf
direction
:
AtoB
...
...
configs/pix2pix_cityscapes.yaml
浏览文件 @
c56dbd8f
...
...
@@ -25,6 +25,7 @@ dataset:
train
:
name
:
PairedDataset
dataroot
:
data/cityscapes
num_workers
:
4
phase
:
train
max_dataset_size
:
inf
direction
:
BtoA
...
...
ppgan/datasets/builder.py
浏览文件 @
c56dbd8f
...
...
@@ -111,6 +111,6 @@ def build_dataloader(cfg, is_train=True):
batch_size
=
cfg
.
get
(
'batch_size'
,
1
)
num_workers
=
cfg
.
get
(
'num_workers'
,
0
)
dataloader
=
DictDataLoader
(
dataset
,
batch_size
,
is_train
)
dataloader
=
DictDataLoader
(
dataset
,
batch_size
,
is_train
,
num_workers
)
return
dataloader
\ No newline at end of file
ppgan/engine/trainer.py
浏览文件 @
c56dbd8f
...
...
@@ -2,8 +2,9 @@ import os
import
time
import
logging
import
paddle
from
paddle.imperative
import
ParallelEnv
from
paddle.imperative
import
ParallelEnv
,
DataParallel
from
..datasets.builder
import
build_dataloader
from
..models.builder
import
build_model
...
...
@@ -22,10 +23,13 @@ class Trainer:
# build model
self
.
model
=
build_model
(
cfg
)
# multiple gpus prepare
if
ParallelEnv
().
nranks
>
1
:
self
.
distributed_data_parallel
()
self
.
logger
=
logging
.
getLogger
(
__name__
)
# base config
# self.timestamp = time.strftime('-%Y-%m-%d-%H-%M', time.localtime())
self
.
output_dir
=
cfg
.
output_dir
self
.
epochs
=
cfg
.
epochs
self
.
start_epoch
=
0
...
...
@@ -38,24 +42,38 @@ class Trainer:
self
.
local_rank
=
ParallelEnv
().
local_rank
# time count
self
.
time_count
=
{}
def
distributed_data_parallel
(
self
):
strategy
=
paddle
.
imperative
.
prepare_context
()
for
name
in
self
.
model
.
model_names
:
if
isinstance
(
name
,
str
):
net
=
getattr
(
self
.
model
,
'net'
+
name
)
setattr
(
self
.
model
,
'net'
+
name
,
DataParallel
(
net
,
strategy
))
def
train
(
self
):
for
epoch
in
range
(
self
.
start_epoch
,
self
.
epochs
):
start_time
=
time
.
time
()
self
.
current_epoch
=
epoch
start_time
=
step_start_time
=
time
.
time
()
for
i
,
data
in
enumerate
(
self
.
train_dataloader
):
data_time
=
time
.
time
()
self
.
batch_id
=
i
# unpack data from dataset and apply preprocessing
# data input should be dict
self
.
model
.
set_input
(
data
)
self
.
model
.
optimize_parameters
()
self
.
data_time
=
data_time
-
step_start_time
self
.
step_time
=
time
.
time
()
-
step_start_time
if
i
%
self
.
log_interval
==
0
:
self
.
print_log
()
if
i
%
self
.
visual_interval
==
0
:
self
.
visual
(
'visual_train'
)
step_start_time
=
time
.
time
()
self
.
logger
.
info
(
'train one epoch time: {}'
.
format
(
time
.
time
()
-
start_time
))
if
epoch
%
self
.
weight_interval
==
0
:
self
.
save
(
epoch
,
'weight'
,
keep
=-
1
)
...
...
@@ -98,6 +116,12 @@ class Trainer:
for
k
,
v
in
losses
.
items
():
message
+=
'%s: %.3f '
%
(
k
,
v
)
if
hasattr
(
self
,
'data_time'
):
message
+=
'reader cost: %.5fs '
%
self
.
data_time
if
hasattr
(
self
,
'step_time'
):
message
+=
'batch cost: %.5fs'
%
self
.
step_time
# print the message
self
.
logger
.
info
(
message
)
...
...
ppgan/models/cycle_gan_model.py
浏览文件 @
c56dbd8f
import
paddle
from
paddle.imperative
import
ParallelEnv
from
.base_model
import
BaseModel
from
.builder
import
MODELS
...
...
@@ -137,6 +138,12 @@ class CycleGANModel(BaseModel):
loss_D_fake
=
self
.
criterionGAN
(
pred_fake
,
False
)
# Combined loss and calculate gradients
loss_D
=
(
loss_D_real
+
loss_D_fake
)
*
0.5
# loss_D.backward()
if
ParallelEnv
().
nranks
>
1
:
loss_D
=
netD
.
scale_loss
(
loss_D
)
loss_D
.
backward
()
netD
.
apply_collective_grads
()
else
:
loss_D
.
backward
()
return
loss_D
...
...
@@ -177,6 +184,13 @@ class CycleGANModel(BaseModel):
self
.
loss_cycle_B
=
self
.
criterionCycle
(
self
.
rec_B
,
self
.
real_B
)
*
lambda_B
# combined loss and calculate gradients
self
.
loss_G
=
self
.
loss_G_A
+
self
.
loss_G_B
+
self
.
loss_cycle_A
+
self
.
loss_cycle_B
+
self
.
loss_idt_A
+
self
.
loss_idt_B
if
ParallelEnv
().
nranks
>
1
:
self
.
loss_G
=
self
.
netG_A
.
scale_loss
(
self
.
loss_G
)
self
.
loss_G
.
backward
()
self
.
netG_A
.
apply_collective_grads
()
self
.
netG_B
.
apply_collective_grads
()
else
:
self
.
loss_G
.
backward
()
def
optimize_parameters
(
self
):
...
...
ppgan/models/generators/resnet.py
浏览文件 @
c56dbd8f
...
...
@@ -36,11 +36,8 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
else
:
use_bias
=
norm_layer
==
nn
.
InstanceNorm
print
(
'norm layer:'
,
norm_layer
,
'use bias:'
,
use_bias
)
model
=
[
ReflectionPad2d
(
3
),
nn
.
Conv2D
(
input_nc
,
ngf
,
filter_size
=
7
,
padding
=
0
,
bias_attr
=
use_bias
),
# nn.nn.Conv2D(input_nc, ngf, filter_size=7, padding=0, bias_attr=use_bias),
norm_layer
(
ngf
),
nn
.
ReLU
()]
...
...
@@ -62,8 +59,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
model
+=
[
nn
.
Conv2DTranspose
(
ngf
*
mult
,
int
(
ngf
*
mult
/
2
),
filter_size
=
3
,
stride
=
2
,
padding
=
1
,
#output_padding=1,
# padding='same', #output_padding=1,
padding
=
1
,
bias_attr
=
use_bias
),
Pad2D
(
paddings
=
[
0
,
1
,
0
,
1
],
mode
=
'constant'
,
pad_value
=
0.0
),
norm_layer
(
int
(
ngf
*
mult
/
2
)),
...
...
ppgan/models/pix2pix_model.py
浏览文件 @
c56dbd8f
import
paddle
from
paddle.imperative
import
ParallelEnv
from
.base_model
import
BaseModel
from
.builder
import
MODELS
...
...
@@ -43,7 +44,6 @@ class Pix2PixModel(BaseModel):
# define networks (both generator and discriminator)
self
.
netG
=
build_generator
(
opt
.
model
.
generator
)
# define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
if
self
.
isTrain
:
self
.
netD
=
build_discriminator
(
opt
.
model
.
discriminator
)
...
...
@@ -98,6 +98,11 @@ class Pix2PixModel(BaseModel):
self
.
loss_D_real
=
self
.
criterionGAN
(
pred_real
,
True
)
# combine loss and calculate gradients
self
.
loss_D
=
(
self
.
loss_D_fake
+
self
.
loss_D_real
)
*
0.5
if
ParallelEnv
().
nranks
>
1
:
self
.
loss_D
=
self
.
netD
.
scale_loss
(
self
.
loss_D
)
self
.
loss_D
.
backward
()
self
.
netD
.
apply_collective_grads
()
else
:
self
.
loss_D
.
backward
()
def
backward_G
(
self
):
...
...
@@ -110,7 +115,12 @@ class Pix2PixModel(BaseModel):
self
.
loss_G_L1
=
self
.
criterionL1
(
self
.
fake_B
,
self
.
real_B
)
*
self
.
opt
.
lambda_L1
# combine loss and calculate gradients
self
.
loss_G
=
self
.
loss_G_GAN
+
self
.
loss_G_L1
# self.loss_G = self.loss_G_L1
if
ParallelEnv
().
nranks
>
1
:
self
.
loss_G
=
self
.
netG
.
scale_loss
(
self
.
loss_G
)
self
.
loss_G
.
backward
()
self
.
netG
.
apply_collective_grads
()
else
:
self
.
loss_G
.
backward
()
def
optimize_parameters
(
self
):
...
...
ppgan/utils/filesystem.py
浏览文件 @
c56dbd8f
...
...
@@ -11,15 +11,13 @@ def save(state_dicts, file_name):
def
convert
(
state_dict
):
model_dict
=
{}
# name_table = {}
for
k
,
v
in
state_dict
.
items
():
if
isinstance
(
v
,
(
paddle
.
framework
.
Variable
,
paddle
.
imperative
.
core
.
VarBase
)):
model_dict
[
k
]
=
v
.
numpy
()
else
:
model_dict
[
k
]
=
v
return
state_dict
# name_table[k] = v.name
# model_dict["StructuredToParameterName@@"] = name_table
return
model_dict
final_dict
=
{}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录