Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleGAN
提交
e41decb6
P
PaddleGAN
项目概览
PaddlePaddle
/
PaddleGAN
1 年多 前同步成功
通知
97
Star
7254
Fork
1210
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
4
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleGAN
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
4
Issue
4
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
e41decb6
编写于
4年前
作者:
L
LielinJiang
提交者:
GitHub
4年前
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #26 from LielinJiang/adapt-to-2.0-api-0920
Fix nan when api adapt to 2.0-beta
上级
a7088eb8
fdcb8d55
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
530 addition
and
444 deletion
+530
-444
configs/pix2pix_cityscapes.yaml
configs/pix2pix_cityscapes.yaml
+1
-1
ppgan/datasets/builder.py
ppgan/datasets/builder.py
+2
-2
ppgan/engine/trainer.py
ppgan/engine/trainer.py
+27
-12
ppgan/models/backbones/resnet_backbone.py
ppgan/models/backbones/resnet_backbone.py
+32
-15
ppgan/models/discriminators/nlayers.py
ppgan/models/discriminators/nlayers.py
+30
-17
ppgan/models/generators/deoldify.py
ppgan/models/generators/deoldify.py
+1
-2
ppgan/models/generators/remaster.py
ppgan/models/generators/remaster.py
+215
-150
ppgan/models/generators/resnet.py
ppgan/models/generators/resnet.py
+77
-41
ppgan/models/generators/unet.py
ppgan/models/generators/unet.py
+83
-39
ppgan/models/losses.py
ppgan/models/losses.py
+13
-9
ppgan/models/pix2pix_model.py
ppgan/models/pix2pix_model.py
+5
-2
ppgan/modules/nn.py
ppgan/modules/nn.py
+19
-146
ppgan/modules/norm.py
ppgan/modules/norm.py
+23
-6
ppgan/utils/setup.py
ppgan/utils/setup.py
+2
-2
未找到文件。
configs/pix2pix_cityscapes.yaml
浏览文件 @
e41decb6
...
...
@@ -25,7 +25,7 @@ dataset:
train
:
name
:
PairedDataset
dataroot
:
data/cityscapes
num_workers
:
4
num_workers
:
0
phase
:
train
max_dataset_size
:
inf
direction
:
BtoA
...
...
This diff is collapsed.
Click to expand it.
ppgan/datasets/builder.py
浏览文件 @
e41decb6
...
...
@@ -56,8 +56,8 @@ class DictDataLoader():
self
.
dataset
=
DictDataset
(
dataset
)
place
=
paddle
.
fluid
.
CUDAPlace
(
ParallelEnv
().
dev_id
)
\
if
ParallelEnv
().
nranks
>
1
else
paddle
.
fluid
.
CUDAPlace
(
0
)
place
=
paddle
.
CUDAPlace
(
ParallelEnv
().
dev_id
)
\
if
ParallelEnv
().
nranks
>
1
else
paddle
.
CUDAPlace
(
0
)
sampler
=
DistributedBatchSampler
(
self
.
dataset
,
batch_size
=
batch_size
,
...
...
This diff is collapsed.
Click to expand it.
ppgan/engine/trainer.py
浏览文件 @
e41decb6
...
...
@@ -13,6 +13,7 @@ from ..utils.visual import tensor2img, save_image
from
..utils.filesystem
import
save
,
load
,
makedirs
from
..metric.psnr_ssim
import
calculate_psnr
,
calculate_ssim
class
Trainer
:
def
__init__
(
self
,
cfg
):
...
...
@@ -51,7 +52,6 @@ class Trainer:
self
.
time_count
=
{}
self
.
best_metric
=
{}
def
distributed_data_parallel
(
self
):
strategy
=
paddle
.
distributed
.
prepare_context
()
for
name
in
self
.
model
.
model_names
:
...
...
@@ -82,6 +82,7 @@ class Trainer:
self
.
visual
(
'visual_train'
)
step_start_time
=
time
.
time
()
self
.
logger
.
info
(
'train one epoch time: {}'
.
format
(
time
.
time
()
-
start_time
))
if
self
.
validate_interval
>
-
1
and
epoch
%
self
.
validate_interval
:
...
...
@@ -93,7 +94,8 @@ class Trainer:
def
validate
(
self
):
if
not
hasattr
(
self
,
'val_dataloader'
):
self
.
val_dataloader
=
build_dataloader
(
self
.
cfg
.
dataset
.
val
,
is_train
=
False
)
self
.
val_dataloader
=
build_dataloader
(
self
.
cfg
.
dataset
.
val
,
is_train
=
False
)
metric_result
=
{}
...
...
@@ -106,7 +108,7 @@ class Trainer:
visual_results
=
{}
current_paths
=
self
.
model
.
get_image_paths
()
current_visuals
=
self
.
model
.
get_current_visuals
()
for
j
in
range
(
len
(
current_paths
)):
short_path
=
os
.
path
.
basename
(
current_paths
[
j
])
basename
=
os
.
path
.
splitext
(
short_path
)[
0
]
...
...
@@ -115,26 +117,38 @@ class Trainer:
visual_results
.
update
({
name
:
img_tensor
[
j
]})
if
'psnr'
in
self
.
cfg
.
validate
.
metrics
:
if
'psnr'
not
in
metric_result
:
metric_result
[
'psnr'
]
=
calculate_psnr
(
tensor2img
(
current_visuals
[
'output'
][
j
],
(
0.
,
1.
)),
tensor2img
(
current_visuals
[
'gt'
][
j
],
(
0.
,
1.
)),
**
self
.
cfg
.
validate
.
metrics
.
psnr
)
metric_result
[
'psnr'
]
=
calculate_psnr
(
tensor2img
(
current_visuals
[
'output'
][
j
],
(
0.
,
1.
)),
tensor2img
(
current_visuals
[
'gt'
][
j
],
(
0.
,
1.
)),
**
self
.
cfg
.
validate
.
metrics
.
psnr
)
else
:
metric_result
[
'psnr'
]
+=
calculate_psnr
(
tensor2img
(
current_visuals
[
'output'
][
j
],
(
0.
,
1.
)),
tensor2img
(
current_visuals
[
'gt'
][
j
],
(
0.
,
1.
)),
**
self
.
cfg
.
validate
.
metrics
.
psnr
)
metric_result
[
'psnr'
]
+=
calculate_psnr
(
tensor2img
(
current_visuals
[
'output'
][
j
],
(
0.
,
1.
)),
tensor2img
(
current_visuals
[
'gt'
][
j
],
(
0.
,
1.
)),
**
self
.
cfg
.
validate
.
metrics
.
psnr
)
if
'ssim'
in
self
.
cfg
.
validate
.
metrics
:
if
'ssim'
not
in
metric_result
:
metric_result
[
'ssim'
]
=
calculate_ssim
(
tensor2img
(
current_visuals
[
'output'
][
j
],
(
0.
,
1.
)),
tensor2img
(
current_visuals
[
'gt'
][
j
],
(
0.
,
1.
)),
**
self
.
cfg
.
validate
.
metrics
.
ssim
)
metric_result
[
'ssim'
]
=
calculate_ssim
(
tensor2img
(
current_visuals
[
'output'
][
j
],
(
0.
,
1.
)),
tensor2img
(
current_visuals
[
'gt'
][
j
],
(
0.
,
1.
)),
**
self
.
cfg
.
validate
.
metrics
.
ssim
)
else
:
metric_result
[
'ssim'
]
+=
calculate_ssim
(
tensor2img
(
current_visuals
[
'output'
][
j
],
(
0.
,
1.
)),
tensor2img
(
current_visuals
[
'gt'
][
j
],
(
0.
,
1.
)),
**
self
.
cfg
.
validate
.
metrics
.
ssim
)
metric_result
[
'ssim'
]
+=
calculate_ssim
(
tensor2img
(
current_visuals
[
'output'
][
j
],
(
0.
,
1.
)),
tensor2img
(
current_visuals
[
'gt'
][
j
],
(
0.
,
1.
)),
**
self
.
cfg
.
validate
.
metrics
.
ssim
)
self
.
visual
(
'visual_val'
,
visual_results
=
visual_results
)
if
i
%
self
.
log_interval
==
0
:
self
.
logger
.
info
(
'val iter: [%d/%d]'
%
(
i
,
len
(
self
.
val_dataloader
)))
for
metric_name
in
metric_result
.
keys
():
metric_result
[
metric_name
]
/=
len
(
self
.
val_dataloader
.
dataset
)
self
.
logger
.
info
(
'Epoch {} validate end: {}'
.
format
(
self
.
current_epoch
,
metric_result
))
self
.
logger
.
info
(
'Epoch {} validate end: {}'
.
format
(
self
.
current_epoch
,
metric_result
))
def
test
(
self
):
if
not
hasattr
(
self
,
'test_dataloader'
):
...
...
@@ -266,6 +280,7 @@ class Trainer:
for
name
in
self
.
model
.
model_names
:
if
isinstance
(
name
,
str
):
self
.
logger
.
info
(
'laod model {} {} params!'
.
format
(
self
.
cfg
.
model
.
name
,
'net'
+
name
))
self
.
logger
.
info
(
'laod model {} {} params!'
.
format
(
self
.
cfg
.
model
.
name
,
'net'
+
name
))
net
=
getattr
(
self
.
model
,
'net'
+
name
)
net
.
set_dict
(
state_dicts
[
'net'
+
name
])
This diff is collapsed.
Click to expand it.
ppgan/models/backbones/resnet_backbone.py
浏览文件 @
e41decb6
import
paddle
import
paddle.nn
as
nn
__all__
=
[
'ResNet'
,
'resnet18'
,
'resnet34'
,
'resnet50'
,
'resnet101'
,
'resnet152'
]
__all__
=
[
'ResNet'
,
'resnet18'
,
'resnet34'
,
'resnet50'
,
'resnet101'
,
'resnet152'
]
def
conv3x3
(
in_planes
,
out_planes
,
stride
=
1
):
"3x3 convolution with padding"
return
nn
.
Conv2d
(
in_planes
,
out_planes
,
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
,
bias_attr
=
False
)
return
nn
.
Conv2d
(
in_planes
,
out_planes
,
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
,
bias_attr
=
False
)
class
BasicBlock
(
paddle
.
fluid
.
Layer
):
class
BasicBlock
(
nn
.
Layer
):
expansion
=
1
def
__init__
(
self
,
inplanes
,
planes
,
stride
=
1
,
downsample
=
None
):
...
...
@@ -44,17 +48,24 @@ class BasicBlock(paddle.fluid.Layer):
return
out
class
Bottleneck
(
paddle
.
fluid
.
Layer
):
class
Bottleneck
(
nn
.
Layer
):
expansion
=
4
def
__init__
(
self
,
inplanes
,
planes
,
stride
=
1
,
downsample
=
None
):
super
(
Bottleneck
,
self
).
__init__
()
self
.
conv1
=
nn
.
Conv2d
(
inplanes
,
planes
,
kernel_size
=
1
,
bias_attr
=
False
)
self
.
bn1
=
nn
.
BatchNorm
(
planes
)
self
.
conv2
=
nn
.
Conv2d
(
planes
,
planes
,
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
,
bias_attr
=
False
)
self
.
conv2
=
nn
.
Conv2d
(
planes
,
planes
,
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
,
bias_attr
=
False
)
self
.
bn2
=
nn
.
BatchNorm
(
planes
)
self
.
conv3
=
nn
.
Conv2d
(
planes
,
planes
*
4
,
kernel_size
=
1
,
bias_attr
=
False
)
self
.
conv3
=
nn
.
Conv2d
(
planes
,
planes
*
4
,
kernel_size
=
1
,
bias_attr
=
False
)
self
.
bn3
=
nn
.
BatchNorm
(
planes
*
4
)
self
.
relu
=
nn
.
ReLU
()
self
.
downsample
=
downsample
...
...
@@ -82,12 +93,15 @@ class Bottleneck(paddle.fluid.Layer):
return
out
class
ResNet
(
paddle
.
fluid
.
Layer
):
class
ResNet
(
nn
.
Layer
):
def
__init__
(
self
,
block
,
layers
,
num_classes
=
1000
):
self
.
inplanes
=
64
super
(
ResNet
,
self
).
__init__
()
self
.
conv1
=
nn
.
Conv2d
(
3
,
64
,
kernel_size
=
7
,
stride
=
2
,
padding
=
3
,
self
.
conv1
=
nn
.
Conv2d
(
3
,
64
,
kernel_size
=
7
,
stride
=
2
,
padding
=
3
,
bias_attr
=
False
)
self
.
bn1
=
nn
.
BatchNorm
(
64
)
self
.
relu
=
nn
.
ReLU
()
...
...
@@ -103,8 +117,11 @@ class ResNet(paddle.fluid.Layer):
downsample
=
None
if
stride
!=
1
or
self
.
inplanes
!=
planes
*
block
.
expansion
:
downsample
=
nn
.
Sequential
(
nn
.
Conv2d
(
self
.
inplanes
,
planes
*
block
.
expansion
,
kernel_size
=
1
,
stride
=
stride
,
bias_attr
=
False
),
nn
.
Conv2d
(
self
.
inplanes
,
planes
*
block
.
expansion
,
kernel_size
=
1
,
stride
=
stride
,
bias_attr
=
False
),
nn
.
BatchNorm
(
planes
*
block
.
expansion
),
)
...
...
This diff is collapsed.
Click to expand it.
ppgan/models/discriminators/nlayers.py
浏览文件 @
e41decb6
import
paddle
import
functools
import
numpy
as
np
import
paddle.nn
as
nn
from
...modules.nn
import
ReflectionPad2d
,
LeakyReLU
,
Dropout
,
BCEWithLogitsLoss
,
Pad2D
,
MSELoss
import
paddle
import
paddle.nn
as
nn
from
...modules.norm
import
build_norm_layer
from
.builder
import
DISCRIMINATORS
@
DISCRIMINATORS
.
register
()
class
NLayerDiscriminator
(
paddle
.
fluid
.
dygraph
.
Layer
):
class
NLayerDiscriminator
(
nn
.
Layer
):
"""Defines a PatchGAN discriminator"""
def
__init__
(
self
,
input_nc
,
ndf
=
64
,
n_layers
=
3
,
norm_type
=
'instance'
):
"""Construct a PatchGAN discriminator
...
...
@@ -24,36 +22,51 @@ class NLayerDiscriminator(paddle.fluid.dygraph.Layer):
"""
super
(
NLayerDiscriminator
,
self
).
__init__
()
norm_layer
=
build_norm_layer
(
norm_type
)
if
type
(
norm_layer
)
==
functools
.
partial
:
if
type
(
norm_layer
)
==
functools
.
partial
:
use_bias
=
norm_layer
.
func
==
nn
.
InstanceNorm
else
:
use_bias
=
norm_layer
==
nn
.
InstanceNorm
kw
=
4
padw
=
1
sequence
=
[
nn
.
Conv2d
(
input_nc
,
ndf
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
),
LeakyReLU
(
0.2
,
True
)]
sequence
=
[
nn
.
Conv2d
(
input_nc
,
ndf
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
),
nn
.
LeakyReLU
(
0.2
)
]
nf_mult
=
1
nf_mult_prev
=
1
for
n
in
range
(
1
,
n_layers
):
for
n
in
range
(
1
,
n_layers
):
nf_mult_prev
=
nf_mult
nf_mult
=
min
(
2
**
n
,
8
)
nf_mult
=
min
(
2
**
n
,
8
)
sequence
+=
[
nn
.
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
,
bias_attr
=
use_bias
),
nn
.
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
,
bias_attr
=
use_bias
),
norm_layer
(
ndf
*
nf_mult
),
LeakyReLU
(
0.2
,
True
)
nn
.
LeakyReLU
(
0.2
)
]
nf_mult_prev
=
nf_mult
nf_mult
=
min
(
2
**
n_layers
,
8
)
nf_mult
=
min
(
2
**
n_layers
,
8
)
sequence
+=
[
nn
.
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
,
bias_attr
=
use_bias
),
nn
.
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
,
bias_attr
=
use_bias
),
norm_layer
(
ndf
*
nf_mult
),
LeakyReLU
(
0.2
,
True
)
nn
.
LeakyReLU
(
0.2
)
]
sequence
+=
[
nn
.
Conv2d
(
ndf
*
nf_mult
,
1
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
)]
sequence
+=
[
nn
.
Conv2d
(
ndf
*
nf_mult
,
1
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
)
]
self
.
model
=
nn
.
Sequential
(
*
sequence
)
def
forward
(
self
,
input
):
"""Standard forward."""
return
self
.
model
(
input
)
\ No newline at end of file
return
self
.
model
(
input
)
This diff is collapsed.
Click to expand it.
ppgan/models/generators/deoldify.py
浏览文件 @
e41decb6
...
...
@@ -432,8 +432,7 @@ class SelfAttention(nn.Layer):
self
.
key
=
conv1d
(
n_channels
,
n_channels
//
8
)
self
.
value
=
conv1d
(
n_channels
,
n_channels
)
self
.
gamma
=
self
.
create_parameter
(
shape
=
[
1
],
default_initializer
=
paddle
.
fluid
.
initializer
.
Constant
(
shape
=
[
1
],
default_initializer
=
paddle
.
nn
.
initializer
.
Constant
(
0.0
))
#nn.Parameter(tensor([0.]))
def
forward
(
self
,
x
):
...
...
This diff is collapsed.
Click to expand it.
ppgan/models/generators/remaster.py
浏览文件 @
e41decb6
...
...
@@ -2,43 +2,79 @@ import paddle
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
class
TempConv
(
nn
.
Layer
):
def
__init__
(
self
,
in_planes
,
out_planes
,
kernel_size
=
(
1
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
0
,
1
,
1
)
):
super
(
TempConv
,
self
).
__init__
()
self
.
conv3d
=
nn
.
Conv3d
(
in_planes
,
out_planes
,
kernel_size
=
kernel_size
,
stride
=
stride
,
padding
=
padding
)
self
.
bn
=
nn
.
BatchNorm
(
out_planes
)
def
forward
(
self
,
x
):
return
F
.
elu
(
self
.
bn
(
self
.
conv3d
(
x
)))
def
__init__
(
self
,
in_planes
,
out_planes
,
kernel_size
=
(
1
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
0
,
1
,
1
)):
super
(
TempConv
,
self
).
__init__
()
self
.
conv3d
=
nn
.
Conv3d
(
in_planes
,
out_planes
,
kernel_size
=
kernel_size
,
stride
=
stride
,
padding
=
padding
)
self
.
bn
=
nn
.
BatchNorm
(
out_planes
)
def
forward
(
self
,
x
):
return
F
.
elu
(
self
.
bn
(
self
.
conv3d
(
x
)))
class
Upsample
(
nn
.
Layer
):
def
__init__
(
self
,
in_planes
,
out_planes
,
scale_factor
=
(
1
,
2
,
2
)):
super
(
Upsample
,
self
).
__init__
()
self
.
scale_factor
=
scale_factor
self
.
conv3d
=
nn
.
Conv3d
(
in_planes
,
out_planes
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
)
)
self
.
bn
=
nn
.
BatchNorm
(
out_planes
)
def
forward
(
self
,
x
):
out_size
=
x
.
shape
[
2
:]
for
i
in
range
(
3
):
out_size
[
i
]
=
self
.
scale_factor
[
i
]
*
out_size
[
i
]
return
F
.
elu
(
self
.
bn
(
self
.
conv3d
(
F
.
interpolate
(
x
,
size
=
out_size
,
mode
=
'trilinear'
,
align_corners
=
False
,
data_format
=
'NCDHW'
,
align_mode
=
0
))))
def
__init__
(
self
,
in_planes
,
out_planes
,
scale_factor
=
(
1
,
2
,
2
)):
super
(
Upsample
,
self
).
__init__
()
self
.
scale_factor
=
scale_factor
self
.
conv3d
=
nn
.
Conv3d
(
in_planes
,
out_planes
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
))
self
.
bn
=
nn
.
BatchNorm
(
out_planes
)
def
forward
(
self
,
x
):
out_size
=
x
.
shape
[
2
:]
for
i
in
range
(
3
):
out_size
[
i
]
=
self
.
scale_factor
[
i
]
*
out_size
[
i
]
return
F
.
elu
(
self
.
bn
(
self
.
conv3d
(
F
.
interpolate
(
x
,
size
=
out_size
,
mode
=
'trilinear'
,
align_corners
=
False
,
data_format
=
'NCDHW'
,
align_mode
=
0
))))
class
UpsampleConcat
(
nn
.
Layer
):
def
__init__
(
self
,
in_planes_up
,
in_planes_flat
,
out_planes
):
super
(
UpsampleConcat
,
self
).
__init__
()
self
.
conv3d
=
TempConv
(
in_planes_up
+
in_planes_flat
,
out_planes
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
)
)
def
forward
(
self
,
x1
,
x2
):
scale_factor
=
(
1
,
2
,
2
)
def
__init__
(
self
,
in_planes_up
,
in_planes_flat
,
out_planes
):
super
(
UpsampleConcat
,
self
).
__init__
()
self
.
conv3d
=
TempConv
(
in_planes_up
+
in_planes_flat
,
out_planes
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
))
def
forward
(
self
,
x1
,
x2
):
scale_factor
=
(
1
,
2
,
2
)
out_size
=
x1
.
shape
[
2
:]
for
i
in
range
(
3
):
out_size
[
i
]
=
scale_factor
[
i
]
*
out_size
[
i
]
x1
=
F
.
interpolate
(
x1
,
size
=
out_size
,
mode
=
'trilinear'
,
align_corners
=
False
,
data_format
=
'NCDHW'
,
align_mode
=
0
)
x1
=
F
.
interpolate
(
x1
,
size
=
out_size
,
mode
=
'trilinear'
,
align_corners
=
False
,
data_format
=
'NCDHW'
,
align_mode
=
0
)
x
=
paddle
.
concat
([
x1
,
x2
],
axis
=
1
)
return
self
.
conv3d
(
x
)
class
SourceReferenceAttention
(
paddle
.
fluid
.
dygraph
.
Layer
):
class
SourceReferenceAttention
(
nn
.
Layer
):
"""
Source-Reference Attention Layer
"""
...
...
@@ -51,137 +87,166 @@ class SourceReferenceAttention(paddle.fluid.dygraph.Layer):
in_planes_r: int
Number of input reference feature vector channels.
"""
super
(
SourceReferenceAttention
,
self
).
__init__
()
super
(
SourceReferenceAttention
,
self
).
__init__
()
self
.
query_conv
=
nn
.
Conv3d
(
in_channels
=
in_planes_s
,
out_channels
=
in_planes_s
//
8
,
kernel_size
=
1
)
self
.
key_conv
=
nn
.
Conv3d
(
in_channels
=
in_planes_r
,
out_channels
=
in_planes_r
//
8
,
kernel_size
=
1
)
out_channels
=
in_planes_s
//
8
,
kernel_size
=
1
)
self
.
key_conv
=
nn
.
Conv3d
(
in_channels
=
in_planes_r
,
out_channels
=
in_planes_r
//
8
,
kernel_size
=
1
)
self
.
value_conv
=
nn
.
Conv3d
(
in_channels
=
in_planes_r
,
out_channels
=
in_planes_r
,
kernel_size
=
1
)
self
.
gamma
=
self
.
create_parameter
(
shape
=
[
1
],
dtype
=
self
.
query_conv
.
weight
.
dtype
,
default_initializer
=
paddle
.
fluid
.
initializer
.
Constant
(
0.0
))
out_channels
=
in_planes_r
,
kernel_size
=
1
)
self
.
gamma
=
self
.
create_parameter
(
shape
=
[
1
],
dtype
=
self
.
query_conv
.
weight
.
dtype
,
default_initializer
=
nn
.
initializer
.
Constant
(
0.0
))
def
forward
(
self
,
source
,
reference
):
s_batchsize
,
sC
,
sT
,
sH
,
sW
=
source
.
shape
r_batchsize
,
rC
,
rT
,
rH
,
rW
=
reference
.
shape
proj_query
=
paddle
.
reshape
(
self
.
query_conv
(
source
),
[
s_batchsize
,
-
1
,
sT
*
sH
*
sW
])
proj_query
=
paddle
.
reshape
(
self
.
query_conv
(
source
),
[
s_batchsize
,
-
1
,
sT
*
sH
*
sW
])
proj_query
=
paddle
.
transpose
(
proj_query
,
[
0
,
2
,
1
])
proj_key
=
paddle
.
reshape
(
self
.
key_conv
(
reference
),
[
r_batchsize
,
-
1
,
rT
*
rW
*
rH
])
energy
=
paddle
.
bmm
(
proj_query
,
proj_key
)
attention
=
F
.
softmax
(
energy
)
proj_value
=
paddle
.
reshape
(
self
.
value_conv
(
reference
),
[
r_batchsize
,
-
1
,
rT
*
rH
*
rW
])
out
=
paddle
.
bmm
(
proj_value
,
paddle
.
transpose
(
attention
,
[
0
,
2
,
1
]))
out
=
paddle
.
reshape
(
out
,
[
s_batchsize
,
sC
,
sT
,
sH
,
sW
])
out
=
self
.
gamma
*
out
+
source
proj_key
=
paddle
.
reshape
(
self
.
key_conv
(
reference
),
[
r_batchsize
,
-
1
,
rT
*
rW
*
rH
])
energy
=
paddle
.
bmm
(
proj_query
,
proj_key
)
attention
=
F
.
softmax
(
energy
)
proj_value
=
paddle
.
reshape
(
self
.
value_conv
(
reference
),
[
r_batchsize
,
-
1
,
rT
*
rH
*
rW
])
out
=
paddle
.
bmm
(
proj_value
,
paddle
.
transpose
(
attention
,
[
0
,
2
,
1
]))
out
=
paddle
.
reshape
(
out
,
[
s_batchsize
,
sC
,
sT
,
sH
,
sW
])
out
=
self
.
gamma
*
out
+
source
return
out
,
attention
class
NetworkR
(
nn
.
Layer
):
def
__init__
(
self
):
super
(
NetworkR
,
self
).
__init__
()
self
.
layers
=
nn
.
Sequential
(
nn
.
ReplicationPad3d
((
1
,
1
,
1
,
1
,
1
,
1
)),
TempConv
(
1
,
64
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
2
,
2
),
padding
=
(
0
,
0
,
0
)
),
TempConv
(
64
,
128
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)
),
TempConv
(
128
,
128
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)
),
TempConv
(
128
,
256
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
2
,
2
),
padding
=
(
1
,
1
,
1
)
),
TempConv
(
256
,
256
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)
),
TempConv
(
256
,
256
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)
),
TempConv
(
256
,
256
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)
),
TempConv
(
256
,
256
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)
),
Upsample
(
256
,
128
),
TempConv
(
128
,
64
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)
),
TempConv
(
64
,
64
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)
),
Upsample
(
64
,
16
),
nn
.
Conv3d
(
16
,
1
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
)
)
)
def
forward
(
self
,
x
):
return
paddle
.
clip
((
x
+
paddle
.
fluid
.
layers
.
tanh
(
self
.
layers
(
((
x
*
1
).
detach
())
-
0.4462414
)
)),
0.0
,
1.0
)
class
NetworkC
(
nn
.
Layer
):
def
__init__
(
self
):
super
(
NetworkC
,
self
).
__init__
()
self
.
down1
=
nn
.
Sequential
(
nn
.
ReplicationPad3d
((
1
,
1
,
1
,
1
,
0
,
0
)),
TempConv
(
1
,
64
,
stride
=
(
1
,
2
,
2
),
padding
=
(
0
,
0
,
0
)
),
TempConv
(
64
,
128
),
TempConv
(
128
,
128
),
TempConv
(
128
,
256
,
stride
=
(
1
,
2
,
2
)
),
TempConv
(
256
,
256
),
TempConv
(
256
,
256
),
TempConv
(
256
,
512
,
stride
=
(
1
,
2
,
2
)
),
TempConv
(
512
,
512
),
TempConv
(
512
,
512
)
)
self
.
flat
=
nn
.
Sequential
(
TempConv
(
512
,
512
),
TempConv
(
512
,
512
)
)
self
.
down2
=
nn
.
Sequential
(
TempConv
(
512
,
512
,
stride
=
(
1
,
2
,
2
)
),
TempConv
(
512
,
512
),
)
self
.
stattn1
=
SourceReferenceAttention
(
512
,
512
)
# Source-Reference Attention
self
.
stattn2
=
SourceReferenceAttention
(
512
,
512
)
# Source-Reference Attention
self
.
selfattn1
=
SourceReferenceAttention
(
512
,
512
)
# Self Attention
self
.
conv1
=
TempConv
(
512
,
512
)
self
.
up1
=
UpsampleConcat
(
512
,
512
,
512
)
# 1/8
self
.
selfattn2
=
SourceReferenceAttention
(
512
,
512
)
# Self Attention
self
.
conv2
=
TempConv
(
512
,
256
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
)
)
self
.
up2
=
nn
.
Sequential
(
Upsample
(
256
,
128
),
# 1/4
TempConv
(
128
,
64
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
)
)
)
self
.
up3
=
nn
.
Sequential
(
Upsample
(
64
,
32
),
# 1/2
TempConv
(
32
,
16
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
)
)
)
self
.
up4
=
nn
.
Sequential
(
Upsample
(
16
,
8
),
# 1/1
nn
.
Conv3d
(
8
,
2
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
)
)
)
self
.
reffeatnet1
=
nn
.
Sequential
(
TempConv
(
3
,
64
,
stride
=
(
1
,
2
,
2
)
),
TempConv
(
64
,
128
),
TempConv
(
128
,
128
),
TempConv
(
128
,
256
,
stride
=
(
1
,
2
,
2
)
),
TempConv
(
256
,
256
),
TempConv
(
256
,
256
),
TempConv
(
256
,
512
,
stride
=
(
1
,
2
,
2
)
),
TempConv
(
512
,
512
),
TempConv
(
512
,
512
),
)
self
.
reffeatnet2
=
nn
.
Sequential
(
TempConv
(
512
,
512
,
stride
=
(
1
,
2
,
2
)
),
TempConv
(
512
,
512
),
TempConv
(
512
,
512
),
)
def
forward
(
self
,
x
,
x_refs
=
None
):
x1
=
self
.
down1
(
x
-
0.4462414
)
if
x_refs
is
not
None
:
x_refs
=
paddle
.
transpose
(
x_refs
,
[
0
,
2
,
1
,
3
,
4
])
# [B,T,C,H,W] --> [B,C,T,H,W]
reffeat
=
self
.
reffeatnet1
(
x_refs
-
0.48
)
x1
,
_
=
self
.
stattn1
(
x1
,
reffeat
)
x2
=
self
.
flat
(
x1
)
out
=
self
.
down2
(
x1
)
if
x_refs
is
not
None
:
reffeat2
=
self
.
reffeatnet2
(
reffeat
)
out
,
_
=
self
.
stattn2
(
out
,
reffeat2
)
out
=
self
.
conv1
(
out
)
out
,
_
=
self
.
selfattn1
(
out
,
out
)
out
=
self
.
up1
(
out
,
x2
)
out
,
_
=
self
.
selfattn2
(
out
,
out
)
out
=
self
.
conv2
(
out
)
out
=
self
.
up2
(
out
)
out
=
self
.
up3
(
out
)
out
=
self
.
up4
(
out
)
return
F
.
sigmoid
(
out
)
\ No newline at end of file
class
NetworkR
(
nn
.
Layer
):
def
__init__
(
self
):
super
(
NetworkR
,
self
).
__init__
()
self
.
layers
=
nn
.
Sequential
(
nn
.
ReplicationPad3d
((
1
,
1
,
1
,
1
,
1
,
1
)),
TempConv
(
1
,
64
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
2
,
2
),
padding
=
(
0
,
0
,
0
)),
TempConv
(
64
,
128
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)),
TempConv
(
128
,
128
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)),
TempConv
(
128
,
256
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
2
,
2
),
padding
=
(
1
,
1
,
1
)),
TempConv
(
256
,
256
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)),
TempConv
(
256
,
256
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)),
TempConv
(
256
,
256
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)),
TempConv
(
256
,
256
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)),
Upsample
(
256
,
128
),
TempConv
(
128
,
64
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)),
TempConv
(
64
,
64
,
kernel_size
=
(
3
,
3
,
3
),
padding
=
(
1
,
1
,
1
)),
Upsample
(
64
,
16
),
nn
.
Conv3d
(
16
,
1
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
)))
def
forward
(
self
,
x
):
return
paddle
.
clip
(
(
x
+
F
.
tanh
(
self
.
layers
(((
x
*
1
).
detach
())
-
0.4462414
))),
0.0
,
1.0
)
class
NetworkC
(
nn
.
Layer
):
def
__init__
(
self
):
super
(
NetworkC
,
self
).
__init__
()
self
.
down1
=
nn
.
Sequential
(
nn
.
ReplicationPad3d
((
1
,
1
,
1
,
1
,
0
,
0
)),
TempConv
(
1
,
64
,
stride
=
(
1
,
2
,
2
),
padding
=
(
0
,
0
,
0
)),
TempConv
(
64
,
128
),
TempConv
(
128
,
128
),
TempConv
(
128
,
256
,
stride
=
(
1
,
2
,
2
)),
TempConv
(
256
,
256
),
TempConv
(
256
,
256
),
TempConv
(
256
,
512
,
stride
=
(
1
,
2
,
2
)),
TempConv
(
512
,
512
),
TempConv
(
512
,
512
))
self
.
flat
=
nn
.
Sequential
(
TempConv
(
512
,
512
),
TempConv
(
512
,
512
))
self
.
down2
=
nn
.
Sequential
(
TempConv
(
512
,
512
,
stride
=
(
1
,
2
,
2
)),
TempConv
(
512
,
512
),
)
self
.
stattn1
=
SourceReferenceAttention
(
512
,
512
)
# Source-Reference Attention
self
.
stattn2
=
SourceReferenceAttention
(
512
,
512
)
# Source-Reference Attention
self
.
selfattn1
=
SourceReferenceAttention
(
512
,
512
)
# Self Attention
self
.
conv1
=
TempConv
(
512
,
512
)
self
.
up1
=
UpsampleConcat
(
512
,
512
,
512
)
# 1/8
self
.
selfattn2
=
SourceReferenceAttention
(
512
,
512
)
# Self Attention
self
.
conv2
=
TempConv
(
512
,
256
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
))
self
.
up2
=
nn
.
Sequential
(
Upsample
(
256
,
128
),
# 1/4
TempConv
(
128
,
64
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
)))
self
.
up3
=
nn
.
Sequential
(
Upsample
(
64
,
32
),
# 1/2
TempConv
(
32
,
16
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
)))
self
.
up4
=
nn
.
Sequential
(
Upsample
(
16
,
8
),
# 1/1
nn
.
Conv3d
(
8
,
2
,
kernel_size
=
(
3
,
3
,
3
),
stride
=
(
1
,
1
,
1
),
padding
=
(
1
,
1
,
1
)))
self
.
reffeatnet1
=
nn
.
Sequential
(
TempConv
(
3
,
64
,
stride
=
(
1
,
2
,
2
)),
TempConv
(
64
,
128
),
TempConv
(
128
,
128
),
TempConv
(
128
,
256
,
stride
=
(
1
,
2
,
2
)),
TempConv
(
256
,
256
),
TempConv
(
256
,
256
),
TempConv
(
256
,
512
,
stride
=
(
1
,
2
,
2
)),
TempConv
(
512
,
512
),
TempConv
(
512
,
512
),
)
self
.
reffeatnet2
=
nn
.
Sequential
(
TempConv
(
512
,
512
,
stride
=
(
1
,
2
,
2
)),
TempConv
(
512
,
512
),
TempConv
(
512
,
512
),
)
def
forward
(
self
,
x
,
x_refs
=
None
):
x1
=
self
.
down1
(
x
-
0.4462414
)
if
x_refs
is
not
None
:
x_refs
=
paddle
.
transpose
(
x_refs
,
[
0
,
2
,
1
,
3
,
4
])
# [B,T,C,H,W] --> [B,C,T,H,W]
reffeat
=
self
.
reffeatnet1
(
x_refs
-
0.48
)
x1
,
_
=
self
.
stattn1
(
x1
,
reffeat
)
x2
=
self
.
flat
(
x1
)
out
=
self
.
down2
(
x1
)
if
x_refs
is
not
None
:
reffeat2
=
self
.
reffeatnet2
(
reffeat
)
out
,
_
=
self
.
stattn2
(
out
,
reffeat2
)
out
=
self
.
conv1
(
out
)
out
,
_
=
self
.
selfattn1
(
out
,
out
)
out
=
self
.
up1
(
out
,
x2
)
out
,
_
=
self
.
selfattn2
(
out
,
out
)
out
=
self
.
conv2
(
out
)
out
=
self
.
up2
(
out
)
out
=
self
.
up3
(
out
)
out
=
self
.
up4
(
out
)
return
F
.
sigmoid
(
out
)
This diff is collapsed.
Click to expand it.
ppgan/models/generators/resnet.py
浏览文件 @
e41decb6
...
...
@@ -2,20 +2,25 @@ import paddle
import
paddle.nn
as
nn
import
functools
from
...modules.nn
import
ReflectionPad2d
,
LeakyReLU
,
Tanh
,
Dropout
,
BCEWithLogitsLoss
,
Pad2D
,
MSELoss
from
...modules.norm
import
build_norm_layer
from
.builder
import
GENERATORS
@
GENERATORS
.
register
()
class
ResnetGenerator
(
paddle
.
fluid
.
dygraph
.
Layer
):
class
ResnetGenerator
(
nn
.
Layer
):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def
__init__
(
self
,
input_nc
,
output_nc
,
ngf
=
64
,
norm_type
=
'instance'
,
use_dropout
=
False
,
n_blocks
=
6
,
padding_type
=
'reflect'
):
def
__init__
(
self
,
input_nc
,
output_nc
,
ngf
=
64
,
norm_type
=
'instance'
,
use_dropout
=
False
,
n_blocks
=
6
,
padding_type
=
'reflect'
):
"""Construct a Resnet-based generator
Args:
...
...
@@ -27,7 +32,7 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert
(
n_blocks
>=
0
)
assert
(
n_blocks
>=
0
)
super
(
ResnetGenerator
,
self
).
__init__
()
norm_layer
=
build_norm_layer
(
norm_type
)
...
...
@@ -36,35 +41,56 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
else
:
use_bias
=
norm_layer
==
nn
.
InstanceNorm
model
=
[
ReflectionPad2d
(
3
),
nn
.
Conv2d
(
input_nc
,
ngf
,
kernel_size
=
7
,
padding
=
0
,
bias_attr
=
use_bias
),
norm_layer
(
ngf
),
nn
.
ReLU
()]
model
=
[
nn
.
ReflectionPad2d
([
3
,
3
,
3
,
3
]),
nn
.
Conv2d
(
input_nc
,
ngf
,
kernel_size
=
7
,
padding
=
0
,
bias_attr
=
use_bias
),
norm_layer
(
ngf
),
nn
.
ReLU
()
]
n_downsampling
=
2
for
i
in
range
(
n_downsampling
):
# add downsampling layers
mult
=
2
**
i
mult
=
2
**
i
model
+=
[
nn
.
Conv2d
(
ngf
*
mult
,
ngf
*
mult
*
2
,
kernel_size
=
3
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
),
norm_layer
(
ngf
*
mult
*
2
),
nn
.
ReLU
()]
mult
=
2
**
n_downsampling
for
i
in
range
(
n_blocks
):
# add ResNet blocks
nn
.
Conv2d
(
ngf
*
mult
,
ngf
*
mult
*
2
,
kernel_size
=
3
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
),
norm_layer
(
ngf
*
mult
*
2
),
nn
.
ReLU
()
]
mult
=
2
**
n_downsampling
for
i
in
range
(
n_blocks
):
# add ResNet blocks
model
+=
[
ResnetBlock
(
ngf
*
mult
,
padding_type
=
padding_type
,
norm_layer
=
norm_layer
,
use_dropout
=
use_dropout
,
use_bias
=
use_bias
)]
model
+=
[
ResnetBlock
(
ngf
*
mult
,
padding_type
=
padding_type
,
norm_layer
=
norm_layer
,
use_dropout
=
use_dropout
,
use_bias
=
use_bias
)
]
for
i
in
range
(
n_downsampling
):
# add upsampling layers
mult
=
2
**
(
n_downsampling
-
i
)
mult
=
2
**
(
n_downsampling
-
i
)
model
+=
[
nn
.
ConvTranspose2d
(
ngf
*
mult
,
int
(
ngf
*
mult
/
2
),
kernel_size
=
3
,
stride
=
2
,
padding
=
1
,
output_padding
=
1
,
bias_attr
=
use_bias
),
norm_layer
(
int
(
ngf
*
mult
/
2
)),
nn
.
ReLU
()]
model
+=
[
ReflectionPad2d
(
3
)]
nn
.
ConvTranspose2d
(
ngf
*
mult
,
int
(
ngf
*
mult
/
2
),
kernel_size
=
3
,
stride
=
2
,
padding
=
1
,
output_padding
=
1
,
bias_attr
=
use_bias
),
norm_layer
(
int
(
ngf
*
mult
/
2
)),
nn
.
ReLU
()
]
model
+=
[
nn
.
ReflectionPad2d
([
3
,
3
,
3
,
3
])]
model
+=
[
nn
.
Conv2d
(
ngf
,
output_nc
,
kernel_size
=
7
,
padding
=
0
)]
model
+=
[
nn
.
Tanh
()]
...
...
@@ -75,9 +101,8 @@ class ResnetGenerator(paddle.fluid.dygraph.Layer):
return
self
.
model
(
x
)
class
ResnetBlock
(
paddle
.
fluid
.
dygraph
.
Layer
):
class
ResnetBlock
(
nn
.
Layer
):
"""Define a Resnet block"""
def
__init__
(
self
,
dim
,
padding_type
,
norm_layer
,
use_dropout
,
use_bias
):
"""Initialize the Resnet block
...
...
@@ -87,9 +112,11 @@ class ResnetBlock(paddle.fluid.dygraph.Layer):
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super
(
ResnetBlock
,
self
).
__init__
()
self
.
conv_block
=
self
.
build_conv_block
(
dim
,
padding_type
,
norm_layer
,
use_dropout
,
use_bias
)
self
.
conv_block
=
self
.
build_conv_block
(
dim
,
padding_type
,
norm_layer
,
use_dropout
,
use_bias
)
def
build_conv_block
(
self
,
dim
,
padding_type
,
norm_layer
,
use_dropout
,
use_bias
):
def
build_conv_block
(
self
,
dim
,
padding_type
,
norm_layer
,
use_dropout
,
use_bias
):
"""Construct a convolutional block.
Parameters:
...
...
@@ -104,28 +131,37 @@ class ResnetBlock(paddle.fluid.dygraph.Layer):
conv_block
=
[]
p
=
0
if
padding_type
==
'reflect'
:
conv_block
+=
[
ReflectionPad2d
(
1
)]
conv_block
+=
[
nn
.
ReflectionPad2d
([
1
,
1
,
1
,
1
]
)]
elif
padding_type
==
'replicate'
:
conv_block
+=
[
ReplicationPad2d
(
1
)]
conv_block
+=
[
nn
.
ReplicationPad2d
([
1
,
1
,
1
,
1
]
)]
elif
padding_type
==
'zero'
:
p
=
1
else
:
raise
NotImplementedError
(
'padding [%s] is not implemented'
%
padding_type
)
conv_block
+=
[
nn
.
Conv2d
(
dim
,
dim
,
kernel_size
=
3
,
padding
=
p
,
bias_attr
=
use_bias
),
norm_layer
(
dim
),
nn
.
ReLU
()]
raise
NotImplementedError
(
'padding [%s] is not implemented'
%
padding_type
)
conv_block
+=
[
nn
.
Conv2d
(
dim
,
dim
,
kernel_size
=
3
,
padding
=
p
,
bias_attr
=
use_bias
),
norm_layer
(
dim
),
nn
.
ReLU
()
]
if
use_dropout
:
conv_block
+=
[
Dropout
(
0.5
)]
conv_block
+=
[
nn
.
Dropout
(
0.5
)]
p
=
0
if
padding_type
==
'reflect'
:
conv_block
+=
[
ReflectionPad2d
(
1
)]
conv_block
+=
[
nn
.
ReflectionPad2d
([
1
,
1
,
1
,
1
]
)]
elif
padding_type
==
'replicate'
:
conv_block
+=
[
ReplicationPad2d
(
1
)]
conv_block
+=
[
nn
.
ReplicationPad2d
([
1
,
1
,
1
,
1
]
)]
elif
padding_type
==
'zero'
:
p
=
1
else
:
raise
NotImplementedError
(
'padding [%s] is not implemented'
%
padding_type
)
conv_block
+=
[
nn
.
Conv2d
(
dim
,
dim
,
kernel_size
=
3
,
padding
=
p
,
bias_attr
=
use_bias
),
norm_layer
(
dim
)]
raise
NotImplementedError
(
'padding [%s] is not implemented'
%
padding_type
)
conv_block
+=
[
nn
.
Conv2d
(
dim
,
dim
,
kernel_size
=
3
,
padding
=
p
,
bias_attr
=
use_bias
),
norm_layer
(
dim
)
]
return
nn
.
Sequential
(
*
conv_block
)
...
...
This diff is collapsed.
Click to expand it.
ppgan/models/generators/unet.py
浏览文件 @
e41decb6
import
functools
import
paddle
import
paddle.nn
as
nn
import
functools
from
...modules.nn
import
ReflectionPad2d
,
LeakyReLU
,
Tanh
,
Dropout
from
...modules.norm
import
build_norm_layer
from
.builder
import
GENERATORS
@
GENERATORS
.
register
()
class
UnetGenerator
(
paddle
.
fluid
.
dygraph
.
Layer
):
class
UnetGenerator
(
nn
.
Layer
):
"""Create a Unet-based generator"""
def
__init__
(
self
,
input_nc
,
output_nc
,
num_downs
,
ngf
=
64
,
norm_type
=
'batch'
,
use_dropout
=
False
):
def
__init__
(
self
,
input_nc
,
output_nc
,
num_downs
,
ngf
=
64
,
norm_type
=
'batch'
,
use_dropout
=
False
):
"""Construct a Unet generator
Args:
input_nc (int) -- the number of channels in input images
...
...
@@ -27,36 +31,64 @@ class UnetGenerator(paddle.fluid.dygraph.Layer):
super
(
UnetGenerator
,
self
).
__init__
()
norm_layer
=
build_norm_layer
(
norm_type
)
# construct unet structure
unet_block
=
UnetSkipConnectionBlock
(
ngf
*
8
,
ngf
*
8
,
input_nc
=
None
,
submodule
=
None
,
norm_layer
=
norm_layer
,
innermost
=
True
)
# add the innermost layer
for
i
in
range
(
num_downs
-
5
):
# add intermediate layers with ngf * 8 filters
unet_block
=
UnetSkipConnectionBlock
(
ngf
*
8
,
ngf
*
8
,
input_nc
=
None
,
submodule
=
unet_block
,
norm_layer
=
norm_layer
,
use_dropout
=
use_dropout
)
unet_block
=
UnetSkipConnectionBlock
(
ngf
*
8
,
ngf
*
8
,
input_nc
=
None
,
submodule
=
None
,
norm_layer
=
norm_layer
,
innermost
=
True
)
# add the innermost layer
for
i
in
range
(
num_downs
-
5
):
# add intermediate layers with ngf * 8 filters
unet_block
=
UnetSkipConnectionBlock
(
ngf
*
8
,
ngf
*
8
,
input_nc
=
None
,
submodule
=
unet_block
,
norm_layer
=
norm_layer
,
use_dropout
=
use_dropout
)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block
=
UnetSkipConnectionBlock
(
ngf
*
4
,
ngf
*
8
,
input_nc
=
None
,
submodule
=
unet_block
,
norm_layer
=
norm_layer
)
unet_block
=
UnetSkipConnectionBlock
(
ngf
*
2
,
ngf
*
4
,
input_nc
=
None
,
submodule
=
unet_block
,
norm_layer
=
norm_layer
)
unet_block
=
UnetSkipConnectionBlock
(
ngf
,
ngf
*
2
,
input_nc
=
None
,
submodule
=
unet_block
,
norm_layer
=
norm_layer
)
self
.
model
=
UnetSkipConnectionBlock
(
output_nc
,
ngf
,
input_nc
=
input_nc
,
submodule
=
unet_block
,
outermost
=
True
,
norm_layer
=
norm_layer
)
# add the outermost layer
unet_block
=
UnetSkipConnectionBlock
(
ngf
*
4
,
ngf
*
8
,
input_nc
=
None
,
submodule
=
unet_block
,
norm_layer
=
norm_layer
)
unet_block
=
UnetSkipConnectionBlock
(
ngf
*
2
,
ngf
*
4
,
input_nc
=
None
,
submodule
=
unet_block
,
norm_layer
=
norm_layer
)
unet_block
=
UnetSkipConnectionBlock
(
ngf
,
ngf
*
2
,
input_nc
=
None
,
submodule
=
unet_block
,
norm_layer
=
norm_layer
)
self
.
model
=
UnetSkipConnectionBlock
(
output_nc
,
ngf
,
input_nc
=
input_nc
,
submodule
=
unet_block
,
outermost
=
True
,
norm_layer
=
norm_layer
)
# add the outermost layer
def
forward
(
self
,
input
):
"""Standard forward"""
# tmp = self.model._sub_layers['model'][0](input)
# tmp1 = self.model._sub_layers['model'][1](tmp)
# tmp2 = self.model._sub_layers['model'][2](tmp1)
# import pickle
# pickle.dump(tmp2.numpy(), open('/workspace/notebook/align_pix2pix/tmp2-pd.pkl', 'wb'))
# tmp3 = self.model._sub_layers['model'][3](tmp2)
# pickle.dump(tmp3.numpy(), open('/workspace/notebook/align_pix2pix/tmp3-pd.pkl', 'wb'))
# tmp4 = self.model._sub_layers['model'][4](tmp3)
return
self
.
model
(
input
)
class
UnetSkipConnectionBlock
(
paddle
.
fluid
.
dygraph
.
Layer
):
class
UnetSkipConnectionBlock
(
nn
.
Layer
):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def
__init__
(
self
,
outer_nc
,
inner_nc
,
input_nc
=
None
,
submodule
=
None
,
outermost
=
False
,
innermost
=
False
,
norm_layer
=
nn
.
BatchNorm
,
use_dropout
=
False
):
def
__init__
(
self
,
outer_nc
,
inner_nc
,
input_nc
=
None
,
submodule
=
None
,
outermost
=
False
,
innermost
=
False
,
norm_layer
=
nn
.
BatchNorm
,
use_dropout
=
False
):
"""Construct a Unet submodule with skip connections.
Parameters:
...
...
@@ -77,36 +109,48 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer):
use_bias
=
norm_layer
==
nn
.
InstanceNorm
if
input_nc
is
None
:
input_nc
=
outer_nc
downconv
=
nn
.
Conv2d
(
input_nc
,
inner_nc
,
kernel_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
)
downrelu
=
LeakyReLU
(
0.2
,
True
)
downconv
=
nn
.
Conv2d
(
input_nc
,
inner_nc
,
kernel_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
)
downrelu
=
nn
.
LeakyReLU
(
0.2
)
downnorm
=
norm_layer
(
inner_nc
)
uprelu
=
nn
.
ReLU
(
True
)
uprelu
=
nn
.
ReLU
()
upnorm
=
norm_layer
(
outer_nc
)
if
outermost
:
upconv
=
nn
.
ConvTranspose2d
(
inner_nc
*
2
,
outer_nc
,
kernel_size
=
4
,
stride
=
2
,
upconv
=
nn
.
ConvTranspose2d
(
inner_nc
*
2
,
outer_nc
,
kernel_size
=
4
,
stride
=
2
,
padding
=
1
)
down
=
[
downconv
]
up
=
[
uprelu
,
upconv
,
Tanh
()]
up
=
[
uprelu
,
upconv
,
nn
.
Tanh
()]
model
=
down
+
[
submodule
]
+
up
elif
innermost
:
upconv
=
nn
.
ConvTranspose2d
(
inner_nc
,
outer_nc
,
kernel_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
)
upconv
=
nn
.
ConvTranspose2d
(
inner_nc
,
outer_nc
,
kernel_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
)
down
=
[
downrelu
,
downconv
]
up
=
[
uprelu
,
upconv
,
upnorm
]
model
=
down
+
up
else
:
upconv
=
nn
.
ConvTranspose2d
(
inner_nc
*
2
,
outer_nc
,
kernel_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
)
upconv
=
nn
.
ConvTranspose2d
(
inner_nc
*
2
,
outer_nc
,
kernel_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
use_bias
)
down
=
[
downrelu
,
downconv
,
downnorm
]
up
=
[
uprelu
,
upconv
,
upnorm
]
if
use_dropout
:
model
=
down
+
[
submodule
]
+
up
+
[
Dropout
(
0.5
)]
model
=
down
+
[
submodule
]
+
up
+
[
nn
.
Dropout
(
0.5
)]
else
:
model
=
down
+
[
submodule
]
+
up
...
...
@@ -115,5 +159,5 @@ class UnetSkipConnectionBlock(paddle.fluid.dygraph.Layer):
def
forward
(
self
,
x
):
if
self
.
outermost
:
return
self
.
model
(
x
)
else
:
# add skip connections
else
:
# add skip connections
return
paddle
.
concat
([
x
,
self
.
model
(
x
)],
1
)
This diff is collapsed.
Click to expand it.
ppgan/models/losses.py
浏览文件 @
e41decb6
import
paddle
import
paddle.nn
as
nn
import
numpy
as
np
from
..modules.nn
import
BCEWithLogitsLoss
import
paddle
import
paddle.nn
as
nn
class
GANLoss
(
paddle
.
fluid
.
dygraph
.
Layer
):
class
GANLoss
(
nn
.
Layer
):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def
__init__
(
self
,
gan_mode
,
target_real_label
=
1.0
,
target_fake_label
=
0.0
):
""" Initialize the GANLoss class.
...
...
@@ -31,7 +29,7 @@ class GANLoss(paddle.fluid.dygraph.Layer):
if
gan_mode
==
'lsgan'
:
self
.
loss
=
nn
.
MSELoss
()
elif
gan_mode
==
'vanilla'
:
self
.
loss
=
BCEWithLogitsLoss
()
self
.
loss
=
nn
.
BCEWithLogitsLoss
()
elif
gan_mode
in
[
'wgangp'
]:
self
.
loss
=
None
else
:
...
...
@@ -50,11 +48,17 @@ class GANLoss(paddle.fluid.dygraph.Layer):
if
target_is_real
:
if
not
hasattr
(
self
,
'target_real_tensor'
):
self
.
target_real_tensor
=
paddle
.
fill_constant
(
shape
=
paddle
.
shape
(
prediction
),
value
=
self
.
target_real_label
,
dtype
=
'float32'
)
self
.
target_real_tensor
=
paddle
.
fill_constant
(
shape
=
paddle
.
shape
(
prediction
),
value
=
self
.
target_real_label
,
dtype
=
'float32'
)
target_tensor
=
self
.
target_real_tensor
else
:
if
not
hasattr
(
self
,
'target_fake_tensor'
):
self
.
target_fake_tensor
=
paddle
.
fill_constant
(
shape
=
paddle
.
shape
(
prediction
),
value
=
self
.
target_fake_label
,
dtype
=
'float32'
)
self
.
target_fake_tensor
=
paddle
.
fill_constant
(
shape
=
paddle
.
shape
(
prediction
),
value
=
self
.
target_fake_label
,
dtype
=
'float32'
)
target_tensor
=
self
.
target_fake_tensor
# target_tensor.stop_gradient = True
...
...
@@ -78,4 +82,4 @@ class GANLoss(paddle.fluid.dygraph.Layer):
loss
=
-
prediction
.
mean
()
else
:
loss
=
prediction
.
mean
()
return
loss
\ No newline at end of file
return
loss
This diff is collapsed.
Click to expand it.
ppgan/models/pix2pix_model.py
浏览文件 @
e41decb6
...
...
@@ -48,6 +48,7 @@ class Pix2PixModel(BaseModel):
self
.
netD
=
build_discriminator
(
opt
.
model
.
discriminator
)
if
self
.
isTrain
:
self
.
losses
=
{}
# define loss functions
self
.
criterionGAN
=
GANLoss
(
opt
.
model
.
gan_mode
)
self
.
criterionL1
=
paddle
.
nn
.
L1Loss
()
...
...
@@ -77,8 +78,9 @@ class Pix2PixModel(BaseModel):
"""
AtoB
=
self
.
opt
.
dataset
.
train
.
direction
==
'AtoB'
self
.
real_A
=
paddle
.
to_tensor
(
input
[
'A'
if
AtoB
else
'B'
])
self
.
real_B
=
paddle
.
to_tensor
(
input
[
'B'
if
AtoB
else
'A'
])
self
.
real_A
=
paddle
.
to_variable
(
input
[
'A'
if
AtoB
else
'B'
])
self
.
real_B
=
paddle
.
to_variable
(
input
[
'B'
if
AtoB
else
'A'
])
self
.
image_paths
=
input
[
'A_paths'
if
AtoB
else
'B_paths'
]
def
forward
(
self
):
...
...
@@ -118,6 +120,7 @@ class Pix2PixModel(BaseModel):
# Second, G(A) = B
self
.
loss_G_L1
=
self
.
criterionL1
(
self
.
fake_B
,
self
.
real_B
)
*
self
.
opt
.
lambda_L1
# combine loss and calculate gradients
self
.
loss_G
=
self
.
loss_G_GAN
+
self
.
loss_G_L1
...
...
This diff is collapsed.
Click to expand it.
ppgan/modules/nn.py
浏览文件 @
e41decb6
import
paddle
import
paddle.nn
as
nn
from
paddle.fluid.dygraph
import
Layer
from
paddle
import
fluid
class
MSELoss
():
def
__init__
(
self
):
pass
def
__call__
(
self
,
prediction
,
label
):
return
fluid
.
layers
.
mse_loss
(
prediction
,
label
)
class
L1Loss
():
def
__init__
(
self
):
pass
def
__call__
(
self
,
prediction
,
label
):
return
fluid
.
layers
.
reduce_mean
(
fluid
.
layers
.
elementwise_sub
(
prediction
,
label
,
act
=
'abs'
))
class
ReflectionPad2d
(
Layer
):
def
__init__
(
self
,
size
):
super
(
ReflectionPad2d
,
self
).
__init__
()
self
.
size
=
size
def
forward
(
self
,
x
):
return
fluid
.
layers
.
pad2d
(
x
,
[
self
.
size
]
*
4
,
mode
=
"reflect"
)
class
LeakyReLU
(
Layer
):
def
__init__
(
self
,
alpha
,
inplace
=
False
):
super
(
LeakyReLU
,
self
).
__init__
()
self
.
alpha
=
alpha
def
forward
(
self
,
x
):
return
fluid
.
layers
.
leaky_relu
(
x
,
self
.
alpha
)
class
Tanh
(
Layer
):
def
__init__
(
self
):
super
(
Tanh
,
self
).
__init__
()
def
forward
(
self
,
x
):
return
fluid
.
layers
.
tanh
(
x
)
class
Dropout
(
Layer
):
def
__init__
(
self
,
prob
,
mode
=
'upscale_in_train'
):
super
(
Dropout
,
self
).
__init__
()
self
.
prob
=
prob
self
.
mode
=
mode
def
forward
(
self
,
x
):
return
fluid
.
layers
.
dropout
(
x
,
self
.
prob
,
dropout_implementation
=
self
.
mode
)
class
BCEWithLogitsLoss
():
def
__init__
(
self
,
weight
=
None
,
reduction
=
'mean'
):
self
.
weight
=
weight
self
.
reduction
=
'mean'
def
__call__
(
self
,
x
,
label
):
out
=
paddle
.
fluid
.
layers
.
sigmoid_cross_entropy_with_logits
(
x
,
label
)
if
self
.
reduction
==
'sum'
:
return
fluid
.
layers
.
reduce_sum
(
out
)
elif
self
.
reduction
==
'mean'
:
return
fluid
.
layers
.
reduce_mean
(
out
)
else
:
return
out
class
_SpectralNorm
(
paddle
.
nn
.
SpectralNorm
):
class
_SpectralNorm
(
nn
.
SpectralNorm
):
def
__init__
(
self
,
weight_shape
,
dim
=
0
,
power_iters
=
1
,
eps
=
1e-12
,
dtype
=
'float32'
):
super
(
_SpectralNorm
,
self
).
__init__
(
weight_shape
,
dim
,
power_iters
,
eps
,
dtype
)
super
(
_SpectralNorm
,
self
).
__init__
(
weight_shape
,
dim
,
power_iters
,
eps
,
dtype
)
def
forward
(
self
,
weight
):
paddle
.
fluid
.
data_feeder
.
check_variable_and_dtype
(
weight
,
"weight"
,
[
'float32'
,
'float64'
],
'SpectralNorm'
)
inputs
=
{
'Weight'
:
weight
,
'U'
:
self
.
weight_u
,
'V'
:
self
.
weight_v
}
out
=
self
.
_helper
.
create_variable_for_type_inference
(
self
.
_dtype
)
_power_iters
=
self
.
_power_iters
if
self
.
training
else
0
self
.
_helper
.
append_op
(
type
=
"spectral_norm"
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
out
,
},
attrs
=
{
"dim"
:
self
.
_dim
,
"power_iters"
:
_power_iters
,
"eps"
:
self
.
_eps
,
})
self
.
_helper
.
append_op
(
type
=
"spectral_norm"
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
out
,
},
attrs
=
{
"dim"
:
self
.
_dim
,
"power_iters"
:
_power_iters
,
"eps"
:
self
.
_eps
,
})
return
out
class
Spectralnorm
(
paddle
.
nn
.
Layer
):
def
__init__
(
self
,
layer
,
dim
=
0
,
power_iters
=
1
,
eps
=
1e-12
,
dtype
=
'float32'
):
def
__init__
(
self
,
layer
,
dim
=
0
,
power_iters
=
1
,
eps
=
1e-12
,
dtype
=
'float32'
):
super
(
Spectralnorm
,
self
).
__init__
()
self
.
spectral_norm
=
_SpectralNorm
(
layer
.
weight
.
shape
,
dim
,
power_iters
,
eps
,
dtype
)
self
.
spectral_norm
=
_SpectralNorm
(
layer
.
weight
.
shape
,
dim
,
power_iters
,
eps
,
dtype
)
self
.
dim
=
dim
self
.
power_iters
=
power_iters
self
.
eps
=
eps
self
.
layer
=
layer
weight
=
layer
.
_parameters
[
'weight'
]
del
layer
.
_parameters
[
'weight'
]
self
.
weight_orig
=
self
.
create_parameter
(
weight
.
shape
,
dtype
=
weight
.
dtype
)
self
.
weight_orig
=
self
.
create_parameter
(
weight
.
shape
,
dtype
=
weight
.
dtype
)
self
.
weight_orig
.
set_value
(
weight
)
def
forward
(
self
,
x
):
weight
=
self
.
spectral_norm
(
self
.
weight_orig
)
self
.
layer
.
weight
=
weight
out
=
self
.
layer
(
x
)
return
out
def
initial_type
(
input
,
op_type
,
fan_out
,
init
=
"normal"
,
use_bias
=
False
,
filter_size
=
0
,
stddev
=
0.02
,
name
=
None
):
if
init
==
"kaiming"
:
if
op_type
==
'conv'
:
fan_in
=
input
.
shape
[
1
]
*
filter_size
*
filter_size
elif
op_type
==
'deconv'
:
fan_in
=
fan_out
*
filter_size
*
filter_size
else
:
if
len
(
input
.
shape
)
>
2
:
fan_in
=
input
.
shape
[
1
]
*
input
.
shape
[
2
]
*
input
.
shape
[
3
]
else
:
fan_in
=
input
.
shape
[
1
]
bound
=
1
/
math
.
sqrt
(
fan_in
)
param_attr
=
fluid
.
ParamAttr
(
# name=name + "_w",
initializer
=
fluid
.
initializer
.
Uniform
(
low
=-
bound
,
high
=
bound
))
if
use_bias
==
True
:
bias_attr
=
fluid
.
ParamAttr
(
# name=name + '_b',
initializer
=
fluid
.
initializer
.
Uniform
(
low
=-
bound
,
high
=
bound
))
else
:
bias_attr
=
False
else
:
param_attr
=
fluid
.
ParamAttr
(
# name=name + "_w",
initializer
=
fluid
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
stddev
))
if
use_bias
==
True
:
bias_attr
=
fluid
.
ParamAttr
(
# name=name + "_b",
initializer
=
fluid
.
initializer
.
Constant
(
0.0
))
else
:
bias_attr
=
False
return
param_attr
,
bias_attr
class
Pad2D
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
paddings
,
mode
,
pad_value
=
0.0
):
super
(
Pad2D
,
self
).
__init__
()
self
.
paddings
=
paddings
self
.
mode
=
mode
def
forward
(
self
,
x
):
return
fluid
.
layers
.
pad2d
(
x
,
self
.
paddings
,
self
.
mode
)
\ No newline at end of file
This diff is collapsed.
Click to expand it.
ppgan/modules/norm.py
浏览文件 @
e41decb6
...
...
@@ -3,7 +3,7 @@ import functools
import
paddle.nn
as
nn
class
Identity
(
paddle
.
fluid
.
dygraph
.
Layer
):
class
Identity
(
nn
.
Layer
):
def
forward
(
self
,
x
):
return
x
...
...
@@ -18,11 +18,28 @@ def build_norm_layer(norm_type='instance'):
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if
norm_type
==
'batch'
:
norm_layer
=
functools
.
partial
(
nn
.
BatchNorm
,
param_attr
=
paddle
.
ParamAttr
(
initializer
=
paddle
.
fluid
.
initializer
.
NormalInitializer
(
1.0
,
0.02
)),
bias_attr
=
paddle
.
ParamAttr
(
initializer
=
paddle
.
fluid
.
initializer
.
Constant
(
0.0
)),
trainable_statistics
=
True
)
norm_layer
=
functools
.
partial
(
nn
.
BatchNorm
,
param_attr
=
paddle
.
ParamAttr
(
initializer
=
nn
.
initializer
.
Normal
(
1.0
,
0.02
)),
bias_attr
=
paddle
.
ParamAttr
(
initializer
=
nn
.
initializer
.
Constant
(
0.0
)),
trainable_statistics
=
True
)
elif
norm_type
==
'instance'
:
norm_layer
=
functools
.
partial
(
nn
.
InstanceNorm
,
param_attr
=
paddle
.
ParamAttr
(
initializer
=
paddle
.
fluid
.
initializer
.
Constant
(
1.0
),
learning_rate
=
0.0
,
trainable
=
False
),
bias_attr
=
paddle
.
ParamAttr
(
initializer
=
paddle
.
fluid
.
initializer
.
Constant
(
0.0
),
learning_rate
=
0.0
,
trainable
=
False
))
norm_layer
=
functools
.
partial
(
nn
.
InstanceNorm
,
param_attr
=
paddle
.
ParamAttr
(
initializer
=
nn
.
initializer
.
Constant
(
1.0
),
learning_rate
=
0.0
,
trainable
=
False
),
bias_attr
=
paddle
.
ParamAttr
(
initializer
=
nn
.
initializer
.
Constant
(
0.0
),
learning_rate
=
0.0
,
trainable
=
False
))
elif
norm_type
==
'none'
:
def
norm_layer
(
x
):
return
Identity
()
def
norm_layer
(
x
):
return
Identity
()
else
:
raise
NotImplementedError
(
'normalization layer [%s] is not found'
%
norm_type
)
return
norm_layer
\ No newline at end of file
raise
NotImplementedError
(
'normalization layer [%s] is not found'
%
norm_type
)
return
norm_layer
This diff is collapsed.
Click to expand it.
ppgan/utils/setup.py
浏览文件 @
e41decb6
...
...
@@ -19,6 +19,6 @@ def setup(args, cfg):
logger
.
info
(
'Configs: {}'
.
format
(
cfg
))
place
=
paddle
.
fluid
.
CUDAPlace
(
ParallelEnv
().
dev_id
)
\
if
ParallelEnv
().
nranks
>
1
else
paddle
.
fluid
.
CUDAPlace
(
0
)
place
=
paddle
.
CUDAPlace
(
ParallelEnv
().
dev_id
)
\
if
ParallelEnv
().
nranks
>
1
else
paddle
.
CUDAPlace
(
0
)
paddle
.
disable_static
(
place
)
This diff is collapsed.
Click to expand it.
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录
新手
引导
客服
返回
顶部