Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleGAN
提交
6306f226
P
PaddleGAN
项目概览
PaddlePaddle
/
PaddleGAN
大约 1 年 前同步成功
通知
97
Star
7254
Fork
1210
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
4
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleGAN
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
4
Issue
4
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6306f226
编写于
10月 15, 2020
作者:
L
lijianshe02
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refine psgan code
上级
c2ed7bb8
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
122 addition
and
240 deletion
+122
-240
ppgan/models/discriminators/nlayers.py
ppgan/models/discriminators/nlayers.py
+46
-46
ppgan/models/generators/makeup.py
ppgan/models/generators/makeup.py
+72
-73
ppgan/models/makeup_model.py
ppgan/models/makeup_model.py
+4
-1
ppgan/modules/nn.py
ppgan/modules/nn.py
+0
-120
未找到文件。
ppgan/models/discriminators/nlayers.py
浏览文件 @
6306f226
...
...
@@ -18,7 +18,7 @@ import numpy as np
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
...modules.nn
import
Conv2d
,
Spectralnorm
from
...modules.nn
import
Spectralnorm
from
...modules.norm
import
build_norm_layer
from
.builder
import
DISCRIMINATORS
...
...
@@ -51,21 +51,21 @@ class NLayerDiscriminator(nn.Layer):
if
norm_type
==
'spectral'
:
sequence
=
[
Spectralnorm
(
Conv2d
(
input_nc
,
ndf
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
)),
nn
.
Conv2d
(
input_nc
,
ndf
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
)),
nn
.
LeakyReLU
(
0.01
)
]
else
:
sequence
=
[
Conv2d
(
input_nc
,
ndf
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
,
bias_attr
=
use_bias
),
nn
.
Conv2d
(
input_nc
,
ndf
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
,
bias_attr
=
use_bias
),
nn
.
LeakyReLU
(
0.2
)
]
nf_mult
=
1
...
...
@@ -76,21 +76,21 @@ class NLayerDiscriminator(nn.Layer):
if
norm_type
==
'spectral'
:
sequence
+=
[
Spectralnorm
(
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
)),
nn
.
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
)),
nn
.
LeakyReLU
(
0.01
)
]
else
:
sequence
+=
[
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
,
bias_attr
=
use_bias
),
nn
.
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
2
,
padding
=
padw
,
bias_attr
=
use_bias
),
norm_layer
(
ndf
*
nf_mult
),
nn
.
LeakyReLU
(
0.2
)
]
...
...
@@ -100,21 +100,21 @@ class NLayerDiscriminator(nn.Layer):
if
norm_type
==
'spectral'
:
sequence
+=
[
Spectralnorm
(
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
)),
nn
.
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
)),
nn
.
LeakyReLU
(
0.01
)
]
else
:
sequence
+=
[
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
,
bias_attr
=
use_bias
),
nn
.
Conv2d
(
ndf
*
nf_mult_prev
,
ndf
*
nf_mult
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
,
bias_attr
=
use_bias
),
norm_layer
(
ndf
*
nf_mult
),
nn
.
LeakyReLU
(
0.2
)
]
...
...
@@ -122,21 +122,21 @@ class NLayerDiscriminator(nn.Layer):
if
norm_type
==
'spectral'
:
sequence
+=
[
Spectralnorm
(
Conv2d
(
ndf
*
nf_mult
,
1
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
,
bias_attr
=
False
))
nn
.
Conv2d
(
ndf
*
nf_mult
,
1
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
,
bias_attr
=
False
))
]
# output 1 channel prediction map
else
:
sequence
+=
[
Conv2d
(
ndf
*
nf_mult
,
1
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
,
bias_attr
=
False
)
nn
.
Conv2d
(
ndf
*
nf_mult
,
1
,
kernel_size
=
kw
,
stride
=
1
,
padding
=
padw
,
bias_attr
=
False
)
]
# output 1 channel prediction map
self
.
model
=
nn
.
Sequential
(
*
sequence
)
...
...
ppgan/models/generators/makeup.py
浏览文件 @
6306f226
...
...
@@ -20,7 +20,6 @@ import functools
import
numpy
as
np
from
...modules.norm
import
build_norm_layer
from
...modules.nn
import
Conv2d
,
ConvTranspose2d
from
.builder
import
GENERATORS
...
...
@@ -50,21 +49,21 @@ class ResidualBlock(paddle.nn.Layer):
bias_attr
=
None
self
.
main
=
nn
.
Sequential
(
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias_attr
=
False
),
nn
.
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias_attr
=
False
),
nn
.
InstanceNorm2d
(
dim_out
,
weight_attr
=
weight_attr
,
bias_attr
=
bias_attr
),
nn
.
ReLU
(),
Conv2d
(
dim_out
,
dim_out
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias_attr
=
False
),
nn
.
Conv2d
(
dim_out
,
dim_out
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias_attr
=
False
),
nn
.
InstanceNorm2d
(
dim_out
,
weight_attr
=
weight_attr
,
bias_attr
=
bias_attr
))
...
...
@@ -79,26 +78,26 @@ class StyleResidualBlock(paddle.nn.Layer):
def
__init__
(
self
,
dim_in
,
dim_out
):
super
(
StyleResidualBlock
,
self
).
__init__
()
self
.
block1
=
nn
.
Sequential
(
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias_attr
=
False
),
PONO
())
nn
.
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias_attr
=
False
),
PONO
())
ks
=
3
pw
=
ks
//
2
self
.
beta1
=
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
ks
,
padding
=
pw
)
self
.
gamma1
=
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
ks
,
padding
=
pw
)
self
.
beta1
=
nn
.
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
ks
,
padding
=
pw
)
self
.
gamma1
=
nn
.
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
ks
,
padding
=
pw
)
self
.
block2
=
nn
.
Sequential
(
nn
.
ReLU
(),
Conv2d
(
dim_out
,
dim_out
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias_attr
=
False
),
PONO
())
self
.
beta2
=
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
ks
,
padding
=
pw
)
self
.
gamma2
=
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
ks
,
padding
=
pw
)
nn
.
Conv2d
(
dim_out
,
dim_out
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
bias_attr
=
False
),
PONO
())
self
.
beta2
=
nn
.
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
ks
,
padding
=
pw
)
self
.
gamma2
=
nn
.
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
ks
,
padding
=
pw
)
def
forward
(
self
,
x
,
y
):
"""forward"""
...
...
@@ -120,12 +119,12 @@ class MDNet(paddle.nn.Layer):
layers
=
[]
layers
.
append
(
Conv2d
(
3
,
conv_dim
,
kernel_size
=
7
,
stride
=
1
,
padding
=
3
,
bias_attr
=
False
))
nn
.
Conv2d
(
3
,
conv_dim
,
kernel_size
=
7
,
stride
=
1
,
padding
=
3
,
bias_attr
=
False
))
layers
.
append
(
nn
.
InstanceNorm2d
(
conv_dim
,
weight_attr
=
None
,
bias_attr
=
None
))
...
...
@@ -135,12 +134,12 @@ class MDNet(paddle.nn.Layer):
curr_dim
=
conv_dim
for
i
in
range
(
2
):
layers
.
append
(
Conv2d
(
curr_dim
,
curr_dim
*
2
,
kernel_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
False
))
nn
.
Conv2d
(
curr_dim
,
curr_dim
*
2
,
kernel_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
False
))
layers
.
append
(
nn
.
InstanceNorm2d
(
curr_dim
*
2
,
weight_attr
=
None
,
...
...
@@ -167,12 +166,12 @@ class TNetDown(paddle.nn.Layer):
layers
=
[]
layers
.
append
(
Conv2d
(
3
,
conv_dim
,
kernel_size
=
7
,
stride
=
1
,
padding
=
3
,
bias_attr
=
False
))
nn
.
Conv2d
(
3
,
conv_dim
,
kernel_size
=
7
,
stride
=
1
,
padding
=
3
,
bias_attr
=
False
))
layers
.
append
(
nn
.
InstanceNorm2d
(
conv_dim
,
weight_attr
=
False
,
bias_attr
=
False
))
...
...
@@ -182,12 +181,12 @@ class TNetDown(paddle.nn.Layer):
curr_dim
=
conv_dim
for
i
in
range
(
2
):
layers
.
append
(
Conv2d
(
curr_dim
,
curr_dim
*
2
,
kernel_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
False
))
nn
.
Conv2d
(
curr_dim
,
curr_dim
*
2
,
kernel_size
=
4
,
stride
=
2
,
padding
=
1
,
bias_attr
=
False
))
layers
.
append
(
nn
.
InstanceNorm2d
(
curr_dim
*
2
,
weight_attr
=
False
,
...
...
@@ -211,18 +210,18 @@ class TNetDown(paddle.nn.Layer):
class
GetMatrix
(
paddle
.
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
dim_in
,
dim_out
):
super
(
GetMatrix
,
self
).
__init__
()
self
.
get_gamma
=
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
,
bias_attr
=
False
)
self
.
get_beta
=
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
,
bias_attr
=
False
)
self
.
get_gamma
=
nn
.
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
,
bias_attr
=
False
)
self
.
get_beta
=
nn
.
Conv2d
(
dim_in
,
dim_out
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
,
bias_attr
=
False
)
def
forward
(
self
,
x
):
gamma
=
self
.
get_gamma
(
x
)
...
...
@@ -237,8 +236,8 @@ class MANet(paddle.nn.Layer):
self
.
encoder
=
TNetDown
(
conv_dim
=
conv_dim
,
repeat_num
=
repeat_num
)
curr_dim
=
conv_dim
*
4
self
.
w
=
w
self
.
beta
=
Conv2d
(
curr_dim
,
curr_dim
,
kernel_size
=
3
,
padding
=
1
)
self
.
gamma
=
Conv2d
(
curr_dim
,
curr_dim
,
kernel_size
=
3
,
padding
=
1
)
self
.
beta
=
nn
.
Conv2d
(
curr_dim
,
curr_dim
,
kernel_size
=
3
,
padding
=
1
)
self
.
gamma
=
nn
.
Conv2d
(
curr_dim
,
curr_dim
,
kernel_size
=
3
,
padding
=
1
)
self
.
simple_spade
=
GetMatrix
(
curr_dim
,
1
)
# get the makeup matrix
self
.
repeat_num
=
repeat_num
for
i
in
range
(
repeat_num
):
...
...
@@ -282,12 +281,12 @@ class MANet(paddle.nn.Layer):
setattr
(
self
,
"up_samplers_"
+
str
(
i
),
nn
.
Sequential
(
*
layers
))
curr_dim
=
curr_dim
//
2
self
.
img_reg
=
[
Conv2d
(
curr_dim
,
3
,
kernel_size
=
7
,
stride
=
1
,
padding
=
3
,
bias_attr
=
False
)
nn
.
Conv2d
(
curr_dim
,
3
,
kernel_size
=
7
,
stride
=
1
,
padding
=
3
,
bias_attr
=
False
)
]
self
.
img_reg
=
nn
.
Sequential
(
*
self
.
img_reg
)
...
...
ppgan/models/makeup_model.py
浏览文件 @
6306f226
...
...
@@ -21,7 +21,7 @@ from .builder import MODELS
from
.generators.builder
import
build_generator
from
.discriminators.builder
import
build_discriminator
from
.losses
import
GANLoss
# from ..modules.nn import L1Los
s
from
..modules.init
import
init_weight
s
from
..solver
import
build_optimizer
from
..utils.image_pool
import
ImagePool
from
..utils.preprocess
import
*
...
...
@@ -82,10 +82,13 @@ class MakeupModel(BaseModel):
# The naming is different from those used in the paper.
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self
.
netG
=
build_generator
(
opt
.
model
.
generator
)
init_weights
(
self
.
netG
,
init_type
=
'xavier'
,
init_gain
=
1.0
)
if
self
.
isTrain
:
# define discriminators
self
.
netD_A
=
build_discriminator
(
opt
.
model
.
discriminator
)
self
.
netD_B
=
build_discriminator
(
opt
.
model
.
discriminator
)
init_weights
(
self
.
netD_A
,
init_type
=
'xavier'
,
init_gain
=
1.0
)
init_weights
(
self
.
netD_B
,
init_type
=
'xavier'
,
init_gain
=
1.0
)
if
self
.
isTrain
:
self
.
fake_A_pool
=
ImagePool
(
...
...
ppgan/modules/nn.py
浏览文件 @
6306f226
...
...
@@ -65,123 +65,3 @@ class Spectralnorm(paddle.nn.Layer):
self
.
layer
.
weight
=
weight
out
=
self
.
layer
(
x
)
return
out
def
initial_type
(
input
,
op_type
,
fan_out
,
init
=
"normal"
,
use_bias
=
False
,
kernel_size
=
0
,
stddev
=
0.02
,
name
=
None
):
if
init
==
"kaiming"
:
if
op_type
==
'conv'
:
fan_in
=
input
.
shape
[
1
]
*
kernel_size
*
kernel_size
elif
op_type
==
'deconv'
:
fan_in
=
fan_out
*
kernel_size
*
kernel_size
else
:
if
len
(
input
.
shape
)
>
2
:
fan_in
=
input
.
shape
[
1
]
*
input
.
shape
[
2
]
*
input
.
shape
[
3
]
else
:
fan_in
=
input
.
shape
[
1
]
bound
=
1
/
math
.
sqrt
(
fan_in
)
param_attr
=
paddle
.
ParamAttr
(
# name=name + "_w",
initializer
=
paddle
.
nn
.
initializer
.
Uniform
(
low
=-
bound
,
high
=
bound
))
if
use_bias
==
True
:
bias_attr
=
paddle
.
ParamAttr
(
# name=name + '_b',
initializer
=
paddle
.
nn
.
initializer
.
Uniform
(
low
=-
bound
,
high
=
bound
))
else
:
bias_attr
=
False
elif
init
==
'xavier'
:
param_attr
=
paddle
.
ParamAttr
(
# name=name + "_w",
initializer
=
paddle
.
nn
.
initializer
.
Xavier
(
uniform
=
False
))
if
use_bias
==
True
:
bias_attr
=
paddle
.
ParamAttr
(
# name=name + "_b",
initializer
=
paddle
.
nn
.
initializer
.
Constant
(
0.0
))
else
:
bias_attr
=
False
else
:
param_attr
=
paddle
.
ParamAttr
(
# name=name + "_w",
initializer
=
paddle
.
nn
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
stddev
))
if
use_bias
==
True
:
bias_attr
=
paddle
.
ParamAttr
(
# name=name + "_b",
initializer
=
paddle
.
nn
.
initializer
.
Constant
(
0.0
))
else
:
bias_attr
=
False
return
param_attr
,
bias_attr
class
Conv2d
(
paddle
.
nn
.
Conv2d
):
def
__init__
(
self
,
num_channels
,
num_filters
,
kernel_size
,
padding
=
0
,
stride
=
1
,
dilation
=
1
,
groups
=
1
,
weight_attr
=
None
,
bias_attr
=
None
,
data_format
=
"NCHW"
,
init_type
=
'xavier'
):
param_attr
,
bias_attr
=
initial_type
(
input
=
input
,
op_type
=
'conv'
,
fan_out
=
num_filters
,
init
=
init_type
,
use_bias
=
True
if
bias_attr
!=
False
else
False
,
kernel_size
=
kernel_size
)
super
(
Conv2d
,
self
).
__init__
(
in_channels
=
num_channels
,
out_channels
=
num_filters
,
kernel_size
=
kernel_size
,
stride
=
stride
,
padding
=
padding
,
dilation
=
dilation
,
groups
=
groups
,
weight_attr
=
param_attr
,
bias_attr
=
bias_attr
,
data_format
=
data_format
)
class
ConvTranspose2d
(
paddle
.
nn
.
ConvTranspose2d
):
def
__init__
(
self
,
num_channels
,
num_filters
,
kernel_size
,
padding
=
0
,
stride
=
1
,
dilation
=
1
,
groups
=
1
,
weight_attr
=
None
,
bias_attr
=
None
,
data_format
=
"NCHW"
,
init_type
=
'normal'
):
param_attr
,
bias_attr
=
initial_type
(
input
=
input
,
op_type
=
'deconv'
,
fan_out
=
num_filters
,
init
=
init_type
,
use_bias
=
True
if
bias_attr
!=
False
else
False
,
kernel_size
=
kernel_size
)
super
(
ConvTranspose2d
,
self
).
__init__
(
in_channels
=
num_channels
,
out_channels
=
num_filters
,
kernel_size
=
kernel_size
,
padding
=
padding
,
stride
=
stride
,
dilation
=
dilation
,
groups
=
groups
,
weight_attr
=
weight_attr
,
bias_attr
=
bias_attr
,
data_format
=
data_format
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录