提交 6306f226 编写于 作者: L lijianshe02

refine psgan code

上级 c2ed7bb8
......@@ -18,7 +18,7 @@ import numpy as np
import paddle.nn as nn
import paddle.nn.functional as F
from ...modules.nn import Conv2d, Spectralnorm
from ...modules.nn import Spectralnorm
from ...modules.norm import build_norm_layer
from .builder import DISCRIMINATORS
......@@ -51,21 +51,21 @@ class NLayerDiscriminator(nn.Layer):
if norm_type == 'spectral':
sequence = [
Spectralnorm(
Conv2d(input_nc,
ndf,
kernel_size=kw,
stride=2,
padding=padw)),
nn.Conv2d(input_nc,
ndf,
kernel_size=kw,
stride=2,
padding=padw)),
nn.LeakyReLU(0.01)
]
else:
sequence = [
Conv2d(input_nc,
ndf,
kernel_size=kw,
stride=2,
padding=padw,
bias_attr=use_bias),
nn.Conv2d(input_nc,
ndf,
kernel_size=kw,
stride=2,
padding=padw,
bias_attr=use_bias),
nn.LeakyReLU(0.2)
]
nf_mult = 1
......@@ -76,21 +76,21 @@ class NLayerDiscriminator(nn.Layer):
if norm_type == 'spectral':
sequence += [
Spectralnorm(
Conv2d(ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=2,
padding=padw)),
nn.Conv2d(ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=2,
padding=padw)),
nn.LeakyReLU(0.01)
]
else:
sequence += [
Conv2d(ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=2,
padding=padw,
bias_attr=use_bias),
nn.Conv2d(ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=2,
padding=padw,
bias_attr=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2)
]
......@@ -100,21 +100,21 @@ class NLayerDiscriminator(nn.Layer):
if norm_type == 'spectral':
sequence += [
Spectralnorm(
Conv2d(ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=1,
padding=padw)),
nn.Conv2d(ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=1,
padding=padw)),
nn.LeakyReLU(0.01)
]
else:
sequence += [
Conv2d(ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=1,
padding=padw,
bias_attr=use_bias),
nn.Conv2d(ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=1,
padding=padw,
bias_attr=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2)
]
......@@ -122,21 +122,21 @@ class NLayerDiscriminator(nn.Layer):
if norm_type == 'spectral':
sequence += [
Spectralnorm(
Conv2d(ndf * nf_mult,
1,
kernel_size=kw,
stride=1,
padding=padw,
bias_attr=False))
nn.Conv2d(ndf * nf_mult,
1,
kernel_size=kw,
stride=1,
padding=padw,
bias_attr=False))
] # output 1 channel prediction map
else:
sequence += [
Conv2d(ndf * nf_mult,
1,
kernel_size=kw,
stride=1,
padding=padw,
bias_attr=False)
nn.Conv2d(ndf * nf_mult,
1,
kernel_size=kw,
stride=1,
padding=padw,
bias_attr=False)
] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
......
......@@ -20,7 +20,6 @@ import functools
import numpy as np
from ...modules.norm import build_norm_layer
from ...modules.nn import Conv2d, ConvTranspose2d
from .builder import GENERATORS
......@@ -50,21 +49,21 @@ class ResidualBlock(paddle.nn.Layer):
bias_attr = None
self.main = nn.Sequential(
Conv2d(dim_in,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.Conv2d(dim_in,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.InstanceNorm2d(dim_out,
weight_attr=weight_attr,
bias_attr=bias_attr), nn.ReLU(),
Conv2d(dim_out,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.Conv2d(dim_out,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.InstanceNorm2d(dim_out,
weight_attr=weight_attr,
bias_attr=bias_attr))
......@@ -79,26 +78,26 @@ class StyleResidualBlock(paddle.nn.Layer):
def __init__(self, dim_in, dim_out):
super(StyleResidualBlock, self).__init__()
self.block1 = nn.Sequential(
Conv2d(dim_in,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False), PONO())
nn.Conv2d(dim_in,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False), PONO())
ks = 3
pw = ks // 2
self.beta1 = Conv2d(dim_in, dim_out, kernel_size=ks, padding=pw)
self.gamma1 = Conv2d(dim_in, dim_out, kernel_size=ks, padding=pw)
self.beta1 = nn.Conv2d(dim_in, dim_out, kernel_size=ks, padding=pw)
self.gamma1 = nn.Conv2d(dim_in, dim_out, kernel_size=ks, padding=pw)
self.block2 = nn.Sequential(
nn.ReLU(),
Conv2d(dim_out,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False), PONO())
self.beta2 = Conv2d(dim_in, dim_out, kernel_size=ks, padding=pw)
self.gamma2 = Conv2d(dim_in, dim_out, kernel_size=ks, padding=pw)
nn.Conv2d(dim_out,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False), PONO())
self.beta2 = nn.Conv2d(dim_in, dim_out, kernel_size=ks, padding=pw)
self.gamma2 = nn.Conv2d(dim_in, dim_out, kernel_size=ks, padding=pw)
def forward(self, x, y):
"""forward"""
......@@ -120,12 +119,12 @@ class MDNet(paddle.nn.Layer):
layers = []
layers.append(
Conv2d(3,
conv_dim,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False))
nn.Conv2d(3,
conv_dim,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False))
layers.append(
nn.InstanceNorm2d(conv_dim, weight_attr=None, bias_attr=None))
......@@ -135,12 +134,12 @@ class MDNet(paddle.nn.Layer):
curr_dim = conv_dim
for i in range(2):
layers.append(
Conv2d(curr_dim,
curr_dim * 2,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False))
nn.Conv2d(curr_dim,
curr_dim * 2,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False))
layers.append(
nn.InstanceNorm2d(curr_dim * 2,
weight_attr=None,
......@@ -167,12 +166,12 @@ class TNetDown(paddle.nn.Layer):
layers = []
layers.append(
Conv2d(3,
conv_dim,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False))
nn.Conv2d(3,
conv_dim,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False))
layers.append(
nn.InstanceNorm2d(conv_dim, weight_attr=False, bias_attr=False))
......@@ -182,12 +181,12 @@ class TNetDown(paddle.nn.Layer):
curr_dim = conv_dim
for i in range(2):
layers.append(
Conv2d(curr_dim,
curr_dim * 2,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False))
nn.Conv2d(curr_dim,
curr_dim * 2,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False))
layers.append(
nn.InstanceNorm2d(curr_dim * 2,
weight_attr=False,
......@@ -211,18 +210,18 @@ class TNetDown(paddle.nn.Layer):
class GetMatrix(paddle.fluid.dygraph.Layer):
def __init__(self, dim_in, dim_out):
super(GetMatrix, self).__init__()
self.get_gamma = Conv2d(dim_in,
dim_out,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False)
self.get_beta = Conv2d(dim_in,
dim_out,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False)
self.get_gamma = nn.Conv2d(dim_in,
dim_out,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False)
self.get_beta = nn.Conv2d(dim_in,
dim_out,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False)
def forward(self, x):
gamma = self.get_gamma(x)
......@@ -237,8 +236,8 @@ class MANet(paddle.nn.Layer):
self.encoder = TNetDown(conv_dim=conv_dim, repeat_num=repeat_num)
curr_dim = conv_dim * 4
self.w = w
self.beta = Conv2d(curr_dim, curr_dim, kernel_size=3, padding=1)
self.gamma = Conv2d(curr_dim, curr_dim, kernel_size=3, padding=1)
self.beta = nn.Conv2d(curr_dim, curr_dim, kernel_size=3, padding=1)
self.gamma = nn.Conv2d(curr_dim, curr_dim, kernel_size=3, padding=1)
self.simple_spade = GetMatrix(curr_dim, 1) # get the makeup matrix
self.repeat_num = repeat_num
for i in range(repeat_num):
......@@ -282,12 +281,12 @@ class MANet(paddle.nn.Layer):
setattr(self, "up_samplers_" + str(i), nn.Sequential(*layers))
curr_dim = curr_dim // 2
self.img_reg = [
Conv2d(curr_dim,
3,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False)
nn.Conv2d(curr_dim,
3,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False)
]
self.img_reg = nn.Sequential(*self.img_reg)
......
......@@ -21,7 +21,7 @@ from .builder import MODELS
from .generators.builder import build_generator
from .discriminators.builder import build_discriminator
from .losses import GANLoss
# from ..modules.nn import L1Loss
from ..modules.init import init_weights
from ..solver import build_optimizer
from ..utils.image_pool import ImagePool
from ..utils.preprocess import *
......@@ -82,10 +82,13 @@ class MakeupModel(BaseModel):
# The naming is different from those used in the paper.
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG = build_generator(opt.model.generator)
init_weights(self.netG, init_type='xavier', init_gain=1.0)
if self.isTrain: # define discriminators
self.netD_A = build_discriminator(opt.model.discriminator)
self.netD_B = build_discriminator(opt.model.discriminator)
init_weights(self.netD_A, init_type='xavier', init_gain=1.0)
init_weights(self.netD_B, init_type='xavier', init_gain=1.0)
if self.isTrain:
self.fake_A_pool = ImagePool(
......
......@@ -65,123 +65,3 @@ class Spectralnorm(paddle.nn.Layer):
self.layer.weight = weight
out = self.layer(x)
return out
def initial_type(input,
op_type,
fan_out,
init="normal",
use_bias=False,
kernel_size=0,
stddev=0.02,
name=None):
if init == "kaiming":
if op_type == 'conv':
fan_in = input.shape[1] * kernel_size * kernel_size
elif op_type == 'deconv':
fan_in = fan_out * kernel_size * kernel_size
else:
if len(input.shape) > 2:
fan_in = input.shape[1] * input.shape[2] * input.shape[3]
else:
fan_in = input.shape[1]
bound = 1 / math.sqrt(fan_in)
param_attr = paddle.ParamAttr(
# name=name + "_w",
initializer=paddle.nn.initializer.Uniform(low=-bound, high=bound))
if use_bias == True:
bias_attr = paddle.ParamAttr(
# name=name + '_b',
initializer=paddle.nn.initializer.Uniform(low=-bound,
high=bound))
else:
bias_attr = False
elif init == 'xavier':
param_attr = paddle.ParamAttr(
# name=name + "_w",
initializer=paddle.nn.initializer.Xavier(uniform=False))
if use_bias == True:
bias_attr = paddle.ParamAttr(
# name=name + "_b",
initializer=paddle.nn.initializer.Constant(0.0))
else:
bias_attr = False
else:
param_attr = paddle.ParamAttr(
# name=name + "_w",
initializer=paddle.nn.initializer.NormalInitializer(loc=0.0,
scale=stddev))
if use_bias == True:
bias_attr = paddle.ParamAttr(
# name=name + "_b",
initializer=paddle.nn.initializer.Constant(0.0))
else:
bias_attr = False
return param_attr, bias_attr
class Conv2d(paddle.nn.Conv2d):
def __init__(self,
num_channels,
num_filters,
kernel_size,
padding=0,
stride=1,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW",
init_type='xavier'):
param_attr, bias_attr = initial_type(
input=input,
op_type='conv',
fan_out=num_filters,
init=init_type,
use_bias=True if bias_attr != False else False,
kernel_size=kernel_size)
super(Conv2d, self).__init__(in_channels=num_channels,
out_channels=num_filters,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
weight_attr=param_attr,
bias_attr=bias_attr,
data_format=data_format)
class ConvTranspose2d(paddle.nn.ConvTranspose2d):
def __init__(self,
num_channels,
num_filters,
kernel_size,
padding=0,
stride=1,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW",
init_type='normal'):
param_attr, bias_attr = initial_type(
input=input,
op_type='deconv',
fan_out=num_filters,
init=init_type,
use_bias=True if bias_attr != False else False,
kernel_size=kernel_size)
super(ConvTranspose2d, self).__init__(in_channels=num_channels,
out_channels=num_filters,
kernel_size=kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册