提交 c7c324d8 编写于 作者: littletomatodonkey's avatar littletomatodonkey

fix some apis to paddle-dev

上级 4f7ce1c1
......@@ -59,12 +59,14 @@ class DictDataLoader():
place = paddle.CUDAPlace(ParallelEnv().dev_id) \
if ParallelEnv().nranks > 1 else paddle.CUDAPlace(0)
sampler = DistributedBatchSampler(self.dataset,
sampler = DistributedBatchSampler(
self.dataset,
batch_size=batch_size,
shuffle=True if is_train else False,
drop_last=True if is_train else False)
self.dataloader = paddle.io.DataLoader(self.dataset,
self.dataloader = paddle.io.DataLoader(
self.dataset,
batch_sampler=sampler,
places=place,
num_workers=num_workers)
......@@ -92,7 +94,7 @@ class DictDataLoader():
return len(self.dataloader)
def get_items_by_indexs(self, key, indexs):
if isinstance(indexs, paddle.Variable):
if isinstance(indexs, paddle.Tensor):
indexs = indexs.numpy()
current_items = []
items = getattr(self.dataset, key)
......
......@@ -13,6 +13,7 @@ class ResnetGenerator(nn.Layer):
code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self,
input_nc,
output_nc,
......@@ -37,17 +38,14 @@ class ResnetGenerator(nn.Layer):
norm_layer = build_norm_layer(norm_type)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm
use_bias = norm_layer == nn.InstanceNorm2d
model = [
nn.ReflectionPad2d([3, 3, 3, 3]),
nn.Conv2d(input_nc,
ngf,
kernel_size=7,
padding=0,
bias_attr=use_bias),
nn.Pad2D(padding=[3, 3, 3, 3], mode="reflect"),
nn.Conv2d(
input_nc, ngf, kernel_size=7, padding=0, bias_attr=use_bias),
norm_layer(ngf),
nn.ReLU()
]
......@@ -56,7 +54,8 @@ class ResnetGenerator(nn.Layer):
for i in range(n_downsampling): # add downsampling layers
mult = 2**i
model += [
nn.Conv2d(ngf * mult,
nn.Conv2d(
ngf * mult,
ngf * mult * 2,
kernel_size=3,
stride=2,
......@@ -70,7 +69,8 @@ class ResnetGenerator(nn.Layer):
for i in range(n_blocks): # add ResNet blocks
model += [
ResnetBlock(ngf * mult,
ResnetBlock(
ngf * mult,
padding_type=padding_type,
norm_layer=norm_layer,
use_dropout=use_dropout,
......@@ -80,7 +80,8 @@ class ResnetGenerator(nn.Layer):
for i in range(n_downsampling): # add upsampling layers
mult = 2**(n_downsampling - i)
model += [
nn.ConvTranspose2d(ngf * mult,
nn.ConvTranspose2d(
ngf * mult,
int(ngf * mult / 2),
kernel_size=3,
stride=2,
......@@ -90,7 +91,7 @@ class ResnetGenerator(nn.Layer):
norm_layer(int(ngf * mult / 2)),
nn.ReLU()
]
model += [nn.ReflectionPad2d([3, 3, 3, 3])]
model += [nn.Pad2D(padding=[3, 3, 3, 3], mode="reflect")]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
......@@ -103,6 +104,7 @@ class ResnetGenerator(nn.Layer):
class ResnetBlock(nn.Layer):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
......@@ -130,15 +132,13 @@ class ResnetBlock(nn.Layer):
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d([1, 1, 1, 1])]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d([1, 1, 1, 1])]
if padding_type in ['reflect', 'replicate']:
conv_block += [nn.Pad2D(padding=[1, 1, 1, 1], mode=padding_type)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' %
padding_type)
raise NotImplementedError(
'padding [%s] is not implemented' % padding_type)
conv_block += [
nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias_attr=use_bias),
......@@ -149,15 +149,13 @@ class ResnetBlock(nn.Layer):
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d([1, 1, 1, 1])]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d([1, 1, 1, 1])]
if padding_type in ['reflect', 'replicate']:
conv_block += [nn.Pad2D(padding=[1, 1, 1, 1], mode=padding_type)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' %
padding_type)
raise NotImplementedError(
'padding [%s] is not implemented' % padding_type)
conv_block += [
nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias_attr=use_bias),
norm_layer(dim)
......
......@@ -10,6 +10,7 @@ class GANLoss(nn.Layer):
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
......@@ -47,17 +48,13 @@ class GANLoss(nn.Layer):
"""
if target_is_real:
if not hasattr(self, 'target_real_tensor'):
self.target_real_tensor = paddle.fill_constant(
shape=paddle.shape(prediction),
value=self.target_real_label,
dtype='float32')
self.target_real_tensor = self.target_real_label * paddle.ones(
paddle.shape(prediction), dtype='float32')
target_tensor = self.target_real_tensor
else:
if not hasattr(self, 'target_fake_tensor'):
self.target_fake_tensor = paddle.fill_constant(
shape=paddle.shape(prediction),
value=self.target_fake_label,
dtype='float32')
self.target_fake_tensor = self.target_fake_label * paddle.ones(
paddle.shape(prediction), dtype='float32')
target_tensor = self.target_fake_tensor
# target_tensor.stop_gradient = True
......
......@@ -80,7 +80,7 @@ def calculate_gain(nonlinearity, param=None):
@paddle.no_grad()
def constant_(x, value):
temp_value = paddle.fill_constant(x.shape, x.dtype, value)
temp_value = value * paddle.ones(x.shape, x.dtype)
x.set_value(temp_value)
return x
......@@ -256,10 +256,8 @@ def kaiming_init(layer,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
kaiming_uniform_(layer.weight,
a=a,
mode=mode,
nonlinearity=nonlinearity)
kaiming_uniform_(
layer.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
kaiming_normal_(layer.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if hasattr(layer, 'bias') and layer.bias is not None:
......@@ -275,6 +273,7 @@ def init_weights(net, init_type='normal', init_gain=0.02):
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1
......
......@@ -21,19 +21,20 @@ def build_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(
nn.BatchNorm,
param_attr=paddle.ParamAttr(
weight_attr=paddle.ParamAttr(
initializer=nn.initializer.Normal(1.0, 0.02)),
bias_attr=paddle.ParamAttr(
initializer=nn.initializer.Constant(0.0)),
trainable_statistics=True)
elif norm_type == 'instance':
norm_layer = functools.partial(
nn.InstanceNorm,
param_attr=paddle.ParamAttr(
nn.InstanceNorm2d,
weight_attr=paddle.ParamAttr(
initializer=nn.initializer.Constant(1.0),
learning_rate=0.0,
trainable=False),
bias_attr=paddle.ParamAttr(initializer=nn.initializer.Constant(0.0),
bias_attr=paddle.ParamAttr(
initializer=nn.initializer.Constant(0.0),
learning_rate=0.0,
trainable=False))
elif norm_type == 'spectral':
......@@ -43,6 +44,6 @@ def build_norm_layer(norm_type='instance'):
def norm_layer(x):
return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' %
norm_type)
raise NotImplementedError(
'normalization layer [%s] is not found' % norm_type)
return norm_layer
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册