提交 3ebf6aaf 编写于 作者: W wangyang59

fixed a gpu bug in trainer API to train gan using GPU

上级 c159e4dd
......@@ -2,5 +2,5 @@ output/
*.png
.pydevproject
.project
trainLog.txt
train.log
data/raw_data/
......@@ -41,39 +41,9 @@ settings(
learning_method=AdamOptimizer()
)
def convTrans_bn(input, channels, output_x, num_filters, imgSize, stride, name,
param_attr, bias_attr, param_attr_bn):
tmp = imgSize - (output_x - 1) * stride
if tmp <= 1 or tmp > 5:
raise ValueError("convTrans input-output dimension does not fit")
elif tmp <= 3:
filter_size = tmp + 2
padding = 1
else:
filter_size = tmp
padding = 0
convTrans = img_conv_layer(input, filter_size=filter_size,
num_filters=num_filters,
name=name + "_convt", num_channels=channels,
act=LinearActivation(), groups=1, stride=stride,
padding=padding, bias_attr=bias_attr,
param_attr=param_attr, shared_biases=True, layer_attr=None,
filter_size_y=None, stride_y=None, padding_y=None,
trans=True)
convTrans_bn = batch_norm_layer(convTrans,
act=ReluActivation(),
name=name + "_convt_bn",
bias_attr=bias_attr,
param_attr=param_attr_bn,
use_global_stats=False)
return convTrans_bn
def conv_bn(input, channels, imgSize, num_filters, output_x, stride, name,
param_attr, bias_attr, param_attr_bn, bn):
param_attr, bias_attr, param_attr_bn, bn, trans=False,
act=ReluActivation()):
tmp = imgSize - (output_x - 1) * stride
if tmp <= 1 or tmp > 5:
raise ValueError("conv input-output dimension does not fit")
......@@ -86,18 +56,24 @@ def conv_bn(input, channels, imgSize, num_filters, output_x, stride, name,
print (imgSize, output_x, stride, filter_size, padding)
if trans:
nameApx = "_conv"
else:
nameApx = "_convt"
if bn:
conv = img_conv_layer(input, filter_size=filter_size,
num_filters=num_filters,
name=name + "_conv", num_channels=channels,
name=name + nameApx, num_channels=channels,
act=LinearActivation(), groups=1, stride=stride,
padding=padding, bias_attr=bias_attr,
param_attr=param_attr, shared_biases=True, layer_attr=None,
filter_size_y=None, stride_y=None, padding_y=None)
filter_size_y=None, stride_y=None, padding_y=None,
trans=trans)
conv_bn = batch_norm_layer(conv,
act=ReluActivation(),
name=name + "_conv_bn",
act=act,
name=name + nameApx + "_bn",
bias_attr=bias_attr,
param_attr=param_attr_bn,
use_global_stats=False)
......@@ -106,11 +82,12 @@ def conv_bn(input, channels, imgSize, num_filters, output_x, stride, name,
else:
conv = img_conv_layer(input, filter_size=filter_size,
num_filters=num_filters,
name=name + "_conv", num_channels=channels,
act=ReluActivation(), groups=1, stride=stride,
name=name + nameApx, num_channels=channels,
act=act, groups=1, stride=stride,
padding=padding, bias_attr=bias_attr,
param_attr=param_attr, shared_biases=True, layer_attr=None,
filter_size_y=None, stride_y=None, padding_y=None)
filter_size_y=None, stride_y=None, padding_y=None,
trans=trans)
return conv
def generator(noise):
......@@ -143,7 +120,7 @@ def generator(noise):
param_attr=param_attr_bn,
use_global_stats=False)
h2_bn = convTrans_bn(h1_bn,
h2_bn = conv_bn(h1_bn,
channels=gf_dim*4,
output_x=s8,
num_filters=gf_dim*2,
......@@ -152,9 +129,11 @@ def generator(noise):
name="gen_layer_h2",
param_attr=param_attr,
bias_attr=bias_attr,
param_attr_bn=param_attr_bn)
param_attr_bn=param_attr_bn,
bn=True,
trans=True)
h3_bn = convTrans_bn(h2_bn,
h3_bn = conv_bn(h2_bn,
channels=gf_dim*2,
output_x=s4,
num_filters=gf_dim,
......@@ -163,10 +142,12 @@ def generator(noise):
name="gen_layer_h3",
param_attr=param_attr,
bias_attr=bias_attr,
param_attr_bn=param_attr_bn)
param_attr_bn=param_attr_bn,
bn=True,
trans=True)
return convTrans_bn(h3_bn,
return conv_bn(h3_bn,
channels=gf_dim,
output_x=s2,
num_filters=c_dim,
......@@ -175,7 +156,10 @@ def generator(noise):
name="gen_layer_h4",
param_attr=param_attr,
bias_attr=bias_attr,
param_attr_bn=param_attr_bn)
param_attr_bn=param_attr_bn,
bn=False,
trans=True,
act=TanhActivation())
def discriminator(sample):
......@@ -186,10 +170,12 @@ def discriminator(sample):
of the sample is from generator and dimension 1 is the probabblity
of the sample is from real data.
"""
param_attr = ParamAttr(is_static=is_generator_training)
param_attr = ParamAttr(is_static=is_generator_training,
initial_mean=0.0,
initial_std=0.02)
bias_attr = ParamAttr(is_static=is_generator_training,
initial_mean=1.0,
initial_std=0)
initial_mean=0.0,
initial_std=0.0)
param_attr_bn=ParamAttr(is_static=is_generator_training,
initial_mean=1.0,
......
......@@ -97,32 +97,32 @@ def prepare_discriminator_data_batch(
(numpy.zeros(batch_size / 2, dtype='int32'),
numpy.ones(batch_size / 2, dtype='int32')), 0)
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createCpuDenseFromNumpy(all_samples))
inputs.setSlotIds(1, api.IVector.createCpuVectorFromNumpy(all_labels))
inputs.setSlotValue(0, api.Matrix.createGpuDenseFromNumpy(all_samples))
inputs.setSlotIds(1, api.IVector.createGpuVectorFromNumy(all_labels))
return inputs
def prepare_discriminator_data_batch_pos(batch_size, noise_dim, sample_dim):
real_samples = get_real_samples(batch_size, sample_dim)
labels = numpy.ones(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createCpuDenseFromNumpy(real_samples))
inputs.setSlotIds(1, api.IVector.createCpuVectorFromNumpy(labels))
inputs.setSlotValue(0, api.Matrix.createGpuDenseFromNumpy(real_samples))
inputs.setSlotIds(1, api.IVector.createGpuVectorFromNumy(labels))
return inputs
def prepare_discriminator_data_batch_neg(generator_machine, batch_size, noise_dim, sample_dim):
fake_samples = get_fake_samples(generator_machine, batch_size, noise_dim, sample_dim)
labels = numpy.zeros(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createCpuDenseFromNumpy(fake_samples))
inputs.setSlotIds(1, api.IVector.createCpuVectorFromNumpy(labels))
inputs.setSlotValue(0, api.Matrix.createGpuDenseFromNumpy(fake_samples))
inputs.setSlotIds(1, api.IVector.createGpuVectorFromNumy(labels))
return inputs
def prepare_generator_data_batch(batch_size, dim):
noise = numpy.random.normal(size=(batch_size, dim)).astype('float32')
label = numpy.ones(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createCpuDenseFromNumpy(noise))
inputs.setSlotIds(1, api.IVector.createCpuVectorFromNumpy(label))
inputs.setSlotValue(0, api.Matrix.createGpuDenseFromNumpy(noise))
inputs.setSlotIds(1, api.IVector.createGpuVectorFromNumy(label))
return inputs
......@@ -140,7 +140,7 @@ def get_layer_size(model_conf, layer_name):
def main():
api.initPaddle('--use_gpu=0', '--dot_period=100', '--log_period=10000')
api.initPaddle('--use_gpu=1', '--dot_period=100', '--log_period=10000')
gen_conf = parse_config("gan_conf.py", "mode=generator_training")
dis_conf = parse_config("gan_conf.py", "mode=discriminator_training")
generator_conf = parse_config("gan_conf.py", "mode=generator")
......
......@@ -16,7 +16,7 @@ import argparse
import itertools
import random
import numpy
import sys,os
import sys,os,gc
from PIL import Image
from paddle.trainer.config_parser import parse_config
......@@ -94,10 +94,19 @@ def load_mnist_data(imageFile):
f.close()
return data
def merge(images, size):
h, w = 28, 28
img = numpy.zeros((h * size[0], w * size[1]))
for idx in xrange(size[0] * size[1]):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w] = (images[idx, :].reshape((h, w)) + 1.0) / 2.0 * 255.0
return img
def saveImages(images, path):
for i in xrange(10):
im = Image.fromarray(images[i, :].reshape((28, 28)) * 255.0).convert('RGB')
im.save(path + "/image_" + str(i) + ".png")
merged_img = merge(images, [8, 8])
im = Image.fromarray(merged_img).convert('RGB')
im.save(path)
def get_real_samples(batch_size, data_np):
return data_np[numpy.random.choice(data_np.shape[0], batch_size,
......@@ -124,8 +133,8 @@ def prepare_discriminator_data_batch_pos(batch_size, data_np):
real_samples = get_real_samples(batch_size, data_np)
labels = numpy.ones(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createCpuDenseFromNumpy(real_samples))
inputs.setSlotIds(1, api.IVector.createCpuVectorFromNumpy(labels))
inputs.setSlotValue(0, api.Matrix.createGpuDenseFromNumpy(real_samples))
inputs.setSlotIds(1, api.IVector.createGpuVectorFromNumpy(labels))
return inputs
def prepare_discriminator_data_batch_neg(generator_machine, batch_size, noise):
......@@ -133,16 +142,16 @@ def prepare_discriminator_data_batch_neg(generator_machine, batch_size, noise):
#print fake_samples.shape
labels = numpy.zeros(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createCpuDenseFromNumpy(fake_samples))
inputs.setSlotIds(1, api.IVector.createCpuVectorFromNumpy(labels))
inputs.setSlotValue(0, api.Matrix.createGpuDenseFromNumpy(fake_samples))
inputs.setSlotIds(1, api.IVector.createGpuVectorFromNumpy(labels))
return inputs
def prepare_generator_data_batch(batch_size, noise):
label = numpy.ones(batch_size, dtype='int32')
#label = numpy.zeros(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createCpuDenseFromNumpy(noise))
inputs.setSlotIds(1, api.IVector.createCpuVectorFromNumpy(label))
inputs.setSlotValue(0, api.Matrix.createGpuDenseFromNumpy(noise))
inputs.setSlotIds(1, api.IVector.createGpuVectorFromNumpy(label))
return inputs
......@@ -160,7 +169,7 @@ def get_layer_size(model_conf, layer_name):
def main():
api.initPaddle('--use_gpu=0', '--dot_period=10', '--log_period=100')
api.initPaddle('--use_gpu=1', '--dot_period=10', '--log_period=100')
gen_conf = parse_config("gan_conf_image.py", "mode=generator_training")
dis_conf = parse_config("gan_conf_image.py", "mode=discriminator_training")
generator_conf = parse_config("gan_conf_image.py", "mode=generator")
......@@ -252,10 +261,7 @@ def main():
fake_samples = get_fake_samples(generator_machine, batch_size, noise)
save_dir = "./pass_" + str(train_pass)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
saveImages(fake_samples, save_dir)
saveImages(fake_samples, "train_pass%s.png" % train_pass)
dis_trainer.finishTrain()
gen_trainer.finishTrain()
......
......@@ -194,4 +194,3 @@ namespace std {
%ignore ParameterTraverseCallbackPrivate;
%include "utils/GlobalConstants.h"
%include "api/PaddleAPI.h"
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册