未验证 提交 7ec97024 编写于 作者: W whs 提交者: GitHub

Make gan support for python3 (#1311)

上级 34d70094
......@@ -12,8 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import six
import argparse
import functools
import matplotlib
......@@ -102,7 +106,7 @@ def train(args):
noise_data = np.random.uniform(
low=-1.0, high=1.0,
size=[args.batch_size, NOISE_SIZE]).astype('float32')
real_image = np.array(map(lambda x: x[0], data)).reshape(
real_image = np.array(list(map(lambda x: x[0], data))).reshape(
-1, 784).astype('float32')
conditions_data = np.array([x[1] for x in data]).reshape(
[-1, 1]).astype("float32")
......@@ -138,7 +142,7 @@ def train(args):
d_loss_np = [d_loss_1[0][0], d_loss_2[0][0]]
for _ in xrange(NUM_TRAIN_TIMES_OF_DG):
for _ in six.moves.xrange(NUM_TRAIN_TIMES_OF_DG):
noise_data = np.random.uniform(
low=-1.0, high=1.0,
size=[args.batch_size, NOISE_SIZE]).astype('float32')
......@@ -159,7 +163,7 @@ def train(args):
total_images = np.concatenate([real_image, generated_images])
fig = plot(total_images)
msg = "Epoch ID={0}\n Batch ID={1}\n D-Loss={2}\n DG-Loss={3}\n gen={4}".format(
pass_id, batch_id, d_loss_np, dg_loss_np,
pass_id, batch_id, np.mean(d_loss_np), dg_loss_np,
check(generated_images))
print(msg)
plt.title(msg)
......
......@@ -12,11 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import argparse
import functools
import matplotlib
import six
import numpy as np
import paddle
import paddle.fluid as fluid
......@@ -98,7 +102,7 @@ def train(args):
noise_data = np.random.uniform(
low=-1.0, high=1.0,
size=[args.batch_size, NOISE_SIZE]).astype('float32')
real_image = np.array(map(lambda x: x[0], data)).reshape(
real_image = np.array(list(map(lambda x: x[0], data))).reshape(
-1, 784).astype('float32')
real_labels = np.ones(
shape=[real_image.shape[0], 1], dtype='float32')
......@@ -128,7 +132,7 @@ def train(args):
d_loss_np = [d_loss_1[0][0], d_loss_2[0][0]]
for _ in xrange(NUM_TRAIN_TIMES_OF_DG):
for _ in six.moves.xrange(NUM_TRAIN_TIMES_OF_DG):
noise_data = np.random.uniform(
low=-1.0, high=1.0,
size=[args.batch_size, NOISE_SIZE]).astype('float32')
......@@ -146,7 +150,7 @@ def train(args):
fig = plot(total_images)
msg = "Epoch ID={0} Batch ID={1} D-Loss={2} DG-Loss={3}\n gen={4}".format(
pass_id, batch_id,
np.sum(d_loss_np), dg_loss_np, check(generated_images))
np.mean(d_loss_np), dg_loss_np, check(generated_images))
print(msg)
plt.title(msg)
plt.savefig(
......
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.fluid as fluid
from utility import get_parent_function_name
......@@ -104,13 +107,13 @@ def D_cond(image, y):
def G_cond(z, y):
s_h, s_w = output_height, output_width
s_h2, s_h4 = int(s_h / 2), int(s_h / 4)
s_w2, s_w4 = int(s_w / 2), int(s_w / 4)
s_h2, s_h4 = int(s_h // 2), int(s_h // 4)
s_w2, s_w4 = int(s_w // 2), int(s_w // 4)
yb = fluid.layers.reshape(y, [-1, y_dim, 1, 1]) #NCHW
z = fluid.layers.concat([z, y], 1)
h0 = bn(fc(z, gfc_dim / 2), act='relu')
h0 = bn(fc(z, gfc_dim // 2), act='relu')
h0 = fluid.layers.concat([h0, y], 1)
h1 = bn(fc(h0, gf_dim * 2 * s_h4 * s_w4), act='relu')
......@@ -134,8 +137,8 @@ def D(x):
def G(x):
x = bn(fc(x, gfc_dim))
x = bn(fc(x, gf_dim * 2 * img_dim / 4 * img_dim / 4))
x = fluid.layers.reshape(x, [-1, gf_dim * 2, img_dim / 4, img_dim / 4])
x = bn(fc(x, gf_dim * 2 * img_dim // 4 * img_dim // 4))
x = fluid.layers.reshape(x, [-1, gf_dim * 2, img_dim // 4, img_dim // 4])
x = deconv(x, gf_dim * 2, act='relu', output_size=[14, 14])
x = deconv(x, 1, filter_size=5, padding=2, act='tanh', output_size=[28, 28])
x = fluid.layers.reshape(x, shape=[-1, 28 * 28])
......
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import distutils.util
import numpy as np
import inspect
import matplotlib
import six
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
......@@ -54,7 +58,7 @@ def print_arguments(args):
:type args: argparse.Namespace
"""
print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).iteritems()):
for arg, value in sorted(six.iteritems(vars(args))):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
......
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from PIL import Image
import numpy as np
from itertools import izip
A_LIST_FILE = "./data/horse2zebra/trainA.txt"
B_LIST_FILE = "./data/horse2zebra/trainB.txt"
......@@ -70,11 +72,3 @@ def b_test_reader():
Reader of images with B style for test.
"""
return reader_creater(B_TEST_LIST_FILE, cycle=False, return_name=True)
if __name__ == "__main__":
for A, B in izip(a_test_reader()(), a_test_reader()()):
print A[0].shape
print A[1]
print B[0].shape
print B[1]
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import data_reader
import os
import random
......@@ -9,7 +12,6 @@ import paddle.fluid as fluid
import numpy as np
from paddle.fluid import core
from trainer import *
from itertools import izip
from scipy.misc import imsave
import paddle.fluid.profiler as profiler
from utility import add_arguments, print_arguments, ImagePool
......@@ -66,7 +68,7 @@ def train(args):
if not os.path.exists(out_path):
os.makedirs(out_path)
i = 0
for data_A, data_B in izip(A_test_reader(), B_test_reader()):
for data_A, data_B in zip(A_test_reader(), B_test_reader()):
A_name = data_A[1]
B_name = data_B[1]
tensor_A = core.LoDTensor()
......@@ -114,7 +116,7 @@ def train(args):
exe, out_path + "/d_a", main_program=d_A_trainer.program)
fluid.io.save_persistables(
exe, out_path + "/d_b", main_program=d_B_trainer.program)
print "saved checkpoint to [%s]" % out_path
print("saved checkpoint to {}".format(out_path))
sys.stdout.flush()
def init_model():
......@@ -128,7 +130,7 @@ def train(args):
exe, args.init_model + "/d_a", main_program=d_A_trainer.program)
fluid.io.load_persistables(
exe, args.init_model + "/d_b", main_program=d_B_trainer.program)
print "Load model from [%s]" % args.init_model
print("Load model from {}".format(args.init_model))
if args.init_model:
init_model()
......@@ -136,8 +138,8 @@ def train(args):
for epoch in range(args.epoch):
batch_id = 0
for i in range(max_images_num):
data_A = A_reader.next()
data_B = B_reader.next()
data_A = next(A_reader)
data_B = next(B_reader)
tensor_A = core.LoDTensor()
tensor_B = core.LoDTensor()
tensor_A.set(data_A, place)
......@@ -174,9 +176,9 @@ def train(args):
feed={"input_A": tensor_A,
"fake_pool_A": fake_pool_A})
print "epoch[%d]; batch[%d]; g_A_loss: %s; d_B_loss: %s; g_B_loss: %s; d_A_loss: %s;" % (
print("epoch{}; batch{}; g_A_loss: {}; d_B_loss: {}; g_B_loss: {}; d_A_loss: {};".format(
epoch, batch_id, g_A_loss[0], d_B_loss[0], g_B_loss[0],
d_A_loss[0])
d_A_loss[0]))
sys.stdout.flush()
batch_id += 1
......
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model import *
import paddle.fluid as fluid
......
......@@ -17,6 +17,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import distutils.util
import six
import random
import glob
import numpy as np
......@@ -39,7 +40,7 @@ def print_arguments(args):
:type args: argparse.Namespace
"""
print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).iteritems()):
for arg, value in sorted(six.iteritems(vars(args))):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册