未验证 提交 fcc4581e 编写于 作者: L lvmengsi 提交者: GitHub

fix grad as list (#2493)

* fix grad and scipy version
上级 d7541747
...@@ -296,7 +296,7 @@ class celeba_reader_creator(reader_creator): ...@@ -296,7 +296,7 @@ class celeba_reader_creator(reader_creator):
label = np.array(label).astype("float32") label = np.array(label).astype("float32")
label = (label + 1) // 2 label = (label + 1) // 2
img = CentorCrop(img, args.crop_size, args.crop_size) img = CentorCrop(img, args.crop_size, args.crop_size)
img = img.resize((args.load_size, args.load_size), img = img.resize((args.image_size, args.image_size),
Image.BILINEAR) Image.BILINEAR)
img = (np.array(img).astype('float32') / 255.0 - 0.5) / 0.5 img = (np.array(img).astype('float32') / 255.0 - 0.5) / 0.5
img = img.transpose([2, 0, 1]) img = img.transpose([2, 0, 1])
......
...@@ -23,9 +23,10 @@ from PIL import Image ...@@ -23,9 +23,10 @@ from PIL import Image
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle import paddle
import numpy as np import numpy as np
from scipy.misc import imsave import imageio
import glob import glob
from util.config import add_arguments, print_arguments from util.config import add_arguments, print_arguments
import copy
parser = argparse.ArgumentParser(description=__doc__) parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser) add_arg = functools.partial(add_arguments, argparser=parser)
...@@ -153,7 +154,7 @@ def infer(args): ...@@ -153,7 +154,7 @@ def infer(args):
images.append(fake_temp) images.append(fake_temp)
images_concat = np.concatenate(images, 1) images_concat = np.concatenate(images, 1)
images_concat = np.concatenate(images_concat, 1) images_concat = np.concatenate(images_concat, 1)
imsave(args.output + "/fake_img_" + name[0], ( imageio.imwrite(args.output + "/fake_img_" + name[0], (
(images_concat + 1) * 127.5).astype(np.uint8)) (images_concat + 1) * 127.5).astype(np.uint8))
elif args.model_net == 'StarGAN': elif args.model_net == 'StarGAN':
test_reader = celeba_reader_creator( test_reader = celeba_reader_creator(
...@@ -184,8 +185,8 @@ def infer(args): ...@@ -184,8 +185,8 @@ def infer(args):
fake_temp = np.squeeze(out[0]).transpose([1, 2, 0]) fake_temp = np.squeeze(out[0]).transpose([1, 2, 0])
images.append(fake_temp) images.append(fake_temp)
images_concat = np.concatenate(images, 1) images_concat = np.concatenate(images, 1)
imsave(out_path + "/fake_img" + str(epoch) + "_" + name[0], ( imageio.imwrite(out_path + "/fake_img" + str(epoch) + "_" + name[0],
(images_concat + 1) * 127.5).astype(np.uint8)) ((images_concat + 1) * 127.5).astype(np.uint8))
elif args.model_net == 'Pix2pix' or args.model_net == 'cyclegan': elif args.model_net == 'Pix2pix' or args.model_net == 'cyclegan':
for file in glob.glob(args.input): for file in glob.glob(args.input):
...@@ -204,7 +205,7 @@ def infer(args): ...@@ -204,7 +205,7 @@ def infer(args):
fake_temp = np.squeeze(fake_temp[0]).transpose([1, 2, 0]) fake_temp = np.squeeze(fake_temp[0]).transpose([1, 2, 0])
input_temp = np.squeeze(data).transpose([1, 2, 0]) input_temp = np.squeeze(data).transpose([1, 2, 0])
imsave(args.output + "/fake_" + image_name, ( imageio.imwrite(args.output + "/fake_" + image_name, (
(fake_temp + 1) * 127.5).astype(np.uint8)) (fake_temp + 1) * 127.5).astype(np.uint8))
else: else:
raise NotImplementedError("model_net {} is not support".format( raise NotImplementedError("model_net {} is not support".format(
......
...@@ -161,20 +161,21 @@ class DTrainer(): ...@@ -161,20 +161,21 @@ class DTrainer():
if fluid.io.is_parameter(var) and var.name.startswith( if fluid.io.is_parameter(var) and var.name.startswith(
"discriminator"): "discriminator"):
vars.append(var.name) vars.append(var.name)
grad = fluid.gradients(pred, x, no_grad_set=vars) grad = fluid.gradients(pred, x, no_grad_set=vars)[0]
grad_shape = grad.shape grad_shape = grad.shape
grad = fluid.layers.reshape( grad = fluid.layers.reshape(
grad, [-1, grad_shape[1] * grad_shape[2] * grad_shape[3]]) grad, [-1, grad_shape[1] * grad_shape[2] * grad_shape[3]])
epsilon = 1e-5
norm = fluid.layers.sqrt( norm = fluid.layers.sqrt(
fluid.layers.reduce_sum( fluid.layers.reduce_sum(
fluid.layers.square(grad), dim=1) + epsilon) fluid.layers.square(grad), dim=1))
gp = fluid.layers.reduce_mean(fluid.layers.square(norm - 1.0)) gp = fluid.layers.reduce_mean(fluid.layers.square(norm - 1.0))
return gp return gp
class AttGAN(object): class AttGAN(object):
def add_special_args(self, parser): def add_special_args(self, parser):
parser.add_argument(
'--image_size', type=int, default=256, help="image size")
parser.add_argument( parser.add_argument(
'--g_lr', '--g_lr',
type=float, type=float,
...@@ -241,7 +242,7 @@ class AttGAN(object): ...@@ -241,7 +242,7 @@ class AttGAN(object):
self.batch_num = batch_num self.batch_num = batch_num
def build_model(self): def build_model(self):
data_shape = [-1, 3, self.cfg.load_size, self.cfg.load_size] data_shape = [-1, 3, self.cfg.image_size, self.cfg.image_size]
image_real = fluid.layers.data( image_real = fluid.layers.data(
name='image_real', shape=data_shape, dtype='float32') name='image_real', shape=data_shape, dtype='float32')
......
...@@ -159,20 +159,21 @@ class DTrainer(): ...@@ -159,20 +159,21 @@ class DTrainer():
if fluid.io.is_parameter(var) and var.name.startswith( if fluid.io.is_parameter(var) and var.name.startswith(
"discriminator"): "discriminator"):
vars.append(var.name) vars.append(var.name)
grad = fluid.gradients(pred, x, no_grad_set=vars) grad = fluid.gradients(pred, x, no_grad_set=vars)[0]
grad_shape = grad.shape grad_shape = grad.shape
grad = fluid.layers.reshape( grad = fluid.layers.reshape(
grad, [-1, grad_shape[1] * grad_shape[2] * grad_shape[3]]) grad, [-1, grad_shape[1] * grad_shape[2] * grad_shape[3]])
epsilon = 1e-5
norm = fluid.layers.sqrt( norm = fluid.layers.sqrt(
fluid.layers.reduce_sum( fluid.layers.reduce_sum(
fluid.layers.square(grad), dim=1) + epsilon) fluid.layers.square(grad), dim=1))
gp = fluid.layers.reduce_mean(fluid.layers.square(norm - 1.0)) gp = fluid.layers.reduce_mean(fluid.layers.square(norm - 1.0))
return gp return gp
class STGAN(object): class STGAN(object):
def add_special_args(self, parser): def add_special_args(self, parser):
parser.add_argument(
'--image_size', type=int, default=256, help="image size")
parser.add_argument( parser.add_argument(
'--g_lr', '--g_lr',
type=float, type=float,
...@@ -246,7 +247,7 @@ class STGAN(object): ...@@ -246,7 +247,7 @@ class STGAN(object):
self.batch_num = batch_num self.batch_num = batch_num
def build_model(self): def build_model(self):
data_shape = [-1, 3, self.cfg.load_size, self.cfg.load_size] data_shape = [-1, 3, self.cfg.image_size, self.cfg.image_size]
image_real = fluid.layers.data( image_real = fluid.layers.data(
name='image_real', shape=data_shape, dtype='float32') name='image_real', shape=data_shape, dtype='float32')
......
...@@ -185,7 +185,7 @@ class DTrainer(): ...@@ -185,7 +185,7 @@ class DTrainer():
for var in fluid.default_main_program().list_vars(): for var in fluid.default_main_program().list_vars():
if fluid.io.is_parameter(var) and var.name.startswith('d_'): if fluid.io.is_parameter(var) and var.name.startswith('d_'):
vars.append(var.name) vars.append(var.name)
grad = fluid.gradients(pred, x, no_grad_set=vars) grad = fluid.gradients(pred, x, no_grad_set=vars)[0]
grad_shape = grad.shape grad_shape = grad.shape
grad = fluid.layers.reshape( grad = fluid.layers.reshape(
grad, [-1, grad_shape[1] * grad_shape[2] * grad_shape[3]]) grad, [-1, grad_shape[1] * grad_shape[2] * grad_shape[3]])
......
...@@ -27,7 +27,8 @@ import six ...@@ -27,7 +27,8 @@ import six
matplotlib.use('agg') matplotlib.use('agg')
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec import matplotlib.gridspec as gridspec
from scipy.misc import imsave import imageio
import copy
img_dim = 28 img_dim = 28
...@@ -94,11 +95,11 @@ def save_test_image(epoch, ...@@ -94,11 +95,11 @@ def save_test_image(epoch,
input_A_temp = np.squeeze(data_A[0]).transpose([1, 2, 0]) input_A_temp = np.squeeze(data_A[0]).transpose([1, 2, 0])
input_B_temp = np.squeeze(data_A[0]).transpose([1, 2, 0]) input_B_temp = np.squeeze(data_A[0]).transpose([1, 2, 0])
imsave(out_path + "/fakeB_" + str(epoch) + "_" + name, ( imageio.imwrite(out_path + "/fakeB_" + str(epoch) + "_" + name, (
(fake_B_temp + 1) * 127.5).astype(np.uint8)) (fake_B_temp + 1) * 127.5).astype(np.uint8))
imsave(out_path + "/inputA_" + str(epoch) + "_" + name, ( imageio.imwrite(out_path + "/inputA_" + str(epoch) + "_" + name, (
(input_A_temp + 1) * 127.5).astype(np.uint8)) (input_A_temp + 1) * 127.5).astype(np.uint8))
imsave(out_path + "/inputB_" + str(epoch) + "_" + name, ( imageio.imwrite(out_path + "/inputB_" + str(epoch) + "_" + name, (
(input_B_temp + 1) * 127.5).astype(np.uint8)) (input_B_temp + 1) * 127.5).astype(np.uint8))
elif cfg.model_net == "StarGAN": elif cfg.model_net == "StarGAN":
for data in zip(A_test_reader()): for data in zip(A_test_reader()):
...@@ -127,8 +128,8 @@ def save_test_image(epoch, ...@@ -127,8 +128,8 @@ def save_test_image(epoch,
images.append(fake_temp) images.append(fake_temp)
images.append(rec_temp) images.append(rec_temp)
images_concat = np.concatenate(images, 1) images_concat = np.concatenate(images, 1)
imsave(out_path + "/fake_img" + str(epoch) + "_" + name[0], ( imageio.imwrite(out_path + "/fake_img" + str(epoch) + "_" + name[0],
(images_concat + 1) * 127.5).astype(np.uint8)) ((images_concat + 1) * 127.5).astype(np.uint8))
elif cfg.model_net == 'AttGAN' or cfg.model_net == 'STGAN': elif cfg.model_net == 'AttGAN' or cfg.model_net == 'STGAN':
for data in zip(A_test_reader()): for data in zip(A_test_reader()):
real_img, label_org, name = data[0] real_img, label_org, name = data[0]
...@@ -168,8 +169,8 @@ def save_test_image(epoch, ...@@ -168,8 +169,8 @@ def save_test_image(epoch,
images.append(fake_temp) images.append(fake_temp)
images_concat = np.concatenate(images, 1) images_concat = np.concatenate(images, 1)
images_concat = np.concatenate(images_concat, 1) images_concat = np.concatenate(images_concat, 1)
imsave(out_path + "/fake_img" + str(epoch) + '_' + name[0], ( imageio.imwrite(out_path + "/fake_img" + str(epoch) + '_' + name[0],
(images_concat + 1) * 127.5).astype(np.uint8)) ((images_concat + 1) * 127.5).astype(np.uint8))
else: else:
for data_A, data_B in zip(A_test_reader(), B_test_reader()): for data_A, data_B in zip(A_test_reader(), B_test_reader()):
...@@ -194,17 +195,17 @@ def save_test_image(epoch, ...@@ -194,17 +195,17 @@ def save_test_image(epoch,
input_A_temp = np.squeeze(data_A[0][0]).transpose([1, 2, 0]) input_A_temp = np.squeeze(data_A[0][0]).transpose([1, 2, 0])
input_B_temp = np.squeeze(data_B[0][0]).transpose([1, 2, 0]) input_B_temp = np.squeeze(data_B[0][0]).transpose([1, 2, 0])
imsave(out_path + "/fakeB_" + str(epoch) + "_" + A_name, ( imageio.imwrite(out_path + "/fakeB_" + str(epoch) + "_" + A_name, (
(fake_B_temp + 1) * 127.5).astype(np.uint8)) (fake_B_temp + 1) * 127.5).astype(np.uint8))
imsave(out_path + "/fakeA_" + str(epoch) + "_" + B_name, ( imageio.imwrite(out_path + "/fakeA_" + str(epoch) + "_" + B_name, (
(fake_A_temp + 1) * 127.5).astype(np.uint8)) (fake_A_temp + 1) * 127.5).astype(np.uint8))
imsave(out_path + "/cycA_" + str(epoch) + "_" + A_name, ( imageio.imwrite(out_path + "/cycA_" + str(epoch) + "_" + A_name, (
(cyc_A_temp + 1) * 127.5).astype(np.uint8)) (cyc_A_temp + 1) * 127.5).astype(np.uint8))
imsave(out_path + "/cycB_" + str(epoch) + "_" + B_name, ( imageio.imwrite(out_path + "/cycB_" + str(epoch) + "_" + B_name, (
(cyc_B_temp + 1) * 127.5).astype(np.uint8)) (cyc_B_temp + 1) * 127.5).astype(np.uint8))
imsave(out_path + "/inputA_" + str(epoch) + "_" + A_name, ( imageio.imwrite(out_path + "/inputA_" + str(epoch) + "_" + A_name, (
(input_A_temp + 1) * 127.5).astype(np.uint8)) (input_A_temp + 1) * 127.5).astype(np.uint8))
imsave(out_path + "/inputB_" + str(epoch) + "_" + B_name, ( imageio.imwrite(out_path + "/inputB_" + str(epoch) + "_" + B_name, (
(input_B_temp + 1) * 127.5).astype(np.uint8)) (input_B_temp + 1) * 127.5).astype(np.uint8))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册