未验证 提交 78fd552b 编写于 作者: W whs 提交者: GitHub

Fix cycle gan (#1266)

* Add trainer.py and utility.py

* Fix data reader
上级 3fb495a7
......@@ -15,7 +15,7 @@ def image_shape():
def max_images_num():
return 1
return 1335
def reader_creater(list_file, cycle=True, shuffle=True, return_name=False):
......
......@@ -93,7 +93,7 @@ def conv2d(input,
if norm:
conv = instance_norm(input=conv, name=name + "_norm")
if relu:
conv = fluid.layers.leaky_relu(conv, relufactor)
conv = fluid.layers.leaky_relu(conv, alpha=relufactor)
return conv
......@@ -148,5 +148,5 @@ def deconv2d(input,
if norm:
conv = instance_norm(input=conv, name=name + "_norm")
if relu:
conv = fluid.layers.leaky_relu(conv, relufactor)
conv = fluid.layers.leaky_relu(conv, alpha=relufactor)
return conv
from model import *
import paddle.fluid as fluid
step_per_epoch = 1335
cycle_loss_factor = 10.0
class GATrainer():
def __init__(self, input_A, input_B):
self.program = fluid.default_main_program().clone()
with fluid.program_guard(self.program):
self.fake_B = build_generator_resnet_9blocks(input_A, name="g_A")
self.fake_A = build_generator_resnet_9blocks(input_B, name="g_B")
self.cyc_A = build_generator_resnet_9blocks(self.fake_B, "g_B")
self.cyc_B = build_generator_resnet_9blocks(self.fake_A, "g_A")
self.infer_program = self.program.clone()
diff_A = fluid.layers.abs(
fluid.layers.elementwise_sub(
x=input_A, y=self.cyc_A))
diff_B = fluid.layers.abs(
fluid.layers.elementwise_sub(
x=input_B, y=self.cyc_B))
self.cyc_loss = (
fluid.layers.reduce_mean(diff_A) +
fluid.layers.reduce_mean(diff_B)) * cycle_loss_factor
self.fake_rec_B = build_gen_discriminator(self.fake_B, "d_B")
self.disc_loss_B = fluid.layers.reduce_mean(
fluid.layers.square(self.fake_rec_B - 1))
self.g_loss_A = fluid.layers.elementwise_add(self.cyc_loss,
self.disc_loss_B)
vars = []
for var in self.program.list_vars():
if fluid.io.is_parameter(var) and var.name.startswith("g_A"):
vars.append(var.name)
self.param = vars
lr = 0.0002
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=[
100 * step_per_epoch, 120 * step_per_epoch,
140 * step_per_epoch, 160 * step_per_epoch,
180 * step_per_epoch
],
values=[
lr, lr * 0.8, lr * 0.6, lr * 0.4, lr * 0.2, lr * 0.1
]),
beta1=0.5,
name="g_A")
optimizer.minimize(self.g_loss_A, parameter_list=vars)
class GBTrainer():
def __init__(self, input_A, input_B):
self.program = fluid.default_main_program().clone()
with fluid.program_guard(self.program):
self.fake_B = build_generator_resnet_9blocks(input_A, name="g_A")
self.fake_A = build_generator_resnet_9blocks(input_B, name="g_B")
self.cyc_A = build_generator_resnet_9blocks(self.fake_B, "g_B")
self.cyc_B = build_generator_resnet_9blocks(self.fake_A, "g_A")
self.infer_program = self.program.clone()
diff_A = fluid.layers.abs(
fluid.layers.elementwise_sub(
x=input_A, y=self.cyc_A))
diff_B = fluid.layers.abs(
fluid.layers.elementwise_sub(
x=input_B, y=self.cyc_B))
self.cyc_loss = (
fluid.layers.reduce_mean(diff_A) +
fluid.layers.reduce_mean(diff_B)) * cycle_loss_factor
self.fake_rec_A = build_gen_discriminator(self.fake_A, "d_A")
disc_loss_A = fluid.layers.reduce_mean(
fluid.layers.square(self.fake_rec_A - 1))
self.g_loss_B = fluid.layers.elementwise_add(self.cyc_loss,
disc_loss_A)
vars = []
for var in self.program.list_vars():
if fluid.io.is_parameter(var) and var.name.startswith("g_B"):
vars.append(var.name)
self.param = vars
lr = 0.0002
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=[
100 * step_per_epoch, 120 * step_per_epoch,
140 * step_per_epoch, 160 * step_per_epoch,
180 * step_per_epoch
],
values=[
lr, lr * 0.8, lr * 0.6, lr * 0.4, lr * 0.2, lr * 0.1
]),
beta1=0.5,
name="g_B")
optimizer.minimize(self.g_loss_B, parameter_list=vars)
class DATrainer():
def __init__(self, input_A, fake_pool_A):
self.program = fluid.default_main_program().clone()
with fluid.program_guard(self.program):
self.rec_A = build_gen_discriminator(input_A, "d_A")
self.fake_pool_rec_A = build_gen_discriminator(fake_pool_A, "d_A")
self.d_loss_A = (fluid.layers.square(self.fake_pool_rec_A) +
fluid.layers.square(self.rec_A - 1)) / 2.0
self.d_loss_A = fluid.layers.reduce_mean(self.d_loss_A)
optimizer = fluid.optimizer.Adam(learning_rate=0.0002, beta1=0.5)
optimizer._name = "d_A"
vars = []
for var in self.program.list_vars():
if fluid.io.is_parameter(var) and var.name.startswith("d_A"):
vars.append(var.name)
self.param = vars
lr = 0.0002
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=[
100 * step_per_epoch, 120 * step_per_epoch,
140 * step_per_epoch, 160 * step_per_epoch,
180 * step_per_epoch
],
values=[
lr, lr * 0.8, lr * 0.6, lr * 0.4, lr * 0.2, lr * 0.1
]),
beta1=0.5,
name="d_A")
optimizer.minimize(self.d_loss_A, parameter_list=vars)
class DBTrainer():
def __init__(self, input_B, fake_pool_B):
self.program = fluid.default_main_program().clone()
with fluid.program_guard(self.program):
self.rec_B = build_gen_discriminator(input_B, "d_B")
self.fake_pool_rec_B = build_gen_discriminator(fake_pool_B, "d_B")
self.d_loss_B = (fluid.layers.square(self.fake_pool_rec_B) +
fluid.layers.square(self.rec_B - 1)) / 2.0
self.d_loss_B = fluid.layers.reduce_mean(self.d_loss_B)
optimizer = fluid.optimizer.Adam(learning_rate=0.0002, beta1=0.5)
vars = []
for var in self.program.list_vars():
if fluid.io.is_parameter(var) and var.name.startswith("d_B"):
vars.append(var.name)
self.param = vars
lr = 0.0002
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=[
100 * step_per_epoch, 120 * step_per_epoch,
140 * step_per_epoch, 160 * step_per_epoch,
180 * step_per_epoch
],
values=[
lr, lr * 0.8, lr * 0.6, lr * 0.4, lr * 0.2, lr * 0.1
]),
beta1=0.5,
name="d_B")
optimizer.minimize(self.d_loss_B, parameter_list=vars)
"""Contains common utility functions."""
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import distutils.util
import random
import glob
import numpy as np
from paddle.fluid import core
def print_arguments(args):
"""Print argparse's arguments.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
parser.add_argument("name", default="Jonh", type=str, help="User name.")
args = parser.parse_args()
print_arguments(args)
:param args: Input argparse.Namespace for printing.
:type args: argparse.Namespace
"""
print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).iteritems()):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
def add_arguments(argname, type, default, help, argparser, **kwargs):
"""Add argparse's argument.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
add_argument("name", str, "Jonh", "User name.", parser)
args = parser.parse_args()
"""
type = distutils.util.strtobool if type == bool else type
argparser.add_argument(
"--" + argname,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
class ImagePool(object):
def __init__(self, pool_size=50):
self.pool = []
self.count = 0
self.pool_size = pool_size
def pool_image(self, image):
if self.count < self.pool_size:
self.pool.append(image)
self.count += 1
return image
else:
p = random.random()
if p > 0.5:
random_id = random.randint(0, self.pool_size - 1)
temp = self.pool[random_id]
self.pool[random_id] = image
return temp
else:
return image
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册