未验证 提交 8a0431bb 编写于 作者: L lvmengsi 提交者: GitHub

fix_cycle_pix (#2403)

* fix bug in cycle and pix
上级 3c303e97
...@@ -49,12 +49,12 @@ def infer(args): ...@@ -49,12 +49,12 @@ def infer(args):
input = fluid.layers.data(name='input', shape=data_shape, dtype='float32') input = fluid.layers.data(name='input', shape=data_shape, dtype='float32')
model_name = 'net_G' model_name = 'net_G'
if args.model_net == 'cyclegan': if args.model_net == 'cyclegan':
from network.CycleGAN_network import network_G, network_D from network.CycleGAN_network import CycleGAN_model
model = CycleGAN_model()
if args.input_style == "A": if args.input_style == "A":
fake = network_G(input, name="GA", cfg=args) fake = model.network_G(input, name="GA", cfg=args)
elif args.input_style == "B": elif args.input_style == "B":
fake = network_G(input, name="GB", cfg=args) fake = model.network_G(input, name="GB", cfg=args)
else: else:
raise "Input with style [%s] is not supported." % args.input_style raise "Input with style [%s] is not supported." % args.input_style
elif args.model_net == 'Pix2pix': elif args.model_net == 'Pix2pix':
......
python infer.py --init_model output/checkpoints/199/ --input "data/cityscapes/testA/*" --input_style A --model_net cyclegan --net_G resnet_6block --g_bash_dims 32 python infer.py --init_model output/checkpoints/199/ --input data/cityscapes/testA/* --input_style A --model_net cyclegan --net_G resnet_6block --g_base_dims 32
import os import os
import argparse
parser = argparse.ArgumentParser(description='the direction of data list')
parser.add_argument(
'--direction', type=str, default='A2B', help='the direction of data list')
def make_pair_data(fileA, file):
def make_pair_data(fileA, file, d):
f = open(fileA, 'r') f = open(fileA, 'r')
lines = f.readlines() lines = f.readlines()
w = open(file, 'w') w = open(file, 'w')
...@@ -10,16 +15,22 @@ def make_pair_data(fileA, file): ...@@ -10,16 +15,22 @@ def make_pair_data(fileA, file):
print(fileA) print(fileA)
fileB = fileA.replace("A", "B") fileB = fileA.replace("A", "B")
print(fileB) print(fileB)
l = fileA + '\t' + fileB + '\n' if d == 'A2B':
l = fileA + '\t' + fileB + '\n'
elif d == 'B2A':
l = fileB + '\t' + fileA + '\n'
else:
raise NotImplementedError("the direction: [%s] is not support" % d)
w.write(l) w.write(l)
w.close() w.close()
if __name__ == "__main__": if __name__ == "__main__":
args = parser.parse_args()
trainA_file = "./data/cityscapes/trainA.txt" trainA_file = "./data/cityscapes/trainA.txt"
train_file = "./data/cityscapes/pix2pix_train_list" train_file = "./data/cityscapes/pix2pix_train_list"
make_pair_data(trainA_file, train_file) make_pair_data(trainA_file, train_file, args.direction)
testA_file = "./data/cityscapes/testA.txt" testA_file = "./data/cityscapes/testA.txt"
test_file = "./data/cityscapes/pix2pix_test_list" test_file = "./data/cityscapes/pix2pix_test_list"
make_pair_data(testA_file, test_file) make_pair_data(testA_file, test_file, args.direction)
...@@ -88,17 +88,23 @@ class GTrainer(): ...@@ -88,17 +88,23 @@ class GTrainer():
vars.append(var.name) vars.append(var.name)
self.param = vars self.param = vars
lr = cfg.learning_rate lr = cfg.learning_rate
optimizer = fluid.optimizer.Adam( if cfg.epoch <= 100:
learning_rate=fluid.layers.piecewise_decay( optimizer = fluid.optimizer.Adam(
boundaries=[99 * step_per_epoch] + learning_rate=lr, beta1=0.5, beta2=0.999, name="net_G")
[x * step_per_epoch for x in range(100, cfg.epoch - 1)], else:
values=[lr] + [ optimizer = fluid.optimizer.Adam(
lr * (1.0 - (x - 99.0) / 101.0) learning_rate=fluid.layers.piecewise_decay(
for x in range(100, cfg.epoch) boundaries=[99 * step_per_epoch] + [
]), x * step_per_epoch
beta1=0.5, for x in xrange(100, cfg.epoch - 1)
beta2=0.999, ],
name="net_G") values=[lr] + [
lr * (1.0 - (x - 99.0) / 101.0)
for x in xrange(100, cfg.epoch)
]),
beta1=0.5,
beta2=0.999,
name="net_G")
optimizer.minimize(self.g_loss, parameter_list=vars) optimizer.minimize(self.g_loss, parameter_list=vars)
...@@ -122,17 +128,23 @@ class DATrainer(): ...@@ -122,17 +128,23 @@ class DATrainer():
self.param = vars self.param = vars
lr = cfg.learning_rate lr = cfg.learning_rate
optimizer = fluid.optimizer.Adam( if cfg.epoch <= 100:
learning_rate=fluid.layers.piecewise_decay( optimizer = fluid.optimizer.Adam(
boundaries=[99 * step_per_epoch] + learning_rate=lr, beta1=0.5, beta2=0.999, name="net_DA")
[x * step_per_epoch for x in range(100, cfg.epoch - 1)], else:
values=[lr] + [ optimizer = fluid.optimizer.Adam(
lr * (1.0 - (x - 99.0) / 101.0) learning_rate=fluid.layers.piecewise_decay(
for x in range(100, cfg.epoch) boundaries=[99 * step_per_epoch] + [
]), x * step_per_epoch
beta1=0.5, for x in xrange(100, cfg.epoch - 1)
beta2=0.999, ],
name="net_DA") values=[lr] + [
lr * (1.0 - (x - 99.0) / 101.0)
for x in xrange(100, cfg.epoch)
]),
beta1=0.5,
beta2=0.999,
name="net_DA")
optimizer.minimize(self.d_loss_A, parameter_list=vars) optimizer.minimize(self.d_loss_A, parameter_list=vars)
...@@ -155,17 +167,23 @@ class DBTrainer(): ...@@ -155,17 +167,23 @@ class DBTrainer():
vars.append(var.name) vars.append(var.name)
self.param = vars self.param = vars
lr = 0.0002 lr = 0.0002
optimizer = fluid.optimizer.Adam( if cfg.epoch <= 100:
learning_rate=fluid.layers.piecewise_decay( optimizer = fluid.optimizer.Adam(
boundaries=[99 * step_per_epoch] + learning_rate=lr, beta1=0.5, beta2=0.999, name="net_DA")
[x * step_per_epoch for x in range(100, cfg.epoch - 1)], else:
values=[lr] + [ optimizer = fluid.optimizer.Adam(
lr * (1.0 - (x - 99.0) / 101.0) learning_rate=fluid.layers.piecewise_decay(
for x in range(100, cfg.epoch) boundaries=[99 * step_per_epoch] + [
]), x * step_per_epoch
beta1=0.5, for x in xrange(100, cfg.epoch - 1)
beta2=0.999, ],
name="net_DB") values=[lr] + [
lr * (1.0 - (x - 99.0) / 101.0)
for x in xrange(100, cfg.epoch)
]),
beta1=0.5,
beta2=0.999,
name="net_DB")
optimizer.minimize(self.d_loss_B, parameter_list=vars) optimizer.minimize(self.d_loss_B, parameter_list=vars)
......
...@@ -70,17 +70,23 @@ class GTrainer(): ...@@ -70,17 +70,23 @@ class GTrainer():
"generator"): "generator"):
vars.append(var.name) vars.append(var.name)
self.param = vars self.param = vars
optimizer = fluid.optimizer.Adam( if cfg.epoch <= 100:
learning_rate=fluid.layers.piecewise_decay( optimizer = fluid.optimizer.Adam(
boundaries=[99 * step_per_epoch] + learning_rate=lr, beta1=0.5, beta2=0.999, name="net_G")
[x * step_per_epoch for x in range(100, cfg.epoch - 1)], else:
values=[lr] + [ optimizer = fluid.optimizer.Adam(
lr * (1.0 - (x - 99.0) / 101.0) learning_rate=fluid.layers.piecewise_decay(
for x in range(100, cfg.epoch) boundaries=[99 * step_per_epoch] + [
]), x * step_per_epoch
beta1=0.5, for x in range(100, cfg.epoch - 1)
beta2=0.999, ],
name="net_G") values=[lr] + [
lr * (1.0 - (x - 99.0) / 101.0)
for x in range(100, cfg.epoch)
]),
beta1=0.5,
beta2=0.999,
name="net_G")
optimizer.minimize(self.g_loss, parameter_list=vars) optimizer.minimize(self.g_loss, parameter_list=vars)
...@@ -142,17 +148,23 @@ class DTrainer(): ...@@ -142,17 +148,23 @@ class DTrainer():
vars.append(var.name) vars.append(var.name)
self.param = vars self.param = vars
optimizer = fluid.optimizer.Adam( if cfg.epoch <= 100:
learning_rate=fluid.layers.piecewise_decay( optimizer = fluid.optimizer.Adam(
boundaries=[99 * step_per_epoch] + learning_rate=lr, beta1=0.5, beta2=0.999, name="net_D")
[x * step_per_epoch for x in range(100, cfg.epoch - 1)], else:
values=[lr] + [ optimizer = fluid.optimizer.Adam(
lr * (1.0 - (x - 99.0) / 101.0) learning_rate=fluid.layers.piecewise_decay(
for x in range(100, cfg.epoch) boundaries=[99 * step_per_epoch] + [
]), x * step_per_epoch
beta1=0.5, for x in range(100, cfg.epoch - 1)
beta2=0.999, ],
name="net_D") values=[lr] + [
lr * (1.0 - (x - 99.0) / 101.0)
for x in range(100, cfg.epoch)
]),
beta1=0.5,
beta2=0.999,
name="net_D")
optimizer.minimize(self.d_loss, parameter_list=vars) optimizer.minimize(self.d_loss, parameter_list=vars)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册