未验证 提交 6db35e7a 编写于 作者: C chengduo 提交者: GitHub

Print time cost with more precision (#3032)

* update gan model

* update gan model time display

* update gan script

* update gan script
上级 64dc2464
......@@ -9,6 +9,7 @@ def set_paddle_flags(flags):
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
use_cudnn_deterministic = os.environ.get('FLAGS_cudnn_deterministic', None)
if use_cudnn_deterministic:
......@@ -22,7 +23,7 @@ else:
set_paddle_flags({
'FLAGS_cudnn_exhaustive_search': use_cudnn_exhaustive_search,
'FLAGS_conv_workspace_size_limit': 256,
'FLAGS_eager_delete_tensor_gb': 0, # enable gc
'FLAGS_eager_delete_tensor_gb': 0, # enable gc
# You can omit the following settings, because the default
# value of FLAGS_memory_fraction_of_eager_deletion is 1,
# and default value of FLAGS_fast_eager_deletion_mode is 1
......@@ -247,7 +248,7 @@ def train(args):
t_time += batch_time
print(
"epoch{}; batch{}; g_A_loss: {}; d_B_loss: {}; g_B_loss: {}; d_A_loss: {}; "
"Batch_time_cost: {:.2f}".format(epoch, batch_id, g_A_loss[
"Batch_time_cost: {}".format(epoch, batch_id, g_A_loss[
0], d_B_loss[0], g_B_loss[0], d_A_loss[0], batch_time))
losses[0].append(g_A_loss[0])
losses[1].append(d_A_loss[0])
......
......@@ -337,10 +337,9 @@ class AttGAN(object):
t_time += batch_time
print("epoch{}: batch{}: \n\
d_loss: {}; d_loss_real: {}; d_loss_fake: {}; d_loss_cls: {}; d_loss_gp: {} \n\
Batch_time_cost: {:.2f}"
.format(epoch_id, batch_id, d_loss[0], d_loss_real[
0], d_loss_fake[0], d_loss_cls[0], d_loss_gp[0],
batch_time))
Batch_time_cost: {}".format(epoch_id, batch_id, d_loss[
0], d_loss_real[0], d_loss_fake[0], d_loss_cls[0],
d_loss_gp[0], batch_time))
# optimize the generator network
else:
d_fetches = [
......
......@@ -188,7 +188,7 @@ class CGAN(object):
g_trainer.infer_program,
feed={'noise': const_n,
'condition': condition_data},
fetch_list={g_trainer.fake})[0]
fetch_list=[g_trainer.fake])[0]
generate_image_reshape = np.reshape(generate_const_image, (
self.cfg.batch_size, -1))
......@@ -196,7 +196,7 @@ class CGAN(object):
[real_image, generate_image_reshape])
fig = utility.plot(total_images)
print(
'Epoch ID={} Batch ID={} D_loss={} G_loss={} Batch_time_cost={:.2f}'.
'Epoch ID: {} Batch ID: {} D_loss: {} G_loss: {} Batch_time_cost: {}'.
format(epoch_id, batch_id, d_loss[0], g_loss[0],
batch_time))
plt.title('Epoch ID={}, Batch ID={}'.format(epoch_id,
......
......@@ -319,7 +319,7 @@ class CycleGAN(object):
print("epoch{}: batch{}: \n\
d_A_loss: {}; g_A_loss: {}; g_A_cyc_loss: {}; g_A_idt_loss: {}; \n\
d_B_loss: {}; g_B_loss: {}; g_B_cyc_loss: {}; g_B_idt_loss: {}; \n\
Batch_time_cost: {:.2f}".format(
Batch_time_cost: {}".format(
epoch_id, batch_id, d_A_loss[0], g_A_loss[0],
g_A_cyc_loss[0], g_A_idt_loss[0], d_B_loss[0], g_B_loss[
0], g_B_cyc_loss[0], g_B_idt_loss[0], batch_time))
......
......@@ -172,15 +172,16 @@ class DCGAN(object):
generate_const_image = exe.run(
g_trainer.infer_program,
feed={'noise': const_n},
fetch_list={g_trainer.fake})[0]
fetch_list=[g_trainer.fake])[0]
generate_image_reshape = np.reshape(generate_const_image, (
self.cfg.batch_size, -1))
total_images = np.concatenate(
[real_image, generate_image_reshape])
fig = utility.plot(total_images)
print(
'Epoch ID={} Batch ID={} D_loss={} G_loss={} Batch_time_cost={:.2f}'.
'Epoch ID: {} Batch ID: {} D_loss: {} G_loss: {} Batch_time_cost: {}'.
format(epoch_id, batch_id, d_loss[0], g_loss[0],
batch_time))
plt.title('Epoch ID={}, Batch ID={}'.format(epoch_id,
......
......@@ -277,7 +277,7 @@ class Pix2pix(object):
print("epoch{}: batch{}: \n\
g_loss_gan: {}; g_loss_l1: {}; \n\
d_loss_real: {}; d_loss_fake: {}; \n\
Batch_time_cost: {:.2f}"
Batch_time_cost: {}"
.format(epoch_id, batch_id, g_loss_gan[0], g_loss_l1[
0], d_loss_real[0], d_loss_fake[0], batch_time))
......
......@@ -342,10 +342,9 @@ class STGAN(object):
t_time += batch_time
print("epoch{}: batch{}: \n\
d_loss: {}; d_loss_real: {}; d_loss_fake: {}; d_loss_cls: {}; d_loss_gp: {} \n\
Batch_time_cost: {:.2f}"
.format(epoch_id, batch_id, d_loss[0], d_loss_real[
0], d_loss_fake[0], d_loss_cls[0], d_loss_gp[0],
batch_time))
Batch_time_cost: {}".format(epoch_id, batch_id, d_loss[
0], d_loss_real[0], d_loss_fake[0], d_loss_cls[0],
d_loss_gp[0], batch_time))
# optimize the generator network
else:
d_fetches = [
......
......@@ -360,7 +360,7 @@ class StarGAN(object):
if batch_id % self.cfg.print_freq == 0:
print("epoch{}: batch{}: \n\
d_loss_real: {}; d_loss_fake: {}; d_loss_cls: {}; d_loss_gp: {} \n\
Batch_time_cost: {:.2f}".format(
Batch_time_cost: {}".format(
epoch_id, batch_id, d_loss_real[0], d_loss_fake[
0], d_loss_cls[0], d_loss_gp[0], batch_time))
......
......@@ -41,26 +41,25 @@ add_arg('init_model', str, None, "The init model file of director
add_arg('save_checkpoints', bool, True, "Whether to save checkpoints.")
# yapf: enable
lambda_A = 10.0
lambda_B = 10.0
lambda_identity = 0.5
tep_per_epoch = 2974
def optimizer_setting():
lr=0.0002
lr = 0.0002
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=[
100 * step_per_epoch, 120 * step_per_epoch,
140 * step_per_epoch, 160 * step_per_epoch,
180 * step_per_epoch
140 * step_per_epoch, 160 * step_per_epoch, 180 * step_per_epoch
],
values=[
lr , lr * 0.8, lr * 0.6, lr * 0.4, lr * 0.2, lr * 0.1
]),
beta1=0.5)
values=[lr, lr * 0.8, lr * 0.6, lr * 0.4, lr * 0.2, lr * 0.1]),
beta1=0.5)
return optimizer
def train(args):
with fluid.dygraph.guard():
max_images_num = data_reader.max_images_num()
......@@ -85,7 +84,7 @@ def train(args):
A_test_reader = data_reader.a_test_reader()
B_test_reader = data_reader.b_test_reader()
cycle_gan = Cycle_Gan("cycle_gan",istrain=True)
cycle_gan = Cycle_Gan("cycle_gan", istrain=True)
losses = [[], []]
t_time = 0
......@@ -101,75 +100,94 @@ def train(args):
data_B = next(B_reader)
s_time = time.time()
data_A = np.array([data_A[0].reshape(3,256,256)]).astype("float32")
data_B = np.array([data_B[0].reshape(3,256,256)]).astype("float32")
data_A = np.array(
[data_A[0].reshape(3, 256, 256)]).astype("float32")
data_B = np.array(
[data_B[0].reshape(3, 256, 256)]).astype("float32")
data_A = to_variable(data_A)
data_B = to_variable(data_B)
# optimize the g_A network
fake_A,fake_B,cyc_A,cyc_B,g_A_loss,g_B_loss,idt_loss_A,idt_loss_B,cyc_A_loss,cyc_B_loss,g_loss = cycle_gan(data_A,data_B,True,False,False)
fake_A, fake_B, cyc_A, cyc_B, g_A_loss, g_B_loss, idt_loss_A, idt_loss_B, cyc_A_loss, cyc_B_loss, g_loss = cycle_gan(
data_A, data_B, True, False, False)
g_loss_out = g_loss.numpy()
g_loss.backward()
vars_G = []
for param in cycle_gan.parameters():
if param.name[:52]=="cycle_gan/Cycle_Gan_0/build_generator_resnet_9blocks":
if param.name[:
52] == "cycle_gan/Cycle_Gan_0/build_generator_resnet_9blocks":
vars_G.append(param)
optimizer1.minimize(g_loss,parameter_list=vars_G)
optimizer1.minimize(g_loss, parameter_list=vars_G)
cycle_gan.clear_gradients()
fake_pool_B = B_pool.pool_image(fake_B).numpy()
fake_pool_B = np.array([fake_pool_B[0].reshape(3,256,256)]).astype("float32")
fake_pool_B = np.array(
[fake_pool_B[0].reshape(3, 256, 256)]).astype("float32")
fake_pool_B = to_variable(fake_pool_B)
fake_pool_A = A_pool.pool_image(fake_A).numpy()
fake_pool_A = np.array([fake_pool_A[0].reshape(3,256,256)]).astype("float32")
fake_pool_A = np.array(
[fake_pool_A[0].reshape(3, 256, 256)]).astype("float32")
fake_pool_A = to_variable(fake_pool_A)
# optimize the d_A network
rec_B, fake_pool_rec_B = cycle_gan(data_B,fake_pool_B,False,True,False)
rec_B, fake_pool_rec_B = cycle_gan(data_B, fake_pool_B, False,
True, False)
d_loss_A = (fluid.layers.square(fake_pool_rec_B) +
fluid.layers.square(rec_B - 1)) / 2.0
fluid.layers.square(rec_B - 1)) / 2.0
d_loss_A = fluid.layers.reduce_mean(d_loss_A)
d_loss_A.backward()
vars_da = []
for param in cycle_gan.parameters():
if param.name[:47]=="cycle_gan/Cycle_Gan_0/build_gen_discriminator_0":
if param.name[:
47] == "cycle_gan/Cycle_Gan_0/build_gen_discriminator_0":
vars_da.append(param)
optimizer2.minimize(d_loss_A,parameter_list=vars_da)
optimizer2.minimize(d_loss_A, parameter_list=vars_da)
cycle_gan.clear_gradients()
# optimize the d_B network
rec_A, fake_pool_rec_A = cycle_gan(data_A,fake_pool_A,False,False,True)
rec_A, fake_pool_rec_A = cycle_gan(data_A, fake_pool_A, False,
False, True)
d_loss_B = (fluid.layers.square(fake_pool_rec_A) +
fluid.layers.square(rec_A - 1)) / 2.0
fluid.layers.square(rec_A - 1)) / 2.0
d_loss_B = fluid.layers.reduce_mean(d_loss_B)
d_loss_B.backward()
vars_db = []
for param in cycle_gan.parameters():
if param.name[:47]=="cycle_gan/Cycle_Gan_0/build_gen_discriminator_1":
if param.name[:
47] == "cycle_gan/Cycle_Gan_0/build_gen_discriminator_1":
vars_db.append(param)
optimizer3.minimize(d_loss_B,parameter_list=vars_db)
optimizer3.minimize(d_loss_B, parameter_list=vars_db)
cycle_gan.clear_gradients()
batch_time = time.time() - s_time
t_time += batch_time
print(
"epoch{}; batch{}; g_loss:{}; d_A_loss: {}; d_B_loss:{} ; \n g_A_loss: {}; g_A_cyc_loss: {}; g_A_idt_loss: {}; g_B_loss: {}; g_B_cyc_loss: {}; g_B_idt_loss: {};Batch_time_cost: {:.2f}".format(epoch, batch_id,g_loss_out[0],d_loss_A.numpy()[0], d_loss_B.numpy()[0],g_A_loss.numpy()[0],cyc_A_loss.numpy()[0], idt_loss_A.numpy()[0], g_B_loss.numpy()[0],cyc_B_loss.numpy()[0],idt_loss_B.numpy()[0], batch_time))
"epoch{}; batch{}; g_loss:{}; d_A_loss: {}; d_B_loss:{} ; \n g_A_loss: {}; g_A_cyc_loss: {}; g_A_idt_loss: {}; g_B_loss: {}; g_B_cyc_loss: {}; g_B_idt_loss: {};Batch_time_cost: {}".
format(epoch, batch_id, g_loss_out[0],
d_loss_A.numpy()[0],
d_loss_B.numpy()[0],
g_A_loss.numpy()[0],
cyc_A_loss.numpy()[0],
idt_loss_A.numpy()[0],
g_B_loss.numpy()[0],
cyc_B_loss.numpy()[0],
idt_loss_B.numpy()[0], batch_time))
with open('logging_train.txt', 'a') as log_file:
now = time.strftime("%c")
log_file.write(
"time: {}; epoch{}; batch{}; d_A_loss: {}; g_A_loss: {}; \
g_A_cyc_loss: {}; g_A_idt_loss: {}; d_B_loss: {}; \
g_B_loss: {}; g_B_cyc_loss: {}; g_B_idt_loss: {}; \
Batch_time_cost: {:.2f}\n".format(now, epoch, \
Batch_time_cost: {}\n"
.format(now, epoch, \
batch_id, d_loss_A[0], g_A_loss[ 0], cyc_A_loss[0], \
idt_loss_A[0], d_loss_B[0], g_A_loss[0], \
cyc_B_loss[0], idt_loss_B[0], batch_time))
......@@ -184,10 +202,12 @@ def train(args):
print("kpis\td_A_loss\t%0.3f" % d_loss_A.numpy()[0])
print("kpis\td_B_loss\t%0.3f" % d_loss_B.numpy()[0])
break
if args.save_checkpoints:
fluid.dygraph.save_persistables(cycle_gan.state_dict(),args.output+"/checkpoints/{}".format(epoch))
fluid.dygraph.save_persistables(
cycle_gan.state_dict(),
args.output + "/checkpoints/{}".format(epoch))
if __name__ == "__main__":
args = parser.parse_args()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册