From 70128735b2e2c2efdd499e70dd3b123916cbab30 Mon Sep 17 00:00:00 2001 From: liuwei1031 Date: Thu, 7 Mar 2019 09:04:41 +0000 Subject: [PATCH] disable default memory optimize since it affect the efficiency a lot, test=develop --- fluid/PaddleCV/gan/cycle_gan/train.py | 11 +++++++---- fluid/PaddleCV/gan/cycle_gan/trainer.py | 4 ---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/fluid/PaddleCV/gan/cycle_gan/train.py b/fluid/PaddleCV/gan/cycle_gan/train.py index df7cf57b..3f7bb039 100644 --- a/fluid/PaddleCV/gan/cycle_gan/train.py +++ b/fluid/PaddleCV/gan/cycle_gan/train.py @@ -147,19 +147,22 @@ def train(args): init_model() losses = [[], []] t_time = 0 + build_strategy = fluid.BuildStrategy() + build_strategy.enable_inplace = False + build_strategy.memory_optimize = False g_A_trainer_program = fluid.CompiledProgram( g_A_trainer.program).with_data_parallel( - loss_name=g_A_trainer.g_loss_A.name) + loss_name=g_A_trainer.g_loss_A.name, build_strategy=build_strategy) g_B_trainer_program = fluid.CompiledProgram( g_B_trainer.program).with_data_parallel( - loss_name=g_B_trainer.g_loss_B.name) + loss_name=g_B_trainer.g_loss_B.name, build_strategy=build_strategy) d_B_trainer_program = fluid.CompiledProgram( d_B_trainer.program).with_data_parallel( - loss_name=d_B_trainer.d_loss_B.name) + loss_name=d_B_trainer.d_loss_B.name, build_strategy=build_strategy) d_A_trainer_program = fluid.CompiledProgram( d_A_trainer.program).with_data_parallel( - loss_name=d_A_trainer.d_loss_A.name) + loss_name=d_A_trainer.d_loss_A.name, build_strategy=build_strategy) for epoch in range(args.epoch): batch_id = 0 for i in range(max_images_num): diff --git a/fluid/PaddleCV/gan/cycle_gan/trainer.py b/fluid/PaddleCV/gan/cycle_gan/trainer.py index 07e8d22f..84d4c87a 100644 --- a/fluid/PaddleCV/gan/cycle_gan/trainer.py +++ b/fluid/PaddleCV/gan/cycle_gan/trainer.py @@ -13,8 +13,6 @@ class GATrainer(): self.program = fluid.default_main_program().clone() with fluid.program_guard(self.program): self.fake_B = build_generator_resnet_9blocks(input_A, name="g_A") - #FIXME set persistable explicitly to pass CE - self.fake_B.persistable = True self.fake_A = build_generator_resnet_9blocks(input_B, name="g_B") self.cyc_A = build_generator_resnet_9blocks(self.fake_B, "g_B") self.cyc_B = build_generator_resnet_9blocks(self.fake_A, "g_A") @@ -60,8 +58,6 @@ class GBTrainer(): with fluid.program_guard(self.program): self.fake_B = build_generator_resnet_9blocks(input_A, name="g_A") self.fake_A = build_generator_resnet_9blocks(input_B, name="g_B") - #FIXME set persistable explicitly to pass CE - self.fake_A.persistable = True self.cyc_A = build_generator_resnet_9blocks(self.fake_B, "g_B") self.cyc_B = build_generator_resnet_9blocks(self.fake_A, "g_A") self.infer_program = self.program.clone() -- GitLab