提交 6bf268f9 编写于 作者: S sneaxiy

remove mem opt strategies settings, test=develop

上级 6aaf0192
......@@ -111,9 +111,7 @@ class CGAN(object):
utility.init_checkpoints(self.cfg, exe, g_trainer, "net_G")
utility.init_checkpoints(self.cfg, exe, d_trainer, "net_D")
### memory optim
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
g_trainer_program = fluid.CompiledProgram(
g_trainer.program).with_data_parallel(
......
......@@ -279,9 +279,7 @@ class CycleGAN(object):
utility.init_checkpoints(self.cfg, exe, d_A_trainer, "net_DA")
utility.init_checkpoints(self.cfg, exe, d_B_trainer, "net_DB")
### memory optim
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
gen_trainer_program = fluid.CompiledProgram(
gen_trainer.program).with_data_parallel(
......
......@@ -107,9 +107,7 @@ class DCGAN(object):
utility.init_checkpoints(self.cfg, exe, g_trainer, "net_G")
utility.init_checkpoints(self.cfg, exe, d_trainer, "net_D")
### memory optim
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
g_trainer_program = fluid.CompiledProgram(
g_trainer.program).with_data_parallel(
......
......@@ -323,9 +323,7 @@ class SPADE(object):
utility.init_checkpoints(self.cfg, exe, gen_trainer, "net_G")
utility.init_checkpoints(self.cfg, exe, dis_trainer, "net_D")
### memory optim
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = False
build_strategy.sync_batch_norm = False
gen_trainer_program = fluid.CompiledProgram(
......
......@@ -168,7 +168,6 @@ def train(args):
train_model.load_pretrain_params(exe, pretrain, train_prog, place)
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
if args.model_name in ['CTCN']:
build_strategy.enable_sequential_execution = True
......
......@@ -199,7 +199,6 @@ exec_strategy.num_iteration_per_drop_scope = 100
build_strategy = fluid.BuildStrategy()
if args.memory_optimize:
build_strategy.fuse_relu_depthwise_conv = True
build_strategy.enable_inplace = True
place = fluid.CPUPlace()
if args.use_gpu:
......
......@@ -284,7 +284,6 @@ def train_parallel(args):
strategy.num_iteration_per_drop_scope = 30
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_sequential_execution = bool(
args.enable_sequential_execution)
......
......@@ -384,8 +384,6 @@ def best_strategy_compiled(args, program, loss):
return program
else:
build_strategy = fluid.compiler.BuildStrategy()
#Feature will be supported in Fluid v1.6
#build_strategy.enable_inplace = True
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = fluid.core.get_cuda_device_count()
......
......@@ -119,8 +119,6 @@ def train():
if cfg.parallel:
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = True
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_iteration_per_drop_scope = 10
......
......@@ -209,7 +209,6 @@ def train(args,
if parallel:
loss.persistable = True
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
train_exe = fluid.ParallelExecutor(main_program=train_prog,
use_cuda=use_gpu, loss_name=loss.name, build_strategy=build_strategy)
......
......@@ -111,7 +111,6 @@ def train():
fluid.io.load_vars(exe, cfg.pretrain, predicate=if_exist)
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False #gc and memory optimize may conflict
syncbn = cfg.syncbn
if (syncbn and devices_num <= 1) or num_trainers > 1:
print("Disable syncbn in single device")
......
......@@ -398,8 +398,6 @@ def train(args):
ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)
ema.update()
fluid.memory_optimize(train_program, skip_opt_set=[loss.name, num_seqs.name])
if args.verbose:
if args.in_tokens:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
......@@ -444,9 +442,6 @@ def train(args):
if args.use_ema and 'ema' not in dir():
ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)
fluid.memory_optimize(test_prog, skip_opt_set=[unique_ids.name,
start_logits.name, end_logits.name, num_seqs.name])
test_prog = test_prog.clone(for_test=True)
# if args.random_seed is not None:
# test_prog.random_seed = args.random_seed
......
......@@ -426,8 +426,6 @@ def train(args):
ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)
ema.update()
fluid.memory_optimize(train_program, skip_opt_set=[loss.name, num_seqs.name])
if args.verbose:
if args.in_tokens:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
......@@ -475,9 +473,6 @@ def train(args):
if args.use_ema and 'ema' not in dir():
ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)
fluid.memory_optimize(test_prog, skip_opt_set=[unique_ids.name,
start_logits.name, end_logits.name, num_seqs.name])
test_prog = test_prog.clone(for_test=True)
# if args.random_seed is not None:
# test_prog.random_seed = args.random_seed
......
......@@ -398,8 +398,6 @@ def train(args):
ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)
ema.update()
fluid.memory_optimize(train_program, skip_opt_set=[loss.name, num_seqs.name])
if args.verbose:
if args.in_tokens:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
......@@ -444,9 +442,6 @@ def train(args):
if args.use_ema and 'ema' not in dir():
ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)
fluid.memory_optimize(test_prog, skip_opt_set=[unique_ids.name,
start_logits.name, end_logits.name, num_seqs.name])
test_prog = test_prog.clone(for_test=True)
# if args.random_seed is not None:
# test_prog.random_seed = args.random_seed
......
......@@ -426,8 +426,6 @@ def train(args):
ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)
ema.update()
fluid.memory_optimize(train_program, skip_opt_set=[loss.name, num_seqs.name])
if args.verbose:
if args.in_tokens:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
......@@ -475,9 +473,6 @@ def train(args):
if args.use_ema and 'ema' not in dir():
ema = fluid.optimizer.ExponentialMovingAverage(args.ema_decay)
fluid.memory_optimize(test_prog, skip_opt_set=[unique_ids.name,
start_logits.name, end_logits.name, num_seqs.name])
test_prog = test_prog.clone(for_test=True)
# if args.random_seed is not None:
# test_prog.random_seed = args.random_seed
......
......@@ -122,7 +122,6 @@ def do_train(args):
print("finish init word embedding ...")
build_strategy = fluid.compiler.BuildStrategy()
build_strategy.enable_inplace = True
compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
......
......@@ -165,8 +165,6 @@ def do_train(args):
save_load_io.init_from_pretrain_model(args, exe, train_prog)
build_strategy = fluid.compiler.BuildStrategy()
build_strategy.enable_inplace = True
compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
......
......@@ -176,8 +176,6 @@ def main():
exec_strategy.num_iteration_per_drop_scope = 100
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
build_strategy.memory_optimize = False
build_strategy.fuse_all_optimizer_ops = True
if args.parallel:
......
......@@ -88,8 +88,6 @@ def do_train(args):
exec_strategy = fluid.ExecutionStrategy()
# exec_strategy.num_threads = dev_count * 6
build_strategy = fluid.compiler.BuildStrategy()
# build_strategy.enable_inplace = True
compiled_prog = fluid.compiler.CompiledProgram(train_program).with_data_parallel(
loss_name=train_ret['avg_cost'].name,
build_strategy=build_strategy,
......
......@@ -233,7 +233,6 @@ def do_train(args):
init_from_pretrain_model(args, exe, train_prog)
build_strategy = fluid.compiler.BuildStrategy()
build_strategy.enable_inplace = True
exec_strategy = fluid.ExecutionStrategy()
if num_trainers > 1:
dist_utils.prepare_for_multi_process(exe, build_strategy, train_prog)
......
......@@ -126,8 +126,6 @@ def main():
exec_strategy.num_iteration_per_drop_scope = 100
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
build_strategy.memory_optimize = False
# build_strategy.fuse_all_optimizer_ops = True
if args.parallel:
......
......@@ -255,7 +255,6 @@ def train(args):
load_persistable_nodes(exe, checkpoint, main_graph)
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = False
binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
loss_name=train_cost.name, build_strategy=build_strategy)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册