提交 38143e5a 编写于 作者: Y Yu Yang

Clean unused changes

test=develop
上级 d424115f
...@@ -168,7 +168,7 @@ def train_parallel(train_args, test_args, args, train_prog, test_prog, ...@@ -168,7 +168,7 @@ def train_parallel(train_args, test_args, args, train_prog, test_prog,
startup_exe = fluid.Executor(place) startup_exe = fluid.Executor(place)
startup_exe.run(startup_prog) startup_exe.run(startup_prog)
strategy = fluid.ExecutionStrategy() strategy = fluid.ExecutionStrategy()
strategy.num_threads = 0 #args.cpus strategy.num_threads = args.cpus
strategy.allow_op_delay = False strategy.allow_op_delay = False
build_strategy = fluid.BuildStrategy() build_strategy = fluid.BuildStrategy()
if args.reduce_strategy == "reduce": if args.reduce_strategy == "reduce":
...@@ -188,8 +188,6 @@ def train_parallel(train_args, test_args, args, train_prog, test_prog, ...@@ -188,8 +188,6 @@ def train_parallel(train_args, test_args, args, train_prog, test_prog,
num_trainers = 1 num_trainers = 1
trainer_id = 0 trainer_id = 0
print('Use parallel_executor')
strategy.type = 2
exe = fluid.ParallelExecutor( exe = fluid.ParallelExecutor(
True, True,
avg_loss.name, avg_loss.name,
......
...@@ -172,7 +172,7 @@ def get_model(args, is_train, main_prog, startup_prog): ...@@ -172,7 +172,7 @@ def get_model(args, is_train, main_prog, startup_prog):
reader, dshape, class_dim = _model_reader_dshape_classdim(args, is_train) reader, dshape, class_dim = _model_reader_dshape_classdim(args, is_train)
pyreader = None pyreader = None
trainer_count = int(os.getenv("PADDLE_TRAINERS", 1)) trainer_count = int(os.getenv("PADDLE_TRAINERS"))
with fluid.program_guard(main_prog, startup_prog): with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard(): with fluid.unique_name.guard():
if args.use_reader_op: if args.use_reader_op:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册