未验证 提交 44ba35e8 编写于 作者: Z Zeng Jinle 提交者: GitHub

disable_old_mem_opt_in_transformer_model, test=develop (#2767)

上级 ce853f67
......@@ -4,6 +4,10 @@ import copy
import logging
import multiprocessing
import os
if os.environ.get('FLAGS_eager_delete_tensor_gb', None) is None:
os.environ['FLAGS_eager_delete_tensor_gb'] = '0'
import six
import sys
sys.path.append("../../")
......@@ -720,9 +724,6 @@ def train(args):
optimizer = fluid.optimizer.SGD(0.003)
optimizer.minimize(avg_cost)
if args.use_mem_opt:
fluid.memory_optimize(train_prog)
if args.local:
logging.info("local start_up:")
train_loop(exe, train_prog, startup_prog, dev_count, sum_cost, avg_cost,
......@@ -806,4 +807,4 @@ if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
args = parse_args()
train(args)
\ No newline at end of file
train(args)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册