提交 80e9ab69 编写于 作者: Z zhanghan17

rm memory optimize

上级 21127772
...@@ -118,14 +118,6 @@ def main(args): ...@@ -118,14 +118,6 @@ def main(args):
weight_decay=args.weight_decay, weight_decay=args.weight_decay,
scheduler=args.lr_scheduler, scheduler=args.lr_scheduler,
use_fp16=args.use_fp16) use_fp16=args.use_fp16)
"""
fluid.memory_optimize(
input_program=train_program,
skip_opt_set=[
graph_vars["loss"].name,
graph_vars["num_seqs"].name,
])
"""
if args.verbose: if args.verbose:
if args.in_tokens: if args.in_tokens:
......
...@@ -109,15 +109,6 @@ def main(args): ...@@ -109,15 +109,6 @@ def main(args):
scheduler=args.lr_scheduler, scheduler=args.lr_scheduler,
use_fp16=args.use_fp16) use_fp16=args.use_fp16)
"""
fluid.memory_optimize(
input_program=train_program,
skip_opt_set=[
graph_vars["loss"].name, graph_vars["labels"].name,
graph_vars["infers"].name, graph_vars["seq_lens"].name
])
"""
if args.verbose: if args.verbose:
if args.in_tokens: if args.in_tokens:
lower_mem, upper_mem, unit = fluid.contrib.memory_usage( lower_mem, upper_mem, unit = fluid.contrib.memory_usage(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册