提交 c622d7ce 编写于 作者: F Flowingsun007

remove param --use_boxing_v2 for all cnns

上级 05681a0b
...@@ -601,7 +601,6 @@ python3 cnn_benchmark/of_cnn_train_val.py \ ...@@ -601,7 +601,6 @@ python3 cnn_benchmark/of_cnn_train_val.py \
--val_batch_size_per_device=512 \ --val_batch_size_per_device=512 \
--num_epoch=90 \ --num_epoch=90 \
--use_fp16=false \ --use_fp16=false \
--use_boxing_v2=false \
--model="alexnet" \ --model="alexnet" \
``` ```
...@@ -630,7 +629,6 @@ python3 cnn_benchmark/of_cnn_train_val.py \ ...@@ -630,7 +629,6 @@ python3 cnn_benchmark/of_cnn_train_val.py \
--val_batch_size_per_device=128 \ --val_batch_size_per_device=128 \
--num_epoch=90 \ --num_epoch=90 \
--use_fp16=false \ --use_fp16=false \
--use_boxing_v2=false \
--model="vgg" \ --model="vgg" \
``` ```
......
...@@ -22,8 +22,6 @@ def get_train_config(args): ...@@ -22,8 +22,6 @@ def get_train_config(args):
train_config.all_reduce_group_min_mbyte(8) train_config.all_reduce_group_min_mbyte(8)
train_config.all_reduce_group_num(128) train_config.all_reduce_group_num(128)
if args.use_boxing_v2:
train_config.use_boxing_v2(True)
train_config.prune_parallel_cast_ops(True) train_config.prune_parallel_cast_ops(True)
train_config.train.model_update_conf(gen_model_update_conf(args)) train_config.train.model_update_conf(gen_model_update_conf(args))
......
...@@ -111,7 +111,6 @@ def _get_train_conf(): ...@@ -111,7 +111,6 @@ def _get_train_conf():
'lazy_adam_conf': { 'lazy_adam_conf': {
} }
}) })
train_conf.use_boxing_v2(True)
train_conf.default_distribute_strategy(flow.distribute.consistent_strategy()) train_conf.default_distribute_strategy(flow.distribute.consistent_strategy())
train_conf.indexed_slices_optimizer_conf(dict(include_op_names=dict(op_name=['wide_embedding', 'deep_embedding']))) train_conf.indexed_slices_optimizer_conf(dict(include_op_names=dict(op_name=['wide_embedding', 'deep_embedding'])))
return train_conf return train_conf
......
...@@ -118,7 +118,6 @@ def _get_train_conf(): ...@@ -118,7 +118,6 @@ def _get_train_conf():
'lazy_adam_conf': { 'lazy_adam_conf': {
} }
}) })
train_conf.use_boxing_v2(True)
train_conf.default_distribute_strategy(flow.distribute.consistent_strategy()) train_conf.default_distribute_strategy(flow.distribute.consistent_strategy())
train_conf.indexed_slices_optimizer_conf(dict(include_op_names=dict(op_name=['wide_embedding', 'deep_embedding']))) train_conf.indexed_slices_optimizer_conf(dict(include_op_names=dict(op_name=['wide_embedding', 'deep_embedding'])))
return train_conf return train_conf
......
...@@ -37,8 +37,6 @@ parser.add_argument("--log_every_n_iter", type=int, default=1, help="print loss ...@@ -37,8 +37,6 @@ parser.add_argument("--log_every_n_iter", type=int, default=1, help="print loss
parser.add_argument("--data_dir", type=str, default=None) parser.add_argument("--data_dir", type=str, default=None)
parser.add_argument("--data_part_num", type=int, default=32, help="data part number in dataset") parser.add_argument("--data_part_num", type=int, default=32, help="data part number in dataset")
parser.add_argument('--use_fp16', type=str2bool, nargs='?', const=True, help='use use fp16 or not') parser.add_argument('--use_fp16', type=str2bool, nargs='?', const=True, help='use use fp16 or not')
parser.add_argument('--use_boxing_v2', type=str2bool, nargs='?', const=True,
help='use boxing v2 or not')
# log and resore/save # log and resore/save
parser.add_argument("--loss_print_every_n_iter", type=int, default=10, required=False, parser.add_argument("--loss_print_every_n_iter", type=int, default=10, required=False,
...@@ -164,8 +162,6 @@ config.train.model_update_conf(_BERT_MODEL_UPDATE_CONF) ...@@ -164,8 +162,6 @@ config.train.model_update_conf(_BERT_MODEL_UPDATE_CONF)
if args.use_fp16: if args.use_fp16:
config.enable_auto_mixed_precision(True) config.enable_auto_mixed_precision(True)
if args.use_boxing_v2:
config.use_boxing_v2(True)
@flow.global_function(config) @flow.global_function(config)
...@@ -206,9 +202,6 @@ def main(): ...@@ -206,9 +202,6 @@ def main():
flow.config.gpu_device_num(args.gpu_num_per_node) flow.config.gpu_device_num(args.gpu_num_per_node)
flow.env.log_dir(args.log_dir) flow.env.log_dir(args.log_dir)
if args.use_boxing_v2:
flow.config.collective_boxing.nccl_fusion_threshold_mb(8)
flow.config.collective_boxing.nccl_fusion_all_reduce_use_buffer(False)
if args.node_num > 1: if args.node_num > 1:
......
...@@ -37,8 +37,6 @@ parser.add_argument("--log_every_n_iter", type=int, default=1, help="print loss ...@@ -37,8 +37,6 @@ parser.add_argument("--log_every_n_iter", type=int, default=1, help="print loss
parser.add_argument("--data_dir", type=str, default=None) parser.add_argument("--data_dir", type=str, default=None)
parser.add_argument("--data_part_num", type=int, default=32, help="data part number in dataset") parser.add_argument("--data_part_num", type=int, default=32, help="data part number in dataset")
parser.add_argument('--use_fp16', type=str2bool, nargs='?', const=True, help='use use fp16 or not') parser.add_argument('--use_fp16', type=str2bool, nargs='?', const=True, help='use use fp16 or not')
parser.add_argument('--use_boxing_v2', type=str2bool, nargs='?', const=True,
help='use boxing v2 or not')
# log and resore/save # log and resore/save
parser.add_argument("--loss_print_every_n_iter", type=int, default=10, required=False, parser.add_argument("--loss_print_every_n_iter", type=int, default=10, required=False,
...@@ -160,8 +158,6 @@ config.train.model_update_conf(_BERT_MODEL_UPDATE_CONF) ...@@ -160,8 +158,6 @@ config.train.model_update_conf(_BERT_MODEL_UPDATE_CONF)
if args.use_fp16: if args.use_fp16:
config.enable_auto_mixed_precision(True) config.enable_auto_mixed_precision(True)
if args.use_boxing_v2:
config.use_boxing_v2(True)
@flow.global_function(config) @flow.global_function(config)
...@@ -202,9 +198,6 @@ def main(): ...@@ -202,9 +198,6 @@ def main():
flow.config.gpu_device_num(args.gpu_num_per_node) flow.config.gpu_device_num(args.gpu_num_per_node)
flow.env.log_dir(args.log_dir) flow.env.log_dir(args.log_dir)
if args.use_boxing_v2:
flow.config.collective_boxing.nccl_fusion_threshold_mb(8)
flow.config.collective_boxing.nccl_fusion_all_reduce_use_buffer(False)
if args.node_num > 1: if args.node_num > 1:
......
...@@ -38,8 +38,6 @@ parser.add_argument("--log_every_n_iter", type=int, default=1, help="print loss ...@@ -38,8 +38,6 @@ parser.add_argument("--log_every_n_iter", type=int, default=1, help="print loss
parser.add_argument("--data_dir", type=str, default=None) parser.add_argument("--data_dir", type=str, default=None)
parser.add_argument("--data_part_num", type=int, default=32, help="data part number in dataset") parser.add_argument("--data_part_num", type=int, default=32, help="data part number in dataset")
parser.add_argument('--use_fp16', type=str2bool, nargs='?', const=True, help='use use fp16 or not') parser.add_argument('--use_fp16', type=str2bool, nargs='?', const=True, help='use use fp16 or not')
parser.add_argument('--use_boxing_v2', type=str2bool, nargs='?', const=True,
help='use boxing v2 or not')
# log and resore/save # log and resore/save
parser.add_argument("--loss_print_every_n_iter", type=int, default=10, required=False, parser.add_argument("--loss_print_every_n_iter", type=int, default=10, required=False,
...@@ -156,8 +154,6 @@ config.default_distribute_strategy(flow.distribute.consistent_strategy()) ...@@ -156,8 +154,6 @@ config.default_distribute_strategy(flow.distribute.consistent_strategy())
if args.use_fp16: if args.use_fp16:
config.enable_auto_mixed_precision(True) config.enable_auto_mixed_precision(True)
if args.use_boxing_v2:
config.use_boxing_v2(True)
@flow.global_function(config) @flow.global_function(config)
...@@ -198,9 +194,6 @@ def main(): ...@@ -198,9 +194,6 @@ def main():
flow.config.gpu_device_num(args.gpu_num_per_node) flow.config.gpu_device_num(args.gpu_num_per_node)
flow.env.log_dir(args.log_dir) flow.env.log_dir(args.log_dir)
if args.use_boxing_v2:
flow.config.collective_boxing.nccl_fusion_threshold_mb(8)
flow.config.collective_boxing.nccl_fusion_all_reduce_use_buffer(False)
if args.node_num > 1: if args.node_num > 1:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册