未验证 提交 09892118 编写于 作者: 李季 提交者: GitHub

Revert pull request 34212 (#34558)

* revert commit id 34212
上级 420570c9
...@@ -183,7 +183,7 @@ message DistributedStrategy { ...@@ -183,7 +183,7 @@ message DistributedStrategy {
optional bool use_hierarchical_allreduce = 15 [ default = false ]; optional bool use_hierarchical_allreduce = 15 [ default = false ];
optional int32 hierarchical_allreduce_inter_nranks = 16 [ default = 1 ]; optional int32 hierarchical_allreduce_inter_nranks = 16 [ default = 1 ];
optional bool sync_batch_norm = 17 [ default = false ]; optional bool sync_batch_norm = 17 [ default = false ];
optional bool fuse_all_reduce_ops = 18 [ default = false ]; optional bool fuse_all_reduce_ops = 18 [ default = true ];
optional int32 fuse_grad_size_in_MB = 19 [ default = 32 ]; optional int32 fuse_grad_size_in_MB = 19 [ default = 32 ];
optional float fuse_grad_size_in_TFLOPS = 20 [ default = 50 ]; optional float fuse_grad_size_in_TFLOPS = 20 [ default = 50 ];
optional bool cudnn_exhaustive_search = 21 [ default = false ]; optional bool cudnn_exhaustive_search = 21 [ default = false ];
......
...@@ -366,6 +366,8 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer): ...@@ -366,6 +366,8 @@ class TestFleetShardingHybridOptimizer(TestFleetMetaOptimizer):
"gradient_merge_acc_step": 1, "gradient_merge_acc_step": 1,
"mp_degree": 1 "mp_degree": 1
} }
strategy.fuse_all_reduce_ops = False
self.optimizer(avg_cost, strategy, train_prog, startup_prog) self.optimizer(avg_cost, strategy, train_prog, startup_prog)
startup_prog_ops = startup_prog.global_block().ops startup_prog_ops = startup_prog.global_block().ops
main_prog_ops = train_prog.global_block().ops main_prog_ops = train_prog.global_block().ops
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册