From d15e73b01e2e7d032e3208548141dd1956f771f7 Mon Sep 17 00:00:00 2001 From: lilong12 Date: Tue, 2 Mar 2021 11:23:00 +0800 Subject: [PATCH] [CP] align fleet param (#31220) * update, test=develop (#30692) * align the default value of some configuration for fleet to that of single cards (#30740) * update, test=develop --- .../fluid/framework/distributed_strategy.proto | 6 +++--- .../fleet/base/distributed_strategy.py | 16 ++++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/framework/distributed_strategy.proto b/paddle/fluid/framework/distributed_strategy.proto index 55ba9b0a0f..208ab9a93c 100644 --- a/paddle/fluid/framework/distributed_strategy.proto +++ b/paddle/fluid/framework/distributed_strategy.proto @@ -141,9 +141,9 @@ message DistributedStrategy { optional bool fuse_all_reduce_ops = 18 [ default = true ]; optional int32 fuse_grad_size_in_MB = 19 [ default = 32 ]; optional float fuse_grad_size_in_TFLOPS = 20 [ default = 50 ]; - optional bool cudnn_exhaustive_search = 21 [ default = true ]; - optional int32 conv_workspace_size_limit = 22 [ default = 4000 ]; - optional bool cudnn_batchnorm_spatial_persistent = 23 [ default = true ]; + optional bool cudnn_exhaustive_search = 21 [ default = false ]; + optional int32 conv_workspace_size_limit = 22 [ default = 512 ]; + optional bool cudnn_batchnorm_spatial_persistent = 23 [ default = false ]; optional bool adaptive_localsgd = 24 [ default = false ]; optional bool fp16_allreduce = 25 [ default = false ]; optional bool sharding = 26 [ default = false ]; diff --git a/python/paddle/distributed/fleet/base/distributed_strategy.py b/python/paddle/distributed/fleet/base/distributed_strategy.py index 186d9263dc..f79013d734 100755 --- a/python/paddle/distributed/fleet/base/distributed_strategy.py +++ b/python/paddle/distributed/fleet/base/distributed_strategy.py @@ -118,6 +118,22 @@ class DistributedStrategy(object): """ self.strategy = distributed_strategy_pb2.DistributedStrategy() + + # Set the default values of the following flags to the ones set by users + key = 'FLAGS_cudnn_batchnorm_spatial_persistent' + if core.globals().is_public(key): + self.strategy.cudnn_batchnorm_spatial_persistent = bool( + core.globals()[key]) + key = 'FLAGS_conv_workspace_size_limit' + if core.globals().is_public(key): + self.strategy.conv_workspace_size_limit = int(core.globals()[key]) + key = 'FLAGS_cudnn_exhaustive_search' + if core.globals().is_public(key): + self.strategy.cudnn_exhaustive_search = bool(core.globals()[key]) + key = 'FLAGS_sync_nccl_allreduce' + if core.globals().is_public(key): + self.strategy.sync_nccl_allreduce = bool(core.globals()[key]) + self.__lock_attr = True def __setattr__(self, key, value): -- GitLab