“8a07aff4d7ede93ba9323b1d7ed43754c04c22d2”上不存在“git@gitcode.net:paddlepaddle/PaddleDetection.git”
test_fleet_distributed_strategy.py 13.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import paddle
import os


class TestStrategyConfig(unittest.TestCase):
    def test_amp(self):
22
        strategy = paddle.distributed.fleet.DistributedStrategy()
23 24 25 26 27 28 29
        strategy.amp = True
        self.assertEqual(strategy.amp, True)
        strategy.amp = False
        self.assertEqual(strategy.amp, False)
        strategy.amp = "True"
        self.assertEqual(strategy.amp, False)

30
    def test_amp_configs(self):
31
        strategy = paddle.distributed.fleet.DistributedStrategy()
32 33 34 35 36 37 38 39 40 41
        configs = {
            "init_loss_scaling": 32768,
            "decr_every_n_nan_or_inf": 2,
            "incr_every_n_steps": 1000,
            "incr_ratio": 2.0,
            "use_dynamic_loss_scaling": True,
            "decr_ratio": 0.5
        }
        strategy.amp_configs = configs
        self.assertEqual(strategy.amp_configs["init_loss_scaling"], 32768)
42 43

    def test_recompute(self):
44
        strategy = paddle.distributed.fleet.DistributedStrategy()
45 46 47 48 49 50 51
        strategy.recompute = True
        self.assertEqual(strategy.recompute, True)
        strategy.recompute = False
        self.assertEqual(strategy.recompute, False)
        strategy.recompute = "True"
        self.assertEqual(strategy.recompute, False)

52
    def test_recompute_configs(self):
53
        strategy = paddle.distributed.fleet.DistributedStrategy()
54 55 56
        configs = {"checkpoints": ["x", "y"]}
        strategy.recompute_configs = configs
        self.assertEqual(len(strategy.recompute_configs["checkpoints"]), 2)
57 58

    def test_pipeline(self):
59
        strategy = paddle.distributed.fleet.DistributedStrategy()
60 61 62 63 64 65 66
        strategy.pipeline = True
        self.assertEqual(strategy.pipeline, True)
        strategy.pipeline = False
        self.assertEqual(strategy.pipeline, False)
        strategy.pipeline = "True"
        self.assertEqual(strategy.pipeline, False)

67
    def test_pipeline_configs(self):
68
        strategy = paddle.distributed.fleet.DistributedStrategy()
69 70 71
        configs = {"micro_batch": 4}
        strategy.pipeline_configs = configs
        self.assertEqual(strategy.pipeline_configs["micro_batch"], 4)
72 73

    def test_localsgd(self):
74
        strategy = paddle.distributed.fleet.DistributedStrategy()
75 76 77 78 79 80 81
        strategy.localsgd = True
        self.assertEqual(strategy.localsgd, True)
        strategy.localsgd = False
        self.assertEqual(strategy.localsgd, False)
        strategy.localsgd = "True"
        self.assertEqual(strategy.localsgd, False)

82
    def test_localsgd_configs(self):
83
        strategy = paddle.distributed.fleet.DistributedStrategy()
84 85 86
        configs = {"k_steps": 4}
        strategy.localsgd_configs = configs
        self.assertEqual(strategy.localsgd_configs["k_steps"], 4)
87 88

    def test_dgc(self):
89
        strategy = paddle.distributed.fleet.DistributedStrategy()
90 91 92 93 94 95 96
        strategy.dgc = True
        self.assertEqual(strategy.dgc, True)
        strategy.dgc = False
        self.assertEqual(strategy.dgc, False)
        strategy.dgc = "True"
        self.assertEqual(strategy.dgc, False)

97
    def test_sync_nccl_allreduce(self):
98
        strategy = paddle.distributed.fleet.DistributedStrategy()
99 100 101 102 103 104
        strategy.sync_nccl_allreduce = True
        self.assertEqual(strategy.sync_nccl_allreduce, True)
        strategy.sync_nccl_allreduce = False
        self.assertEqual(strategy.sync_nccl_allreduce, False)
        strategy.sync_nccl_allreduce = "True"
        self.assertEqual(strategy.sync_nccl_allreduce, False)
105

106
    def test_nccl_comm_num(self):
107
        strategy = paddle.distributed.fleet.DistributedStrategy()
108 109 110 111 112
        strategy.nccl_comm_num = 1
        self.assertEqual(strategy.nccl_comm_num, 1)
        strategy.nccl_comm_num = "2"
        self.assertEqual(strategy.nccl_comm_num, 1)

113
    def test_use_hierarchical_allreduce(self):
114
        strategy = paddle.distributed.fleet.DistributedStrategy()
115 116 117 118 119 120 121 122
        strategy.use_hierarchical_allreduce = True
        self.assertEqual(strategy.use_hierarchical_allreduce, True)
        strategy.use_hierarchical_allreduce = False
        self.assertEqual(strategy.use_hierarchical_allreduce, False)
        strategy.use_hierarchical_allreduce = "True"
        self.assertEqual(strategy.use_hierarchical_allreduce, False)

    def test_hierarchical_allreduce_inter_nranks(self):
123
        strategy = paddle.distributed.fleet.DistributedStrategy()
124 125 126 127 128 129
        strategy.hierarchical_allreduce_inter_nranks = 8
        self.assertEqual(strategy.hierarchical_allreduce_inter_nranks, 8)
        strategy.hierarchical_allreduce_inter_nranks = "4"
        self.assertEqual(strategy.hierarchical_allreduce_inter_nranks, 8)

    def test_sync_batch_norm(self):
130
        strategy = paddle.distributed.fleet.DistributedStrategy()
131 132 133 134 135 136 137 138
        strategy.sync_batch_norm = True
        self.assertEqual(strategy.sync_batch_norm, True)
        strategy.sync_batch_norm = False
        self.assertEqual(strategy.sync_batch_norm, False)
        strategy.sync_batch_norm = "True"
        self.assertEqual(strategy.sync_batch_norm, False)

    def test_fuse_all_reduce_ops(self):
139
        strategy = paddle.distributed.fleet.DistributedStrategy()
140 141 142 143 144 145 146
        strategy.fuse_all_reduce_ops = True
        self.assertEqual(strategy.fuse_all_reduce_ops, True)
        strategy.fuse_all_reduce_ops = False
        self.assertEqual(strategy.fuse_all_reduce_ops, False)
        strategy.fuse_all_reduce_ops = "True"
        self.assertEqual(strategy.fuse_all_reduce_ops, False)

147
    def test_fuse_grad_size_in_MB(self):
148
        strategy = paddle.distributed.fleet.DistributedStrategy()
149 150 151 152 153 154
        strategy.fuse_grad_size_in_MB = 50
        self.assertEqual(strategy.fuse_grad_size_in_MB, 50)
        strategy.fuse_grad_size_in_MB = "40"
        self.assertEqual(strategy.fuse_grad_size_in_MB, 50)

    def test_fuse_grad_size_in_TFLOPS(self):
155
        strategy = paddle.distributed.fleet.DistributedStrategy()
156 157 158 159 160
        strategy._fuse_grad_size_in_TFLOPS = 0.1
        self.assertGreater(strategy._fuse_grad_size_in_TFLOPS, 0.09)
        strategy._fuse_grad_size_in_TFLOPS = "0.3"
        self.assertGreater(strategy._fuse_grad_size_in_TFLOPS, 0.09)

161
    def test_gradient_merge(self):
162
        strategy = paddle.distributed.fleet.DistributedStrategy()
163 164 165 166 167 168 169
        strategy.gradient_merge = True
        self.assertEqual(strategy.gradient_merge, True)
        strategy.gradient_merge = False
        self.assertEqual(strategy.gradient_merge, False)
        strategy.gradient_merge = "True"
        self.assertEqual(strategy.gradient_merge, False)

170
    def test_gradient_merge_configs(self):
171
        strategy = paddle.distributed.fleet.DistributedStrategy()
172 173 174
        configs = {"k_steps": 4}
        strategy.gradient_merge_configs = configs
        self.assertEqual(strategy.gradient_merge_configs["k_steps"], 4)
175 176

    def test_lars(self):
177
        strategy = paddle.distributed.fleet.DistributedStrategy()
178 179 180 181 182 183 184 185
        strategy.lars = True
        self.assertEqual(strategy.lars, True)
        strategy.lars = False
        self.assertEqual(strategy.lars, False)
        strategy.lars = "True"
        self.assertEqual(strategy.lars, False)

    def test_lamb(self):
186
        strategy = paddle.distributed.fleet.DistributedStrategy()
187 188 189 190 191 192 193
        strategy.lamb = True
        self.assertEqual(strategy.lamb, True)
        strategy.lamb = False
        self.assertEqual(strategy.lamb, False)
        strategy.lamb = "True"
        self.assertEqual(strategy.lamb, False)

D
Dong Daxiang 已提交
194
    def test_a_sync(self):
195
        strategy = paddle.distributed.fleet.DistributedStrategy()
D
Dong Daxiang 已提交
196 197 198 199
        strategy.a_sync = True
        self.assertEqual(strategy.a_sync, True)
        strategy.a_sync = False
        self.assertEqual(strategy.a_sync, False)
200 201 202

        with self.assertRaises(ValueError):
            strategy.a_sync = "True"
203

D
Dong Daxiang 已提交
204
    def test_a_sync_configs(self):
205
        strategy = paddle.distributed.fleet.DistributedStrategy()
206
        configs = {"k_steps": 1000}
D
Dong Daxiang 已提交
207 208
        strategy.a_sync_configs = configs
        self.assertEqual(strategy.a_sync_configs["k_steps"], 1000)
209 210

    def test_elastic(self):
211
        strategy = paddle.distributed.fleet.DistributedStrategy()
212 213 214 215 216 217 218 219
        strategy.elastic = True
        self.assertEqual(strategy.elastic, True)
        strategy.elastic = False
        self.assertEqual(strategy.elastic, False)
        strategy.elastic = "True"
        self.assertEqual(strategy.elastic, False)

    def test_auto(self):
220
        strategy = paddle.distributed.fleet.DistributedStrategy()
221 222 223 224 225 226 227
        strategy.auto = True
        self.assertEqual(strategy.auto, True)
        strategy.auto = False
        self.assertEqual(strategy.auto, False)
        strategy.auto = "True"
        self.assertEqual(strategy.auto, False)

228
    def test_strategy_prototxt(self):
229
        strategy = paddle.distributed.fleet.DistributedStrategy()
D
Dong Daxiang 已提交
230
        strategy.a_sync = True
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
        strategy.localsgd = True
        strategy.dgc = True
        localsgd_configs = {"k_steps": 5}
        strategy.localsgd_configs = localsgd_configs
        build_strategy = paddle.fluid.BuildStrategy()
        build_strategy.enable_sequential_execution = True
        build_strategy.nccl_comm_num = 10
        build_strategy.use_hierarchical_allreduce = True
        build_strategy.hierarchical_allreduce_inter_nranks = 1
        build_strategy.fuse_elewise_add_act_ops = True
        build_strategy.fuse_bn_act_ops = True
        build_strategy.enable_auto_fusion = True
        build_strategy.fuse_relu_depthwise_conv = True
        build_strategy.fuse_broadcast_ops = True
        build_strategy.fuse_all_optimizer_ops = True
        build_strategy.sync_batch_norm = True
        build_strategy.enable_inplace = True
        build_strategy.fuse_all_reduce_ops = True
        build_strategy.enable_backward_optimizer_op_deps = True
        build_strategy.trainers_endpoints = ["1", "2"]
        strategy.build_strategy = build_strategy
        exe_strategy = paddle.fluid.ExecutionStrategy()
        exe_strategy.num_threads = 10
        exe_strategy.num_iteration_per_drop_scope = 10
        exe_strategy.num_iteration_per_run = 10
        strategy.execution_strategy = exe_strategy
257
        strategy.save_to_prototxt("dist_strategy.prototxt")
258
        strategy2 = paddle.distributed.fleet.DistributedStrategy()
259
        strategy2.load_from_prototxt("dist_strategy.prototxt")
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
        self.assertEqual(strategy.dgc, strategy2.dgc)

    def test_build_strategy(self):
        build_strategy = paddle.fluid.BuildStrategy()
        build_strategy.enable_sequential_execution = True
        build_strategy.nccl_comm_num = 10
        build_strategy.use_hierarchical_allreduce = True
        build_strategy.hierarchical_allreduce_inter_nranks = 1
        build_strategy.fuse_elewise_add_act_ops = True
        build_strategy.fuse_bn_act_ops = True
        build_strategy.enable_auto_fusion = True
        build_strategy.fuse_relu_depthwise_conv = True
        build_strategy.fuse_broadcast_ops = True
        build_strategy.fuse_all_optimizer_ops = True
        build_strategy.sync_batch_norm = True
        build_strategy.enable_inplace = True
        build_strategy.fuse_all_reduce_ops = True
        build_strategy.enable_backward_optimizer_op_deps = True
        build_strategy.trainers_endpoints = ["1", "2"]

280
        strategy = paddle.distributed.fleet.DistributedStrategy()
281 282 283 284 285 286 287 288
        strategy.build_strategy = build_strategy

    def test_execution_strategy(self):
        exe_strategy = paddle.fluid.ExecutionStrategy()
        exe_strategy.num_threads = 10
        exe_strategy.num_iteration_per_drop_scope = 10
        exe_strategy.num_iteration_per_run = 10

289
        strategy = paddle.distributed.fleet.DistributedStrategy()
290
        strategy.execution_strategy = exe_strategy
291

292 293 294 295 296
    def test_unknown_strategy(self):
        strategy = paddle.distributed.fleet.DistributedStrategy()
        with self.assertRaises(TypeError):
            strategy.unknown_key = 'UNK'

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
    def test_cudnn_exhaustive_search(self):
        strategy = paddle.distributed.fleet.DistributedStrategy()
        strategy.cudnn_exhaustive_search = False
        self.assertEqual(strategy.cudnn_exhaustive_search, False)
        strategy.cudnn_exhaustive_search = "True"
        self.assertEqual(strategy.cudnn_exhaustive_search, False)

    def test_cudnn_batchnorm_spatial_persistent(self):
        strategy = paddle.distributed.fleet.DistributedStrategy()
        strategy.cudnn_batchnorm_spatial_persistent = False
        self.assertEqual(strategy.cudnn_batchnorm_spatial_persistent, False)
        strategy.cudnn_batchnorm_spatial_persistent = "True"
        self.assertEqual(strategy.cudnn_batchnorm_spatial_persistent, False)

    def test_conv_workspace_size_limit(self):
        strategy = paddle.distributed.fleet.DistributedStrategy()
        strategy.conv_workspace_size_limit = 1000
        self.assertEqual(strategy.conv_workspace_size_limit, 1000)
        strategy.conv_workspace_size_limit = "400"
        self.assertEqual(strategy.conv_workspace_size_limit, 1000)
        strategy._enable_env()

319 320 321

if __name__ == '__main__':
    unittest.main()