fleet_base.py 62.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
16
import copy
17
import warnings
18
import paddle
19
import os
20
from types import MethodType
21
import numpy as np
22
from paddle.fluid.framework import dygraph_only, _global_flags
23
from paddle.fluid import compiler
24
from .role_maker import UserDefinedRoleMaker, PaddleCloudRoleMaker, RoleMakerBase
25
from .strategy_compiler import StrategyCompiler
26
from .distributed_strategy import DistributedStrategy
27 28
from .meta_optimizer_factory import MetaOptimizerFactory
from .runtime_factory import RuntimeFactory
29
from paddle.fluid.wrapped_decorator import wrap_decorator
30
from paddle.fluid.dygraph import parallel_helper
31
from paddle.fluid.ir import apply_build_strategy
32
from . import topology as tp
33
from .topology import ParallelMode
34
from ..meta_parallel import TensorParallel, model_parallel_random_seed
J
JZ-LIANG 已提交
35
from ..meta_parallel import PipelineParallel, ShardingParallel
K
kuizhiqing 已提交
36
from ..meta_optimizers import HybridParallelOptimizer, HeterParallelOptimizer
37
from paddle import _C_ops
38 39
from paddle.fluid import core
from paddle.fluid.dygraph import to_variable
40

41 42
__all__ = []

43

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
def apply_ir_passes(main_program, startup_program, config):
    build_strategy = config._user_defined_strategy.build_strategy._copy()
    if not _global_flags()['FLAGS_apply_pass_to_program']:
        return build_strategy

    pipeline_opt = getattr(main_program, "_pipeline_opt", {})
    if pipeline_opt:
        main_program = pipeline_opt["section_program"]
        startup_program = startup_program._pipeline_opt["startup_program"]

    pass_attrs = {"use_cuda": config._is_collective}
    fuse_all_reduce = config._user_defined_strategy.fuse_all_reduce_ops
    if fuse_all_reduce and build_strategy.fuse_all_optimizer_ops:
        # FIXME(zjl): currently, fuse_all_optimizer_ops
        # have conflict with fuse_all_reduce_ops because 
        # RawProgramOptimizer also inserts coalesce_tensor 
        # into program. These two procedures may conflict  
        # in which vars are to be fused. 
        warnings.warn(
            'Currently, the fuse_all_optimizer_ops pass has conflict with fuse_all_reduce_ops pass. Disable the fuse_all_optimizer_ops pass temporarily.'
        )
        build_strategy.fuse_all_optimizer_ops = False

    return apply_build_strategy(main_program, startup_program, build_strategy,
                                pass_attrs)


71 72 73 74 75 76 77 78 79 80 81 82
def _inited_runtime_handler_(func):
    def __impl__(*args, **kwargs):
        cls = args[0]

        if cls._runtime_handle is None:
            raise ValueError("Fleet can not find suitable runtime handler")

        return func(*args, **kwargs)

    return __impl__


83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
def _is_non_distributed_check_(func):
    def __impl__(*args, **kwargs):
        cls = args[0]

        if cls._role_maker is not None and cls._role_maker._is_non_distributed(
        ) is True:
            warnings.warn(
                "%s() function doesn't work when use non_distributed fleet." %
                (func.__name__))
            return

        return func(*args, **kwargs)

    return __impl__


99
inited_runtime_handler = wrap_decorator(_inited_runtime_handler_)
100
is_non_distributed_check = wrap_decorator(_is_non_distributed_check_)
101 102


103 104 105
class Fleet(object):
    """
    Unified API for distributed training of PaddlePaddle
106
    Please reference the https://github.com/PaddlePaddle/FleetX for details
107 108 109 110 111


    Returns:
        Fleet: A Fleet instance

112
    Example for collective training:
1
123malin 已提交
113

114 115
        .. code-block:: python

1
123malin 已提交
116 117
            import paddle
            paddle.enable_static()
118
            import paddle.distributed.fleet as fleet
119 120 121

            fleet.init(is_collective=True)

122 123 124
            strategy = fleet.DistributedStrategy()
            optimizer = paddle.optimizer.SGD(learning_rate=0.001)
            optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
125 126 127 128 129 130 131 132

            # do distributed training


    Example for parameter server training:

        .. code-block:: python

1
123malin 已提交
133 134
            import paddle
            paddle.enable_static()
135 136
            import paddle.distributed.fleet as fleet
            strategy = fleet.DistributedStrategy()
S
ShenLiang 已提交
137
            fleet.init(strategy=strategy)
138

139
            optimizer = paddle.optimizer.SGD(learning_rate=0.001)
140
            optimizer = fleet.distributed_optimizer(optimizer)
141

142 143
            if fleet.is_first_worker():
                print("this is first worker")
144

145 146
            print("current node index: {}".format(fleet.worker_index()))
            print("total number of worker num: {}".format(fleet.worker_num()))
147

148 149 150
            if fleet.is_worker():
                print("this is worker")
            print("worker endpoints: {}".format(fleet.worker_endpoints(to_string=True)))
151

152 153
            print("server num: {}".format(fleet.server_num()))
            print("server endpoints: {}".format(fleet.server_endpoints(to_string=True)))
154

155 156 157
            if fleet.is_server():
                print("this is server")
            fleet.stop_worker()
158 159


160 161 162
    """

    def __init__(self):
163
        self._role_maker = None
164
        self.strategy_compiler = None
165
        self._is_collective = False
166
        self._runtime_handle = None
D
Dong Daxiang 已提交
167 168
        self._util = None
        self._context = {}
169

170
    def init(self, role_maker=None, is_collective=False, strategy=None):
171 172 173
        """
        Initialize role_maker in Fleet.

174 175 176 177 178 179 180 181 182 183 184
        This function is responsible for the distributed architecture
        what you want to run your code behind.

        Args:
            role_maker (RoleMakerBase, optional): A ``RoleMakerBase`` containing the configuration
                of environment variables related to distributed training.If you did not initialize 
                the rolemaker by yourself, it will be automatically initialized to PaddleRoleMaker.
                The default value is None.
            is_collective (Boolean, optional): A ``Boolean`` variable determines whether the program 
                runs on the CPU or GPU. False means set distributed training using CPU, and True means
                GPU.The default value is False.The default value is False.
185 186 187 188
            strategy (DistributedStrategy): Extra properties for distributed training. 
                For details, please refer to paddle.distributed.fleet.DistributedStrategy. Default: None.


189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
        Returns:
            None

        Examples1:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

        Examples2:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init(is_collective=True)

        Examples3:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
1
123malin 已提交
211
                role = fleet.PaddleCloudRoleMaker()
212
                fleet.init(role)
213

214 215 216 217 218 219
        Examples4:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                strategy = fleet.DistributedStrategy()
S
ShenLiang 已提交
220
                fleet.init(strategy=strategy)
221

222
        """
S
ShenLiang 已提交
223 224 225
        if strategy is None:
            strategy = DistributedStrategy()
        self._user_defined_strategy = copy.deepcopy(strategy)
226 227

        if role_maker is None:
228 229 230 231 232 233
            if isinstance(is_collective, bool):
                self._is_collective = is_collective
                self._role_maker = PaddleCloudRoleMaker(
                    is_collective=self._is_collective)
            else:
                raise ValueError(
234 235
                    "`is_collective` should be instance of `bool`, but got {}".
                    format(type(is_collective)))
236
        else:
237 238
            if isinstance(role_maker, RoleMakerBase):
                self._role_maker = role_maker
239
                self._is_collective = role_maker._is_collective
240 241 242 243
            else:
                raise ValueError(
                    "`role_maker` should be subclass of `RoleMakerBase`, but got {}".
                    format(type(role_maker)))
244
        self._role_maker._generate_role()
245

246 247 248
        import paddle.distributed.fleet as fleet
        fleet.util._set_role_maker(self._role_maker)

249
        self.strategy_compiler = StrategyCompiler()
250 251 252 253 254 255 256 257 258

        if self._role_maker._is_non_distributed() and self._is_collective:
            if paddle.fluid.core.is_compiled_with_cuda():
                gpus_num = paddle.fluid.core.get_cuda_device_count()
                if gpus_num != 1:
                    raise ValueError(
                        "CUDA_VISIBLE_DEVICES shoule be set only 1 card if you use `python` to launch fleet program."
                    )

259
        if paddle.fluid.framework.in_dygraph_mode():
260
            if self.worker_num() == 1:
261 262 263
                # if worker_num is 1, should construct default topology & hcg
                self._topology = tp.CommunicateTopology()
                self._hcg = tp.HybridCommunicateGroup(self._topology)
264
                return
265 266 267 268
            if parallel_helper._is_parallel_ctx_initialized():
                warnings.warn(
                    "The dygraph parallel environment has been initialized.")
            else:
269 270 271 272 273 274 275 276 277
                # FLAGS_nccl_nrings is used for dynamic graph multi-stream communication
                if "FLAGS_nccl_nrings" in os.environ:
                    warnings.warn(
                        "You have set the environment variable FLAGS_nccl_nrings "
                        "outside the program, so the nccl_comm_num in "
                        "DistributedStrategy will not take effect here.")
                else:
                    os.environ["FLAGS_nccl_nrings"] = str(
                        self._user_defined_strategy.nccl_comm_num)
278
                paddle.distributed.init_parallel_env()
279

K
kuizhiqing 已提交
280 281 282 283 284 285 286 287 288
            # hybrid parallel not support for npu/xpu
            if self._user_defined_strategy.heter_ccl_mode == False:
                # init hybrid parallel environment in dygraph
                if tp._HYBRID_PARALLEL_GROUP is None:
                    self._init_hybrid_parallel_env()
                else:
                    warnings.warn(
                        "The dygraph hybrid parallel environment has been initialized."
                    )
W
WangXi 已提交
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
        elif self._is_collective:
            use_sharding = self._user_defined_strategy.sharding

            # global group
            global_rank = self.worker_index()
            global_world_size = self.worker_num()
            # NOTE(wangxi): see sharding_optimizer
            global_ring_id = 3 if use_sharding else 0
            global_ranks = list(range(global_world_size))

            if tp._HYBRID_PARALLEL_GROUP is None: tp._CommunicateGroup()
            cg = tp._HYBRID_PARALLEL_GROUP
            self._hcg = cg
            cg.set_comm_group('global', global_rank, global_world_size,
                              global_ring_id, global_ranks)

Y
Yuang Liu 已提交
305 306 307
            use_tensor_parallel = self._user_defined_strategy.tensor_parallel
            use_mp = use_sharding or use_tensor_parallel

W
WangXi 已提交
308
            # hybrid group
Y
Yuang Liu 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
            if use_mp is False: return

            mp_degree_sharding = 1
            mp_degree_tensor_parallel = 1
            if use_sharding:
                sharding_configs = self._user_defined_strategy.sharding_configs
                mp_degree_sharding = int(sharding_configs['mp_degree'])

            if use_tensor_parallel:
                tensor_parallel_configs = self._user_defined_strategy.tensor_parallel_configs
                mp_degree_tensor_parallel = int(tensor_parallel_configs[
                    'tensor_parallel_degree'])

            if use_sharding and use_tensor_parallel:
                assert mp_degree_sharding == mp_degree_tensor_parallel
W
WangXi 已提交
324

Y
Yuang Liu 已提交
325
            mp_degree = mp_degree_sharding if use_sharding else mp_degree_tensor_parallel
W
WangXi 已提交
326 327 328 329 330 331 332 333 334 335 336 337 338

            if mp_degree > 1:
                assert global_world_size % mp_degree == 0
                # NOTE(wangxi): mp_ring_id sync with sharding_optimizer.py _build_groups
                mp_ring_id = 0
                mp_rank = global_rank % mp_degree
                mp_group_id = global_rank // mp_degree
                mp_group_ranks = [
                    idx for idx in global_ranks
                    if idx // mp_degree == mp_group_id
                ]
                cg.set_comm_group('model', mp_rank, mp_degree, mp_ring_id,
                                  mp_group_ranks)
339 340 341 342 343 344 345 346

    def _init_hybrid_parallel_env(self):
        """initialize the hybrid environment
        """
        self.hybrid_configs = self._user_defined_strategy.hybrid_configs
        self.dp_degree = self.hybrid_configs["dp_degree"]
        self.mp_degree = self.hybrid_configs["mp_degree"]
        self.pp_degree = self.hybrid_configs["pp_degree"]
J
JZ-LIANG 已提交
347
        self.sharding_degree = self.hybrid_configs["sharding_degree"]
348 349 350

        assert self.mp_degree >= 0, "mp_degree should be greater or equal to 0"
        assert self.pp_degree >= 0, "pp_degree should be greater or equal to 0"
J
JZ-LIANG 已提交
351
        assert self.sharding_degree >= 0, "sharding_degree should be greater or equal to 0"
352 353 354 355 356 357 358 359 360 361 362

        self.mp_degree = max(self.mp_degree, 1)
        self.pp_degree = max(self.pp_degree, 1)

        if self.dp_degree < 0:
            nranks = paddle.distributed.get_world_size()
            self.dp_degree = nranks // (self.mp_degree * self.pp_degree)

        self.dp_degree = max(self.dp_degree, 1)

        self._topology = tp.CommunicateTopology(
J
JZ-LIANG 已提交
363 364 365 366 367
            hybrid_group_names=["data", "pipe", "sharding", "model"],
            dims=[
                self.dp_degree, self.pp_degree, self.sharding_degree,
                self.mp_degree
            ])
368 369 370

        self._hcg = tp.HybridCommunicateGroup(self._topology)

371 372 373 374 375 376 377 378
        if self.mp_degree > 1:
            tensor_parallel_configs = self._user_defined_strategy.tensor_parallel_configs
            tensor_init_seed = tensor_parallel_configs["tensor_init_seed"]
            if tensor_init_seed == -1:
                model_parallel_random_seed()
            else:
                model_parallel_random_seed(tensor_init_seed)

379 380 381 382 383 384 385 386
    def get_hybrid_communicate_group(self):
        assert self._hcg is not None
        return self._hcg

    def get_hybrid_parallel_topology(self):
        assert self._topology is not None
        return self._topology

387 388 389 390 391 392 393
    def is_first_worker(self):
        """
        Check whether the node is the first instance of worker.

        Returns:
            bool: True if this is the first node of worker,
                  False if not.
394

395 396 397 398 399 400 401 402
        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.is_first_worker()

403
        """
404
        return self._role_maker._is_first_worker()
405 406 407 408 409 410 411

    def worker_index(self):
        """
        Get current worker index.

        Returns:
            int: node id
412 413 414 415

        Examples:

            .. code-block:: python
1
123malin 已提交
416

417 418 419 420
                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.worker_index()

421
        """
422
        return self._role_maker._worker_index()
423 424 425 426 427 428 429

    def worker_num(self):
        """
        Get current total worker number.

        Returns:
            int: worker numbers
1
123malin 已提交
430

431
        Examples:
1
123malin 已提交
432

433 434 435 436 437 438
            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.worker_num()

439
        """
440
        return self._role_maker._worker_num()
441

442 443 444 445 446 447 448 449 450 451 452 453
    def node_num(self):
        return self._role_maker._get_node_num()

    def local_rank(self):
        return self._role_maker._get_local_rank()

    def local_device_ids(self):
        return self._role_maker._get_local_device_ids()

    def world_device_ids(self):
        return self._role_maker._get_world_device_ids()

454 455 456 457 458 459 460
    def is_worker(self):
        """
        Check whether the node is an instance of worker.

        Returns:
            bool: True if this is a node of worker,
                  False if not.
461 462

        Examples:
1
123malin 已提交
463

464 465 466 467 468 469
            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.is_worker()

470
        """
471
        return self._role_maker._is_worker()
472 473 474

    def worker_endpoints(self, to_string=False):
        """
475
        Get current worker endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"].
476 477 478

        Returns:
            list/string: server endpoints
479 480

        Examples:
1
123malin 已提交
481

482 483 484 485 486 487
            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.worker_endpoints()

488 489
        """
        if to_string:
490
            return ",".join(self._role_maker._get_trainer_endpoints())
491
        else:
492
            return self._role_maker._get_trainer_endpoints()
493 494 495 496 497 498 499

    def server_num(self):
        """
        Get current total worker number.

        Returns:
            int: server number
500 501

        Examples:
1
123malin 已提交
502

503
            .. code-block:: python
1
123malin 已提交
504 505 506 507

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.server_num()
508
        """
509
        return len(self._role_maker._get_pserver_endpoints())
510 511 512 513 514 515 516

    def server_index(self):
        """
        Get current server index.

        Returns:
            int: node id
517 518

        Examples:
1
123malin 已提交
519

520 521 522 523 524 525
            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.server_index()

526
        """
527
        return self._role_maker._server_index()
528 529 530 531 532 533 534

    def server_endpoints(self, to_string=False):
        """
        Get current server endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"].

        Returns:
            list/string: server endpoints
535 536

        Examples:
1
123malin 已提交
537

538 539 540 541 542 543
            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.server_endpoints()

544
        """
545

546
        if to_string:
547
            return ",".join(self._role_maker._get_pserver_endpoints())
548
        else:
549
            return self._role_maker._get_pserver_endpoints()
550 551 552 553 554 555 556 557

    def is_server(self):
        """
        Check whether the node is an instance of server.

        Returns:
            bool: True if this is a node of server,
                  False if not.
558 559 560 561

        Examples:

            .. code-block:: python
1
123malin 已提交
562

563 564 565 566
                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.is_server()

567
        """
568 569
        return self._role_maker._is_server()

570 571
    def barrier_worker(self):
        """
572 573 574 575
        barrier all workers

        Returns:
            None
576
        """
577
        self._role_maker._barrier("worker")
578

579
    @is_non_distributed_check
580
    @inited_runtime_handler
581
    def init_worker(self, scopes=None):
582
        """
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
        initialize `Communicator` for parameter server training.


        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                fleet.init_worker()

601
        """
602
        self._runtime_handle._init_worker(scopes)
603

604
    @is_non_distributed_check
605
    @inited_runtime_handler
606
    def init_server(self, *args, **kwargs):
607
        """
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
        init_server executor to initialize startup program,
        if the `args` is not empty, it will run load_persistables for increment training.


        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                fleet.init_server()

627
        """
628
        self._runtime_handle._init_server(*args, **kwargs)
629

Z
zmxdream 已提交
630 631
    @is_non_distributed_check
    @inited_runtime_handler
T
Thunderbrook 已提交
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
    def load_model(self, path, mode):
        """
        load fleet model from path


        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                fleet.load_model("path", "mode")

        """
        self._runtime_handle.load_model(path, mode)

655
    @is_non_distributed_check
656
    @inited_runtime_handler
657 658
    def run_server(self):
        """
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
        run server will run pserver main program with executor.

        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                if fleet.is_server():
                    fleet.init_server()

677 678 679
        """
        self._runtime_handle._run_server()

680
    @is_non_distributed_check
681
    @inited_runtime_handler
682 683
    def stop_worker(self):
        """
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
        stop `Communicator` and give training complete notice to parameter server.

        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                fleet.init_server()

701 702 703
        """
        self._runtime_handle._stop_worker()

Z
zmxdream 已提交
704 705
    @is_non_distributed_check
    @inited_runtime_handler
T
tangwei12 已提交
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
    def save(self, dirname, feed=[], fetch=[], **configs):
        inference = True

        if not feed and not fetch:
            inference = False

        place = paddle.CPUPlace()
        executor = paddle.static.Executor(place)

        if inference:
            feeded_var_names = []
            fetch_var_names = []

            for var in feed:
                if isinstance(var, str):
                    feeded_var_names.append(var)
                elif isinstance(var, paddle.static.Variable):
                    feeded_var_names.append(var.name)
                else:
                    raise ValueError("feed must be [str|Variable]")

            for var in fetch:
                if isinstance(var, str):
                    fetch_var_names.append(var)
                elif isinstance(var, paddle.static.Variable):
                    fetch_var_names.append(var.name)
                else:
                    raise ValueError("feed must be [str|Variable]")

            fetch_vars = [
                paddle.static.default_main_program().global_block().var(name)
                for name in fetch_var_names
            ]

            self._runtime_handle._save_inference_model(
                executor, dirname, feeded_var_names, fetch_vars, None, True, 0)
        else:
            increment_mode = 0
            if "mode" in configs:
                increment_mode = int(configs["mode"])
            self._runtime_handle._save_persistables(
                executor, dirname, main_program=None, mode=increment_mode)

Z
zmxdream 已提交
749 750
    @is_non_distributed_check
    @inited_runtime_handler
751 752 753 754 755 756
    def save_inference_model(self,
                             executor,
                             dirname,
                             feeded_var_names,
                             target_vars,
                             main_program=None,
757 758
                             export_for_deployment=True,
                             mode=0):
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
        """
        save inference model for inference.

        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                fleet.init_server()

        """
T
tangwei12 已提交
778 779 780
        # warnings.warn(
        #     "'save_inference_model' is a deprecated, will be deleted after v2.2.0, Please use fleet.save instead."
        # )
781

782 783
        self._runtime_handle._save_inference_model(
            executor, dirname, feeded_var_names, target_vars, main_program,
784
            export_for_deployment, mode)
785

Z
zmxdream 已提交
786 787
    @is_non_distributed_check
    @inited_runtime_handler
788
    def save_persistables(self, executor, dirname, main_program=None, mode=0):
789 790
        """

1
123malin 已提交
791
        saves all persistable tensors from :code:`main_program` to
792 793
        the folder :code:`dirname`. You can refer to

1
123malin 已提交
794 795
        The :code:`dirname` is used to specify the folder where persistable tensors
        are going to be saved. If you would like to save tensors in separate
796 797 798
        files, set :code:`filename` None.

        Args:
1
123malin 已提交
799
            executor(Executor): The executor to run for saving persistable tensors.
800 801 802 803 804
                                You can refer to :ref:`api_guide_executor_en` for
                                more details.

            dirname(str, optional): The saving directory path.
                                When you need to save the parameter to the memory, set it to None.
1
123malin 已提交
805
            main_program(Program, optional): The program whose persistbale tensors will
806 807 808 809 810 811 812 813 814 815
                                             be saved. Default: None.


        Returns:
            None

        Examples:

            .. code-block:: text

1
123malin 已提交
816 817
                import paddle
                paddle.enable_static()
818 819 820 821 822 823 824
                import paddle.distributed.fleet as fleet

                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

1
123malin 已提交
825 826
                exe = paddle.static.Executor(paddle.CPUPlace())
                fleet.save_persistables(exe, "dirname", paddle.static.default_main_program())
827 828

        """
T
tangwei12 已提交
829 830 831
        # warnings.warn(
        #     "'save_persistables' is a deprecated, will be deleted after v2.2.0, Please use fleet.save instead."
        # )
832

833 834
        self._runtime_handle._save_persistables(executor, dirname, main_program,
                                                mode)
835

836
    def shrink(self, threshold=None):
837 838
        self._runtime_handle._shrink(threshold)

839
    def distributed_optimizer(self, optimizer, strategy=None):
840
        """
841 842 843 844 845 846 847
        Optimizer for distributed training.

        For the distributed training, this method would rebuild a new instance of DistributedOptimizer.
        Which has basic Optimizer function and special features for distributed training.

        Args:
            optimizer(Optimizer): The executor to run for init server.
848 849 850 851 852
            strategy(DistributedStrategy): Extra properties for distributed optimizer. 
                It is recommended to use DistributedStrategy in fleet.init(). The strategy
                here is for compatibility. If the strategy in fleet.distributed_optimizer() 
                is not None, then it will overwrite the DistributedStrategy in fleet.init(), 
                which will take effect in distributed training.
853

854
        Returns:
855
            Fleet: instance of fleet.
856 857

        Examples:
858

859
            .. code-block:: python
860

1
123malin 已提交
861
                import paddle
862
                import paddle.distributed.fleet as fleet
1
123malin 已提交
863
                fleet.init(is_collective=True)
864 865 866 867
                strategy = fleet.DistributedStrategy()
                optimizer = paddle.optimizer.SGD(learning_rate=0.001)
                optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)

868 869
        """
        self.user_defined_optimizer = optimizer
870

871
        if strategy is not None:
T
tangwei12 已提交
872 873 874 875 876 877 878
            if self._is_collective:
                warnings.warn(
                    "It is recommended to use DistributedStrategy "
                    "in fleet.init(). The strategy here is only for compatibility. "
                    "If the strategy in fleet.distributed_optimizer() is "
                    "not None, then it will overwrite the DistributedStrategy in fleet.init(), "
                    "which will take effect in distributed training.")
879
            self._user_defined_strategy = copy.deepcopy(strategy)
D
Dong Daxiang 已提交
880 881

        self._context = {}
S
ShenLiang 已提交
882 883

        if paddle.fluid.framework.in_dygraph_mode():
884
            if self.worker_num() > 1:
K
kuizhiqing 已提交
885 886 887 888 889 890
                if self._user_defined_strategy.heter_ccl_mode == False:
                    return HybridParallelOptimizer(optimizer, self._hcg,
                                                   self._user_defined_strategy)
                else:
                    return HeterParallelOptimizer(optimizer,
                                                  self._user_defined_strategy)
891 892
            else:
                return optimizer
893 894
        return self

895
    @dygraph_only
896
    def distributed_model(self, model):
897
        """
898 899 900 901 902 903 904
        Return distributed data parallel model (Only work in dygraph mode)

        Args:
            model (Layer): the user-defind model which inherits Layer.

        Returns:
            distributed data parallel model which inherits Layer.
905 906

        Examples:
907

908 909
            .. code-block:: python

910 911 912 913 914 915 916 917 918
                import paddle
                import paddle.nn as nn
                from paddle.distributed import fleet

                class LinearNet(nn.Layer):
                    def __init__(self):
                        super(LinearNet, self).__init__()
                        self._linear1 = nn.Linear(10, 10)
                        self._linear2 = nn.Linear(10, 1)
919

920 921
                    def forward(self, x):
                        return self._linear2(self._linear1(x))
922

1
123malin 已提交
923
                # 1. initialize fleet environment
924 925
                fleet.init(is_collective=True)

1
123malin 已提交
926
                # 2. create layer & optimizer
927 928 929 930 931
                layer = LinearNet()
                loss_fn = nn.MSELoss()
                adam = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=layer.parameters())

1
123malin 已提交
932
                # 3. get data_parallel model using fleet
933 934 935
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)

1
123malin 已提交
936
                # 4. run layer
937 938 939 940 941 942 943 944 945 946 947 948
                inputs = paddle.randn([10, 10], 'float32')
                outputs = dp_layer(inputs)
                labels = paddle.randn([10, 1], 'float32')
                loss = loss_fn(outputs, labels)

                print("loss:", loss.numpy())

                loss.backward()

                adam.step()
                adam.clear_grad()

949

950
        """
951 952 953
        assert model is not None, "model should not be None"
        if self.worker_num() <= 1:
            return model
J
JZ-LIANG 已提交
954

K
kuizhiqing 已提交
955 956 957 958 959 960 961 962 963 964 965
        if self._user_defined_strategy.heter_ccl_mode == True:
            distributed_model = paddle.DataParallel(
                model,
                comm_buffer_size=self._user_defined_strategy.
                fuse_grad_size_in_MB,
                last_comm_buffer_size=self._user_defined_strategy.
                last_comm_group_size_MB,
                find_unused_parameters=self._user_defined_strategy.
                find_unused_parameters)
            return distributed_model

J
JZ-LIANG 已提交
966 967 968 969
        if self._hcg.get_parallel_mode() == ParallelMode.SHARDING_PARALLEL:
            distributed_model = ShardingParallel(
                model, self._hcg, strategy=self._user_defined_strategy)
        elif self._hcg.get_parallel_mode() == ParallelMode.DATA_PARALLEL:
970 971 972 973 974 975 976 977

            # NOTE (JZ-LIANG) init parameters broadcast within sharding group
            # normally it should be done inside DataParallel
            if self.sharding_degree > 1:
                from paddle.distributed.fleet.utils.hybrid_parallel_util import broadcast_mp_parameters, broadcast_sharding_parameters
                assert self.sharding_degree == self._hcg.get_sharding_parallel_world_size(
                )
                broadcast_sharding_parameters(model, self._hcg)
978 979 980 981 982 983 984 985
            distributed_model = paddle.DataParallel(
                model,
                comm_buffer_size=self._user_defined_strategy.
                fuse_grad_size_in_MB,
                last_comm_buffer_size=self._user_defined_strategy.
                last_comm_group_size_MB,
                find_unused_parameters=self._user_defined_strategy.
                find_unused_parameters)
986 987
        elif self._hcg.get_parallel_mode() == ParallelMode.TENSOR_PARALLEL:
            distributed_model = TensorParallel(
988
                model, self._hcg, strategy=self._user_defined_strategy)
989 990 991
        elif self._hcg.get_parallel_mode() == ParallelMode.PIPELINE_PARALLEL:
            distributed_model = PipelineParallel(
                model, self._hcg, strategy=self._user_defined_strategy)
J
JZ-LIANG 已提交
992

993
        return distributed_model
994 995 996 997 998

    @dygraph_only
    def state_dict(self):
        """
        Get state dict information from optimizer.
999
        (Only work in dygraph mode)
1000 1001 1002 1003 1004 1005 1006

        Returns: 
            state_dict(dict) : dict contains all the Tensor used by optimizer

        Examples:
            .. code-block:: python

1007 1008 1009 1010 1011
                import numpy as np
                import paddle
                from paddle.distributed import fleet

                fleet.init(is_collective=True)
1012

1013
                value = np.arange(26).reshape(2, 13).astype("float32")
1
123malin 已提交
1014
                a = paddle.to_tensor(value)
1015

1016 1017
                layer = paddle.nn.Linear(13, 5)
                adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
1018

1019 1020 1021
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)
                state_dict = adam.state_dict()
1022 1023 1024 1025 1026 1027 1028 1029
        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.state_dict()

    @dygraph_only
    def set_state_dict(self, state_dict):
        """
        Load optimizer state dict.
1030
        (Only work in dygraph mode)
1031 1032 1033 1034

        Args: 
            state_dict(dict) : Dict contains all the Tensor needed by optimizer

1035 1036
        Returns:
            None
1037 1038 1039 1040

        Examples:
            .. code-block:: python

1041 1042 1043
                import numpy as np
                import paddle
                from paddle.distributed import fleet
1044

1045 1046 1047
                fleet.init(is_collective=True)

                value = np.arange(26).reshape(2, 13).astype("float32")
1
123malin 已提交
1048
                a = paddle.to_tensor(value)
1049

1050 1051
                layer = paddle.nn.Linear(13, 5)
                adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
1052

1053 1054 1055
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)
                state_dict = adam.state_dict()
1
123malin 已提交
1056 1057 1058
                paddle.save(state_dict, "paddle_dy")
                para_state_dict = paddle.load("paddle_dy")
                adam.set_state_dict(para_state_dict)
1059 1060 1061 1062 1063 1064 1065 1066
        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.set_state_dict(state_dict)

    @dygraph_only
    def set_lr(self, value):
        """
        Set the value of the learning rate manually in the optimizer. 
1067
        (Only work in dygraph mode)
1068

1069 1070 1071
        Args:
            value (float|Tensor): the value of learning rate

1072 1073
        Returns: 
            None 
1074 1075 1076 1077

        Examples:
            .. code-block:: python

1078 1079 1080
                import numpy as np
                import paddle
                from paddle.distributed import fleet
1081

1082
                fleet.init(is_collective=True)
1083

1084
                value = np.arange(26).reshape(2, 13).astype("float32")
1
123malin 已提交
1085
                a = paddle.to_tensor(value)
1086

1087 1088
                layer = paddle.nn.Linear(13, 5)
                adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
1089

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)

                lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
                for i in range(5):
                    adam.set_lr(lr_list[i])
                    lr = adam.get_lr()
                    print("current lr is {}".format(lr))
                # Print:
                #    current lr is 0.2
                #    current lr is 0.3
                #    current lr is 0.4
                #    current lr is 0.5
                #    current lr is 0.6
1104 1105 1106 1107 1108 1109 1110 1111
        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.set_lr(value)

    @dygraph_only
    def get_lr(self):
        """
        Get current step learning rate.
1112
        (Only work in dygraph mode)
1113 1114 1115 1116 1117

        Returns:
            float: The learning rate of the current step.

        Examples:
1
123malin 已提交
1118

1119 1120
            .. code-block:: python

1121 1122 1123 1124 1125
                import numpy as np
                import paddle
                from paddle.distributed import fleet

                fleet.init(is_collective=True)
1126

1127
                value = np.arange(26).reshape(2, 13).astype("float32")
1
123malin 已提交
1128
                a = paddle.to_tensor(value)
1129

1130 1131
                layer = paddle.nn.Linear(13, 5)
                adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
1132

1133 1134
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)
1135

1136 1137
                lr = adam.get_lr()
                print(lr) # 0.01
1138 1139 1140 1141 1142 1143 1144 1145
        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.get_lr()

    @dygraph_only
    def step(self):
        """
        Execute the optimizer once.
1146
        (Only work in dygraph mode)
1147

1148 1149
        Returns:
            None
1150 1151

        Examples:
1
123malin 已提交
1152

1153 1154
            .. code-block:: python

1155 1156 1157
                import paddle
                import paddle.nn as nn
                from paddle.distributed import fleet
1158

1159 1160 1161 1162 1163
                class LinearNet(nn.Layer):
                    def __init__(self):
                        super(LinearNet, self).__init__()
                        self._linear1 = nn.Linear(10, 10)
                        self._linear2 = nn.Linear(10, 1)
1164

1165 1166
                    def forward(self, x):
                        return self._linear2(self._linear1(x))
1167

1
123malin 已提交
1168
                # 1. initialize fleet environment
1169 1170
                fleet.init(is_collective=True)

1
123malin 已提交
1171
                # 2. create layer & optimizer
1172 1173 1174 1175 1176
                layer = LinearNet()
                loss_fn = nn.MSELoss()
                adam = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=layer.parameters())

1
123malin 已提交
1177
                # 3. get data_parallel model using fleet
1178 1179 1180
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)

1
123malin 已提交
1181
                # 4. run layer
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
                inputs = paddle.randn([10, 10], 'float32')
                outputs = dp_layer(inputs)
                labels = paddle.randn([10, 1], 'float32')
                loss = loss_fn(outputs, labels)

                print("loss:", loss.numpy())

                loss.backward()

                adam.step()
                adam.clear_grad()


        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.step()

    @dygraph_only
    def clear_grad(self):
        """
1202 1203
        Clear the gradients of all optimized parameters for model.
        (Only work in dygraph mode)
1204

1205 1206
        Returns: 
            None
1207 1208

        Examples:
1
123malin 已提交
1209

1210 1211
            .. code-block:: python

1212 1213 1214
                import paddle
                import paddle.nn as nn
                from paddle.distributed import fleet
1215

1216 1217 1218 1219 1220
                class LinearNet(nn.Layer):
                    def __init__(self):
                        super(LinearNet, self).__init__()
                        self._linear1 = nn.Linear(10, 10)
                        self._linear2 = nn.Linear(10, 1)
1221

1222 1223
                    def forward(self, x):
                        return self._linear2(self._linear1(x))
1224

1
123malin 已提交
1225
                # 1. initialize fleet environment
1226 1227
                fleet.init(is_collective=True)

1
123malin 已提交
1228
                # 2. create layer & optimizer
1229 1230 1231 1232 1233
                layer = LinearNet()
                loss_fn = nn.MSELoss()
                adam = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=layer.parameters())

1
123malin 已提交
1234
                # 3. get data_parallel model using fleet
1235 1236 1237
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)

1
123malin 已提交
1238
                # 4. run layer
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
                inputs = paddle.randn([10, 10], 'float32')
                outputs = dp_layer(inputs)
                labels = paddle.randn([10, 1], 'float32')
                loss = loss_fn(outputs, labels)

                print("loss:", loss.numpy())

                loss.backward()

                adam.step()
                adam.clear_grad()

        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.clear_grad()

1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
    def _get_amp_optimizer(self):
        # imitate target optimizer retrieval
        amp_optimizer = None
        for optimizer in self.strategy_compiler._get_applied_meta_optimizer():
            if hasattr(optimizer, 'amp_init'):
                amp_optimizer = optimizer
                break

        if amp_optimizer is None:
            if hasattr(self.user_defined_optimizer, 'amp_init'):
                amp_optimizer = self.user_defined_optimizer

        assert amp_optimizer is not None, \
            "amp_init can only be used when the amp(auto mixed precision) strategy is turned on."
        return amp_optimizer

    def get_loss_scaling(self):
1272 1273
        """Return the real-time loss scaling factor.
        """
1274 1275 1276
        amp_optimizer = self._get_amp_optimizer()
        return amp_optimizer.get_loss_scaling()

H
huangxu96 已提交
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
    def amp_init(self,
                 place,
                 scope=None,
                 test_program=None,
                 use_fp16_test=False):
        """
        Init the amp training, such as cast fp32 parameters to fp16 type.
  
        Args:
            place(CUDAPlace): place is used to initialize 
                fp16 parameters with fp32 values.
            scope(Scope): The scope is used to find fp32 parameters.
            test_program(Program): The program is used for testing.
            use_fp16_test(bool): Whether to use fp16 testing.
            
        Examples:
            .. code-block:: python

                import numpy as np
                import paddle
                import paddle.nn.functional as F
                paddle.enable_static()

                def run_example_code():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32')
                    conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3)
                    # 1) Use fp16_guard to control the range of fp16 kernels used.
                    with paddle.static.amp.fp16_guard():
                        bn = paddle.static.nn.batch_norm(input=conv2d, act="relu")
                        pool = F.max_pool2d(bn, kernel_size=2, stride=2)
                        hidden = paddle.static.nn.fc(pool, size=10)
                        loss = paddle.mean(hidden)
                    # 2) Create the optimizer and set `multi_precision` to True.
                    # Setting `multi_precision` to True can avoid the poor accuracy
                    # or the slow convergence in a way. 
                    optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True)
                    # 3) These ops in `custom_black_list` will keep in the float32 computation type.
                    amp_list = paddle.static.amp.CustomOpLists(
                        custom_black_list=['pool2d'])
                    # 4) The entry of Paddle AMP.
                    # Enable pure fp16 training by setting `use_pure_fp16` to True.
                    optimizer = paddle.static.amp.decorate(
                        optimizer,
                        amp_list,
                        init_loss_scaling=128.0,
                        use_dynamic_loss_scaling=True,
                        use_pure_fp16=True)
                    # If you don't use the default_startup_program(), you sholud pass
                    # your defined `startup_program` into `minimize`.
                    optimizer.minimize(loss)
                    exe.run(paddle.static.default_startup_program())
                    # 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`).
                    # If you want to perform the testing process, you should pass `test_program` into `amp_init`.
                    optimizer.amp_init(place, scope=paddle.static.global_scope())
                    
                if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0:
                    run_example_code()       
        """
1337
        amp_optimizer = self._get_amp_optimizer()
1338
        return amp_optimizer.amp_init(place, scope, test_program, use_fp16_test)
H
huangxu96 已提交
1339

D
Dong Daxiang 已提交
1340 1341 1342 1343 1344 1345 1346 1347 1348
    def _final_strategy(self):
        if "valid_strategy" not in self._context:
            print(
                "WARNING: You may need to call minimize function before this function is called"
            )
            return {}
        else:
            return self._context["valid_strategy"]

1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
    def _get_applied_meta_list(self):
        if "applied_meta_list" not in self._context:
            print(
                "WARNING: You may need to call minimize function before _get_applied_meta_list called"
            )
            return []
        else:
            return self._context["applied_meta_list"]

    def _get_applied_graph_list(self):
        if "applied_graph_list" not in self._context:
            print(
                "WARNING: You may need to call minimize function before _get_applied_graph_list called"
            )
            return []
        else:
            return self._context["applied_graph_list"]

1367 1368 1369 1370 1371 1372 1373 1374 1375
    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
        """
        Add distributed operations to minimize ``loss`` by updating ``parameter_list``.

        Args:
1
123malin 已提交
1376
            loss (Tensor): A ``Tensor`` containing the value to minimize.
1377 1378 1379
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameter_list``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
1
123malin 已提交
1380
            parameter_list (Iterable, optional): Iterable of ``Tensor`` or ``Tensor.name`` to update
1381 1382
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
1
123malin 已提交
1383
            no_grad_set (set, optional): Set of ``Tensor``  or ``Tensor.name`` that don't need
1384 1385 1386 1387
                to be updated. The default value is None.

        Returns:
            tuple: tuple (optimize_ops, params_grads), A list of operators appended
1
123malin 已提交
1388
            by minimize and a list of (param, grad) tensor pairs, param is
1389
            ``Parameter``, grad is the gradient value corresponding to the parameter.
1390 1391
            The returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
1392 1393 1394
            ``fetch_list`` before run, see details in ``Executor``.

        Examples:
1
123malin 已提交
1395

1396
            .. code-block:: python
1397

1398
                import paddle
1
123malin 已提交
1399
                paddle.enable_static()
1400
                import paddle.distributed.fleet as fleet
1
123malin 已提交
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
                import paddle.nn.functional as F

                hid_dim = 10
                label_dim = 2
                input_x = paddle.static.data(name='x', shape=[None, 13], dtype='float32')
                input_y = paddle.static.data(name='y', shape=[None, 1], dtype='int64')
                fc_1 = paddle.static.nn.fc(x=input_x, size=hid_dim, activation='tanh')
                fc_2 = paddle.static.nn.fc(x=fc_1, size=hid_dim, activation='tanh')
                prediction = paddle.static.nn.fc(x=[fc_2], size=label_dim, activation='softmax')
                cost = F.cross_entropy(input=prediction, label=input_y)
                avg_cost = paddle.mean(x=cost)
1412

1
123malin 已提交
1413
                fleet.init(is_collective=True)
1414 1415 1416 1417
                strategy = fleet.DistributedStrategy()
                optimizer = paddle.optimizer.SGD(learning_rate=0.001)
                optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
                optimizer.minimize(avg_cost)
1418

1419
                # for more examples, please reference https://github.com/PaddlePaddle/FleetX
1420 1421

        """
1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
        if not isinstance(loss, list):
            return self._minimize_impl(loss, startup_program, parameter_list,
                                       no_grad_set)
        else:
            if paddle.fluid.framework.in_dygraph_mode(
            ) or self._role_maker._is_non_distributed() or self._is_collective:
                raise ValueError("loss can be list only in PS mode")
            return self._minimize_losses_impl(loss, startup_program,
                                              parameter_list, no_grad_set)

    def _minimize_impl(self,
                       loss,
                       startup_program=None,
                       parameter_list=None,
                       no_grad_set=None):
D
Dong Daxiang 已提交
1437 1438 1439
        context = {}
        context["user_defined_strategy"] = copy.deepcopy(
            self._user_defined_strategy)
1440 1441 1442
        if paddle.fluid.framework.in_dygraph_mode():
            # imitate target optimizer retrieval
            target_opt = self.user_defined_optimizer
D
Dong Daxiang 已提交
1443
            self._context = context
1444 1445
            return target_opt.minimize(loss)

1446 1447
        # cache original feed forward program
        self.origin_main_program = loss.block.program
B
Baibaifan 已提交
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463
        # add distributed attr
        if not hasattr(self.origin_main_program, "distributed_info_"):
            setattr(self.origin_main_program, "distributed_info_", dict())
            self.origin_main_program.distributed_info_[
                "dp_degree"] = self._user_defined_strategy.sharding_configs[
                    "dp_degree"]
            self.origin_main_program.distributed_info_[
                "mp_degree"] = self._user_defined_strategy.sharding_configs[
                    "mp_degree"]
            self.origin_main_program.distributed_info_[
                "pp_degree"] = self._user_defined_strategy.sharding_configs[
                    "pp_degree"]
            self.origin_main_program.distributed_info_[
                "sharding_degree"] = self._user_defined_strategy.sharding_configs[
                    "sharding_degree"]

1464
        context["origin_main_program"] = self.origin_main_program
1465
        context["origin_main_programs"] = [self.origin_main_program]
1466
        context["loss"] = loss
1467 1468
        if startup_program == None:
            self.origin_startup_program = \
1469 1470
                paddle.static.default_startup_program().clone(for_test=False)
            startup_program = paddle.static.default_startup_program()
1471 1472 1473
        else:
            self.origin_startup_program = \
                startup_program.clone(for_test=False)
1474

1475
        context["origin_startup_program"] = startup_program
1476
        context["origin_startup_programs"] = [startup_program]
1477
        context["role_maker"] = self._role_maker
1478

1479
        # Use the auto-parallel's routines instead
1480
        if self._user_defined_strategy.semi_auto or self._user_defined_strategy.auto_search:
1481 1482 1483 1484
            from ...auto_parallel.parallelizer import AutoParallelizer
            auto_parallelizer = AutoParallelizer(self)
            optimize_ops, params_grads, dist_startup_prog, dist_main_prog = auto_parallelizer.parallelize(
                loss, startup_program, parameter_list, no_grad_set)
1485

1486 1487
            return optimize_ops, params_grads, dist_startup_prog, dist_main_prog

1488 1489 1490 1491
        # compile time
        distributed_optimizer_list = \
            MetaOptimizerFactory()._get_valid_meta_optimizers(
                self.user_defined_optimizer)
D
Dong Daxiang 已提交
1492

D
Dong Daxiang 已提交
1493 1494 1495
        context["user_defined_strategy"] = copy.deepcopy(
            self._user_defined_strategy)
        copy_user_defined_strategy = copy.deepcopy(self._user_defined_strategy)
1496 1497 1498 1499 1500 1501

        # trigger the auto-parallel in very strict condition
        # strategy = DistributedStrategy()
        # strategy.auto = True
        # optimizer = paddle.optimizer.SGD(learning_rate=0.1)
        # optimizer = fleet.distributed_optimizer(optimizer, strategy)
D
Dong Daxiang 已提交
1502
        if copy_user_defined_strategy._is_strict_auto():
1503 1504
            # turn on all the strategy for each optimizer
            for opt in distributed_optimizer_list:
D
Dong Daxiang 已提交
1505
                opt._enable_strategy(copy_user_defined_strategy, context)
1506

1507 1508
        valid_optimizer_list = []
        valid_graph_optimizer_list = []
D
Dong Daxiang 已提交
1509
        can_not_apply_optimizer_list = []
1510 1511 1512 1513
        # recall meta optimizers for ranking
        for opt in distributed_optimizer_list:
            opt._set_basic_info(loss, self._role_maker,
                                self.user_defined_optimizer,
D
Dong Daxiang 已提交
1514
                                copy_user_defined_strategy)
1515 1516
            if opt._can_apply() and not opt._is_graph_out():
                valid_optimizer_list.append(opt)
D
Dong Daxiang 已提交
1517
            elif opt._can_apply() and opt._is_graph_out():
1518
                valid_graph_optimizer_list.append(opt)
D
Dong Daxiang 已提交
1519 1520
            else:
                can_not_apply_optimizer_list.append(opt)
1521
        # combine recalled meta optimizers to be a valid meta optimizer
D
Dong Daxiang 已提交
1522
        meta_optimizer, graph_optimizer = \
1523 1524
            self.strategy_compiler.generate_optimizer(
                loss, self._role_maker, self.user_defined_optimizer,
D
Dong Daxiang 已提交
1525
                copy_user_defined_strategy, valid_optimizer_list,
1526
                valid_graph_optimizer_list)
D
Dong Daxiang 已提交
1527

D
Dong Daxiang 已提交
1528
        valid_strategy = self.strategy_compiler._get_valid_strategy(
D
Dong Daxiang 已提交
1529 1530 1531
            copy_user_defined_strategy, can_not_apply_optimizer_list)

        context["valid_strategy"] = copy.deepcopy(valid_strategy)
1532 1533
        # print("valid_strategy:", context["valid_strategy"])
        # print("user_defined_strategy:", context["user_defined_strategy"])
1534

1535 1536 1537 1538 1539 1540
        applied_meta_list = self.strategy_compiler._get_applied_meta_list()
        applied_graph_list = self.strategy_compiler._get_applied_graph_list()

        context['applied_meta_list'] = applied_meta_list
        context['applied_graph_list'] = applied_graph_list

D
Dong Daxiang 已提交
1541
        self._context = context
1542

D
Dong Daxiang 已提交
1543
        self.valid_strategy = valid_strategy
1544
        self.valid_strategy._enable_env()
D
Dong Daxiang 已提交
1545

1546 1547
        optimize_ops = []
        params_grads = []
1548

1549 1550 1551 1552 1553 1554 1555 1556 1557
        if self._role_maker._is_non_distributed() and not self._is_collective:
            if self._runtime_handle is None:
                self._runtime_handle = RuntimeFactory()._create_runtime(context)

            compiled_program = compiler.CompiledProgram(
                self.origin_main_program).with_data_parallel(
                    loss_name=loss.name, share_vars_from=None)
            loss.block.program._graph = compiled_program
            return self.user_defined_optimizer.minimize(
M
MRXLT 已提交
1558
                loss, startup_program, parameter_list, no_grad_set=no_grad_set)
1559

1560
        if meta_optimizer:
1561
            # print("before minimize program id:", id(loss.block.program))
1562
            optimize_ops, params_grads = meta_optimizer.minimize(
M
MRXLT 已提交
1563
                loss, startup_program, parameter_list, no_grad_set=no_grad_set)
1564
            # print("after minimize program id:", id(loss.block.program))
1565

1566
            default_program = paddle.static.default_main_program()
1567
            # print("default program id:", id(default_program))
1568 1569 1570

            if id(default_program) != id(loss.block.program):
                paddle.fluid.framework.switch_main_program(loss.block.program)
1571
            # print("default program id after switch:", id(default_program))
1572

1573 1574
        else:
            optimize_ops, params_grads = self.user_defined_optimizer.minimize(
M
MRXLT 已提交
1575
                loss, startup_program, parameter_list, no_grad_set=no_grad_set)
1576

1577 1578
        context["program_optimize_ops"] = optimize_ops
        context["program_params_grads"] = params_grads
1579

1580
        if graph_optimizer:
1581
            # print("before graph minimize program id:", id(loss.block.program))
D
Dong Daxiang 已提交
1582
            optimize_ops, params_grads = graph_optimizer.minimize(
M
MRXLT 已提交
1583
                loss, startup_program, parameter_list, no_grad_set=no_grad_set)
1584 1585 1586 1587
            # since we do not encourage users to use graph operations
            # if a graph optimizer takes effect, mostly
            # optimizers_ops and params_grads are None
            # i.e. users can not modify current computation graph anymore
1588 1589
            context["graph_optimize_ops"] = optimize_ops
            context["graph_optimize_grads"] = params_grads
1590 1591
        else:
            apply_ir_passes(loss.block.program, startup_program, self)
1592

1593 1594
        if not self._role_maker._is_heter_parameter_server_mode:
            program = paddle.static.default_main_program()
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
            opt_info = {} if program._fleet_opt is None else program._fleet_opt
            opt_info["mpi_size"] = self.worker_num()
            opt_info["mpi_rank"] = self.worker_index()
            for k, v in self._user_defined_strategy.trainer_desc_configs.items(
            ):
                opt_info[k] = v
            program._fleet_opt = opt_info

        if self._runtime_handle is None:
            self._runtime_handle = RuntimeFactory()._create_runtime(context)

        import paddle.distributed.fleet as fleet
        fleet.util._set_strategy(context["valid_strategy"])

        return optimize_ops, params_grads

    def _minimize_losses_impl(self,
                              losses,
                              startup_programs=None,
                              parameter_list=None,
                              no_grad_set=None):
        context = {}

        # cache original feed forward program
        self.origin_main_program = losses[0].block.program
        context["origin_main_program"] = self.origin_main_program
        context["origin_main_programs"] = []
        for loss in losses:
            context["origin_main_programs"].append(loss.block.program)
        context["loss"] = losses

        if startup_programs is None:
            if len(losses) == 1:
                startup_programs = [paddle.static.default_startup_program()]
            else:
                raise ValueError(
                    "startup_program can't be None when loss is list.")
        self.origin_startup_program = startup_programs[0].clone(for_test=False)
        context["origin_startup_program"] = startup_programs[0]
        context["origin_startup_programs"] = []
        for program in startup_programs:
            context["origin_startup_programs"].append(program)

        context["role_maker"] = self._role_maker

        context["user_defined_strategy"] = copy.deepcopy(
            self._user_defined_strategy)

        context["valid_strategy"] = copy.deepcopy(self._user_defined_strategy)

        self._context = context

        self.valid_strategy = context["valid_strategy"]
        self.valid_strategy._enable_env()

        optimize_ops = []
        params_grads = []

        from ..meta_optimizers import ParameterServerOptimizer
        ps_optimizer = ParameterServerOptimizer(self.user_defined_optimizer)
        ps_optimizer._set_basic_info(losses, self._role_maker,
                                     self.user_defined_optimizer,
                                     self._user_defined_strategy)
        optimize_ops, params_grads = ps_optimizer.minimize_losses_impl(
            losses, startup_programs, parameter_list, no_grad_set=no_grad_set)

        # default_program = paddle.static.default_main_program()

        # if id(default_program) != id(losses[0].block.program):
        #     paddle.fluid.framework.switch_main_program(losses[0].block.program)

        context["program_optimize_ops"] = optimize_ops
        context["program_params_grads"] = params_grads

        for loss in losses:
            program = loss.block.program
            opt_info = {} if program._fleet_opt is None else program._fleet_opt
1672 1673 1674 1675 1676 1677
            opt_info["mpi_size"] = self.worker_num()
            opt_info["mpi_rank"] = self.worker_index()
            for k, v in self._user_defined_strategy.trainer_desc_configs.items(
            ):
                opt_info[k] = v
            program._fleet_opt = opt_info
1678
            # print("fleet base opt info:", id(program), program._fleet_opt)
1679

1680
        if self._runtime_handle is None:
1681
            self._runtime_handle = RuntimeFactory()._create_runtime(context)
1682

1683 1684
        import paddle.distributed.fleet as fleet
        fleet.util._set_strategy(context["valid_strategy"])
1685 1686

        return optimize_ops, params_grads
1687 1688 1689

    @dygraph_only
    def distributed_scaler(self, scaler):
1690 1691 1692 1693 1694 1695
        def unscale_method(self, optimizer):
            if not self._enable:
                return
            if getattr(optimizer, '_param_groups', None) and isinstance(
                    optimizer._param_groups[0], dict):
                param_grads = []
1696 1697
                param_grads_fp16 = []
                param_grads_fp32 = []
1698 1699 1700 1701
                for group in optimizer._param_groups:
                    for param in group['params']:
                        if param._grad_ivar() is not None:
                            param_grads.append(param._grad_ivar())
1702 1703 1704 1705 1706
                            if param._grad_ivar(
                            ).dtype == core.VarDesc.VarType.FP16:
                                param_grads_fp16.append(param._grad_ivar())
                            else:
                                param_grads_fp32.append(param._grad_ivar())
1707 1708 1709 1710 1711
            else:
                param_grads = [
                    param._grad_ivar() for param in optimizer._parameter_list
                    if param._grad_ivar() is not None
                ]
1712 1713
                param_grads_fp16 = [
                    param._grad_ivar() for param in optimizer._parameter_list
1714 1715
                    if (param._grad_ivar() is not None) and (param._grad_ivar(
                    ).dtype == core.VarDesc.VarType.FP16)
1716 1717 1718
                ]
                param_grads_fp32 = [
                    param._grad_ivar() for param in optimizer._parameter_list
1719 1720
                    if (param._grad_ivar() is not None) and (param._grad_ivar(
                    ).dtype == core.VarDesc.VarType.FP32)
1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
                ]
            temp_found_inf_fp16 = to_variable(np.array([0]).astype(np.bool))
            temp_found_inf_fp32 = to_variable(np.array([0]).astype(np.bool))
            if len(param_grads_fp16):
                _C_ops.check_finite_and_unscale(param_grads_fp16, self._scale,
                                                param_grads_fp16,
                                                temp_found_inf_fp16)
            if len(param_grads_fp32):
                _C_ops.check_finite_and_unscale(param_grads_fp32, self._scale,
                                                param_grads_fp32,
                                                temp_found_inf_fp32)
1732

1733
            self._found_inf = 1 if temp_found_inf_fp16 or temp_found_inf_fp32 else 0
1734
            is_found_inf = paddle.to_tensor([self._found_inf], dtype="int32")
1735 1736 1737 1738 1739

            # TODO(shenliang03) Since dp allreduce in the optimizer is 
            # after the gradscaler, check_finite needs to synchronize global 
            # information. In the future, we should use check_group to speed.
            paddle.distributed.all_reduce(
1740 1741
                is_found_inf, op=paddle.distributed.ReduceOp.MAX, group=None)
            self._found_inf = is_found_inf.numpy()[0]
1742 1743 1744 1745 1746 1747 1748

        # Only tensor_parallel and pipeline_parallel need to modify scaler
        if self._hcg.get_parallel_mode() in (ParallelMode.TENSOR_PARALLEL,
                                             ParallelMode.PIPELINE_PARALLEL):
            scaler._unscale = MethodType(unscale_method, scaler)

        return scaler