fleet_base.py 34.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
16
import copy
17
import warnings
18
import paddle
19
from paddle.fluid.framework import dygraph_only
20
from paddle.fluid import compiler
21
from .role_maker import UserDefinedRoleMaker, PaddleCloudRoleMaker, RoleMakerBase
22
from .strategy_compiler import StrategyCompiler
23
from .distributed_strategy import DistributedStrategy
24 25 26
from .meta_optimizer_factory import MetaOptimizerFactory
from .runtime_factory import RuntimeFactory
from .util_factory import UtilFactory
27
from paddle.fluid.wrapped_decorator import wrap_decorator
28
from paddle.fluid.dygraph import parallel_helper
29

30

31 32 33 34 35 36 37 38 39 40 41 42
def _inited_runtime_handler_(func):
    def __impl__(*args, **kwargs):
        cls = args[0]

        if cls._runtime_handle is None:
            raise ValueError("Fleet can not find suitable runtime handler")

        return func(*args, **kwargs)

    return __impl__


43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
def _is_non_distributed_check_(func):
    def __impl__(*args, **kwargs):
        cls = args[0]

        if cls._role_maker is not None and cls._role_maker._is_non_distributed(
        ) is True:
            warnings.warn(
                "%s() function doesn't work when use non_distributed fleet." %
                (func.__name__))
            return

        return func(*args, **kwargs)

    return __impl__


59
inited_runtime_handler = wrap_decorator(_inited_runtime_handler_)
60
is_non_distributed_check = wrap_decorator(_is_non_distributed_check_)
61 62


63 64 65
class Fleet(object):
    """
    Unified API for distributed training of PaddlePaddle
66
    Please reference the https://github.com/PaddlePaddle/FleetX for details
67 68 69 70 71


    Returns:
        Fleet: A Fleet instance

72
    Example for collective training:
73 74
        .. code-block:: python

75
            import paddle.distributed.fleet as fleet
76 77 78

            fleet.init(is_collective=True)

79 80 81
            strategy = fleet.DistributedStrategy()
            optimizer = paddle.optimizer.SGD(learning_rate=0.001)
            optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97

            # do distributed training


    Example for parameter server training:

        .. code-block:: python

            import paddle.distributed.fleet as fleet

            fleet.init()

            strategy = fleet.DistributedStrategy()
            optimizer = paddle.optimizer.SGD(learning_rate=0.001)
            optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)

98 99
            if fleet.is_first_worker():
                print("this is first worker")
100

101 102
            print("current node index: {}".format(fleet.worker_index()))
            print("total number of worker num: {}".format(fleet.worker_num()))
103

104 105 106
            if fleet.is_worker():
                print("this is worker")
            print("worker endpoints: {}".format(fleet.worker_endpoints(to_string=True)))
107

108 109
            print("server num: {}".format(fleet.server_num()))
            print("server endpoints: {}".format(fleet.server_endpoints(to_string=True)))
110

111 112 113
            if fleet.is_server():
                print("this is server")
            fleet.stop_worker()
114 115


116 117 118
    """

    def __init__(self):
119
        self._role_maker = None
120
        self.strategy_compiler = None
121
        self._is_collective = False
122 123
        self._runtime_handle = None
        self._util = None
124

125 126 127 128
    def init(self, role_maker=None, is_collective=False):
        """
        Initialize role_maker in Fleet.

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
        This function is responsible for the distributed architecture
        what you want to run your code behind.

        Args:
            role_maker (RoleMakerBase, optional): A ``RoleMakerBase`` containing the configuration
                of environment variables related to distributed training.If you did not initialize 
                the rolemaker by yourself, it will be automatically initialized to PaddleRoleMaker.
                The default value is None.
            is_collective (Boolean, optional): A ``Boolean`` variable determines whether the program 
                runs on the CPU or GPU. False means set distributed training using CPU, and True means
                GPU.The default value is False.The default value is False.
        Returns:
            None

        Examples1:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

        Examples2:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init(is_collective=True)

        Examples3:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                role = fleet.PaddleCloudRoleMaker
                fleet.init(role)
164

165
        """
166 167

        if role_maker is None:
168 169 170 171 172 173
            if isinstance(is_collective, bool):
                self._is_collective = is_collective
                self._role_maker = PaddleCloudRoleMaker(
                    is_collective=self._is_collective)
            else:
                raise ValueError(
174 175
                    "`is_collective` should be instance of `bool`, but got {}".
                    format(type(is_collective)))
176
        else:
177 178 179 180 181 182
            if isinstance(role_maker, RoleMakerBase):
                self._role_maker = role_maker
            else:
                raise ValueError(
                    "`role_maker` should be subclass of `RoleMakerBase`, but got {}".
                    format(type(role_maker)))
183
        self._role_maker._generate_role()
184

185
        self.strategy_compiler = StrategyCompiler()
186 187 188 189 190 191
        if paddle.fluid.framework.in_dygraph_mode():
            if parallel_helper._is_parallel_ctx_initialized():
                warnings.warn(
                    "The dygraph parallel environment has been initialized.")
            else:
                paddle.distributed.init_parallel_env()
192

M
mapingshuo 已提交
193 194 195
    def _role_maker_(self):
        return self._role_maker

196 197 198 199 200 201 202
    def is_first_worker(self):
        """
        Check whether the node is the first instance of worker.

        Returns:
            bool: True if this is the first node of worker,
                  False if not.
203

204 205 206 207 208 209 210 211
        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.is_first_worker()

212
        """
213
        return self._role_maker._is_first_worker()
214 215 216 217 218 219 220

    def worker_index(self):
        """
        Get current worker index.

        Returns:
            int: node id
221 222 223 224 225 226 227 228

        Examples:

            .. code-block:: python
                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.worker_index()

229
        """
230
        return self._role_maker._worker_index()
231 232 233 234 235 236 237

    def worker_num(self):
        """
        Get current total worker number.

        Returns:
            int: worker numbers
238

239 240 241 242 243 244 245
        Examples:
            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.worker_num()

246
        """
247
        return self._role_maker._worker_num()
248 249 250 251 252 253 254 255

    def is_worker(self):
        """
        Check whether the node is an instance of worker.

        Returns:
            bool: True if this is a node of worker,
                  False if not.
256 257 258 259 260 261 262 263

        Examples:
            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.is_worker()

264
        """
265
        return self._role_maker._is_worker()
266 267 268

    def worker_endpoints(self, to_string=False):
        """
269
        Get current worker endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"].
270 271 272

        Returns:
            list/string: server endpoints
273 274 275 276 277 278 279 280

        Examples:
            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.worker_endpoints()

281 282
        """
        if to_string:
283
            return ",".join(self._role_maker._get_trainer_endpoints())
284
        else:
285
            return self._role_maker._get_trainer_endpoints()
286 287 288 289 290 291 292

    def server_num(self):
        """
        Get current total worker number.

        Returns:
            int: server number
293 294 295 296 297 298

        Examples:
            .. code-block:: python
            import paddle.distributed.fleet as fleet
            fleet.init()
            fleet.server_num()
299
        """
300
        return len(self._role_maker._get_pserver_endpoints())
301 302 303 304 305 306 307

    def server_index(self):
        """
        Get current server index.

        Returns:
            int: node id
308 309 310 311 312 313 314 315

        Examples:
            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.server_index()

316
        """
317
        return self._role_maker._server_index()
318 319 320 321 322 323 324

    def server_endpoints(self, to_string=False):
        """
        Get current server endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"].

        Returns:
            list/string: server endpoints
325 326 327 328 329 330 331 332

        Examples:
            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.server_endpoints()

333
        """
334

335
        if to_string:
336
            return ",".join(self._role_maker._get_pserver_endpoints())
337
        else:
338
            return self._role_maker._get_pserver_endpoints()
339 340 341 342 343 344 345 346

    def is_server(self):
        """
        Check whether the node is an instance of server.

        Returns:
            bool: True if this is a node of server,
                  False if not.
347 348 349 350 351 352 353 354

        Examples:

            .. code-block:: python
                import paddle.distributed.fleet as fleet
                fleet.init()
                fleet.is_server()

355
        """
356
        return self._role_maker._is_server(
357
        ) or self._role_maker._is_heter_worker()
358

359 360 361
    def set_util(self, util):
        self._util = util

362 363 364 365
    def util(self):
        """
        Utility functions that can be used under certain runtime
        return util
366 367 368 369 370 371 372 373 374 375 376 377 378

        Returns:
            UtilBase: instance of UtilBase, can use distributed ops/tools easily.

        Examples:

            .. code-block:: python
                import paddle.distributed.fleet as fleet
                fleet.init()
                util = fleet.util
                files = ["1.log", "2.log", "3.log", "4.log"]
                files = util.get_file_shard()

379 380 381 382 383
        """
        return self._util

    def barrier_worker(self):
        """
384 385 386 387
        barrier all workers

        Returns:
            None
388
        """
389
        self._role_maker._barrier("worker")
390

391
    @is_non_distributed_check
392
    @inited_runtime_handler
393 394
    def init_worker(self):
        """
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
        initialize `Communicator` for parameter server training.


        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                fleet.init_worker()

413 414 415
        """
        self._runtime_handle._init_worker()

416
    @is_non_distributed_check
417
    @inited_runtime_handler
418
    def init_server(self, *args, **kwargs):
419
        """
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
        init_server executor to initialize startup program,
        if the `args` is not empty, it will run load_persistables for increment training.


        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                fleet.init_server()

439
        """
440
        self._runtime_handle._init_server(*args, **kwargs)
441

442
    @is_non_distributed_check
443
    @inited_runtime_handler
444 445
    def run_server(self):
        """
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
        run server will run pserver main program with executor.

        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                if fleet.is_server():
                    fleet.init_server()

464 465 466
        """
        self._runtime_handle._run_server()

467
    @is_non_distributed_check
468
    @inited_runtime_handler
469 470
    def stop_worker(self):
        """
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
        stop `Communicator` and give training complete notice to parameter server.

        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                fleet.init_server()

488 489 490
        """
        self._runtime_handle._stop_worker()

491 492 493 494 495 496 497
    def save_inference_model(self,
                             executor,
                             dirname,
                             feeded_var_names,
                             target_vars,
                             main_program=None,
                             export_for_deployment=True):
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
        """
        save inference model for inference.

        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle.distributed.fleet as fleet
                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                fleet.init_server()

        """

518 519 520 521 522
        self._runtime_handle._save_inference_model(
            executor, dirname, feeded_var_names, target_vars, main_program,
            export_for_deployment)

    def save_persistables(self, executor, dirname, main_program=None):
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
        """

        saves all persistable variables from :code:`main_program` to
        the folder :code:`dirname`. You can refer to

        The :code:`dirname` is used to specify the folder where persistable variables
        are going to be saved. If you would like to save variables in separate
        files, set :code:`filename` None.

        Args:
            executor(Executor): The executor to run for saving persistable variables.
                                You can refer to :ref:`api_guide_executor_en` for
                                more details.

            dirname(str, optional): The saving directory path.
                                When you need to save the parameter to the memory, set it to None.
            main_program(Program, optional): The program whose persistbale variables will
                                             be saved. Default: None.


        Returns:
            None

        Examples:

            .. code-block:: text

                import paddle.distributed.fleet as fleet
                import paddle.fluid as fluid

                fleet.init()

                # build net
                # fleet.distributed_optimizer(...)

                exe = fluid.Executor(fluid.CPUPlace())
                fleet.save_persistables(exe, "dirname", fluid.default_main_program())

        """

563 564
        self._runtime_handle._save_persistables(executor, dirname, main_program)

565
    def distributed_optimizer(self, optimizer, strategy=None):
566
        """
567 568 569 570 571 572 573 574 575
        Optimizer for distributed training.

        For the distributed training, this method would rebuild a new instance of DistributedOptimizer.
        Which has basic Optimizer function and special features for distributed training.

        Args:
            optimizer(Optimizer): The executor to run for init server.
            strategy(DistributedStrategy): Extra properties for distributed optimizer.

576
        Returns:
577
            Fleet: instance of fleet.
578 579

        Examples:
580

581
            .. code-block:: python
582 583 584 585 586 587 588 589

                import paddle.distributed.fleet as fleet
                role = fleet.role_maker.PaddleCloudRoleMaker(is_collective=True)
                fleet.init(role)
                strategy = fleet.DistributedStrategy()
                optimizer = paddle.optimizer.SGD(learning_rate=0.001)
                optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)

590 591
        """
        self.user_defined_optimizer = optimizer
592 593 594
        if paddle.fluid.framework.in_dygraph_mode():
            return self

595 596
        if strategy == None:
            strategy = DistributedStrategy()
597
        self.user_defined_strategy = strategy
D
Dong Daxiang 已提交
598
        self.valid_strategy = None
599 600
        return self

601 602 603
    @dygraph_only
    def distributed_model(self, model):
        """
604 605 606 607 608 609 610
        Return distributed data parallel model (Only work in dygraph mode)

        Args:
            model (Layer): the user-defind model which inherits Layer.

        Returns:
            distributed data parallel model which inherits Layer.
611 612

        Examples:
613

614 615
            .. code-block:: python

616 617 618 619 620 621 622 623 624
                import paddle
                import paddle.nn as nn
                from paddle.distributed import fleet

                class LinearNet(nn.Layer):
                    def __init__(self):
                        super(LinearNet, self).__init__()
                        self._linear1 = nn.Linear(10, 10)
                        self._linear2 = nn.Linear(10, 1)
625

626 627
                    def forward(self, x):
                        return self._linear2(self._linear1(x))
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659

                # 1. enable dynamic mode
                paddle.disable_static()

                # 2. initialize fleet environment
                fleet.init(is_collective=True)

                # 3. create layer & optimizer
                layer = LinearNet()
                loss_fn = nn.MSELoss()
                adam = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=layer.parameters())

                # 4. get data_parallel model using fleet
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)

                # 5. run layer
                inputs = paddle.randn([10, 10], 'float32')
                outputs = dp_layer(inputs)
                labels = paddle.randn([10, 1], 'float32')
                loss = loss_fn(outputs, labels)

                print("loss:", loss.numpy())

                loss = dp_layer.scale_loss(loss)
                loss.backward()
                dp_layer.apply_collective_grads()

                adam.step()
                adam.clear_grad()

660

661 662 663 664 665 666 667 668 669
        """
        assert model is not None
        self.model = paddle.DataParallel(model)
        return self.model

    @dygraph_only
    def state_dict(self):
        """
        Get state dict information from optimizer.
670
        (Only work in dygraph mode)
671 672 673 674 675 676 677

        Returns: 
            state_dict(dict) : dict contains all the Tensor used by optimizer

        Examples:
            .. code-block:: python

678 679 680 681 682 683
                import numpy as np
                import paddle
                from paddle.distributed import fleet

                paddle.disable_static()
                fleet.init(is_collective=True)
684

685 686
                value = np.arange(26).reshape(2, 13).astype("float32")
                a = paddle.fluid.dygraph.to_variable(value)
687

688 689
                layer = paddle.nn.Linear(13, 5)
                adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
690

691 692 693
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)
                state_dict = adam.state_dict()
694 695 696 697 698 699 700 701
        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.state_dict()

    @dygraph_only
    def set_state_dict(self, state_dict):
        """
        Load optimizer state dict.
702
        (Only work in dygraph mode)
703 704 705 706

        Args: 
            state_dict(dict) : Dict contains all the Tensor needed by optimizer

707 708
        Returns:
            None
709 710 711 712

        Examples:
            .. code-block:: python

713 714 715
                import numpy as np
                import paddle
                from paddle.distributed import fleet
716

717 718 719 720 721
                paddle.disable_static()
                fleet.init(is_collective=True)

                value = np.arange(26).reshape(2, 13).astype("float32")
                a = paddle.fluid.dygraph.to_variable(value)
722

723 724
                layer = paddle.nn.Linear(13, 5)
                adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
725

726 727 728 729 730 731
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)
                state_dict = adam.state_dict()
                paddle.framework.save(state_dict, "paddle_dy")
                para_state_dict, opti_state_dict = paddle.framework.load( "paddle_dy")
                adam.set_state_dict(opti_state_dict)
732 733 734 735 736 737 738 739
        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.set_state_dict(state_dict)

    @dygraph_only
    def set_lr(self, value):
        """
        Set the value of the learning rate manually in the optimizer. 
740
        (Only work in dygraph mode)
741

742 743 744
        Args:
            value (float|Tensor): the value of learning rate

745 746
        Returns: 
            None 
747 748 749 750

        Examples:
            .. code-block:: python

751 752 753
                import numpy as np
                import paddle
                from paddle.distributed import fleet
754

755 756
                paddle.disable_static()
                fleet.init(is_collective=True)
757

758 759
                value = np.arange(26).reshape(2, 13).astype("float32")
                a = paddle.fluid.dygraph.to_variable(value)
760

761 762
                layer = paddle.nn.Linear(13, 5)
                adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
763

764 765 766 767 768 769 770 771 772 773 774 775 776 777
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)

                lr_list = [0.2, 0.3, 0.4, 0.5, 0.6]
                for i in range(5):
                    adam.set_lr(lr_list[i])
                    lr = adam.get_lr()
                    print("current lr is {}".format(lr))
                # Print:
                #    current lr is 0.2
                #    current lr is 0.3
                #    current lr is 0.4
                #    current lr is 0.5
                #    current lr is 0.6
778 779 780 781 782 783 784 785
        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.set_lr(value)

    @dygraph_only
    def get_lr(self):
        """
        Get current step learning rate.
786
        (Only work in dygraph mode)
787 788 789 790 791 792 793

        Returns:
            float: The learning rate of the current step.

        Examples:
            .. code-block:: python

794 795 796 797 798 799
                import numpy as np
                import paddle
                from paddle.distributed import fleet

                paddle.disable_static()
                fleet.init(is_collective=True)
800

801 802
                value = np.arange(26).reshape(2, 13).astype("float32")
                a = paddle.fluid.dygraph.to_variable(value)
803

804 805
                layer = paddle.nn.Linear(13, 5)
                adam = paddle.optimizer.Adam(learning_rate=0.01, parameters=layer.parameters())
806

807 808
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)
809

810 811
                lr = adam.get_lr()
                print(lr) # 0.01
812 813 814 815 816 817 818 819
        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.get_lr()

    @dygraph_only
    def step(self):
        """
        Execute the optimizer once.
820
        (Only work in dygraph mode)
821

822 823
        Returns:
            None
824 825 826 827

        Examples:
            .. code-block:: python

828 829 830
                import paddle
                import paddle.nn as nn
                from paddle.distributed import fleet
831

832 833 834 835 836
                class LinearNet(nn.Layer):
                    def __init__(self):
                        super(LinearNet, self).__init__()
                        self._linear1 = nn.Linear(10, 10)
                        self._linear2 = nn.Linear(10, 1)
837

838 839
                    def forward(self, x):
                        return self._linear2(self._linear1(x))
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879

                # 1. enable dynamic mode
                paddle.disable_static()

                # 2. initialize fleet environment
                fleet.init(is_collective=True)

                # 3. create layer & optimizer
                layer = LinearNet()
                loss_fn = nn.MSELoss()
                adam = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=layer.parameters())

                # 4. get data_parallel model using fleet
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)

                # 5. run layer
                inputs = paddle.randn([10, 10], 'float32')
                outputs = dp_layer(inputs)
                labels = paddle.randn([10, 1], 'float32')
                loss = loss_fn(outputs, labels)

                print("loss:", loss.numpy())

                loss = dp_layer.scale_loss(loss)
                loss.backward()
                dp_layer.apply_collective_grads()

                adam.step()
                adam.clear_grad()


        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.step()

    @dygraph_only
    def clear_grad(self):
        """
880 881
        Clear the gradients of all optimized parameters for model.
        (Only work in dygraph mode)
882

883 884
        Returns: 
            None
885 886 887 888

        Examples:
            .. code-block:: python

889 890 891
                import paddle
                import paddle.nn as nn
                from paddle.distributed import fleet
892

893 894 895 896 897
                class LinearNet(nn.Layer):
                    def __init__(self):
                        super(LinearNet, self).__init__()
                        self._linear1 = nn.Linear(10, 10)
                        self._linear2 = nn.Linear(10, 1)
898

899 900
                    def forward(self, x):
                        return self._linear2(self._linear1(x))
901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936

                # 1. enable dynamic mode
                paddle.disable_static()

                # 2. initialize fleet environment
                fleet.init(is_collective=True)

                # 3. create layer & optimizer
                layer = LinearNet()
                loss_fn = nn.MSELoss()
                adam = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=layer.parameters())

                # 4. get data_parallel model using fleet
                adam = fleet.distributed_optimizer(adam)
                dp_layer = fleet.distributed_model(layer)

                # 5. run layer
                inputs = paddle.randn([10, 10], 'float32')
                outputs = dp_layer(inputs)
                labels = paddle.randn([10, 1], 'float32')
                loss = loss_fn(outputs, labels)

                print("loss:", loss.numpy())

                loss = dp_layer.scale_loss(loss)
                loss.backward()
                dp_layer.apply_collective_grads()

                adam.step()
                adam.clear_grad()

        """
        # imitate target optimizer retrieval
        return self.user_defined_optimizer.clear_grad()

937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
    def minimize(self,
                 loss,
                 startup_program=None,
                 parameter_list=None,
                 no_grad_set=None):
        """
        Add distributed operations to minimize ``loss`` by updating ``parameter_list``.

        Args:
            loss (Variable): A ``Variable`` containing the value to minimize.
            startup_program (Program, optional): :ref:`api_fluid_Program` for
                initializing parameters in ``parameter_list``. The default value
                is None, at this time :ref:`api_fluid_default_startup_program` will be used.
            parameter_list (Iterable, optional): Iterable of ``Variable`` or ``Variable.name`` to update
                to minimize ``loss``. The default value is None, at this time all parameters
                will be updated.
            no_grad_set (set, optional): Set of ``Variable``  or ``Variable.name`` that don't need
                to be updated. The default value is None.

        Returns:
            tuple: tuple (optimize_ops, params_grads), A list of operators appended
            by minimize and a list of (param, grad) variable pairs, param is
            ``Parameter``, grad is the gradient value corresponding to the parameter.
960 961
            The returned tuple can be passed to ``fetch_list`` in ``Executor.run()`` to
            indicate program pruning. If so, the program will be pruned by ``feed`` and
962 963 964
            ``fetch_list`` before run, see details in ``Executor``.

        Examples:
965
            .. code-block:: python
966

967 968
                import paddle
                import paddle.distributed.fleet as fleet
969

970 971 972 973 974 975 976 977 978 979 980 981
                fc_1 = paddle.fluid.layers.fc(input=input_x, size=hid_dim, act='tanh')
                fc_2 = paddle.fluid.layers.fc(input=fc_1, size=hid_dim, act='tanh')
                prediction = paddle.fluid.layers.fc(input=[fc_2], size=label_dim, act='softmax')
                cost = paddle.fluid.layers.cross_entropy(input=prediction, label=input_y)
                avg_cost = paddle.fluid.layers.mean(x=cost)

                role = fleet.role_maker.PaddleCloudRoleMaker(is_collective=True)
                fleet.init(role)
                strategy = fleet.DistributedStrategy()
                optimizer = paddle.optimizer.SGD(learning_rate=0.001)
                optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
                optimizer.minimize(avg_cost)
982

983
                # for more examples, please reference https://github.com/PaddlePaddle/FleetX
984 985

        """
986 987 988 989 990
        if paddle.fluid.framework.in_dygraph_mode():
            # imitate target optimizer retrieval
            target_opt = self.user_defined_optimizer
            return target_opt.minimize(loss)

991
        context = {}
992 993
        # cache original feed forward program
        self.origin_main_program = loss.block.program
994 995
        context["origin_main_program"] = self.origin_main_program
        context["loss"] = loss
996 997
        if startup_program == None:
            self.origin_startup_program = \
998 999
                paddle.static.default_startup_program().clone(for_test=False)
            startup_program = paddle.static.default_startup_program()
1000 1001 1002
        else:
            self.origin_startup_program = \
                startup_program.clone(for_test=False)
1003

1004 1005
        context["origin_startup_program"] = startup_program
        context["role_maker"] = self._role_maker
1006 1007 1008 1009 1010

        # compile time
        distributed_optimizer_list = \
            MetaOptimizerFactory()._get_valid_meta_optimizers(
                self.user_defined_optimizer)
D
Dong Daxiang 已提交
1011

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
        context["user_defined_strategy"] = copy.copy(self.user_defined_strategy)

        # trigger the auto-parallel in very strict condition
        # strategy = DistributedStrategy()
        # strategy.auto = True
        # optimizer = paddle.optimizer.SGD(learning_rate=0.1)
        # optimizer = fleet.distributed_optimizer(optimizer, strategy)
        if self.user_defined_strategy._is_strict_auto():
            # turn on all the strategy for each optimizer
            for opt in distributed_optimizer_list:
1022
                opt._enable_strategy(self.user_defined_strategy, context)
1023

1024 1025
        valid_optimizer_list = []
        valid_graph_optimizer_list = []
D
Dong Daxiang 已提交
1026
        can_not_apply_optimizer_list = []
1027 1028 1029 1030 1031 1032 1033
        # recall meta optimizers for ranking
        for opt in distributed_optimizer_list:
            opt._set_basic_info(loss, self._role_maker,
                                self.user_defined_optimizer,
                                self.user_defined_strategy)
            if opt._can_apply() and not opt._is_graph_out():
                valid_optimizer_list.append(opt)
D
Dong Daxiang 已提交
1034
            elif opt._can_apply() and opt._is_graph_out():
1035
                valid_graph_optimizer_list.append(opt)
D
Dong Daxiang 已提交
1036 1037
            else:
                can_not_apply_optimizer_list.append(opt)
1038
        # combine recalled meta optimizers to be a valid meta optimizer
D
Dong Daxiang 已提交
1039
        meta_optimizer, graph_optimizer = \
1040 1041 1042 1043
            self.strategy_compiler.generate_optimizer(
                loss, self._role_maker, self.user_defined_optimizer,
                self.user_defined_strategy, valid_optimizer_list,
                valid_graph_optimizer_list)
D
Dong Daxiang 已提交
1044

D
Dong Daxiang 已提交
1045 1046
        valid_strategy = self.strategy_compiler._get_valid_strategy(
            self.user_defined_strategy, can_not_apply_optimizer_list)
1047 1048 1049

        context["valid_strategy"] = valid_strategy

D
Dong Daxiang 已提交
1050
        self.valid_strategy = valid_strategy
1051
        self.valid_strategy._enable_env()
D
Dong Daxiang 已提交
1052

1053 1054
        optimize_ops = []
        params_grads = []
1055

1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
        if self._role_maker._is_non_distributed() and not self._is_collective:
            if self._runtime_handle is None:
                self._runtime_handle = RuntimeFactory()._create_runtime(context)

            compiled_program = compiler.CompiledProgram(
                self.origin_main_program).with_data_parallel(
                    loss_name=loss.name, share_vars_from=None)
            loss.block.program._graph = compiled_program
            return self.user_defined_optimizer.minimize(
                loss,
                startup_program=startup_program,
                parameter_list=parameter_list,
                no_grad_set=no_grad_set)

1070 1071 1072 1073 1074 1075
        if meta_optimizer:
            optimize_ops, params_grads = meta_optimizer.minimize(
                loss,
                startup_program=startup_program,
                parameter_list=parameter_list,
                no_grad_set=no_grad_set)
1076

1077
            default_program = paddle.static.default_main_program()
1078 1079 1080 1081

            if id(default_program) != id(loss.block.program):
                paddle.fluid.framework.switch_main_program(loss.block.program)

1082 1083 1084 1085 1086 1087
        else:
            optimize_ops, params_grads = self.user_defined_optimizer.minimize(
                loss,
                startup_program=startup_program,
                parameter_list=parameter_list,
                no_grad_set=no_grad_set)
1088

1089 1090
        context["program_optimize_ops"] = optimize_ops
        context["program_params_grads"] = params_grads
1091

M
mapingshuo 已提交
1092 1093 1094
        if self.user_defined_strategy.zero:
            graph_optimizer = None

1095
        if graph_optimizer:
D
Dong Daxiang 已提交
1096
            optimize_ops, params_grads = graph_optimizer.minimize(
1097 1098 1099 1100 1101 1102 1103 1104
                loss,
                startup_program=startup_program,
                parameter_list=parameter_list,
                no_grad_set=no_grad_set)
            # since we do not encourage users to use graph operations
            # if a graph optimizer takes effect, mostly
            # optimizers_ops and params_grads are None
            # i.e. users can not modify current computation graph anymore
1105 1106 1107
            context["graph_optimize_ops"] = optimize_ops
            context["graph_optimize_grads"] = params_grads

1108
        if self._runtime_handle is None:
1109
            self._runtime_handle = RuntimeFactory()._create_runtime(context)
1110 1111

        if self._util is None:
1112
            self._util = UtilFactory()._create_util(context)
1113 1114

        return optimize_ops, params_grads