optimizer.py 3.2 KB
Newer Older
W
wuhuachaocoding 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import copy
import paddle
from .meta_optimizers import HybridParallelOptimizer, HeterParallelOptimizer
from paddle.distributed import fleet
R
Roc 已提交
19
from .utils.log_util import logger
W
wuhuachaocoding 已提交
20 21 22 23


def _dygraph_distributed_optimizer(optimizer, strategy=None):
    """
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
    Optimizer for distributed training.
    For the distributed training, this method would rebuild a new instance of DistributedOptimizer.
    Which has basic Optimizer function and special features for distributed training.
    Args:
        optimizer(Optimizer): The executor to run for init server.
        strategy(DistributedStrategy): Extra properties for distributed optimizer.
            It is recommended to use DistributedStrategy in fleet.init(). The strategy
            here is for compatibility. If the strategy in fleet.distributed_optimizer()
            is not None, then it will overwrite the DistributedStrategy in fleet.init(),
            which will take effect in distributed training.
    Returns:
        Fleet: instance of fleet.
    Examples:
        .. code-block:: python
            import paddle
            import paddle.distributed.fleet as fleet
            fleet.init(is_collective=True)
            strategy = fleet.DistributedStrategy()
            optimizer = paddle.optimizer.SGD(learning_rate=0.001)
            optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
    """
W
wuhuachaocoding 已提交
45 46 47 48 49
    fleet_env = fleet.fleet
    fleet_env.user_defined_optimizer = optimizer

    if strategy is not None:
        if fleet_env._is_collective:
R
Roc 已提交
50
            logger.warning(
W
wuhuachaocoding 已提交
51 52 53 54
                "It is recommended to use DistributedStrategy "
                "in fleet_env.init(). The strategy here is only for compatibility. "
                "If the strategy in fleet_env.distributed_optimizer() is "
                "not None, then it will overwrite the DistributedStrategy in fleet_env.init(), "
55 56
                "which will take effect in distributed training."
            )
W
wuhuachaocoding 已提交
57 58 59 60 61
        fleet_env._user_defined_strategy = copy.deepcopy(strategy)

    fleet_env._context = {}

    if fleet_env.worker_num() > 1:
62
        if not fleet_env._user_defined_strategy.heter_ccl_mode:
63 64 65
            return HybridParallelOptimizer(
                optimizer, fleet_env._hcg, fleet_env._user_defined_strategy
            )
W
wuhuachaocoding 已提交
66
        else:
67 68 69
            return HeterParallelOptimizer(
                optimizer, fleet_env._user_defined_strategy
            )
W
wuhuachaocoding 已提交
70 71 72 73 74
    else:
        return optimizer


def distributed_optimizer(*args, **kwargs):
75
    if paddle.framework._non_static_mode():
W
wuhuachaocoding 已提交
76 77 78
        return _dygraph_distributed_optimizer(*args, **kwargs)
    else:
        return fleet.fleet.distributed_optimizer(*args, **kwargs)