parallelizer.py 19.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18 19 20 21
import os
import sys
import json
import shlex
import copy
import pathlib
import subprocess
Z
zhaoyingli 已提交
22
import logging
23 24
import pickle
import time
25
import paddle
J
JZ-LIANG 已提交
26
from paddle.fluid.backward import append_backward
Z
zhaoyingli 已提交
27
from paddle.distributed.utils import get_logger
28
from paddle.distributed.fleet import cloud_utils
29
import paddle.fluid.core as core
30
from paddle.fluid import program_guard
J
JZ-LIANG 已提交
31
from paddle.distributed.passes import new_pass, PassContext
32 33
from .dist_context import DistributedContext
from .dist_context import get_default_distributed_context
34
from .dist_context import set_default_distributed_context
35
from .completion import Completer
36
from .partitioner import Partitioner
37
from .process_group import get_all_process_groups
38
from .process_group import get_process_group
J
JZ-LIANG 已提交
39
from .process_group import get_world_process_group
40
from .process_group import _g_process_group_map, ProcessGroup
41
from .utils import make_data_unshard
Z
zhaoyingli 已提交
42
from .utils import set_grad_var_shape
43
from .utils import print_program_with_dist_attr
44
from .utils import SerialProgramInfo
45
from .reshard import Resharder
46 47
from .cluster import Cluster
from .mapper import mapping
48 49 50
from .dist_op import DistributedOperator
from .dist_tensor import DistributedTensor
from .planner import Planner
Z
zhaoyingli 已提交
51 52

_logger = get_logger(logging.INFO)
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68


class AutoParallelizer:
    """
    AutoParallelizer is the main controller class to do the auto parallel process.
    And the auto parallel process will be triggered in the wrapped parallelize function.
    To facilitate the auto parallelization, it will contain information about program, cluster and the
    related context. In this basic version, the program information will be retrevied from 
    Fleet object, and the cluster information can be retrevied in the new created Cluster object,
    and the context information can be retrevied in the new created DistributedContext. 
    """

    def __init__(self, fleet):
        self._fleet = fleet
        self._optimizer = self._fleet.user_defined_optimizer
        self._dist_strategy = self._fleet._user_defined_strategy
69
        self._dist_context = DistributedContext()
70 71 72 73 74 75 76 77 78 79 80 81
        self._cluster = None
        self._cluster_topo_path = os.getenv("PADDLE_CLUSTER_TOPO_PATH", None)
        if self._cluster_topo_path is not None:
            self._cluster = Cluster()
            self._cluster.build_from_file(self._cluster_topo_path)
        # Prepare information for auto mapping
        self._rank_mapping_path = os.getenv("PADDLE_RANK_MAPPING_PATH", None)
        enable_auto_mapping_env = os.getenv("PADDLE_ENABLE_AUTO_MAPPING", None)
        if enable_auto_mapping_env is None:
            self._enable_auto_mapping = False
        else:
            self._enable_auto_mapping = True
82 83
        self._pass_context = PassContext()

84 85 86
        self._need_rank_mapping = os.getenv("PADDLE_NEED_RANK_MAPPING")
        self._need_rank_mapping = True if self._need_rank_mapping and \
            self._need_rank_mapping.lower() == 'true' else False
87
        self._pass_context = None
88

89 90 91 92 93 94 95 96 97 98
    def _remove_distributed_attrs(self, main_program):
        suffix = core.kAutoParallelSuffix()
        # distributed attributes for variable have been removed
        # in previous process.
        for block in main_program.blocks:
            for op in block.ops:
                for attr_name in op.attr_names:
                    if suffix in attr_name:
                        op._remove_attr(attr_name)

99 100
    def _apply_pre_optimization_passes(self, main_program, startup_program,
                                       loss, params_grads, no_grad_set):
J
JZ-LIANG 已提交
101
        # apply amp pass
102
        if self._dist_strategy.amp:
J
JZ-LIANG 已提交
103 104 105 106
            config = copy.deepcopy(self._dist_strategy.amp_configs)
            config["dist_context"] = self._dist_context
            config["params_grads"] = params_grads
            config["loss"] = loss
107 108 109
            if config["use_pure_fp16"]:
                config["base_opt"] = self._optimizer
                auto_parallel_fp16_pass = new_pass("auto_parallel_fp16", config)
110 111
                auto_parallel_fp16_pass.apply([main_program], [startup_program],
                                              self._pass_context)
112 113 114 115
            else:
                auto_parallel_amp_pass = new_pass("auto_parallel_amp", config)
                auto_parallel_amp_pass.apply([main_program], [startup_program],
                                             self._pass_context)
116

J
JZ-LIANG 已提交
117
        # apply recompute pass
118
        if self._dist_strategy.recompute:
119 120 121 122 123 124
            config = copy.deepcopy(self._dist_strategy.recompute_configs)
            config["dist_context"] = self._dist_context
            config["no_grad_set"] = copy.deepcopy(no_grad_set)
            config["loss"] = loss
            auto_parallel_recompute_pass = new_pass("auto_parallel_recompute",
                                                    config)
125 126 127
            auto_parallel_recompute_pass.apply([main_program],
                                               [startup_program],
                                               self._pass_context)
128 129 130 131

    def _generate_backward(self, main_program, startup_program, loss,
                           parameter_list, no_grad_set, callbacks):

J
JZ-LIANG 已提交
132 133 134 135 136 137 138
        with program_guard(main_program, startup_program):
            params_grads = append_backward(
                loss,
                parameter_list,
                no_grad_set,
                callbacks,
                distop_context=self._dist_context.dist_op_context)
139 140
        self._completer = Completer(self._dist_context)
        self._completer.complete_backward_annotation(main_program)
141
        self._dist_context.block_state.parse_backward_blocks(main_program)
142 143 144 145
        return params_grads

    def _apply_optimize(self, main_program, startup_program, params_grads):

J
JZ-LIANG 已提交
146
        with program_guard(main_program, startup_program):
147 148
            optimize_ops = copy.deepcopy(
                self._optimizer).apply_gradients(params_grads)
149

150
        # update completion
151 152
        self._completer = Completer(self._dist_context)
        self._completer.complete_update_annotation(main_program)
153 154 155

        return optimize_ops

156
    def _apply_post_optimization_passes(self, main_program, startup_program,
J
JZ-LIANG 已提交
157 158 159 160 161 162 163 164 165
                                        rank, params_grads):

        if self._dist_strategy.sharding:
            config = copy.deepcopy(self._dist_strategy.sharding_configs)
            config["dist_context"] = self._dist_context
            config["params_grads"] = params_grads
            config["global_rank"] = rank
            auto_parallel_sharding_pass = new_pass("auto_parallel_sharding",
                                                   config)
166 167
            auto_parallel_sharding_pass.apply([main_program], [startup_program],
                                              self._pass_context)
J
JZ-LIANG 已提交
168

169 170 171 172 173 174
        if self._dist_strategy.gradient_merge:
            config = copy.deepcopy(self._dist_strategy.gradient_merge_configs)
            config["dist_context"] = self._dist_context
            config["params_grads"] = params_grads
            auto_parallel_gradient_merge_pass = new_pass(
                "auto_parallel_gradient_merge_pass", config)
175 176 177
            auto_parallel_gradient_merge_pass.apply([main_program],
                                                    [startup_program],
                                                    self._pass_context)
178

179 180
    def _get_dist_program(self, rank, dist_context=None, relaunch_phase=False):
        completed_main_program = None
181 182 183
        serial_main_program = self._main_program.clone()
        serial_startup_program = self._startup_program.clone()
        serial_loss = serial_main_program.global_block().var(self._loss.name)
184

185
        # generating serial
186 187 188 189
        if dist_context is None:
            # Annotation completion
            self._dist_context = DistributedContext()
            _logger.info("Start annotation dist attr.")
190 191 192
            self._completer = Completer(self._dist_context)
            completed_main_program = self._completer.complete_forward_annotation(
                serial_main_program)
193
        else:
194
            completed_main_program = serial_main_program
195 196
            self._dist_context = copy.deepcopy(dist_context)

197 198 199
        # parse forward sub block
        self._dist_context.block_state.parse_forward_blocks(serial_main_program)

200 201 202 203 204
        # serial backward pass
        params_grads = self._generate_backward(
            completed_main_program, serial_startup_program, serial_loss,
            self._parameter_list, self._no_grad_set, self._callbacks)

J
JZ-LIANG 已提交
205
        # serial forward pass
206
        self._apply_pre_optimization_passes(completed_main_program,
J
JZ-LIANG 已提交
207
                                            serial_startup_program, serial_loss,
208
                                            params_grads, self._no_grad_set)
209
        # Logical partition
210 211 212 213 214 215
        partitioner = Partitioner(self._dist_context, rank)
        dist_main_prog, dist_startup_prog, dist_params_grads = partitioner.partition(
            completed_main_program, serial_startup_program, params_grads)

        # TODO refactor the placement of optimizer
        # generate optimize program
216 217 218
        dist_optimize_ops = self._apply_optimize(dist_main_prog,
                                                 dist_startup_prog,
                                                 dist_params_grads)
219

220
        set_grad_var_shape(dist_main_prog, self._dist_context)
221

222
        make_data_unshard(dist_main_prog, dist_startup_prog, self._dist_context)
223

224 225 226
        resharder = Resharder(dist_main_prog, dist_startup_prog, rank,
                              self._dist_context, dist_params_grads)
        resharder.reshard()
227

228
        self._apply_post_optimization_passes(dist_main_prog, dist_startup_prog,
J
JZ-LIANG 已提交
229
                                             rank, dist_params_grads)
230 231 232 233 234
        g_process_group_map = None
        if not relaunch_phase:
            g_process_group_map = copy.deepcopy(_g_process_group_map)
            _g_process_group_map.clear()
            _g_process_group_map[0] = ProcessGroup(0, [])
235
            for process_mesh in self._dist_context._process_meshes:
Z
zhaoyingli 已提交
236
                _g_process_group_map[0].add_ranks(process_mesh.processes)
237
        return dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog, g_process_group_map
238

239 240
    def parallelize(self,
                    loss,
241
                    startup_program,
242
                    parameter_list=None,
243 244
                    no_grad_set=None,
                    callbacks=None):
245
        assert startup_program is not None
246 247 248 249 250
        self._loss = loss
        self._startup_program = startup_program
        self._main_program = loss.block.program
        self._parameter_list = parameter_list
        self._no_grad_set = no_grad_set
251
        self._callbacks = callbacks
252 253 254 255 256 257

        if self._enable_auto_mapping and self._need_rank_mapping:
            # Do the mapping pass before parallelization
            assert self._cluster is not None, \
                "The cluster must not be none when using auto mapping."
            dist_programs = {}
J
JZ-LIANG 已提交
258
            world_process_group = get_world_process_group()
259 260 261 262
            dist_context = None
            # auto search
            if self._dist_strategy.auto_search:
                logging.info("Start searching dist attr.")
263 264 265 266 267 268 269 270 271 272 273
                serial_program_info = SerialProgramInfo(self._main_program,
                                                        self._startup_program,
                                                        self._loss,
                                                        self._optimizer,
                                                        self._cluster)
                planner = Planner(serial_program_info,
                                  self,
                                  algorithm_config={
                                      "name": "mcmc",
                                      "max_search_times": 5
                                  })
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
                dist_context, _ = planner.search()
                logging.info("End searching dist attr.")

            # serialize the dist context by planner
            if dist_context is not None:
                logging.info("Start serialize searched dist attr")
                cwd = pathlib.Path().resolve()
                searched_dist_context_path = os.path.join(
                    cwd, f"searched_dist_context_{time.time()}.pkl")
                saved_dist_context = {}
                ops_dist_attr = {}
                tensors_dist_attr = {}
                for key, dist_op in dist_context._dist_ops_for_program.items():
                    ops_dist_attr[key] = dist_op.dist_attr
                for key, dist_tensor in dist_context._dist_tensors_for_program.items(
                ):
                    tensors_dist_attr[key] = dist_tensor.dist_attr
                saved_dist_context["ops_dist_attr"] = ops_dist_attr
                saved_dist_context["tensors_dist_attr"] = tensors_dist_attr
                saved_dist_context[
                    "process_meshes"] = dist_context._process_meshes
                with open(searched_dist_context_path,
                          "wb") as dist_context_file:
                    pickle.dump(saved_dist_context, dist_context_file)
                    os.environ[
                        'PADDLE_SEARCHED_DIST_CONTEXT_PATH'] = searched_dist_context_path
                    logging.info(
                        f"End serialize searched dist attr to {searched_dist_context_path}"
                    )

304
            for rank in world_process_group.ranks:
305 306 307
                dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog, g_process_group_map = self._get_dist_program(
                    rank, dist_context)
                dist_programs[rank] = [dist_main_prog, g_process_group_map]
308 309 310 311

            # Do the mapping between the distributed program graph and the cluster graph
            rank_mapping_dict = mapping(dist_programs, self._cluster)
            rank_mapping = list(rank_mapping_dict.values())
312

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
            # Relaunch the training by using the rank mapping file
            with open(self._rank_mapping_path, "w") as rank_mapping_file:
                json.dump(rank_mapping, rank_mapping_file)

            enable_elastic = os.getenv("PADDLE_ENABLE_ELASTIC")
            enable_elastic = True if enable_elastic and enable_elastic.lower(
            ) == 'true' else False
            if enable_elastic:
                print("Auto mapping finished, now do elastic re-launch")
                sys.exit(paddle.distributed.fleet.elastic.manager.
                         ELASTIC_AUTO_PARALLEL_EXIT_CODE)

            original_cmd_args = os.getenv("PADDLE_ORIGINAL_CMD_ARGS")
            rank_mapping_args = " ".join(
                ["--rank_mapping_path", self._rank_mapping_path])
            if os.environ.get("WITH_COVERAGE", "OFF") == "ON":
                coverage_args = ["-m", "coverage", "run", "--branch", "-p"]
            else:
                coverage_args = []
            new_cmd_args = "-m paddle.distributed.fleet.launch" + " " + rank_mapping_args + " " + original_cmd_args
333 334
            new_cmd = [sys.executable, "-u"
                       ] + coverage_args + shlex.split(new_cmd_args)
335 336 337 338 339 340 341 342 343
            new_process = subprocess.Popen(new_cmd)
            new_process.wait()
            assert new_process.returncode == 0, \
                "Launch failed with rank mapping"
            print("Successfully do the second launch for auto mapping!")
            sys.exit(0)
        else:
            # Parallelization after the mapping pass
            rank = paddle.distributed.get_rank()
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
            dist_context = None
            searched_dist_context_path = os.getenv(
                "PADDLE_SEARCHED_DIST_CONTEXT_PATH", None)
            if searched_dist_context_path is not None:
                with open(searched_dist_context_path,
                          "rb") as dist_context_file:
                    saved_dist_context = pickle.load(dist_context_file)
                    dist_context = DistributedContext()
                    for op in self._main_program.global_block().ops:
                        dist_attr = saved_dist_context["ops_dist_attr"][
                            op.desc.id()]
                        dist_op = DistributedOperator(op, dist_attr)
                        dist_context.add_dist_op_for_program(dist_op)

                    vars = self._main_program.global_block().vars
                    for var in vars.values():
                        dist_attr = saved_dist_context["tensors_dist_attr"][
                            var.desc.id()]
                        dist_tensor = DistributedTensor(var, dist_attr)
                        dist_context.add_dist_tensor_for_program(dist_tensor)

                    dist_context._process_meshes = saved_dist_context[
                        "process_meshes"]

            else:
                if self._dist_strategy.auto_search:
                    serial_program_info = SerialProgramInfo(
                        self._main_program,
                        self._startup_program,
                        self._loss,
                        self._optimizer,
                        cluster=self._cluster)
376 377 378 379 380 381
                    planner = Planner(serial_program_info,
                                      self,
                                      algorithm_config={
                                          "name": "mcmc",
                                          "max_search_times": 5
                                      })
382 383 384 385 386 387 388 389 390
                    dist_context, _ = planner.search()

            # rebuild g_process_group
            if dist_context is not None:
                pg0 = get_process_group(0)
                for process_mesh in dist_context._process_meshes:
                    pg0.add_ranks(process_mesh.processes)
            dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog, _ = self._get_dist_program(
                rank, dist_context, relaunch_phase=True)
391

392 393 394 395 396 397 398 399 400 401
            # NOTE: This is a trick to fix hang in pipeline mode when dist context is searched by planner
            if self._dist_strategy.auto_search:
                is_pipeline = False
                for op in dist_main_prog.global_block().ops:
                    if op.type == "send_v2" or op.type == "recv_v2":
                        is_pipeline = True
                        break
                if is_pipeline:
                    with paddle.static.program_guard(dist_main_prog):
                        paddle.distributed.barrier()
402

403 404 405 406 407 408 409
            # Traverse different rank programs and traverse each op of them,
            # instantiate communication by process_mapping.
            all_process_groups = get_all_process_groups()
            for process_group in all_process_groups:
                if rank not in process_group.ranks:
                    continue
                process_group.instantiate()
C
caozhou 已提交
410

411 412
            # Copy distributed info to the default context
            set_default_distributed_context(self._dist_context)
Z
zhaoyingli 已提交
413

414 415 416
            # The last step: remove all distributed attributes to be compatible
            # with inference.
            self._remove_distributed_attrs(dist_main_prog)
417

418
            return dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog
419 420 421 422 423 424 425 426 427 428 429

    def __deepcopy__(self, memo):
        cls = self.__class__
        result = cls.__new__(cls)
        memo[id(self)] = result
        for k, v in self.__dict__.items():
            if k == "_main_program" or k == "_startup_program" or k == "_dist_context" or k == "_fleet" or k == "_loss":
                setattr(result, k, v)
            else:
                setattr(result, k, copy.deepcopy(v, memo))
        return result