parallelizer.py 20.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18 19 20 21
import os
import sys
import json
import shlex
import copy
import pathlib
import subprocess
Z
zhaoyingli 已提交
22
import logging
23 24
import pickle
import time
25
import paddle
26
import paddle.fluid.core as core
27
from paddle.fluid import program_guard
28 29
from paddle.fluid.backward import append_backward
from paddle.distributed.utils.log_utils import get_logger
J
JZ-LIANG 已提交
30
from paddle.distributed.passes import new_pass, PassContext
31
from .dist_context import DistributedContext
32
from .dist_context import set_default_distributed_context
33
from .completion import Completer
34
from .partitioner import Partitioner
35
from .process_group import get_all_process_groups
36
from .process_group import get_process_group
J
JZ-LIANG 已提交
37
from .process_group import get_world_process_group
38
from .process_group import _g_process_group_map, ProcessGroup
39
from .utils import make_data_unshard
Z
zhaoyingli 已提交
40
from .utils import set_grad_var_shape
41
from .utils import SerialProgramInfo
42
from .reshard import Resharder
43 44
from .cluster import Cluster
from .mapper import mapping
45 46 47
from .dist_op import DistributedOperator
from .dist_tensor import DistributedTensor
from .planner import Planner
Z
zhaoyingli 已提交
48 49

_logger = get_logger(logging.INFO)
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65


class AutoParallelizer:
    """
    AutoParallelizer is the main controller class to do the auto parallel process.
    And the auto parallel process will be triggered in the wrapped parallelize function.
    To facilitate the auto parallelization, it will contain information about program, cluster and the
    related context. In this basic version, the program information will be retrevied from 
    Fleet object, and the cluster information can be retrevied in the new created Cluster object,
    and the context information can be retrevied in the new created DistributedContext. 
    """

    def __init__(self, fleet):
        self._fleet = fleet
        self._optimizer = self._fleet.user_defined_optimizer
        self._dist_strategy = self._fleet._user_defined_strategy
66
        self._dist_context = DistributedContext()
67 68 69 70 71 72 73 74 75 76 77 78
        self._cluster = None
        self._cluster_topo_path = os.getenv("PADDLE_CLUSTER_TOPO_PATH", None)
        if self._cluster_topo_path is not None:
            self._cluster = Cluster()
            self._cluster.build_from_file(self._cluster_topo_path)
        # Prepare information for auto mapping
        self._rank_mapping_path = os.getenv("PADDLE_RANK_MAPPING_PATH", None)
        enable_auto_mapping_env = os.getenv("PADDLE_ENABLE_AUTO_MAPPING", None)
        if enable_auto_mapping_env is None:
            self._enable_auto_mapping = False
        else:
            self._enable_auto_mapping = True
79 80
        self._pass_context = PassContext()

81 82 83
        self._need_rank_mapping = os.getenv("PADDLE_NEED_RANK_MAPPING")
        self._need_rank_mapping = True if self._need_rank_mapping and \
            self._need_rank_mapping.lower() == 'true' else False
84
        # self._pass_context = None
85

86 87 88 89 90 91 92 93 94 95
    def _remove_distributed_attrs(self, main_program):
        suffix = core.kAutoParallelSuffix()
        # distributed attributes for variable have been removed
        # in previous process.
        for block in main_program.blocks:
            for op in block.ops:
                for attr_name in op.attr_names:
                    if suffix in attr_name:
                        op._remove_attr(attr_name)

96 97
    def _apply_pre_optimization_passes(self, main_program, startup_program,
                                       loss, params_grads, no_grad_set):
J
JZ-LIANG 已提交
98
        # apply amp pass
99
        if self._dist_strategy.amp:
J
JZ-LIANG 已提交
100 101 102 103
            config = copy.deepcopy(self._dist_strategy.amp_configs)
            config["dist_context"] = self._dist_context
            config["params_grads"] = params_grads
            config["loss"] = loss
104 105 106
            if config["use_pure_fp16"]:
                config["base_opt"] = self._optimizer
                auto_parallel_fp16_pass = new_pass("auto_parallel_fp16", config)
107 108
                auto_parallel_fp16_pass.apply([main_program], [startup_program],
                                              self._pass_context)
109 110 111 112
            else:
                auto_parallel_amp_pass = new_pass("auto_parallel_amp", config)
                auto_parallel_amp_pass.apply([main_program], [startup_program],
                                             self._pass_context)
113

J
JZ-LIANG 已提交
114
        # apply recompute pass
115
        if self._dist_strategy.recompute:
116 117 118 119 120 121
            config = copy.deepcopy(self._dist_strategy.recompute_configs)
            config["dist_context"] = self._dist_context
            config["no_grad_set"] = copy.deepcopy(no_grad_set)
            config["loss"] = loss
            auto_parallel_recompute_pass = new_pass("auto_parallel_recompute",
                                                    config)
122 123 124
            auto_parallel_recompute_pass.apply([main_program],
                                               [startup_program],
                                               self._pass_context)
125 126 127 128

    def _generate_backward(self, main_program, startup_program, loss,
                           parameter_list, no_grad_set, callbacks):

J
JZ-LIANG 已提交
129 130 131 132 133 134 135
        with program_guard(main_program, startup_program):
            params_grads = append_backward(
                loss,
                parameter_list,
                no_grad_set,
                callbacks,
                distop_context=self._dist_context.dist_op_context)
136 137
        self._completer = Completer(self._dist_context)
        self._completer.complete_backward_annotation(main_program)
138
        self._dist_context.block_state.parse_backward_blocks(main_program)
139 140 141 142
        return params_grads

    def _apply_optimize(self, main_program, startup_program, params_grads):

143
        optimizer = copy.deepcopy(self._optimizer)
J
JZ-LIANG 已提交
144
        with program_guard(main_program, startup_program):
145
            optimize_ops = optimizer.apply_gradients(params_grads)
146

147
        self._dist_context._serial_optimizer = optimizer
148
        # update completion
149 150
        self._completer = Completer(self._dist_context)
        self._completer.complete_update_annotation(main_program)
151 152 153

        return optimize_ops

154
    def _apply_post_optimization_passes(self, main_program, startup_program,
J
JZ-LIANG 已提交
155 156 157 158 159 160 161 162 163
                                        rank, params_grads):

        if self._dist_strategy.sharding:
            config = copy.deepcopy(self._dist_strategy.sharding_configs)
            config["dist_context"] = self._dist_context
            config["params_grads"] = params_grads
            config["global_rank"] = rank
            auto_parallel_sharding_pass = new_pass("auto_parallel_sharding",
                                                   config)
164 165
            auto_parallel_sharding_pass.apply([main_program], [startup_program],
                                              self._pass_context)
166 167 168 169 170 171 172 173 174
            params_grads = self._pass_context.get_attr("params_grads")

        config = copy.deepcopy(self._dist_strategy.sharding_configs)
        config["dist_context"] = self._dist_context
        config["params_grads"] = params_grads
        config["rank_id"] = rank
        auto_parallel_clip_pass = new_pass("auto_parallel_grad_clip", config)
        auto_parallel_clip_pass.apply([main_program], [startup_program],
                                      self._pass_context)
J
JZ-LIANG 已提交
175

176 177 178 179 180 181
        if self._dist_strategy.gradient_merge:
            config = copy.deepcopy(self._dist_strategy.gradient_merge_configs)
            config["dist_context"] = self._dist_context
            config["params_grads"] = params_grads
            auto_parallel_gradient_merge_pass = new_pass(
                "auto_parallel_gradient_merge_pass", config)
182 183 184
            auto_parallel_gradient_merge_pass.apply([main_program],
                                                    [startup_program],
                                                    self._pass_context)
185

186 187
    def _get_dist_program(self, rank, dist_context=None, relaunch_phase=False):
        completed_main_program = None
188 189 190
        serial_main_program = self._main_program.clone()
        serial_startup_program = self._startup_program.clone()
        serial_loss = serial_main_program.global_block().var(self._loss.name)
191

192
        # generating serial
193 194 195 196
        if dist_context is None:
            # Annotation completion
            self._dist_context = DistributedContext()
            _logger.info("Start annotation dist attr.")
197 198 199
            self._completer = Completer(self._dist_context)
            completed_main_program = self._completer.complete_forward_annotation(
                serial_main_program)
200
        else:
201
            completed_main_program = serial_main_program
202 203
            self._dist_context = copy.deepcopy(dist_context)

204 205 206
        # parse forward sub block
        self._dist_context.block_state.parse_forward_blocks(serial_main_program)

207 208 209 210 211
        # serial backward pass
        params_grads = self._generate_backward(
            completed_main_program, serial_startup_program, serial_loss,
            self._parameter_list, self._no_grad_set, self._callbacks)

J
JZ-LIANG 已提交
212
        # serial forward pass
213
        self._apply_pre_optimization_passes(completed_main_program,
J
JZ-LIANG 已提交
214
                                            serial_startup_program, serial_loss,
215
                                            params_grads, self._no_grad_set)
216
        # Logical partition
217 218 219 220 221 222
        partitioner = Partitioner(self._dist_context, rank)
        dist_main_prog, dist_startup_prog, dist_params_grads = partitioner.partition(
            completed_main_program, serial_startup_program, params_grads)

        # TODO refactor the placement of optimizer
        # generate optimize program
223 224 225
        dist_optimize_ops = self._apply_optimize(dist_main_prog,
                                                 dist_startup_prog,
                                                 dist_params_grads)
226

227
        set_grad_var_shape(dist_main_prog, self._dist_context)
228

229
        make_data_unshard(dist_main_prog, dist_startup_prog, self._dist_context)
230

231 232 233
        resharder = Resharder(dist_main_prog, dist_startup_prog, rank,
                              self._dist_context, dist_params_grads)
        resharder.reshard()
234

235
        self._apply_post_optimization_passes(dist_main_prog, dist_startup_prog,
J
JZ-LIANG 已提交
236
                                             rank, dist_params_grads)
237 238 239 240 241
        g_process_group_map = None
        if not relaunch_phase:
            g_process_group_map = copy.deepcopy(_g_process_group_map)
            _g_process_group_map.clear()
            _g_process_group_map[0] = ProcessGroup(0, [])
242
            for process_mesh in self._dist_context._process_meshes:
Z
zhaoyingli 已提交
243
                _g_process_group_map[0].add_ranks(process_mesh.processes)
244
        return dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog, g_process_group_map
245

246 247
    def parallelize(self,
                    loss,
248
                    startup_program,
249
                    parameter_list=None,
250 251
                    no_grad_set=None,
                    callbacks=None):
252
        assert startup_program is not None
253 254 255 256 257
        self._loss = loss
        self._startup_program = startup_program
        self._main_program = loss.block.program
        self._parameter_list = parameter_list
        self._no_grad_set = no_grad_set
258
        self._callbacks = callbacks
259 260 261 262 263 264

        if self._enable_auto_mapping and self._need_rank_mapping:
            # Do the mapping pass before parallelization
            assert self._cluster is not None, \
                "The cluster must not be none when using auto mapping."
            dist_programs = {}
J
JZ-LIANG 已提交
265
            world_process_group = get_world_process_group()
266 267 268 269
            dist_context = None
            # auto search
            if self._dist_strategy.auto_search:
                logging.info("Start searching dist attr.")
270 271 272 273 274 275 276 277 278 279 280
                serial_program_info = SerialProgramInfo(self._main_program,
                                                        self._startup_program,
                                                        self._loss,
                                                        self._optimizer,
                                                        self._cluster)
                planner = Planner(serial_program_info,
                                  self,
                                  algorithm_config={
                                      "name": "mcmc",
                                      "max_search_times": 5
                                  })
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
                dist_context, _ = planner.search()
                logging.info("End searching dist attr.")

            # serialize the dist context by planner
            if dist_context is not None:
                logging.info("Start serialize searched dist attr")
                cwd = pathlib.Path().resolve()
                searched_dist_context_path = os.path.join(
                    cwd, f"searched_dist_context_{time.time()}.pkl")
                saved_dist_context = {}
                ops_dist_attr = {}
                tensors_dist_attr = {}
                for key, dist_op in dist_context._dist_ops_for_program.items():
                    ops_dist_attr[key] = dist_op.dist_attr
                for key, dist_tensor in dist_context._dist_tensors_for_program.items(
                ):
                    tensors_dist_attr[key] = dist_tensor.dist_attr
                saved_dist_context["ops_dist_attr"] = ops_dist_attr
                saved_dist_context["tensors_dist_attr"] = tensors_dist_attr
                saved_dist_context[
                    "process_meshes"] = dist_context._process_meshes
                with open(searched_dist_context_path,
                          "wb") as dist_context_file:
                    pickle.dump(saved_dist_context, dist_context_file)
                    os.environ[
                        'PADDLE_SEARCHED_DIST_CONTEXT_PATH'] = searched_dist_context_path
                    logging.info(
                        f"End serialize searched dist attr to {searched_dist_context_path}"
                    )

311
            for rank in world_process_group.ranks:
312 313 314
                dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog, g_process_group_map = self._get_dist_program(
                    rank, dist_context)
                dist_programs[rank] = [dist_main_prog, g_process_group_map]
315 316 317 318

            # Do the mapping between the distributed program graph and the cluster graph
            rank_mapping_dict = mapping(dist_programs, self._cluster)
            rank_mapping = list(rank_mapping_dict.values())
319

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
            # Relaunch the training by using the rank mapping file
            with open(self._rank_mapping_path, "w") as rank_mapping_file:
                json.dump(rank_mapping, rank_mapping_file)

            enable_elastic = os.getenv("PADDLE_ENABLE_ELASTIC")
            enable_elastic = True if enable_elastic and enable_elastic.lower(
            ) == 'true' else False
            if enable_elastic:
                print("Auto mapping finished, now do elastic re-launch")
                sys.exit(paddle.distributed.fleet.elastic.manager.
                         ELASTIC_AUTO_PARALLEL_EXIT_CODE)

            original_cmd_args = os.getenv("PADDLE_ORIGINAL_CMD_ARGS")
            rank_mapping_args = " ".join(
                ["--rank_mapping_path", self._rank_mapping_path])
            if os.environ.get("WITH_COVERAGE", "OFF") == "ON":
                coverage_args = ["-m", "coverage", "run", "--branch", "-p"]
            else:
                coverage_args = []
            new_cmd_args = "-m paddle.distributed.fleet.launch" + " " + rank_mapping_args + " " + original_cmd_args
340 341
            new_cmd = [sys.executable, "-u"
                       ] + coverage_args + shlex.split(new_cmd_args)
342 343 344 345 346 347 348 349 350
            new_process = subprocess.Popen(new_cmd)
            new_process.wait()
            assert new_process.returncode == 0, \
                "Launch failed with rank mapping"
            print("Successfully do the second launch for auto mapping!")
            sys.exit(0)
        else:
            # Parallelization after the mapping pass
            rank = paddle.distributed.get_rank()
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
            dist_context = None
            searched_dist_context_path = os.getenv(
                "PADDLE_SEARCHED_DIST_CONTEXT_PATH", None)
            if searched_dist_context_path is not None:
                with open(searched_dist_context_path,
                          "rb") as dist_context_file:
                    saved_dist_context = pickle.load(dist_context_file)
                    dist_context = DistributedContext()
                    for op in self._main_program.global_block().ops:
                        dist_attr = saved_dist_context["ops_dist_attr"][
                            op.desc.id()]
                        dist_op = DistributedOperator(op, dist_attr)
                        dist_context.add_dist_op_for_program(dist_op)

                    vars = self._main_program.global_block().vars
                    for var in vars.values():
                        dist_attr = saved_dist_context["tensors_dist_attr"][
                            var.desc.id()]
                        dist_tensor = DistributedTensor(var, dist_attr)
                        dist_context.add_dist_tensor_for_program(dist_tensor)

                    dist_context._process_meshes = saved_dist_context[
                        "process_meshes"]

            else:
                if self._dist_strategy.auto_search:
                    serial_program_info = SerialProgramInfo(
                        self._main_program,
                        self._startup_program,
                        self._loss,
                        self._optimizer,
                        cluster=self._cluster)
383 384 385 386 387 388
                    planner = Planner(serial_program_info,
                                      self,
                                      algorithm_config={
                                          "name": "mcmc",
                                          "max_search_times": 5
                                      })
389 390 391 392 393 394 395 396 397
                    dist_context, _ = planner.search()

            # rebuild g_process_group
            if dist_context is not None:
                pg0 = get_process_group(0)
                for process_mesh in dist_context._process_meshes:
                    pg0.add_ranks(process_mesh.processes)
            dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog, _ = self._get_dist_program(
                rank, dist_context, relaunch_phase=True)
398

399 400 401 402 403 404 405 406 407 408
            # NOTE: This is a trick to fix hang in pipeline mode when dist context is searched by planner
            if self._dist_strategy.auto_search:
                is_pipeline = False
                for op in dist_main_prog.global_block().ops:
                    if op.type == "send_v2" or op.type == "recv_v2":
                        is_pipeline = True
                        break
                if is_pipeline:
                    with paddle.static.program_guard(dist_main_prog):
                        paddle.distributed.barrier()
409

410 411 412 413 414 415 416
            # Traverse different rank programs and traverse each op of them,
            # instantiate communication by process_mapping.
            all_process_groups = get_all_process_groups()
            for process_group in all_process_groups:
                if rank not in process_group.ranks:
                    continue
                process_group.instantiate()
C
caozhou 已提交
417

418 419
            # Copy distributed info to the default context
            set_default_distributed_context(self._dist_context)
Z
zhaoyingli 已提交
420

421 422 423
            # The last step: remove all distributed attributes to be compatible
            # with inference.
            self._remove_distributed_attrs(dist_main_prog)
424

425
            return dist_optimize_ops, dist_params_grads, dist_startup_prog, dist_main_prog
426 427 428 429 430 431 432 433 434 435 436

    def __deepcopy__(self, memo):
        cls = self.__class__
        result = cls.__new__(cls)
        memo[id(self)] = result
        for k, v in self.__dict__.items():
            if k == "_main_program" or k == "_startup_program" or k == "_dist_context" or k == "_fleet" or k == "_loss":
                setattr(result, k, v)
            else:
                setattr(result, k, copy.deepcopy(v, memo))
        return result