dist_context.py 43.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

import copy
from collections import defaultdict
17
import paddle.fluid
18
from paddle.fluid import framework
19
from paddle.fluid.framework import get_flags, set_flags
20
from paddle.fluid import core
21
from paddle.distributed.passes import PassContext
22 23 24 25 26
from .dist_attribute import TensorDistributedAttribute
from .dist_attribute import OperatorDistributedAttribute
from .dist_tensor import DistributedTensor
from .dist_op import DistributedOperator
from .process_mesh import ProcessMesh
27
from .utils import is_loss_grad_op, is_loss_op
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45

# There always exists a default context for user. And user can set it to another one.
_g_default_distributed_context = None


def get_default_distributed_context():
    global _g_default_distributed_context
    if _g_default_distributed_context is None:
        dist_context = DistributedContext()
        set_default_distributed_context(dist_context)
    return _g_default_distributed_context


def set_default_distributed_context(dist_context):
    global _g_default_distributed_context
    _g_default_distributed_context = dist_context


46 47 48 49
def _node_id(node):
    return (node.node.graph_id(), node.node.id())


50 51 52 53 54 55
class DistributedContext:
    """
    DistributedContext is used to collect related distributed information for program and graph.
    One auto-parallel run should use its own DistributedContext to avoid interfering other run.
    """

56 57 58
    def __init__(self,
                 serial_main_prog=None,
                 serial_startup_prog=None,
59
                 serial_optimizer=None,
60
                 serial_loss=None,
61 62 63
                 feed_vars={},
                 fetch_vars={},
                 cluster=None,
64 65 66 67
                 strategy=None):
        # Data members related to original programs (unchanged)
        self._original_serial_main_program = serial_main_prog
        self._original_serial_startup_program = serial_startup_prog
68
        self._original_serial_optimizer = serial_optimizer
69
        self._original_serial_loss = serial_loss
70 71
        self._original_serial_feed_vars = feed_vars
        self._original_serial_fetch_vars = fetch_vars
72 73 74 75

        # Data members related to programs (changed)
        self._serial_main_program = None
        self._serial_startup_program = None
76 77 78 79
        self._serial_loss = None
        self._serial_optimizer = None
        self._serial_feed_vars = {}
        self._serial_fetch_vars = {}
80
        self._lr_optimizer = None  # record the optimzier holding lr_scheduler
81 82

        # Data members related to the program
83 84
        self._dist_tensors_for_program = {}
        self._dist_ops_for_program = {}
85 86

        # Data members related to the graph
87
        self._serial_graph = None
88 89
        self._dist_tensors_for_graph = {}
        self._dist_ops_for_graph = {}
90 91
        self._node_id_to_tensor_id = {}
        self._node_id_to_op_id = {}
92

93
        # Data members related to the distributed programs
94
        # Distributed programs
95 96
        self._dist_main_programs = {}
        self._dist_startup_programs = {}
97 98
        self._dist_op_context = DistributedOperatorContext()
        self._process_meshes = []
99

100
        self._cluster = cluster
101 102 103 104
        self._strategy = strategy

        # Pass Context
        self._pass_context = PassContext()
105
        self._block_state = BlockState()
106 107 108 109 110 111 112 113

        # Other data members
        self._serial_ordered_tensor_nodes = []
        self._serial_ordered_op_nodes = []
        self._serial_ordered_nodes = []
        # self._tensor_id_to_tensor_node_ids = {}

        self._is_initialized = False
114
        #TODO: need a better way to remove the following flag
115 116 117 118 119 120 121
        self._need_copy_dist_attr_to_graph = False
        self._backup_pass_context_stack = []
        self._backup_block_state_stack = []
        self._backup_dist_tensors_for_program_stack = []
        self._backup_dist_ops_for_program_stack = []
        self._backup_serial_main_program_stack = []
        self._backup_serial_startup_program_stack = []
122

123 124 125
        # flag whether scale gradient with dp size
        self._gradient_scale = True

126 127 128
        # A flag indicates whether the used parallelism is data parallel
        self._data_parallel = False

129
        # flag whether using `to_static`
130
        self._dygraph_mode = False
131

132
    @property
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
    def serial_main_program(self):
        return self._serial_main_program

    @property
    def serial_startup_program(self):
        return self._serial_startup_program

    @property
    def serial_loss(self):
        return self._serial_loss

    @property
    def serial_optimizer(self):
        return self._serial_optimizer

148 149 150 151 152 153 154
    @property
    def serial_feed_vars(self):
        return self._serial_feed_vars

    @property
    def serial_fetch_vars(self):
        return self._serial_fetch_vars
155

156 157 158 159 160 161 162 163 164 165 166 167
    @property
    def dist_main_programs(self):
        return self._dist_main_programs

    @property
    def dist_startup_programs(self):
        return self._dist_startup_programs

    @property
    def cluster(self):
        return self._cluster

168 169 170 171
    @property
    def strategy(self):
        return self._strategy

172 173 174 175
    @property
    def serial_graph(self):
        return self._serial_graph

176 177 178 179
    @property
    def serial_ordered_nodes(self):
        return self._serial_ordered_nodes

180 181 182 183
    @property
    def process_meshes(self):
        return self._process_meshes

184 185 186 187
    @property
    def pass_context(self):
        return self._pass_context

188 189 190 191
    @property
    def dist_op_context(self):
        return self._dist_op_context

192 193 194 195
    @property
    def block_state(self):
        return self._block_state

196
    @property
197
    def has_annotation(self):
198 199 200
        return len(self._dist_tensors_for_program) or len(
            self._dist_ops_for_program)

201 202 203 204 205 206 207 208
    @property
    def gradient_scale(self):
        return self._gradient_scale

    @gradient_scale.setter
    def gradient_scale(self, gs):
        self._gradient_scale = gs

209 210 211 212 213 214 215 216
    @property
    def data_parallel(self):
        return self._data_parallel

    @data_parallel.setter
    def data_parallel(self, dp):
        self._data_parallel = dp

217 218 219 220 221
    def _backup_serial_info(self, mode):
        self._backup_serial_main_program_stack.append(
            self._serial_main_program.clone())
        self._backup_serial_startup_program_stack.append(
            self._serial_startup_program.clone())
222 223
        self._backup_pass_context_stack.append(copy.deepcopy(
            self._pass_context))
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
        self._backup_block_state_stack.append(copy.deepcopy(self._block_state))

    def _backup_dist_info(self, mode):
        self._backup_dist_tensors_for_program_stack.append(
            copy.deepcopy(self._dist_tensors_for_program))
        self._backup_dist_ops_for_program_stack.append(
            copy.deepcopy(self._dist_ops_for_program))

    def _backup(self, serial=True, serial_mode=None, dist=True, dist_mode=None):
        # Use this function carefully
        if serial:
            self._backup_serial_info(serial_mode)
        if dist:
            self._backup_dist_info(dist_mode)

239
    def _restore_serial_loss(self):
240 241
        if self._original_serial_loss:
            if isinstance(self._original_serial_loss, list):
242 243 244 245 246 247 248 249 250 251 252
                if len(self._original_serial_loss) == 1:
                    loss = self._original_serial_loss[0]
                    block_idx = loss.block.idx
                    var_name = loss.name
                    var = self._serial_main_program.blocks[
                        block_idx]._var_recursive(var_name)
                    self._serial_loss = var
                elif len(self._original_serial_loss) == 0:
                    self._serial_loss = []
                else:
                    raise ValueError("multi loss vars are not supported.")
253
            else:
254 255 256 257 258 259
                block_idx = self._original_serial_loss.block.idx
                var_name = self._original_serial_loss.name
                var = self._serial_main_program.blocks[
                    block_idx]._var_recursive(var_name)
                self._serial_loss = var

260
    def _restore_serial_feed_vars(self):
261 262 263 264 265 266 267 268 269 270
        for key, var_list in self._original_serial_feed_vars.items():
            new_var_list = []
            for var in var_list:
                block_idx = var.block.idx
                var_name = var.name
                var = self._serial_main_program.blocks[
                    block_idx]._var_recursive(var_name)
                new_var_list.append(var)
            self._serial_feed_vars[key] = new_var_list

271
    def _restore_serial_fetch_vars(self):
272 273 274 275 276 277 278 279 280 281
        for key, var_list in self._original_serial_fetch_vars.items():
            new_var_list = []
            for var in var_list:
                block_idx = var.block.idx
                var_name = var.name
                var = self._serial_main_program.blocks[
                    block_idx]._var_recursive(var_name)
                new_var_list.append(var)
            self._serial_fetch_vars[key] = new_var_list

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
    def _restore_serial_info(self, mode="to_backup"):
        if mode == "to_backup":
            self._serial_main_program = self._backup_serial_main_program_stack.pop(
            )
            self._serial_startup_program = self._backup_serial_startup_program_stack.pop(
            )
        elif mode == "to_original":
            assert self._original_serial_main_program is not None
            assert self._original_serial_startup_program is not None
            self._serial_main_program = self._original_serial_main_program.clone(
            )
            self._serial_startup_program = self._original_serial_startup_program.clone(
            )

        self._restore_serial_loss()
        self._restore_serial_feed_vars()
        self._restore_serial_fetch_vars()
        self._serial_optimizer = self._original_serial_optimizer
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
        self._pass_context = self._backup_pass_context_stack.pop()
        self._block_state = self._backup_block_state_stack.pop()

    def _restore_dist_info(self, mode="to_backup"):
        if mode == "to_backup":
            self._dist_tensors_for_program = self._backup_dist_tensors_for_program_stack.pop(
            )
            self._dist_ops_for_program = self._backup_dist_ops_for_program_stack.pop(
            )
        elif mode == "to_original":
            assert self._original_dist_tensors_for_program
            assert self._original_dist_ops_for_program
            self._dist_tensors_for_program = copy.deepcopy(
                self._original_dist_tensors_for_program)
            self._dist_ops_for_program = copy.deepcopy(
                self._original_dist_ops_for_program)
        elif mode == "to_default":
            new_tensors_ids = []
            for tensor_id, dist_tensor in self._dist_tensors_for_program.items(
            ):
                if tensor_id in self._tensors_ids:
                    dist_tensor.dist_attr.reset()
                else:
                    new_tensors_ids.append(tensor_id)
            for tensor_id in new_tensors_ids:
                self._dist_tensors_for_program.pop(tensor_id)
            new_ops_ids = []
            for op_id, dist_op in self._dist_ops_for_program.items():
                if op_id in self._ops_ids:
                    dist_op.dist_attr.reset()
                else:
                    new_ops_ids.append(op_id)
            for op_id in new_ops_ids:
                self._dist_ops_for_program.pop(op_id)
        else:
            new_tensors_ids = []
            for tensor_id, dist_tensor in self._dist_tensors_for_program.items(
            ):
                new_tensors_ids.append(tensor_id)
            for tensor_id in new_tensors_ids:
                self._dist_tensors_for_program.pop(tensor_id)
            new_ops_ids = []
            for op_id, dist_op in self._dist_ops_for_program.items():
                new_ops_ids.append(op_id)
            for op_id in new_ops_ids:
                self._dist_ops_for_program.pop(op_id)
        self._dist_main_programs = {}
        self._dist_startup_programs = {}
        self._dist_op_context = DistributedOperatorContext()
        self._need_copy_dist_attr_to_graph = True
        self._process_meshes = []

    def _restore(self,
                 serial=True,
                 serial_mode="to_backup",
                 dist=True,
                 dist_mode="to_backup"):
        # Use this function carefully
        if serial:
            self._restore_serial_info(serial_mode)
        if dist:
            self._restore_dist_info(dist_mode)

363
    def initialize(self, with_graph=True):
364 365
        if not self._is_initialized:
            if not self._serial_main_program:
366 367 368
                if self._original_serial_main_program:
                    self._serial_main_program = self._original_serial_main_program.clone(
                    )
369
            if not self._serial_startup_program:
370 371 372
                if self._original_serial_startup_program:
                    self._serial_startup_program = self._original_serial_startup_program.clone(
                    )
373
            if not self._serial_loss:
374
                self._restore_serial_loss()
375 376 377
            if not self._serial_optimizer:
                self._serial_optimizer = self._original_serial_optimizer
            if not self._serial_feed_vars:
378
                self._restore_serial_feed_vars()
379
            if not self._serial_fetch_vars:
380
                self._restore_serial_fetch_vars()
381

382
            self._init_dist_attr_for_program()
383 384 385 386 387
            # Backup the original distributed information for later restore
            self._original_dist_tensors_for_program = copy.deepcopy(
                self._dist_tensors_for_program)
            self._original_dist_ops_for_program = copy.deepcopy(
                self._dist_ops_for_program)
388 389 390
            self._tensors_ids = list(self._dist_tensors_for_program.keys())
            self._ops_ids = list(self._dist_ops_for_program.keys())
            self._is_initialized = True
391 392 393 394 395 396 397 398 399

            if with_graph:
                set_flags({"FLAGS_convert_all_blocks": True})
                self._serial_graph = framework.IrGraph(
                    core.Graph(self._serial_main_program.desc))
                self._init_dist_attr_for_graph()
                self._need_copy_dist_attr_to_graph = False

        if self._need_copy_dist_attr_to_graph and with_graph:
400
            self.copy_dist_attr_from_program_to_graph()
401

402 403 404 405 406 407 408 409
    def add_process_mesh(self, process_mesh):
        assert isinstance(process_mesh, ProcessMesh), \
            'The type of dim_mapping must be ProcessMesh.'
        if process_mesh not in self.process_meshes:
            self._process_meshes.append(process_mesh)

    def add_dist_tensor_for_program(self, dist_tensor):
        inner_serial_tensor = dist_tensor.serial_tensor
410
        inner_serial_tensor_id = inner_serial_tensor.desc.original_id()
411 412 413 414
        self._dist_tensors_for_program[inner_serial_tensor_id] = dist_tensor

    def add_dist_op_for_program(self, dist_op):
        inner_serial_op = dist_op.serial_op
415
        inner_serial_op_id = inner_serial_op.desc.original_id()
416 417 418 419
        self._dist_ops_for_program[inner_serial_op_id] = dist_op

    def get_dist_tensor_for_program(self, serial_tensor):
        serial_tensor_id = serial_tensor.desc.id()
420 421 422 423 424
        dist_tensor = self._dist_tensors_for_program.get(serial_tensor_id, None)
        if dist_tensor:
            return dist_tensor
        else:
            serial_tensor_id = serial_tensor.desc.original_id()
425 426
            dist_tensor = self._dist_tensors_for_program.get(
                serial_tensor_id, None)
427 428 429 430
            if dist_tensor:
                return dist_tensor
            else:
                return None
431 432

    def get_dist_tensor_for_graph(self, serial_tensor_node):
433
        serial_tensor_node_id = _node_id(serial_tensor_node)
434 435
        return self._dist_tensors_for_graph.get(serial_tensor_node_id, None)

436 437 438 439 440 441 442 443 444 445 446 447
    def get_dist_op_for_program(self, serial_op):
        serial_op_id = serial_op.desc.id()
        dist_op = self._dist_ops_for_program.get(serial_op_id, None)
        if dist_op:
            return dist_op
        else:
            serial_op_id = serial_op.desc.original_id()
            dist_op = self._dist_ops_for_program.get(serial_op_id, None)
            if dist_op:
                return dist_op
            else:
                return None
448

449 450 451 452 453
    def del_dist_op_for_program(self, serial_tensor):
        serial_tensor_id = serial_tensor.desc.id()
        if self._dist_ops_for_program.get(serial_tensor_id, None):
            del self._dist_ops_for_program[serial_tensor_id]

454
    def get_dist_op_for_graph(self, serial_op_node):
455
        serial_op_node_id = _node_id(serial_op_node)
456
        return self._dist_ops_for_graph.get(serial_op_node_id, None)
457 458 459 460 461 462 463

    def get_tensor_dist_attr_for_program(self, serial_tensor):
        serial_tensor_id = serial_tensor.desc.id()
        dist_tensor = self._dist_tensors_for_program.get(serial_tensor_id, None)
        if dist_tensor:
            return dist_tensor.dist_attr
        else:
464
            serial_tensor_id = serial_tensor.desc.original_id()
465 466
            dist_tensor = self._dist_tensors_for_program.get(
                serial_tensor_id, None)
467 468 469 470
            if dist_tensor:
                return dist_tensor.dist_attr
            else:
                return None
471

472 473 474 475 476 477 478
    def get_tensor_dist_attr_for_program_with_id(self, tensor_id):
        dist_tensor = self._dist_tensors_for_program.get(tensor_id, None)
        if dist_tensor:
            return dist_tensor.dist_attr
        else:
            return None

479 480 481 482 483
    def set_tensor_dist_attr_for_program(self, serial_tensor, dist_attr):
        dist_tensor = DistributedTensor(serial_tensor, dist_attr)
        self.add_dist_tensor_for_program(dist_tensor)

    def get_tensor_dist_attr_for_graph(self, serial_tensor_node):
484
        serial_tensor_node_id = _node_id(serial_tensor_node)
485 486 487 488 489 490 491 492 493 494 495 496 497
        dist_tensor = self._dist_tensors_for_graph.get(serial_tensor_node_id,
                                                       None)
        if dist_tensor:
            return dist_tensor.dist_attr
        else:
            return None

    def get_op_dist_attr_for_program(self, serial_op):
        serial_op_id = serial_op.desc.id()
        dist_op = self._dist_ops_for_program.get(serial_op_id, None)
        if dist_op:
            return dist_op.dist_attr
        else:
498 499 500 501 502 503
            serial_op_id = serial_op.desc.original_id()
            dist_op = self._dist_ops_for_program.get(serial_op_id, None)
            if dist_op:
                return dist_op.dist_attr
            else:
                return None
504

505 506 507 508 509 510 511
    def get_op_dist_attr_for_program_with_id(self, op_id):
        dist_op = self._dist_ops_for_program.get(op_id, None)
        if dist_op:
            return dist_op.dist_attr
        else:
            return None

512 513 514 515 516
    def set_op_dist_attr_for_program(self, serial_op, dist_attr):
        dist_op = DistributedOperator(serial_op, dist_attr)
        self.add_dist_op_for_program(dist_op)

    def get_op_dist_attr_for_graph(self, serial_op_node):
517
        serial_op_node_id = _node_id(serial_op_node)
518 519 520 521 522 523
        dist_op = self._dist_ops_for_graph.get(serial_op_node_id, None)
        if dist_op:
            return dist_op.dist_attr
        else:
            return None

524 525
    def get_dist_attr_for_graph(self, serial_node):
        if serial_node.is_var() and serial_node.var() is not None:
526
            serial_tensor_node_id = _node_id(serial_node)
527 528 529 530 531 532 533
            dist_tensor = self._dist_tensors_for_graph.get(
                serial_tensor_node_id, None)
            if dist_tensor:
                return dist_tensor.dist_attr
            else:
                return None
        if serial_node.is_op() and serial_node.op() is not None:
534
            serial_op_node_id = _node_id(serial_node)
535 536 537 538 539 540
            dist_op = self._dist_ops_for_graph.get(serial_op_node_id, None)
            if dist_op:
                return dist_op.dist_attr
            else:
                return None
        return None
541

542
    def _init_dist_attr_for_program(self, no_default=False):
543
        # Copy the dist tensors and dist ops annotated by users from the default context
544 545 546 547 548
        if not no_default:
            default_ctx = get_default_distributed_context()
            self._process_meshes = copy.deepcopy(default_ctx.process_meshes)
        else:
            default_ctx = self
549 550
        # Copy the data parallel flag from the default context
        self._data_parallel = default_ctx.data_parallel
551
        for block in self._serial_main_program.blocks:
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
            for tensor in block.vars.values():
                # Copy the distributed tensors in the default context
                default_dist_tensor = default_ctx.get_dist_tensor_for_program(
                    tensor)
                if default_dist_tensor and default_ctx is not self:
                    self.add_dist_tensor_for_program(default_dist_tensor)
                current_dist_tensor = self.get_dist_tensor_for_program(tensor)
                if current_dist_tensor is None:
                    dist_tensor = DistributedTensor(tensor)
                    self.add_dist_tensor_for_program(dist_tensor)
            for op in block.ops:
                # Copy the distributed operators in the default context
                default_dist_op = default_ctx.get_dist_op_for_program(op)
                if default_dist_op and default_ctx is not self:
                    self.add_dist_op_for_program(default_dist_op)
                current_dist_op = self.get_dist_op_for_program(op)
                if current_dist_op is None:
                    dist_op = DistributedOperator(op)
                    self.add_dist_op_for_program(dist_op)
571 572 573 574
        self._original_dist_tensors_for_program = copy.deepcopy(
            self._dist_tensors_for_program)
        self._original_dist_ops_for_program = copy.deepcopy(
            self._dist_ops_for_program)
575

576
    def _order_nodes_by_program_order(self):
577

578 579
        def _contains(nodes, target_node):
            for node in nodes:
580
                if _node_id(node) == _node_id(target_node):
581 582 583
                    return True
            return False

584 585 586 587 588 589
        serial_ordered_tensor_nodes = []
        serial_ordered_op_nodes = []
        all_nodes = []
        for idx, graph in enumerate(self._serial_graph.all_sub_graphs()):
            for node in graph.all_nodes():
                all_nodes.append(node)
590 591
        for node in all_nodes:
            if node.is_var() and node.var() is not None:
592
                serial_ordered_tensor_nodes.append(node)
593
            if node.is_op() and node.op() is not None:
594 595 596 597 598 599 600 601 602 603
                serial_ordered_op_nodes.append(node)
        serial_ordered_tensor_nodes.sort(
            key=lambda node: node.node.original_desc_id())
        serial_ordered_op_nodes.sort(
            key=lambda node: node.node.original_desc_id())
        num_nodes_before = len(serial_ordered_tensor_nodes) + len(
            serial_ordered_op_nodes)

        new_serial_ordered_tensor_nodes = []
        new_serial_ordered_op_nodes = []
604
        new_serial_ordered_nodes = []
605
        for op_node in serial_ordered_op_nodes:
606 607 608 609
            tensor_nodes = []
            for tensor_node in op_node.inputs:
                if tensor_node.is_var() \
                    and tensor_node.var() is not None \
610
                    and not _contains(new_serial_ordered_nodes, tensor_node):
611
                    tensor_nodes.append(tensor_node)
612
                    new_serial_ordered_tensor_nodes.append(tensor_node)
613
            tensor_nodes.sort(key=lambda node: node.node.original_desc_id())
614 615
            new_serial_ordered_nodes.extend(tensor_nodes)
            new_serial_ordered_nodes.append(op_node)
616
            new_serial_ordered_op_nodes.append(op_node)
617 618 619 620
            tensor_nodes = []
            for tensor_node in op_node.outputs:
                if tensor_node.is_var() \
                    and tensor_node.var() is not None \
621
                    and not _contains(new_serial_ordered_nodes, tensor_node):
622
                    tensor_nodes.append(tensor_node)
623 624
                    new_serial_ordered_tensor_nodes.append(tensor_node)
            tensor_nodes.sort(key=lambda node: node.node.original_desc_id())
625
            new_serial_ordered_nodes.extend(tensor_nodes)
626 627 628 629 630 631
        new_serial_ordered_tensor_nodes.sort(
            key=lambda node: node.node.original_desc_id())
        new_serial_ordered_op_nodes.sort(
            key=lambda node: node.node.original_desc_id())
        self._serial_ordered_tensor_nodes = new_serial_ordered_tensor_nodes
        self._serial_ordered_op_nodes = new_serial_ordered_op_nodes
632
        self._serial_ordered_nodes = new_serial_ordered_nodes
633 634 635 636 637 638 639 640 641 642 643
        assert len(self._serial_ordered_nodes) == len(
            self._serial_ordered_tensor_nodes) + len(
                self._serial_ordered_op_nodes)
        self._serial_orphan_tensor_nodes = []
        for tensor_node in serial_ordered_tensor_nodes:
            if not _contains(self._serial_ordered_tensor_nodes, tensor_node):
                self._serial_orphan_tensor_nodes.append(tensor_node)
        if len(self._serial_ordered_nodes) != num_nodes_before:
            print(
                "WARNING: there are some orphan tensors or ops which are not used in the execution."
            )
644

645 646 647
    def _init_dist_attr_for_graph(self):
        # Convert program to graph and initialize the distributed attributes
        self._order_nodes_by_program_order()
648
        for node in self.serial_ordered_nodes:
649
            if node.is_var() and node.var() is not None:
650 651 652 653 654 655 656
                dist_tensor = None
                tensor_id = node.node.original_desc_id()
                for cur_tensor_id, cur_dist_tensor in self._dist_tensors_for_program.items(
                ):
                    if tensor_id == cur_tensor_id \
                        or tensor_id == cur_dist_tensor.serial_tensor.desc.original_id():
                        dist_tensor = cur_dist_tensor
657 658
                        self._node_id_to_tensor_id[_node_id(
                            node)] = cur_tensor_id
659 660
                assert dist_tensor is not None, \
                    "Tensor must have a distributed tensor after the initialization for program."
661
                serial_tensor_node_id = _node_id(node)
662 663 664 665
                new_dist_tensor = DistributedTensor(dist_tensor.serial_tensor,
                                                    dist_tensor.dist_attr)
                self._dist_tensors_for_graph[
                    serial_tensor_node_id] = new_dist_tensor
666
            if node.is_op() and node.op() is not None:
667 668 669 670 671 672 673
                dist_op = None
                op_id = node.node.original_desc_id()
                for cur_op_id, cur_dist_op in self._dist_ops_for_program.items(
                ):
                    if op_id == cur_op_id \
                        or op_id == cur_dist_op.serial_op.desc.original_id():
                        dist_op = cur_dist_op
674
                        self._node_id_to_op_id[_node_id(node)] = cur_op_id
675 676
                assert dist_op is not None, \
                    "Operator must have a distributed operator after the initialization for program."
677
                serial_op_node_id = _node_id(node)
678 679 680
                new_dist_op = DistributedOperator(dist_op.serial_op,
                                                  dist_op.dist_attr)
                self._dist_ops_for_graph[serial_op_node_id] = new_dist_op
681 682 683 684 685 686 687 688 689

    def clear_dist_info_for_program(self):
        self._dist_tensors_for_program.clear()
        self._dist_ops_for_program.clear()

    def clear_dist_info_for_graph(self):
        self._dist_tensors_for_graph.clear()
        self._dist_ops_for_graph.clear()

690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
    def copy_dist_attr_from_program_to_graph(self):
        for node in self.serial_ordered_nodes:
            if node.is_var() and node.var() is not None:
                dist_tensor = None
                tensor_id = node.node.original_desc_id()
                for cur_tensor_id, cur_dist_tensor in self._dist_tensors_for_program.items(
                ):
                    if tensor_id == cur_tensor_id \
                        or tensor_id == cur_dist_tensor.serial_tensor.desc.original_id():
                        dist_tensor = cur_dist_tensor
                assert dist_tensor is not None, \
                    "Tensor must have a distributed tensor after the initialization for program."
                serial_tensor_node_id = _node_id(node)
                new_dist_tensor = DistributedTensor(dist_tensor.serial_tensor,
                                                    dist_tensor.dist_attr)
                self._dist_tensors_for_graph[
                    serial_tensor_node_id] = new_dist_tensor
            if node.is_op() and node.op() is not None:
                dist_op = None
                op_id = node.node.original_desc_id()
                for cur_op_id, cur_dist_op in self._dist_ops_for_program.items(
                ):
                    if op_id == cur_op_id \
                        or op_id == cur_dist_op.serial_op.desc.original_id():
                        dist_op = cur_dist_op
                assert dist_op is not None, \
                    "Operator must have a distributed operator after the initialization for program."
                serial_op_node_id = _node_id(node)
                new_dist_op = DistributedOperator(dist_op.serial_op,
                                                  dist_op.dist_attr)
                self._dist_ops_for_graph[serial_op_node_id] = new_dist_op

722
    def copy_dist_attr_from_graph_to_program(self):
723
        assert self._is_initialized, \
724 725
            "Both program and graph must be initialized."
        updated_tensors = {}
726 727
        # all_nodes = self._serial_graph.all_nodes()
        all_nodes = self._serial_ordered_nodes
728 729
        for node in all_nodes:
            if node.is_var() and node.var() is not None:
730
                tensor_id = self._node_id_to_tensor_id[_node_id(node)]
731
                updated = updated_tensors.get(tensor_id, False)
732 733 734 735 736 737 738
                # If a var has multiples var nodes in graph, only use the first one for now
                if not updated:
                    tensor_dist_attr_for_graph = self.get_tensor_dist_attr_for_graph(
                        node)
                    dist_tensor_for_program = self._dist_tensors_for_program[
                        tensor_id]
                    dist_tensor_for_program.dist_attr = tensor_dist_attr_for_graph
739
                    updated_tensors[tensor_id] = True
740
            if node.is_op() and node.op() is not None:
741
                op_id = self._node_id_to_op_id[_node_id(node)]
742 743 744
                op_dist_attr_for_graph = self.get_op_dist_attr_for_graph(node)
                dist_op_for_program = self._dist_ops_for_program[op_id]
                dist_op_for_program.dist_attr = op_dist_attr_for_graph
745
        # TODO: the completion algorithm will skipped orphan tensors,
746 747 748
        # here we just set there process_mesh to the first one.
        for orphan_node in self._serial_orphan_tensor_nodes:
            serial_tensor_id = orphan_node.var().id()
749 750
            dist_tensor = self._dist_tensors_for_program.get(
                serial_tensor_id, None)
751 752 753 754 755 756 757
            if dist_tensor:
                dist_tensor.dist_attr.process_mesh = self._process_meshes[0]
            else:
                serial_tensor_id = orphan_node.var().original_id()
                dist_tensor = self._dist_tensors_for_program.get(
                    serial_tensor_id, None)
                dist_tensor.dist_attr.process_mesh = self._process_meshes[0]
758 759 760 761 762

    def amend_dist_attr_for_program(self):
        for dist_tensor in self._dist_tensors_for_program.values():
            serial_tensor = dist_tensor.serial_tensor
            dist_attr = dist_tensor.dist_attr
763 764 765
            if serial_tensor.type == core.VarDesc.VarType.READER \
                or serial_tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \
                or serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES:
766 767 768 769 770
                tensor_shape = []
            else:
                tensor_shape = serial_tensor.shape
            dims_mapping = dist_attr.dims_mapping
            process_mesh_shape = dist_attr.process_mesh.topology
771
            process_mesh_processes = dist_attr.process_mesh.processes
772 773 774 775 776 777
            # If the dimension of tensor is less than the sharding dimension of process mesh,
            # we just amend the dimension mapping to -1. (Is this really OK?)
            for i in range(len(tensor_shape)):
                if dims_mapping[i] != -1 and tensor_shape[i] > 0 \
                    and process_mesh_shape[dims_mapping[i]] > tensor_shape[i]:
                    dims_mapping[i] = -1
778 779
                if dims_mapping[i] != -1 and len(process_mesh_processes) == 1:
                    dims_mapping[i] = -1
780 781 782 783

        for dist_op in self._dist_ops_for_program.values():
            serial_op = dist_op.serial_op
            dist_attr = dist_op.dist_attr
784 785
            process_mesh_shape = dist_attr.process_mesh.topology
            process_mesh_processes = dist_attr.process_mesh.processes
786 787 788 789 790
            for arg_name in serial_op.input_arg_names:
                if dist_op.get_serial_input(arg_name) is None:
                    tensor_shape = []
                else:
                    if dist_op.get_serial_input(arg_name).type == core.VarDesc.VarType.READER \
791
                        or dist_op.get_serial_input(arg_name).type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \
792 793 794 795 796 797 798 799 800 801 802
                        or dist_op.serial_op.type == "create_py_reader":
                        tensor_shape = []
                    else:
                        tensor_shape = dist_op.get_serial_input(arg_name).shape
                dims_mapping = dist_attr.get_input_dims_mapping(arg_name)
                # If the dimension of tensor is less than the sharding dimension of process mesh,
                # we just amend the dimension mapping to -1. (Is this really OK?)
                for i in range(len(tensor_shape)):
                    if dims_mapping[i] != -1 and tensor_shape[i] > 0 \
                        and process_mesh_shape[dims_mapping[i]] > tensor_shape[i]:
                        dims_mapping[i] = -1
803 804 805
                    if dims_mapping[i] != -1 and len(
                            process_mesh_processes) == 1:
                        dims_mapping[i] = -1
806
            for arg_name in serial_op.output_arg_names:
807 808 809
                if dist_op.get_serial_output(arg_name).type == core.VarDesc.VarType.READER \
                    or dist_op.get_serial_output(arg_name).type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \
                    or dist_op.get_serial_output(arg_name).type == core.VarDesc.VarType.STEP_SCOPES:
810 811 812 813 814 815 816 817 818 819
                    tensor_shape = []
                else:
                    tensor_shape = dist_op.get_serial_output(arg_name).shape
                dims_mapping = dist_attr.get_output_dims_mapping(arg_name)
                # If the dimension of tensor is less than the sharding dimension of process mesh,
                # we just amend the dimension mapping to -1. (Is this really OK?)
                for i in range(len(tensor_shape)):
                    if dims_mapping[i] != -1 and tensor_shape[i] > 0 \
                        and process_mesh_shape[dims_mapping[i]] > tensor_shape[i]:
                        dims_mapping[i] = -1
820 821 822 823 824 825
                    if dims_mapping[i] != -1 and len(
                            process_mesh_processes) == 1:
                        dims_mapping[i] = -1
            if len(process_mesh_processes) == 1:
                dist_op.dist_attr.impl_type = "default"
                dist_op.dist_attr.impl_idx = 0
826 827

    def validate_dist_attr_for_program(self):
828
        if not self._is_initialized:
829 830
            assert False, \
                "Program must be initialized before validating its distributed attributes"
831
        for block in self.serial_main_program.blocks:
832 833
            for tensor in block.vars.values():
                dist_tensor = self.get_dist_tensor_for_program(tensor)
834 835 836
                assert dist_tensor is not None, \
                    "Tensor {} does not have a distributed attribute.".format(
                        dist_tensor.serial_tensor.name)
837 838
                if (dist_tensor
                        is not None) and (not dist_tensor.validate_dist_attr()):
839
                    assert False, "Tensor {} (id: {}, original_id: {}) has a wrong distributed attributes {}.".format(
C
caozhou 已提交
840 841 842 843
                        dist_tensor.serial_tensor.name,
                        dist_tensor.serial_tensor.desc.id(),
                        dist_tensor.serial_tensor.desc.original_id(),
                        dist_tensor.dist_attr)
844 845
            for op in block.ops:
                dist_op = self.get_dist_op_for_program(op)
846 847 848
                assert dist_op is not None, \
                    "Operator {} does not have a distributed attribute.".format(
                        dist_op.serial_op.type)
849
                if (dist_op is not None) and (not dist_op.validate_dist_attr()):
850
                    assert False, "Operator {} (id: {}, original_id: {}) has a wrong distributed attributes {} .".format(
851
                        dist_op.serial_op.type, dist_op.serial_op.desc.id(),
852
                        dist_op.serial_op.desc.original_id(), dist_op.dist_attr)
853 854
        return True

Z
zhaoyingli 已提交
855 856 857 858 859
    def __deepcopy__(self, memo):
        cls = self.__class__
        result = cls.__new__(cls)
        memo[id(self)] = result
        for k, v in self.__dict__.items():
860 861 862 863 864
            if k in [
                "_original_serial_main_program", "_original_serial_startup_program", \
                "_serial_main_program", "_serial_startup_program", "_serial_graph", \
                "_dist_main_programs", "_dist_startup_programs", \
                "_serial_ordered_nodes", "_serial_ordered_tensor_nodes", \
865 866 867 868 869
                "_serial_ordered_op_nodes", "_original_serial_loss", \
                "_original_serial_feed_vars", "_original_serial_fetch_vars", \
                "_serial_loss", "_serial_feed_vars", "_serial_fetch_vars", "_lr_optimizer", \
                "_backup_serial_main_program_stack", "_backup_serial_startup_program_stack", \
                "_pass_context"]:
Z
zhaoyingli 已提交
870 871 872
                setattr(result, k, v)
            else:
                setattr(result, k, copy.deepcopy(v, memo))
873 874 875 876

        # update dist tensor's dist_context
        for key in result._dist_tensors_for_program.keys():
            result._dist_tensors_for_program[key]._dist_context = result
Z
zhaoyingli 已提交
877 878
        return result

879 880 881 882 883 884 885 886 887

class DistributedOperatorContext:
    """
    DistributedOperatorContext is used to create a dist op desc in Program.
    Every time to create a new dist op, the context should be updated for it accordingly.
    """

    def __init__(self):
        self._dst_main_program = None
888
        self._main_block = None
889
        self._dst_startup_program = None
890
        self._startup_block = None
891 892
        self._cur_src_op = None
        self._cur_dist_attr = None
893
        self.grad_op_id_to_op_id = {}
894
        self.grad_var_to_var = defaultdict(dict)
895
        self._work_block = None
896
        self.already_init_sync_vars = set()
897 898
        self.varname_mapping = None
        self.rank_id = None
899 900 901 902 903
        # NOTE Support correct parallelism for high-order differential model.
        # by default exceed_backward_init_op is False and it means we are in Forward phase; After exceed_backward_init_op = True,
        # it means we are in Backward phase.
        # And the final sulotion should be revise high-order differential logic for these two phases in future.
        self._exceed_backward_init_op = False
904

Z
zhaoyingli 已提交
905 906 907 908 909
    def __deepcopy__(self, memo):
        cls = self.__class__
        result = cls.__new__(cls)
        memo[id(self)] = result
        for k, v in self.__dict__.items():
910 911 912 913
            if k in [
                    "_dst_main_program", "_dst_startup_program", "_cur_src_op",
                    "_work_block", "_main_block", "_startup_block"
            ]:
Z
zhaoyingli 已提交
914 915 916 917 918
                setattr(result, k, v)
            else:
                setattr(result, k, copy.deepcopy(v, memo))
        return result

919 920
    @property
    def dst_main_program(self):
921 922
        return self._dst_main_program

923 924 925 926
    @dst_main_program.setter
    def dst_main_program(self, prog):
        self._dst_main_program = prog
        self._main_block = prog.blocks[0]
927

928 929 930
    @property
    def main_block(self):
        return self._main_block
931

932 933 934
    @property
    def dst_startup_program(self):
        return self._dst_startup_program
935

936 937 938 939
    @dst_startup_program.setter
    def dst_startup_program(self, prog):
        self._dst_startup_program = prog
        self._startup_block = prog.blocks[0]
940

941 942 943
    @property
    def startup_block(self):
        return self._startup_block
944

945 946 947 948
    @property
    def work_block(self):
        assert self._work_block is not None
        return self._work_block
949

950 951 952 953
    @work_block.setter
    def work_block(self, block):
        assert block is not None
        self._work_block = block
954

955 956 957
    @property
    def cur_src_op(self):
        assert self._cur_src_op is not None
958 959
        return self._cur_src_op

960 961 962
    def in_backward_phase(self):
        return self._exceed_backward_init_op

963
    def prepare_context(self, src_op):
964

965
        self._cur_src_op = src_op
966

967 968 969
        if is_loss_grad_op(src_op):
            self._exceed_backward_init_op = True

970 971 972 973 974
        # build input varname mapping
        kinputs = {}
        for input_name in src_op.desc.input_names():
            varnames = []
            for varname in src_op.desc.input(input_name):
975 976
                assert varname in self.varname_mapping
                varnames.append(self.varname_mapping[varname])
977 978 979 980 981 982 983
            kinputs[input_name] = varnames

        # build output varname mapping
        koutputs = {}
        for output_name in src_op.desc.output_names():
            varnames = []
            for varname in src_op.desc.output(output_name):
984 985
                assert varname in self.varname_mapping
                varnames.append(self.varname_mapping[varname])
986 987 988
            koutputs[output_name] = varnames

        return kinputs, koutputs
989 990 991


class BlockState(object):
992

993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
    def __init__(self):
        self.nblock = 0
        self.forward_indices = []
        self.backward_indices = []
        self.backward_to_forward_index_map = {}

    def parse_forward_blocks(self, program):

        while program.current_block_idx != 0:
            program._rollback()

        assert program.current_block_idx == 0

        for idx, block in enumerate(program.blocks):

            assert idx == block.idx, "index doesn't match"
            assert block.forward_block_idx == -1, "forward_block_idx of forward block [{}] is not [{}]".format(
                idx, block.forward_block_idx)
            self.forward_indices.append(idx)
            self.nblock += 1

        assert self.nblock >= 1

    def parse_backward_blocks(self, program):

        assert 0 in self.forward_indices, "forward block idx are{}".format(
            self.forward_indices)
        self.backward_to_forward_index_map[0] = 0

        for idx, block in enumerate(program.blocks):

            if idx < len(self.forward_indices):
                continue

            assert idx == block.idx, "index doesn't match"
            assert block.forward_block_idx in self.forward_indices
            self.backward_indices.append(idx)
            self.backward_to_forward_index_map[idx] = block.forward_block_idx
            self.nblock += 1

        assert self.nblock == len(program.blocks)