dist_context.py 41.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#   Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

import copy
from collections import defaultdict
17
import paddle.fluid
18
from paddle.fluid import framework
19
from paddle.fluid.framework import get_flags, set_flags
20
from paddle.fluid import core
21
from paddle.distributed.passes import PassContext
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
from .dist_attribute import TensorDistributedAttribute
from .dist_attribute import OperatorDistributedAttribute
from .dist_tensor import DistributedTensor
from .dist_op import DistributedOperator
from .process_mesh import ProcessMesh

# There always exists a default context for user. And user can set it to another one.
_g_default_distributed_context = None


def get_default_distributed_context():
    global _g_default_distributed_context
    if _g_default_distributed_context is None:
        dist_context = DistributedContext()
        set_default_distributed_context(dist_context)
    return _g_default_distributed_context


def set_default_distributed_context(dist_context):
    global _g_default_distributed_context
    _g_default_distributed_context = dist_context


45 46 47 48
def _node_id(node):
    return (node.node.graph_id(), node.node.id())


49 50 51 52 53 54
class DistributedContext:
    """
    DistributedContext is used to collect related distributed information for program and graph.
    One auto-parallel run should use its own DistributedContext to avoid interfering other run.
    """

55 56 57
    def __init__(self,
                 serial_main_prog=None,
                 serial_startup_prog=None,
58
                 serial_optimizer=None,
59
                 serial_loss=None,
60 61 62
                 feed_vars={},
                 fetch_vars={},
                 cluster=None,
63 64 65 66
                 strategy=None):
        # Data members related to original programs (unchanged)
        self._original_serial_main_program = serial_main_prog
        self._original_serial_startup_program = serial_startup_prog
67
        self._original_serial_optimizer = serial_optimizer
68
        self._original_serial_loss = serial_loss
69 70
        self._original_serial_feed_vars = feed_vars
        self._original_serial_fetch_vars = fetch_vars
71 72 73 74 75
        self._original_serial_optimizer = serial_optimizer

        # Data members related to programs (changed)
        self._serial_main_program = None
        self._serial_startup_program = None
76 77 78 79
        self._serial_loss = None
        self._serial_optimizer = None
        self._serial_feed_vars = {}
        self._serial_fetch_vars = {}
80 81

        # Data members related to the program
82 83
        self._dist_tensors_for_program = {}
        self._dist_ops_for_program = {}
84 85

        # Data members related to the graph
86
        self._serial_graph = None
87 88
        self._dist_tensors_for_graph = {}
        self._dist_ops_for_graph = {}
89 90
        self._node_id_to_tensor_id = {}
        self._node_id_to_op_id = {}
91

92
        # Data members related to the distributed programs
93
        # Distributed programs
94 95
        self._dist_main_programs = {}
        self._dist_startup_programs = {}
96 97
        self._dist_op_context = DistributedOperatorContext()
        self._process_meshes = []
98

99
        self._cluster = cluster
100 101 102 103
        self._strategy = strategy

        # Pass Context
        self._pass_context = PassContext()
104
        self._block_state = BlockState()
105 106 107 108 109 110 111 112

        # Other data members
        self._serial_ordered_tensor_nodes = []
        self._serial_ordered_op_nodes = []
        self._serial_ordered_nodes = []
        # self._tensor_id_to_tensor_node_ids = {}

        self._is_initialized = False
113 114 115 116 117 118 119
        self._need_copy_dist_attr_to_graph = False
        self._backup_pass_context_stack = []
        self._backup_block_state_stack = []
        self._backup_dist_tensors_for_program_stack = []
        self._backup_dist_ops_for_program_stack = []
        self._backup_serial_main_program_stack = []
        self._backup_serial_startup_program_stack = []
120

121 122 123
        # flag whether scale gradient with dp size
        self._gradient_scale = True

124
    @property
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
    def serial_main_program(self):
        return self._serial_main_program

    @property
    def serial_startup_program(self):
        return self._serial_startup_program

    @property
    def serial_loss(self):
        return self._serial_loss

    @property
    def serial_optimizer(self):
        return self._serial_optimizer

140 141 142 143 144 145 146
    @property
    def serial_feed_vars(self):
        return self._serial_feed_vars

    @property
    def serial_fetch_vars(self):
        return self._serial_fetch_vars
147

148 149 150 151 152 153 154 155 156 157 158 159
    @property
    def dist_main_programs(self):
        return self._dist_main_programs

    @property
    def dist_startup_programs(self):
        return self._dist_startup_programs

    @property
    def cluster(self):
        return self._cluster

160 161 162 163
    @property
    def strategy(self):
        return self._strategy

164 165 166 167
    @property
    def serial_graph(self):
        return self._serial_graph

168 169 170 171
    @property
    def serial_ordered_nodes(self):
        return self._serial_ordered_nodes

172 173 174 175
    @property
    def process_meshes(self):
        return self._process_meshes

176 177 178 179
    @property
    def pass_context(self):
        return self._pass_context

180 181 182 183
    @property
    def dist_op_context(self):
        return self._dist_op_context

184 185 186 187
    @property
    def block_state(self):
        return self._block_state

188
    @property
189
    def has_annotation(self):
190 191 192
        return len(self._dist_tensors_for_program) or len(
            self._dist_ops_for_program)

193 194 195 196 197 198 199 200
    @property
    def gradient_scale(self):
        return self._gradient_scale

    @gradient_scale.setter
    def gradient_scale(self, gs):
        self._gradient_scale = gs

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
    def _backup_serial_info(self, mode):
        self._backup_serial_main_program_stack.append(
            self._serial_main_program.clone())
        self._backup_serial_startup_program_stack.append(
            self._serial_startup_program.clone())
        self._backup_pass_context_stack.append(
            copy.deepcopy(self._pass_context))
        self._backup_block_state_stack.append(copy.deepcopy(self._block_state))

    def _backup_dist_info(self, mode):
        self._backup_dist_tensors_for_program_stack.append(
            copy.deepcopy(self._dist_tensors_for_program))
        self._backup_dist_ops_for_program_stack.append(
            copy.deepcopy(self._dist_ops_for_program))

    def _backup(self, serial=True, serial_mode=None, dist=True, dist_mode=None):
        # Use this function carefully
        if serial:
            self._backup_serial_info(serial_mode)
        if dist:
            self._backup_dist_info(dist_mode)

    def _restore_serial_info(self, mode="to_backup"):
        if mode == "to_backup":
            self._serial_main_program = self._backup_serial_main_program_stack.pop(
            )
            self._serial_startup_program = self._backup_serial_startup_program_stack.pop(
            )
        elif mode == "to_original":
            assert self._original_serial_main_program is not None
            assert self._original_serial_startup_program is not None
232 233 234 235
            self._serial_main_program = self._original_serial_main_program.clone(
            )
            self._serial_startup_program = self._original_serial_startup_program.clone(
            )
236 237 238 239 240 241 242 243 244 245 246 247

        self._serial_optimizer = self._original_serial_optimizer

        if self._original_serial_loss:
            if isinstance(self._original_serial_loss, list):
                assert len(self._original_serial_loss) == 1
                loss = self._original_serial_loss[0]
                block_idx = loss.block.idx
                var_name = loss.name
                var = self._serial_main_program.blocks[
                    block_idx]._var_recursive(var_name)
                self._serial_loss = var
248
            else:
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
                block_idx = self._original_serial_loss.block.idx
                var_name = self._original_serial_loss.name
                var = self._serial_main_program.blocks[
                    block_idx]._var_recursive(var_name)
                self._serial_loss = var

        for key, var_list in self._original_serial_feed_vars.items():
            new_var_list = []
            for var in var_list:
                block_idx = var.block.idx
                var_name = var.name
                var = self._serial_main_program.blocks[
                    block_idx]._var_recursive(var_name)
                new_var_list.append(var)
            self._serial_feed_vars[key] = new_var_list

        for key, var_list in self._original_serial_fetch_vars.items():
            new_var_list = []
            for var in var_list:
                block_idx = var.block.idx
                var_name = var.name
                var = self._serial_main_program.blocks[
                    block_idx]._var_recursive(var_name)
                new_var_list.append(var)
            self._serial_fetch_vars[key] = new_var_list

        self._pass_context = self._backup_pass_context_stack.pop()
        self._block_state = self._backup_block_state_stack.pop()

    def _restore_dist_info(self, mode="to_backup"):
        if mode == "to_backup":
            self._dist_tensors_for_program = self._backup_dist_tensors_for_program_stack.pop(
            )
            self._dist_ops_for_program = self._backup_dist_ops_for_program_stack.pop(
            )
        elif mode == "to_original":
            assert self._original_dist_tensors_for_program
            assert self._original_dist_ops_for_program
            self._dist_tensors_for_program = copy.deepcopy(
                self._original_dist_tensors_for_program)
            self._dist_ops_for_program = copy.deepcopy(
                self._original_dist_ops_for_program)
        elif mode == "to_default":
            new_tensors_ids = []
            for tensor_id, dist_tensor in self._dist_tensors_for_program.items(
            ):
                if tensor_id in self._tensors_ids:
                    dist_tensor.dist_attr.reset()
                else:
                    new_tensors_ids.append(tensor_id)
            for tensor_id in new_tensors_ids:
                self._dist_tensors_for_program.pop(tensor_id)
            new_ops_ids = []
            for op_id, dist_op in self._dist_ops_for_program.items():
                if op_id in self._ops_ids:
                    dist_op.dist_attr.reset()
                else:
                    new_ops_ids.append(op_id)
            for op_id in new_ops_ids:
                self._dist_ops_for_program.pop(op_id)
        else:
            new_tensors_ids = []
            for tensor_id, dist_tensor in self._dist_tensors_for_program.items(
            ):
                new_tensors_ids.append(tensor_id)
            for tensor_id in new_tensors_ids:
                self._dist_tensors_for_program.pop(tensor_id)
            new_ops_ids = []
            for op_id, dist_op in self._dist_ops_for_program.items():
                new_ops_ids.append(op_id)
            for op_id in new_ops_ids:
                self._dist_ops_for_program.pop(op_id)
        self._dist_main_programs = {}
        self._dist_startup_programs = {}
        self._dist_op_context = DistributedOperatorContext()
        self._need_copy_dist_attr_to_graph = True
        self._process_meshes = []

    def _restore(self,
                 serial=True,
                 serial_mode="to_backup",
                 dist=True,
                 dist_mode="to_backup"):
        # Use this function carefully
        if serial:
            self._restore_serial_info(serial_mode)
        if dist:
            self._restore_dist_info(dist_mode)

    def initialize(self):
        if not self._is_initialized:
            if not self._serial_main_program:
                self._serial_main_program = self._original_serial_main_program
            if not self._serial_startup_program:
                self._serial_startup_program = self._original_serial_startup_program
            if not self._serial_loss:
                if isinstance(self._original_serial_loss, list):
                    assert len(self._original_serial_loss) == 1
                    self._serial_loss = self._original_serial_loss[0]
                else:
                    self._serial_loss = self._original_serial_loss
            if not self._serial_optimizer:
                self._serial_optimizer = self._original_serial_optimizer
            if not self._serial_feed_vars:
                self._serial_feed_vars = self._original_serial_feed_vars
            if not self._serial_fetch_vars:
                self._serial_fetch_vars = self._original_serial_fetch_vars

357
            self._init_dist_attr_for_program()
358 359 360 361 362
            # Backup the original distributed information for later restore
            self._original_dist_tensors_for_program = copy.deepcopy(
                self._dist_tensors_for_program)
            self._original_dist_ops_for_program = copy.deepcopy(
                self._dist_ops_for_program)
363 364 365 366 367 368 369
            self._tensors_ids = list(self._dist_tensors_for_program.keys())
            self._ops_ids = list(self._dist_ops_for_program.keys())
            set_flags({"FLAGS_convert_all_blocks": True})
            self._serial_graph = framework.IrGraph(
                core.Graph(self._serial_main_program.desc))
            self._init_dist_attr_for_graph()
            self._is_initialized = True
370 371 372
            self._need_copy_dist_attr_to_graph = False
        if self._need_copy_dist_attr_to_graph:
            self.copy_dist_attr_from_program_to_graph()
373

374 375 376 377 378 379 380 381
    def add_process_mesh(self, process_mesh):
        assert isinstance(process_mesh, ProcessMesh), \
            'The type of dim_mapping must be ProcessMesh.'
        if process_mesh not in self.process_meshes:
            self._process_meshes.append(process_mesh)

    def add_dist_tensor_for_program(self, dist_tensor):
        inner_serial_tensor = dist_tensor.serial_tensor
382
        inner_serial_tensor_id = inner_serial_tensor.desc.original_id()
383 384 385 386
        self._dist_tensors_for_program[inner_serial_tensor_id] = dist_tensor

    def add_dist_op_for_program(self, dist_op):
        inner_serial_op = dist_op.serial_op
387
        inner_serial_op_id = inner_serial_op.desc.original_id()
388 389 390 391
        self._dist_ops_for_program[inner_serial_op_id] = dist_op

    def get_dist_tensor_for_program(self, serial_tensor):
        serial_tensor_id = serial_tensor.desc.id()
392 393 394 395 396 397 398 399 400 401 402
        dist_tensor = self._dist_tensors_for_program.get(serial_tensor_id, None)
        if dist_tensor:
            return dist_tensor
        else:
            serial_tensor_id = serial_tensor.desc.original_id()
            dist_tensor = self._dist_tensors_for_program.get(serial_tensor_id,
                                                             None)
            if dist_tensor:
                return dist_tensor
            else:
                return None
403 404

    def get_dist_tensor_for_graph(self, serial_tensor_node):
405
        serial_tensor_node_id = _node_id(serial_tensor_node)
406 407
        return self._dist_tensors_for_graph.get(serial_tensor_node_id, None)

408 409 410 411 412 413 414 415 416 417 418 419
    def get_dist_op_for_program(self, serial_op):
        serial_op_id = serial_op.desc.id()
        dist_op = self._dist_ops_for_program.get(serial_op_id, None)
        if dist_op:
            return dist_op
        else:
            serial_op_id = serial_op.desc.original_id()
            dist_op = self._dist_ops_for_program.get(serial_op_id, None)
            if dist_op:
                return dist_op
            else:
                return None
420

421 422 423 424 425
    def del_dist_op_for_program(self, serial_tensor):
        serial_tensor_id = serial_tensor.desc.id()
        if self._dist_ops_for_program.get(serial_tensor_id, None):
            del self._dist_ops_for_program[serial_tensor_id]

426
    def get_dist_op_for_graph(self, serial_op_node):
427
        serial_op_node_id = _node_id(serial_op_node)
428
        return self._dist_ops_for_graph.get(serial_op_node_id, None)
429 430 431 432 433 434 435

    def get_tensor_dist_attr_for_program(self, serial_tensor):
        serial_tensor_id = serial_tensor.desc.id()
        dist_tensor = self._dist_tensors_for_program.get(serial_tensor_id, None)
        if dist_tensor:
            return dist_tensor.dist_attr
        else:
436 437 438 439 440 441 442
            serial_tensor_id = serial_tensor.desc.original_id()
            dist_tensor = self._dist_tensors_for_program.get(serial_tensor_id,
                                                             None)
            if dist_tensor:
                return dist_tensor.dist_attr
            else:
                return None
443

444 445 446 447 448 449 450
    def get_tensor_dist_attr_for_program_with_id(self, tensor_id):
        dist_tensor = self._dist_tensors_for_program.get(tensor_id, None)
        if dist_tensor:
            return dist_tensor.dist_attr
        else:
            return None

451 452 453 454 455
    def set_tensor_dist_attr_for_program(self, serial_tensor, dist_attr):
        dist_tensor = DistributedTensor(serial_tensor, dist_attr)
        self.add_dist_tensor_for_program(dist_tensor)

    def get_tensor_dist_attr_for_graph(self, serial_tensor_node):
456
        serial_tensor_node_id = _node_id(serial_tensor_node)
457 458 459 460 461 462 463 464 465 466 467 468 469
        dist_tensor = self._dist_tensors_for_graph.get(serial_tensor_node_id,
                                                       None)
        if dist_tensor:
            return dist_tensor.dist_attr
        else:
            return None

    def get_op_dist_attr_for_program(self, serial_op):
        serial_op_id = serial_op.desc.id()
        dist_op = self._dist_ops_for_program.get(serial_op_id, None)
        if dist_op:
            return dist_op.dist_attr
        else:
470 471 472 473 474 475
            serial_op_id = serial_op.desc.original_id()
            dist_op = self._dist_ops_for_program.get(serial_op_id, None)
            if dist_op:
                return dist_op.dist_attr
            else:
                return None
476

477 478 479 480 481 482 483
    def get_op_dist_attr_for_program_with_id(self, op_id):
        dist_op = self._dist_ops_for_program.get(op_id, None)
        if dist_op:
            return dist_op.dist_attr
        else:
            return None

484 485 486 487 488
    def set_op_dist_attr_for_program(self, serial_op, dist_attr):
        dist_op = DistributedOperator(serial_op, dist_attr)
        self.add_dist_op_for_program(dist_op)

    def get_op_dist_attr_for_graph(self, serial_op_node):
489
        serial_op_node_id = _node_id(serial_op_node)
490 491 492 493 494 495
        dist_op = self._dist_ops_for_graph.get(serial_op_node_id, None)
        if dist_op:
            return dist_op.dist_attr
        else:
            return None

496 497
    def get_dist_attr_for_graph(self, serial_node):
        if serial_node.is_var() and serial_node.var() is not None:
498
            serial_tensor_node_id = _node_id(serial_node)
499 500 501 502 503 504 505
            dist_tensor = self._dist_tensors_for_graph.get(
                serial_tensor_node_id, None)
            if dist_tensor:
                return dist_tensor.dist_attr
            else:
                return None
        if serial_node.is_op() and serial_node.op() is not None:
506
            serial_op_node_id = _node_id(serial_node)
507 508 509 510 511 512
            dist_op = self._dist_ops_for_graph.get(serial_op_node_id, None)
            if dist_op:
                return dist_op.dist_attr
            else:
                return None
        return None
513

514
    def _init_dist_attr_for_program(self, no_default=False):
515
        # Copy the dist tensors and dist ops annotated by users from the default context
516 517 518 519 520 521
        if not no_default:
            default_ctx = get_default_distributed_context()
            self._process_meshes = copy.deepcopy(default_ctx.process_meshes)
        else:
            default_ctx = self
        for block in self._serial_main_program.blocks:
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
            for tensor in block.vars.values():
                # Copy the distributed tensors in the default context
                default_dist_tensor = default_ctx.get_dist_tensor_for_program(
                    tensor)
                if default_dist_tensor and default_ctx is not self:
                    self.add_dist_tensor_for_program(default_dist_tensor)
                current_dist_tensor = self.get_dist_tensor_for_program(tensor)
                if current_dist_tensor is None:
                    dist_tensor = DistributedTensor(tensor)
                    self.add_dist_tensor_for_program(dist_tensor)
            for op in block.ops:
                # Copy the distributed operators in the default context
                default_dist_op = default_ctx.get_dist_op_for_program(op)
                if default_dist_op and default_ctx is not self:
                    self.add_dist_op_for_program(default_dist_op)
                current_dist_op = self.get_dist_op_for_program(op)
                if current_dist_op is None:
                    dist_op = DistributedOperator(op)
                    self.add_dist_op_for_program(dist_op)
541 542 543 544
        self._original_dist_tensors_for_program = copy.deepcopy(
            self._dist_tensors_for_program)
        self._original_dist_ops_for_program = copy.deepcopy(
            self._dist_ops_for_program)
545

546
    def _order_nodes_by_program_order(self):
547 548
        def _contains(nodes, target_node):
            for node in nodes:
549
                if _node_id(node) == _node_id(target_node):
550 551 552
                    return True
            return False

553 554 555 556 557 558
        serial_ordered_tensor_nodes = []
        serial_ordered_op_nodes = []
        all_nodes = []
        for idx, graph in enumerate(self._serial_graph.all_sub_graphs()):
            for node in graph.all_nodes():
                all_nodes.append(node)
559 560
        for node in all_nodes:
            if node.is_var() and node.var() is not None:
561
                serial_ordered_tensor_nodes.append(node)
562
            if node.is_op() and node.op() is not None:
563 564 565 566 567 568 569 570 571 572
                serial_ordered_op_nodes.append(node)
        serial_ordered_tensor_nodes.sort(
            key=lambda node: node.node.original_desc_id())
        serial_ordered_op_nodes.sort(
            key=lambda node: node.node.original_desc_id())
        num_nodes_before = len(serial_ordered_tensor_nodes) + len(
            serial_ordered_op_nodes)

        new_serial_ordered_tensor_nodes = []
        new_serial_ordered_op_nodes = []
573
        new_serial_ordered_nodes = []
574
        for op_node in serial_ordered_op_nodes:
575 576 577 578
            tensor_nodes = []
            for tensor_node in op_node.inputs:
                if tensor_node.is_var() \
                    and tensor_node.var() is not None \
579
                    and not _contains(new_serial_ordered_nodes, tensor_node):
580
                    tensor_nodes.append(tensor_node)
581
                    new_serial_ordered_tensor_nodes.append(tensor_node)
582
            tensor_nodes.sort(key=lambda node: node.node.original_desc_id())
583 584
            new_serial_ordered_nodes.extend(tensor_nodes)
            new_serial_ordered_nodes.append(op_node)
585
            new_serial_ordered_op_nodes.append(op_node)
586 587 588 589
            tensor_nodes = []
            for tensor_node in op_node.outputs:
                if tensor_node.is_var() \
                    and tensor_node.var() is not None \
590
                    and not _contains(new_serial_ordered_nodes, tensor_node):
591
                    tensor_nodes.append(tensor_node)
592 593
                    new_serial_ordered_tensor_nodes.append(tensor_node)
            tensor_nodes.sort(key=lambda node: node.node.original_desc_id())
594
            new_serial_ordered_nodes.extend(tensor_nodes)
595 596 597 598 599 600
        new_serial_ordered_tensor_nodes.sort(
            key=lambda node: node.node.original_desc_id())
        new_serial_ordered_op_nodes.sort(
            key=lambda node: node.node.original_desc_id())
        self._serial_ordered_tensor_nodes = new_serial_ordered_tensor_nodes
        self._serial_ordered_op_nodes = new_serial_ordered_op_nodes
601
        self._serial_ordered_nodes = new_serial_ordered_nodes
602 603 604 605 606 607 608 609 610 611 612
        assert len(self._serial_ordered_nodes) == len(
            self._serial_ordered_tensor_nodes) + len(
                self._serial_ordered_op_nodes)
        self._serial_orphan_tensor_nodes = []
        for tensor_node in serial_ordered_tensor_nodes:
            if not _contains(self._serial_ordered_tensor_nodes, tensor_node):
                self._serial_orphan_tensor_nodes.append(tensor_node)
        if len(self._serial_ordered_nodes) != num_nodes_before:
            print(
                "WARNING: there are some orphan tensors or ops which are not used in the execution."
            )
613

614 615 616
    def _init_dist_attr_for_graph(self):
        # Convert program to graph and initialize the distributed attributes
        self._order_nodes_by_program_order()
617
        for node in self.serial_ordered_nodes:
618
            if node.is_var() and node.var() is not None:
619 620 621 622 623 624 625
                dist_tensor = None
                tensor_id = node.node.original_desc_id()
                for cur_tensor_id, cur_dist_tensor in self._dist_tensors_for_program.items(
                ):
                    if tensor_id == cur_tensor_id \
                        or tensor_id == cur_dist_tensor.serial_tensor.desc.original_id():
                        dist_tensor = cur_dist_tensor
626 627
                        self._node_id_to_tensor_id[_node_id(
                            node)] = cur_tensor_id
628 629
                assert dist_tensor is not None, \
                    "Tensor must have a distributed tensor after the initialization for program."
630
                serial_tensor_node_id = _node_id(node)
631 632 633 634
                new_dist_tensor = DistributedTensor(dist_tensor.serial_tensor,
                                                    dist_tensor.dist_attr)
                self._dist_tensors_for_graph[
                    serial_tensor_node_id] = new_dist_tensor
635
            if node.is_op() and node.op() is not None:
636 637 638 639 640 641 642
                dist_op = None
                op_id = node.node.original_desc_id()
                for cur_op_id, cur_dist_op in self._dist_ops_for_program.items(
                ):
                    if op_id == cur_op_id \
                        or op_id == cur_dist_op.serial_op.desc.original_id():
                        dist_op = cur_dist_op
643
                        self._node_id_to_op_id[_node_id(node)] = cur_op_id
644 645
                assert dist_op is not None, \
                    "Operator must have a distributed operator after the initialization for program."
646
                serial_op_node_id = _node_id(node)
647 648 649
                new_dist_op = DistributedOperator(dist_op.serial_op,
                                                  dist_op.dist_attr)
                self._dist_ops_for_graph[serial_op_node_id] = new_dist_op
650 651 652 653 654 655 656 657 658

    def clear_dist_info_for_program(self):
        self._dist_tensors_for_program.clear()
        self._dist_ops_for_program.clear()

    def clear_dist_info_for_graph(self):
        self._dist_tensors_for_graph.clear()
        self._dist_ops_for_graph.clear()

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
    def copy_dist_attr_from_program_to_graph(self):
        for node in self.serial_ordered_nodes:
            if node.is_var() and node.var() is not None:
                dist_tensor = None
                tensor_id = node.node.original_desc_id()
                for cur_tensor_id, cur_dist_tensor in self._dist_tensors_for_program.items(
                ):
                    if tensor_id == cur_tensor_id \
                        or tensor_id == cur_dist_tensor.serial_tensor.desc.original_id():
                        dist_tensor = cur_dist_tensor
                assert dist_tensor is not None, \
                    "Tensor must have a distributed tensor after the initialization for program."
                serial_tensor_node_id = _node_id(node)
                new_dist_tensor = DistributedTensor(dist_tensor.serial_tensor,
                                                    dist_tensor.dist_attr)
                self._dist_tensors_for_graph[
                    serial_tensor_node_id] = new_dist_tensor
            if node.is_op() and node.op() is not None:
                dist_op = None
                op_id = node.node.original_desc_id()
                for cur_op_id, cur_dist_op in self._dist_ops_for_program.items(
                ):
                    if op_id == cur_op_id \
                        or op_id == cur_dist_op.serial_op.desc.original_id():
                        dist_op = cur_dist_op
                assert dist_op is not None, \
                    "Operator must have a distributed operator after the initialization for program."
                serial_op_node_id = _node_id(node)
                new_dist_op = DistributedOperator(dist_op.serial_op,
                                                  dist_op.dist_attr)
                self._dist_ops_for_graph[serial_op_node_id] = new_dist_op

691
    def copy_dist_attr_from_graph_to_program(self):
692
        assert self._is_initialized, \
693 694
            "Both program and graph must be initialized."
        updated_tensors = {}
695 696
        # all_nodes = self._serial_graph.all_nodes()
        all_nodes = self._serial_ordered_nodes
697 698
        for node in all_nodes:
            if node.is_var() and node.var() is not None:
699
                tensor_id = self._node_id_to_tensor_id[_node_id(node)]
700
                updated = updated_tensors.get(tensor_id, False)
701 702 703 704 705 706 707
                # If a var has multiples var nodes in graph, only use the first one for now
                if not updated:
                    tensor_dist_attr_for_graph = self.get_tensor_dist_attr_for_graph(
                        node)
                    dist_tensor_for_program = self._dist_tensors_for_program[
                        tensor_id]
                    dist_tensor_for_program.dist_attr = tensor_dist_attr_for_graph
708
                    updated_tensors[tensor_id] = True
709
            if node.is_op() and node.op() is not None:
710
                op_id = self._node_id_to_op_id[_node_id(node)]
711 712 713
                op_dist_attr_for_graph = self.get_op_dist_attr_for_graph(node)
                dist_op_for_program = self._dist_ops_for_program[op_id]
                dist_op_for_program.dist_attr = op_dist_attr_for_graph
714
        # TODO: the completion algorithm will skipped orphan tensors,
715 716 717 718 719 720 721 722 723 724 725 726
        # here we just set there process_mesh to the first one.
        for orphan_node in self._serial_orphan_tensor_nodes:
            serial_tensor_id = orphan_node.var().id()
            dist_tensor = self._dist_tensors_for_program.get(serial_tensor_id,
                                                             None)
            if dist_tensor:
                dist_tensor.dist_attr.process_mesh = self._process_meshes[0]
            else:
                serial_tensor_id = orphan_node.var().original_id()
                dist_tensor = self._dist_tensors_for_program.get(
                    serial_tensor_id, None)
                dist_tensor.dist_attr.process_mesh = self._process_meshes[0]
727 728 729 730 731

    def amend_dist_attr_for_program(self):
        for dist_tensor in self._dist_tensors_for_program.values():
            serial_tensor = dist_tensor.serial_tensor
            dist_attr = dist_tensor.dist_attr
732 733 734
            if serial_tensor.type == core.VarDesc.VarType.READER \
                or serial_tensor.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \
                or serial_tensor.type == core.VarDesc.VarType.STEP_SCOPES:
735 736 737 738 739
                tensor_shape = []
            else:
                tensor_shape = serial_tensor.shape
            dims_mapping = dist_attr.dims_mapping
            process_mesh_shape = dist_attr.process_mesh.topology
740
            process_mesh_processes = dist_attr.process_mesh.processes
741 742 743 744 745 746
            # If the dimension of tensor is less than the sharding dimension of process mesh,
            # we just amend the dimension mapping to -1. (Is this really OK?)
            for i in range(len(tensor_shape)):
                if dims_mapping[i] != -1 and tensor_shape[i] > 0 \
                    and process_mesh_shape[dims_mapping[i]] > tensor_shape[i]:
                    dims_mapping[i] = -1
747 748
                if dims_mapping[i] != -1 and len(process_mesh_processes) == 1:
                    dims_mapping[i] = -1
749 750 751 752

        for dist_op in self._dist_ops_for_program.values():
            serial_op = dist_op.serial_op
            dist_attr = dist_op.dist_attr
753 754
            process_mesh_shape = dist_attr.process_mesh.topology
            process_mesh_processes = dist_attr.process_mesh.processes
755 756 757 758 759
            for arg_name in serial_op.input_arg_names:
                if dist_op.get_serial_input(arg_name) is None:
                    tensor_shape = []
                else:
                    if dist_op.get_serial_input(arg_name).type == core.VarDesc.VarType.READER \
760
                        or dist_op.get_serial_input(arg_name).type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \
761 762 763 764 765 766 767 768 769 770 771
                        or dist_op.serial_op.type == "create_py_reader":
                        tensor_shape = []
                    else:
                        tensor_shape = dist_op.get_serial_input(arg_name).shape
                dims_mapping = dist_attr.get_input_dims_mapping(arg_name)
                # If the dimension of tensor is less than the sharding dimension of process mesh,
                # we just amend the dimension mapping to -1. (Is this really OK?)
                for i in range(len(tensor_shape)):
                    if dims_mapping[i] != -1 and tensor_shape[i] > 0 \
                        and process_mesh_shape[dims_mapping[i]] > tensor_shape[i]:
                        dims_mapping[i] = -1
772 773 774
                    if dims_mapping[i] != -1 and len(
                            process_mesh_processes) == 1:
                        dims_mapping[i] = -1
775
            for arg_name in serial_op.output_arg_names:
776 777 778
                if dist_op.get_serial_output(arg_name).type == core.VarDesc.VarType.READER \
                    or dist_op.get_serial_output(arg_name).type == core.VarDesc.VarType.LOD_TENSOR_ARRAY \
                    or dist_op.get_serial_output(arg_name).type == core.VarDesc.VarType.STEP_SCOPES:
779 780 781 782 783 784 785 786 787 788
                    tensor_shape = []
                else:
                    tensor_shape = dist_op.get_serial_output(arg_name).shape
                dims_mapping = dist_attr.get_output_dims_mapping(arg_name)
                # If the dimension of tensor is less than the sharding dimension of process mesh,
                # we just amend the dimension mapping to -1. (Is this really OK?)
                for i in range(len(tensor_shape)):
                    if dims_mapping[i] != -1 and tensor_shape[i] > 0 \
                        and process_mesh_shape[dims_mapping[i]] > tensor_shape[i]:
                        dims_mapping[i] = -1
789 790 791 792 793 794
                    if dims_mapping[i] != -1 and len(
                            process_mesh_processes) == 1:
                        dims_mapping[i] = -1
            if len(process_mesh_processes) == 1:
                dist_op.dist_attr.impl_type = "default"
                dist_op.dist_attr.impl_idx = 0
795 796

    def validate_dist_attr_for_program(self):
797
        if not self._is_initialized:
798 799
            assert False, \
                "Program must be initialized before validating its distributed attributes"
800
        for block in self.serial_main_program.blocks:
801 802
            for tensor in block.vars.values():
                dist_tensor = self.get_dist_tensor_for_program(tensor)
803 804 805
                assert dist_tensor is not None, \
                    "Tensor {} does not have a distributed attribute.".format(
                        dist_tensor.serial_tensor.name)
806 807
                if (dist_tensor is not None) and (
                        not dist_tensor.validate_dist_attr()):
808 809 810 811
                    assert False, "Tensor {} (id: {}, original_id: {}) has a wrong distributed attributes {}.".format(
                        dist_tensor.serial_tensor.name,
                        dist_tensor.desc.id(),
                        dist_tensor.desc.original_id(), dist_tensor.dist_attr)
812 813
            for op in block.ops:
                dist_op = self.get_dist_op_for_program(op)
814 815 816
                assert dist_op is not None, \
                    "Operator {} does not have a distributed attribute.".format(
                        dist_op.serial_op.type)
817
                if (dist_op is not None) and (not dist_op.validate_dist_attr()):
818 819 820 821
                    assert False, "Operator {} (id: {}, original_id: {}) has a wrong distributed attributes {} .".format(
                        dist_op.serial_op.type,
                        dist_op.serial_op.desc.id(),
                        dist_op.serial_op.desc.original_id(), dist_op.dist_attr)
822 823
        return True

Z
zhaoyingli 已提交
824 825 826 827 828
    def __deepcopy__(self, memo):
        cls = self.__class__
        result = cls.__new__(cls)
        memo[id(self)] = result
        for k, v in self.__dict__.items():
829 830 831 832 833 834
            if k in [
                "_original_serial_main_program", "_original_serial_startup_program", \
                "_serial_main_program", "_serial_startup_program", "_serial_graph", \
                "_dist_main_programs", "_dist_startup_programs", \
                "_serial_ordered_nodes", "_serial_ordered_tensor_nodes", \
                "_serial_ordered_op_nodes"]:
Z
zhaoyingli 已提交
835 836 837
                setattr(result, k, v)
            else:
                setattr(result, k, copy.deepcopy(v, memo))
838 839 840 841

        # update dist tensor's dist_context
        for key in result._dist_tensors_for_program.keys():
            result._dist_tensors_for_program[key]._dist_context = result
Z
zhaoyingli 已提交
842 843
        return result

844 845 846 847 848 849 850 851 852

class DistributedOperatorContext:
    """
    DistributedOperatorContext is used to create a dist op desc in Program.
    Every time to create a new dist op, the context should be updated for it accordingly.
    """

    def __init__(self):
        self._dst_main_program = None
853
        self._main_block = None
854
        self._dst_startup_program = None
855
        self._startup_block = None
856 857
        self._cur_src_op = None
        self._cur_dist_attr = None
858
        self.grad_op_id_to_op_id = {}
859
        self.grad_var_to_var = defaultdict(dict)
860
        self._work_block = None
861
        self.already_init_sync_vars = set()
862 863
        self.varname_mapping = None
        self.rank_id = None
864

Z
zhaoyingli 已提交
865 866 867 868 869
    def __deepcopy__(self, memo):
        cls = self.__class__
        result = cls.__new__(cls)
        memo[id(self)] = result
        for k, v in self.__dict__.items():
870 871 872 873
            if k in [
                    "_dst_main_program", "_dst_startup_program", "_cur_src_op",
                    "_work_block", "_main_block", "_startup_block"
            ]:
Z
zhaoyingli 已提交
874 875 876 877 878
                setattr(result, k, v)
            else:
                setattr(result, k, copy.deepcopy(v, memo))
        return result

879 880
    @property
    def dst_main_program(self):
881 882
        return self._dst_main_program

883 884 885 886
    @dst_main_program.setter
    def dst_main_program(self, prog):
        self._dst_main_program = prog
        self._main_block = prog.blocks[0]
887

888 889 890
    @property
    def main_block(self):
        return self._main_block
891

892 893 894
    @property
    def dst_startup_program(self):
        return self._dst_startup_program
895

896 897 898 899
    @dst_startup_program.setter
    def dst_startup_program(self, prog):
        self._dst_startup_program = prog
        self._startup_block = prog.blocks[0]
900

901 902 903
    @property
    def startup_block(self):
        return self._startup_block
904

905 906 907 908
    @property
    def work_block(self):
        assert self._work_block is not None
        return self._work_block
909

910 911 912 913
    @work_block.setter
    def work_block(self, block):
        assert block is not None
        self._work_block = block
914

915 916 917
    @property
    def cur_src_op(self):
        assert self._cur_src_op is not None
918 919
        return self._cur_src_op

920
    def prepare_context(self, src_op):
921

922
        self._cur_src_op = src_op
923 924 925 926 927 928

        # build input varname mapping
        kinputs = {}
        for input_name in src_op.desc.input_names():
            varnames = []
            for varname in src_op.desc.input(input_name):
929 930
                assert varname in self.varname_mapping
                varnames.append(self.varname_mapping[varname])
931 932 933 934 935 936 937
            kinputs[input_name] = varnames

        # build output varname mapping
        koutputs = {}
        for output_name in src_op.desc.output_names():
            varnames = []
            for varname in src_op.desc.output(output_name):
938 939
                assert varname in self.varname_mapping
                varnames.append(self.varname_mapping[varname])
940 941 942
            koutputs[output_name] = varnames

        return kinputs, koutputs
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986


class BlockState(object):
    def __init__(self):
        self.nblock = 0
        self.forward_indices = []
        self.backward_indices = []
        self.backward_to_forward_index_map = {}

    def parse_forward_blocks(self, program):

        while program.current_block_idx != 0:
            program._rollback()

        assert program.current_block_idx == 0

        for idx, block in enumerate(program.blocks):

            assert idx == block.idx, "index doesn't match"
            assert block.forward_block_idx == -1, "forward_block_idx of forward block [{}] is not [{}]".format(
                idx, block.forward_block_idx)
            self.forward_indices.append(idx)
            self.nblock += 1

        assert self.nblock >= 1

    def parse_backward_blocks(self, program):

        assert 0 in self.forward_indices, "forward block idx are{}".format(
            self.forward_indices)
        self.backward_to_forward_index_map[0] = 0

        for idx, block in enumerate(program.blocks):

            if idx < len(self.forward_indices):
                continue

            assert idx == block.idx, "index doesn't match"
            assert block.forward_block_idx in self.forward_indices
            self.backward_indices.append(idx)
            self.backward_to_forward_index_map[idx] = block.forward_block_idx
            self.nblock += 1

        assert self.nblock == len(program.blocks)