parallel.py 25.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except jin compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
import os
16
import six
Y
Yan Xu 已提交
17
import numpy as np
18
import warnings
19
from collections import OrderedDict
20 21 22 23 24 25 26

from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.dygraph import layers
from paddle.fluid.dygraph import parallel_helper
from paddle.fluid.dygraph import to_variable, no_grad
from paddle.utils import deprecated
27
from ..layers import collective
28
import warnings
29
import paddle
30
import itertools
31

32
__all__ = ["prepare_context", "ParallelEnv", "DataParallel"]
33 34 35 36

ParallelStrategy = core.ParallelStrategy


37
@deprecated(since="2.0.0", update_to="paddle.distributed.init_parallel_env")
C
chengduo 已提交
38
def prepare_context(strategy=None):
39 40 41
    '''
    :api_attr: imperative
    '''
C
chengduo 已提交
42 43 44 45 46 47 48 49
    if strategy is None:
        strategy = ParallelStrategy()
        strategy.nranks = Env().nranks
        strategy.local_rank = Env().local_rank
        strategy.trainer_endpoints = Env().trainer_endpoints
        strategy.current_endpoint = Env().current_endpoint
    if strategy.nranks < 2:
        return
50
    assert framework.in_dygraph_mode() is True, \
51
        "dygraph.prepare_context should be used with dygraph mode."
52
    place = framework._current_expected_place()
C
chengduo 已提交
53
    assert place is not None, \
54
        "dygraph.prepare_context should be used in fluid.dygraph.guard(place) guard."
55 56 57 58
    if not parallel_helper._is_parallel_ctx_initialized():
        if isinstance(place, core.CUDAPlace):
            parallel_helper._set_parallel_ctx(
                core.NCCLParallelContext(strategy, place))
59 60 61
        elif isinstance(place, core.XPUPlace):
            parallel_helper._set_parallel_ctx(
                core.BKCLParallelContext(strategy, place))
62 63
        else:
            # TODO(Yancey1989): add Gloo Parallel Context to support CPU parallel computation
64
            assert ("Only support CUDAPlace or XPUPlace for now.")
65
        parallel_helper._init_parallel_ctx()
C
chengduo 已提交
66
    return strategy
67 68


69 70
class ParallelEnv(object):
    """
71 72 73 74
    .. note::
        This API is not recommended, if you need to get rank and world_size, 
        it is recommended to use ``paddle.distributed.get_rank()`` and 
        ``paddle.distributed.get_world_size()`` .
75 76

    This class is used to obtain the environment variables required for 
77
    the parallel execution of ``paddle.nn.Layer`` in dynamic mode.
78

79
    The parallel execution in dynamic mode needs to be started using ``paddle.distributed.launch``
80
    or ``paddle.distributed.spawn`` .
81 82 83 84

    Examples:
      .. code-block:: python

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
        import paddle
        import paddle.distributed as dist

        def train():
            # 1. initialize parallel environment
            dist.init_parallel_env()

            # 2. get current ParallelEnv
            parallel_env = dist.ParallelEnv()
            print("rank: ", parallel_env.rank)
            print("world_size: ", parallel_env.world_size)

            # print result in process 1:
            # rank: 1
            # world_size: 2
            # print result in process 2:
            # rank: 2
            # world_size: 2

        if __name__ == '__main__':
            # 1. start by ``paddle.distributed.spawn`` (default)
            dist.spawn(train, nprocs=2)
            # 2. start by ``paddle.distributed.launch``
            # train()
109 110
    """

111
    def __init__(self):
112 113
        self._rank = int(os.getenv("PADDLE_TRAINER_ID", "0"))
        self._world_size = int(os.getenv("PADDLE_TRAINERS_NUM", "1"))
114

115 116 117 118 119 120 121
        # imperative only support one gpu or xpu
        if core.is_compiled_with_cuda():
            selected_gpus = os.getenv("FLAGS_selected_gpus", "0").split(",")
            self._device_id = int(selected_gpus[0])
        elif core.is_compiled_with_xpu():
            selected_xpus = os.getenv("FLAGS_selected_xpus", "0").split(",")
            self._device_id = int(selected_xpus[0])
122

123 124 125
        self._trainer_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS",
                                            "").split(",")
        self._current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT", "")
126 127 128 129 130
        self._nrings = int(os.getenv("FLAGS_nccl_nrings", "1"))
        assert self._nrings > 0, \
            "nccl_nrings must be an integer greater than 0."
        assert self._nrings < 9, \
            "nccl_nrings should be less than 9, which is enough in most scenarios."
131 132

    @property
133
    def rank(self):
134
        """
135
        Rank of current trainer.
136

137
        Its value is equal to the value of the environment variable ``PADDLE_TRAINER_ID`` . The default value is 0.
138 139 140 141

        Examples:
          .. code-block:: python

142 143
            # execute this command in terminal: export PADDLE_TRAINER_ID=0
            import paddle.distributed as dist
144
            
145 146 147
            env = dist.ParallelEnv()
            print("The rank is %d" % env.rank)
            # The rank is 0
148
        """
149
        return self._rank
150 151

    @property
152
    def world_size(self):
153
        """
154
        The number of trainers (number of processes participating in current job).
155

156
        Its value is equal to the value of the environment variable ``PADDLE_TRAINERS_NUM`` . The default value is 1.
157 158 159 160

        Examples:
          .. code-block:: python

161 162
            # execute this command in terminal: export PADDLE_TRAINERS_NUM=4
            import paddle.distributed as dist
163
            
164 165 166
            env = dist.ParallelEnv()
            print("The world_size is %d" % env.world_size)
            # The world_size is 4
167
        """
168
        return self._world_size
169 170

    @property
171
    def device_id(self):
172 173 174
        """
        The ID of selected GPU card for parallel training.

175
        Its value is equal to the value of the environment variable ``FLAGS_selected_gpus`` . The default value is 0.
176 177 178 179 180

        Examples:
          .. code-block:: python

            # execute this command in terminal: export FLAGS_selected_gpus=1
181
            import paddle.distributed as dist
182
            
183 184
            env = dist.ParallelEnv()
            print("The device id are %d" % env.device_id)
185 186
            # The device id are 1
        """
187
        return self._device_id
188 189 190

    @property
    def current_endpoint(self):
191 192 193
        """
        The endpoint of current trainer, it is in the form of (node IP + port).

194
        Its value is equal to the value of the environment variable ``PADDLE_CURRENT_ENDPOINT`` . The default value is "".
195 196 197 198 199

        Examples:
          .. code-block:: python
            
            # execute this command in terminal: export PADDLE_CURRENT_ENDPOINT=127.0.0.1:6170
200
            import paddle.distributed as dist
201
            
202
            env = dist.ParallelEnv()
203 204 205
            print("The current endpoint are %s" % env.current_endpoint)
            # The current endpoint are 127.0.0.1:6170
        """
206
        return self._current_endpoint
207 208 209

    @property
    def trainer_endpoints(self):
210 211 212 213
        """
        The endpoints of all trainer nodes in the task, 
        which are used to broadcast the NCCL ID when NCCL2 is initialized.

214
        Its value is equal to the value of the environment variable ``PADDLE_TRAINER_ENDPOINTS`` . The default value is "".
215 216 217 218 219

        Examples:
          .. code-block:: python

            # execute this command in terminal: export PADDLE_TRAINER_ENDPOINTS=127.0.0.1:6170,127.0.0.1:6171
220
            import paddle.distributed as dist
221
            
222
            env = dist.ParallelEnv()
223 224 225
            print("The trainer endpoints are %s" % env.trainer_endpoints)
            # The trainer endpoints are ['127.0.0.1:6170', '127.0.0.1:6171']
        """
226 227
        return self._trainer_endpoints

228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
    @property
    def nrings(self):
        """
        Nrings of current trainer.

        Its value is equal to the value of the environment variable ``FLAGS_nccl_nrings`` . The default value is 1.

        Examples:
          .. code-block:: python

            # execute this command in terminal: export FLAGS_nccl_nrings=1
            import paddle.distributed as dist
            
            env = dist.ParallelEnv()
            print("The nrings is %d" % env.nrings)
            # the number of ring is 1
        """
        return self._nrings

247 248 249 250 251
    # [aliases] Compatible with old method names
    local_rank = rank
    nranks = world_size
    dev_id = device_id

252

253 254 255 256 257 258
# NOTE: [ Compatible ] Originally this class name is `Env`. The semantics of the old class names
# are inaccurate and may confuse users, so replace it with `ParallelEnv`, but to be compatible
# with the old examples, here still need to keep this name.
Env = ParallelEnv


259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
def _build_default_parallel_strategy():
    strategy = ParallelStrategy()
    strategy.nranks = ParallelEnv().nranks
    strategy.local_rank = ParallelEnv().local_rank
    strategy.trainer_endpoints = ParallelEnv().trainer_endpoints
    strategy.current_endpoint = ParallelEnv().current_endpoint
    return strategy


def _coalesce_tensors(var_groups):
    from ..layers import nn
    coalesced_grads_and_grad_vars = []
    for group_id, grad_vars in var_groups.items():
        flattened_vars = []
        g_var_shapes = []
        for g_var in grad_vars:
            g_var_shapes.append(g_var.shape)
            flattened_vars.append(
                nn.reshape(
                    x=g_var, shape=[np.prod(g_var.shape)]))
        coalesced_grad = nn.concat(flattened_vars)
        coalesced_grads_and_grad_vars.append(
            [coalesced_grad, grad_vars, g_var_shapes])
    return coalesced_grads_and_grad_vars


@framework.dygraph_only
def _reshape_inplace(x, shape):
    x_shape = framework._varbase_creator(dtype=x.dtype)
    framework._dygraph_tracer().trace_op(
        type="reshape2",
        inputs={'X': x},
        outputs={'Out': x,
                 'XShape': x_shape},
        attrs={'shape': shape})


@framework.dygraph_only
def _split_tensors(coalesced_grads_and_grad_vars):
    for coalesced_grad, origin_grad_vars, grad_shapes in coalesced_grads_and_grad_vars:
        grad_var_len = [np.prod(g_shape) for g_shape in grad_shapes]
        framework._dygraph_tracer().trace_op(
            type='split',
            inputs={'X': coalesced_grad},
            outputs={'Out': origin_grad_vars},
            attrs={'sections': grad_var_len,
                   'axis': 0})
        for g_var, g_shape in zip(origin_grad_vars, grad_shapes):
            _reshape_inplace(x=g_var, shape=g_shape)
            assert g_var.shape == g_shape


def scale_loss(loss):
312
    # TODO(liuyuhui) Currently only for xpu. Will be removed in the future.
313 314 315 316 317 318 319 320 321 322
    if not ParallelEnv().world_size > 1:
        return loss

    loss_scale = to_variable(
        np.array([ParallelEnv().world_size]).astype("float32"))
    loss_scale.stop_gradient = True
    scaled_loss = loss / loss_scale
    return scaled_loss


323
class DataParallel(layers.Layer):
C
chengduo 已提交
324
    """
325
    Run the dygraph module with data parallelism.
C
chengduo 已提交
326

327
    Currently, DataParallel class only supports to run the dynamic graph
328 329 330 331 332 333 334 335 336 337
    with multi-process. 
    
    Now supports two ways to start training:

    1. start by ``paddle.distributed.spawn`` method, for example:

        ``python demo.py`` (spawn need to be called in ``__main__`` method)
    
    2. start by ``paddle.distributed.launch`` module, for example:
    
338
        ``python -m paddle.distributed.launch --gpus=0,1 demo.py`` .
339 340

    And the content of `demo.py` is the code of examples.
C
chengduo 已提交
341

342 343
    Args:
        layers(Layer): The module that should be executed by data parallel.
344 345
        strategy(ParallelStrategy, optional): (deprecated) The strategy of data parallelism, 
            contains environment configuration related to parallel execution. Default: None.
346
        comm_buffer_size(int, optional):  It limits the memory size(MB) of one buffer  
347 348
                                          parameters' gradient which is the input of communication 
                                          calling(e.g NCCLAllReduce). Default: 25.
349 350
        last_comm_buffer_size(float, optional): It limits memory size(MB) of last buffer in communication
                                         calling. Making the last communication buffer size small is useful to 
351
                                         improve performance. Default: 1.
352 353 354 355 356 357 358 359 360 361 362 363
        find_unused_parameters(bool, optional): Whether to traverse the entire backward graph from the
                                                all tensors in the return value of the wrapped model's 
                                                forward function. For parameters not involved in loss 
                                                calculation, their gradients will be marked as ready in 
                                                advance to prepare reduce. Please note that all forward 
                                                outputs derived from the wrapped model parameters must 
                                                participate in the calculation of loss and subsequent 
                                                gradient calculations. If not, serious error will occur.
                                                Note that setting the find_unused_parameters to True 
                                                will affect computing performance. Therefore, if all parameters
                                                are sure to participate in the loss calculation and the 
                                                autograd graph construction, please set it False. Default: True.
364
            
365 366 367
    Returns:
        Layer: The data paralleled module.

C
chengduo 已提交
368 369 370
    Examples:
        .. code-block:: python

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
            import paddle
            import paddle.nn as nn
            import paddle.optimizer as opt
            import paddle.distributed as dist

            class LinearNet(nn.Layer):
                def __init__(self):
                    super(LinearNet, self).__init__()
                    self._linear1 = nn.Linear(10, 10)
                    self._linear2 = nn.Linear(10, 1)
                    
                def forward(self, x):
                    return self._linear2(self._linear1(x))

            def train():
386
                # 1. initialize parallel environment
387 388
                dist.init_parallel_env()

389
                # 2. create data parallel layer & optimizer
390 391 392 393 394 395 396
                layer = LinearNet()
                dp_layer = paddle.DataParallel(layer)

                loss_fn = nn.MSELoss()
                adam = opt.Adam(
                    learning_rate=0.001, parameters=dp_layer.parameters())

397
                # 3. run layer
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
                inputs = paddle.randn([10, 10], 'float32')
                outputs = dp_layer(inputs)
                labels = paddle.randn([10, 1], 'float32')
                loss = loss_fn(outputs, labels)
                
                loss.backward()

                adam.step()
                adam.clear_grad()

            if __name__ == '__main__':
                # 1. start by ``paddle.distributed.spawn`` (default)
                dist.spawn(train, nprocs=2)
                # 2. start by ``paddle.distributed.launch``
                # train()
C
chengduo 已提交
413 414
    """

415 416 417
    def __init__(self,
                 layers,
                 strategy=None,
418
                 comm_buffer_size=25,
419 420
                 last_comm_buffer_size=1,
                 find_unused_parameters=True):
421 422
        super(DataParallel,
              self).__init__(layers.full_name() + "_data_parallel")
C
chengduo 已提交
423

424
        self._layers = layers
425
        self.find_unused_parameters = find_unused_parameters
426 427 428 429 430 431 432 433

        # NOTE(chenweihang): The ParallelStrategy here is not strictly a strategy. 
        # It just stores some environment variables, which can be constructed by 
        # ParallelEnv. Here it is set as an optional argument.
        # This parameter is not removed because of compatibility with 1.x writing.
        if strategy is not None:
            self._strategy = strategy
        else:
434
            self._strategy = _build_default_parallel_strategy()
435

436
        if self._strategy.nranks > 1:
437 438 439 440 441 442 443 444 445 446 447
            # check the environment
            assert parallel_helper.__parallel_ctx__clz__ is not None, \
            "ParallelContext must be initialized before. You should use init_parallel_env() before" \
            "constructing the DataParallel."

            # sync buffer and params
            # TODO(liuyuhui) Currently not support xpu. xpu is 
            # still broadcasting parameters when calling layer
            if not paddle.is_compiled_with_xpu():
                self._sync_params_buffers()

448
            self.comm_buffer_size = int(comm_buffer_size * 1024 * 1024)
449 450 451 452
            # NOTE(shenliang03): We can set environment variables to control 
            # the size of the group, Default: 1MB. The role of this small group is: 
            # when the last group allreduce, the overlap cannot work. Making the 
            # the last group small is useful to improve performance.
453 454
            self.last_comm_buffer_size = int(last_comm_buffer_size * 1024 *
                                             1024)
455 456
            self.init_reducer()
        else:
S
ShenLiang 已提交
457 458
            warnings.warn("The program will return to single-card operation. "
                          "Please check 1, whether you use spawn or fleetrun "
459 460
                          "to start the program. 2, Whether it is a multi-card "
                          "program. 3, Is the current environment multi-card.")
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477

    def init_reducer(self):
        layers_param = []
        params_set = set()
        for sublayer in self.sublayers():
            for _, param in sublayer.named_parameters(include_sublayers=False):
                if param is None or param in params_set:
                    continue
                params_set.add(param)
                if not isinstance(param, core.VarBase):
                    raise TypeError("The data type of '%s' must be Varbase" %
                                    param.name)
                if param.trainable:
                    layers_param.append((sublayer, param))

        trainable_parameters = [param for _, param in layers_param]

478 479 480 481
        assert len(trainable_parameters) > 0, \
            "This model does not have any parameters to train, and " \
            "does not need to use DataParallel"

482 483 484
        # NOTE(shenliang03): Here we can only use the attributes to judge whether
        # parameter is sparse(or SelectedRows). The reason is that the sparse message
        # can't be obtained when bp hasn't happened yet. So if layer supports sparse parameter,
485
        # we should add the layer here like "paddle.nn.layer.common.Embedding".
486
        def check_layer_sparse(sublayer):
487 488 489
            if isinstance(sublayer, paddle.nn.layer.common.Embedding):
                return sublayer._sparse
            # NOTE(shenliang03):This is for compatibility. If paddle.fluid.dygraph.Embedding 
490
            # is removed in the future, the check will also be removed here.
491
            if isinstance(sublayer, paddle.fluid.dygraph.Embedding):
492 493 494 495 496 497 498 499 500
                return sublayer._is_sparse
            return False

        is_sparse_gradient = [
            check_layer_sparse(sublayer) for sublayer, _ in layers_param
        ]

        self.group_indices = core.assign_group_by_size(
            trainable_parameters, is_sparse_gradient,
501
            [self.last_comm_buffer_size, self.comm_buffer_size])
502

503 504 505 506
        self._reducer = core.Reducer(
            trainable_parameters,
            list(reversed(self.group_indices)), is_sparse_gradient,
            parallel_helper.__parallel_ctx__clz__,
507
            [self.last_comm_buffer_size, self.comm_buffer_size],
508
            self.find_unused_parameters)
509 510 511 512 513 514 515 516 517

    def _find_varbase(self, obj):
        if isinstance(obj, core.VarBase):
            return [obj]
        if isinstance(obj, (list, tuple)):
            return itertools.chain(*map(self._find_varbase, obj))
        if isinstance(obj, dict):
            return itertools.chain(*map(self._find_varbase, obj.values()))
        return []
518

519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
    def _sync_params_buffers(self):
        model_vars = []
        for _, param in self._layers.state_dict().items():
            if not isinstance(param, core.VarBase):
                raise TypeError("The data type of '%s' must be Varbase" %
                                param.name)
            model_vars.append(param.detach())
        if len(model_vars) == 0:
            return

        mega_bytes = 128 * 1024 * 1024
        group_idx = 0
        memory_counter = 0
        var_groups = OrderedDict()
        dtype = model_vars[0].dtype

        for var in model_vars:
            bytes = np.prod(var.shape) * core.size_of_dtype(var.dtype)
            if memory_counter < mega_bytes and dtype == var.dtype:
                memory_counter += bytes
            else:
                memory_counter = 0
                dtype = var.dtype
                group_idx += 1
            var_groups.setdefault(group_idx, []).append(var)

        coalesced_vars = _coalesce_tensors(var_groups)

        for coalesced_var, _, _ in coalesced_vars:
            collective._broadcast(coalesced_var, root=0, sync_mode=True)

        for coalesced_var, origin_vars, var_shapes in coalesced_vars:
            var_len = [np.prod(v_shape) for v_shape in var_shapes]
            framework._dygraph_tracer().trace_op(
                type='split',
                inputs={'X': coalesced_var},
                outputs={'Out': origin_vars},
                attrs={'sections': var_len,
                       'axis': 0})

559
    def forward(self, *inputs, **kwargs):
560
        outputs = self._layers(*inputs, **kwargs)
561 562 563 564 565 566
        if self._strategy.nranks > 1 and framework._dygraph_tracer()._has_grad:
            if self.find_unused_parameters:
                self._reducer.prepare_for_backward(
                    list(self._find_varbase(outputs)))
            else:
                self._reducer.prepare_for_backward(list(self._find_varbase([])))
567

568
        return outputs
Y
Yan Xu 已提交
569

570 571
    @deprecated(
        since="2.0.0", reason="This method does not need to be called anymore.")
Y
Yan Xu 已提交
572
    def scale_loss(self, loss):
C
chengduo 已提交
573
        """
574 575
        Deprecated method, now ``scale_loss`` is an empty method,  
        keep this method just for compatibility.
C
chengduo 已提交
576
        """
Y
Yan Xu 已提交
577 578
        return loss

579 580
    @deprecated(
        since="2.0.0", reason="This method does not need to be called anymore.")
Y
Yan Xu 已提交
581
    def apply_collective_grads(self):
C
chengduo 已提交
582
        """
583 584
        Deprecated method, now ``apply_collective_grads`` is an empty method, 
        keep this method just for compatibility.
C
chengduo 已提交
585
        """
586
        return
587 588 589 590 591 592

    def state_dict(self,
                   destination=None,
                   include_sublayers=True,
                   structured_name_prefix=""):
        '''
593
        Get all parameters and persistable buffers of current layer and its sub-layers. And set them into a dict
594 595

        Parameters:
596 597
            destination(dict, optional) : If provide, all the parameters and persistable buffers will be set to this dict . Default: None
            include_sublayers(bool, optional) : If true, also include the parameters and persistable buffers from sublayers. Default: True
598 599

        Retruns:
600
            dict: a dict contains all the parameters and persistable buffers.
601 602 603 604

        Examples:
            .. code-block:: python

605 606 607 608 609 610 611
                import paddle
                import paddle.distributed as dist

                dist.init_parallel_env()

                emb = fluid.dygraph.Embedding([10, 10])
                emb = fluid.dygraph.DataParallel(emb)
612

613 614
                state_dict = emb.state_dict()
                paddle.save(state_dict, "paddle_dy.pdparams")
615 616 617 618 619 620 621 622

        '''

        return self._layers.state_dict(
            destination=destination,
            include_sublayers=include_sublayers,
            structured_name_prefix=structured_name_prefix)

623
    @framework.deprecate_stat_dict
J
Jiabin Yang 已提交
624
    def set_state_dict(self, state_dict, use_structured_name=True):
625
        '''
626
        Set parameters and persistable buffers from state_dict. All the parameters and buffers will be reset by the tensor in the state_dict
627 628

        Parameters:
629 630
            state_dict(dict) : Dict contains all the parameters and persistable buffers.
            use_structured_name(bool, optional) : If true, use structured name as key, otherwise, use parameter or buffer name as key. 
631 632 633 634 635 636 637
                                                  Default: True
        Returns:
            None

        Examples:
            .. code-block:: python

638 639
                import paddle
                import paddle.distributed as dist
640

641
                dist.init_parallel_env()
642

643
                emb = paddle.nn.Embedding(10, 10)
644
                emb = fluid.dygraph.DataParallel(emb)
645

646
                state_dict = emb.state_dict()
647
                paddle.save(state_dict, "paddle_dy.pdparams")
648

649
                para_state_dict = paddle.load("paddle_dy.pdparams")
650
                emb.set_state_dict(para_state_dict)
651 652 653

        '''

654
        self._layers.set_state_dict(
J
Jiabin Yang 已提交
655
            state_dict, use_structured_name=use_structured_name)
656 657 658 659

    # [aliases] Compatible with old method names
    set_dict = set_state_dict
    load_dict = set_state_dict