api.py 66.0 KB
Newer Older
1
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16 17 18
# Temporary disable isort to avoid circular import
# This can be removed after the circular import is resolved
# isort: skip_file
19 20
import os
import pickle
21
import warnings
22
from collections import OrderedDict
23
import inspect
24
import threading
25
from typing import Text, Tuple, Any, List
26

27
import paddle
28
from paddle.fluid import core, dygraph
29 30 31 32 33
from paddle.fluid.compiler import (
    BuildStrategy,
    CompiledProgram,
    ExecutionStrategy,
)
34
from paddle.fluid.data_feeder import check_type
35
from paddle.fluid.layers.utils import flatten, pack_sequence_as
36 37 38 39
from paddle.fluid.dygraph.base import (
    program_desc_tracing_guard,
    switch_to_static_graph,
)
40 41
from .dy2static import logging_utils
from .dy2static.convert_call_func import (
42 43 44
    ConversionOptions,
    CONVERSION_OPTIONS,
)
45
from .dy2static.logging_utils import (
46 47 48
    set_code_level,
    set_verbosity,
)
49
from .dy2static.program_translator import (
50 51 52 53 54 55 56 57 58 59 60
    ProgramTranslator,
    StaticFunction,
    unwrap_decorators,
)
from paddle.fluid.dygraph.io import (
    TranslatedLayer,
    INFER_MODEL_SUFFIX,
    INFER_PARAMS_SUFFIX,
    INFER_PARAMS_INFO_SUFFIX,
    INFER_PROPERTY_SUFFIX,
)
61 62
from paddle.fluid.dygraph.layers import Layer
from paddle.fluid.executor import Executor, scope_guard
63 64 65 66 67 68 69 70 71 72 73 74 75
from paddle.fluid.framework import (
    Block,
    ParamBase,
    Program,
    Variable,
    Parameter,
    EagerParamBase,
)
from paddle.fluid.framework import (
    _current_expected_place,
    _dygraph_guard,
    _dygraph_tracer,
)
76
from paddle.fluid.framework import dygraph_only, _non_static_mode
77
from paddle.fluid.wrapped_decorator import wrap_decorator
78

79
__all__ = [
80 81 82 83 84 85
    'declarative',
    'set_code_level',
    'set_verbosity',
    'save',
    'load',
    'not_to_static',
86
]
87 88 89 90 91 92 93 94 95 96


def create_program_from_desc(program_desc):
    program = Program()
    program.desc = program_desc
    program.blocks = [Block(program, 0)]
    program._sync_with_cpp()
    return program


97
def _extract_vars(inputs, result_list, err_tag='inputs'):
98
    if isinstance(inputs, Variable):
99
        result_list.append(inputs)
100
    elif isinstance(inputs, (list, tuple)):
101
        for var in inputs:
102
            _extract_vars(var, result_list, err_tag)
103 104
    else:
        raise TypeError(
105
            "The type of 'each element of {}' in paddle.jit.TracedLayer.trace must be fluid.Variable, but received {}.".format(
106 107 108
                err_tag, type(inputs)
            )
        )
109 110


111
def extract_vars(inputs, err_tag='inputs'):
112
    result_list = []
113
    _extract_vars(inputs, result_list, err_tag)
114 115 116
    return result_list


117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
def _dygraph_to_static_func_(dygraph_func):
    """
    Converts imperative dygraph APIs into declarative function APIs. Decorator
    @dygraph_to_static_func only converts imperative dygraph APIs into
    declarative net-building APIs, which means it doesn't return immediate
    digital result as imperative mode. Users should handle Program and Executor
    by themselves.

    Note:
    This decorator is NOT our recommended way to transform imperative function
    to declarative function. We will remove this decorator after we finalize
    cleaning up code.

    Args:
        dygraph_func (callable): callable imperative function.

    Returns:
        Callable: converting imperative dygraph APIs into declarative
        net-building APIs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy as np
142
          from paddle.jit.api import dygraph_to_static_func
143 144 145

          @dygraph_to_static_func
          def func(x):
146
              if paddle.mean(x) < 0:
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
                  x_v = x - 1
              else:
                  x_v = x + 1

               return x_v

          x = fluid.layers.fill_constant(shape=[3, 3], value=0, dtype='float64')

          x_v = func(x)
          exe = fluid.Executor(fluid.CPUPlace())
          out = exe.run(fetch_list=[x_v])
          print(out[0])
          # [[1. 1. 1.]
          #  [1. 1. 1.]
          #  [1. 1. 1.]]

    """

    # TODO: remove this decorator after we finalize training API
166 167
    def __impl__(*args, **kwargs):
        program_translator = ProgramTranslator()
168
        if _non_static_mode() or not program_translator.enable_to_static:
169
            logging_utils.warn(
170
                "The decorator 'dygraph_to_static_func' doesn't work in "
171
                "dygraph mode or set ProgramTranslator.enable to False. "
172 173
                "We will just return dygraph output."
            )
174 175 176
            return dygraph_func(*args, **kwargs)
        static_func = program_translator.get_func(dygraph_func)
        return static_func(*args, **kwargs)
177 178 179 180

    return __impl__


181
dygraph_to_static_func = wrap_decorator(_dygraph_to_static_func_)
182

183

184 185 186 187 188 189
def copy_decorator_attrs(original_func, decorated_obj):
    """
    Copies some necessary attributes from original function into decorated function.

    Args:
        original_func(callable): the original decorated function.
190
        decorated_obj(StaticFunction): the target decorated StaticFunction object.
191 192 193 194 195 196 197 198 199 200 201 202 203
    """
    decorator_name = "declarative"

    decorated_obj.__name__ = original_func.__name__
    decorated_obj._decorator_name = decorator_name
    decorated_obj.__wrapped__ = original_func
    decorated_obj.__doc__ = original_func.__doc__
    if hasattr(original_func, "__module__"):
        decorated_obj.__module__ = original_func.__module__

    return decorated_obj


204 205 206
def declarative(
    function=None, input_spec=None, build_strategy=None, property=False
):
207 208 209
    """
    Converts imperative dygraph APIs into declarative function APIs. Decorator
    @declarative handles the Program and Executor of static mode and returns
210 211 212 213
    the result as dygraph Tensor(s). Users could use the returned dygraph
    Tensor(s) to do imperative training, inference, or other operations. If the
    decorated function calls other imperative function, the called one will be
    converted into declarative function as well.
214

215
    Args:
216
        function (callable): callable imperative function.
217
        input_spec(list[InputSpec]|tuple[InputSpec]): list/tuple of InputSpec to specific the shape/dtype/name
218
            information of each input Tensor.
219 220 221 222 223
        build_strategy(BuildStrategy|None): This argument is used to compile the
            converted program with the specified options, such as operators' fusion
            in the computational graph and memory optimization during the execution
            of the computational graph. For more information about build_strategy,
            please refer to :code:`paddle.static.BuildStrategy`. The default is None.
224
        property(bool, Optional): whether the fucntion is python property. The default is False.
225

226

227
    Returns:
228
        Tensor(s): containing the numerical result.
229

230 231
    Examples:
        .. code-block:: python
232

233 234 235 236 237 238 239 240 241 242 243 244 245 246
            import paddle
            from paddle.jit import to_static

            @to_static
            def func(x):
                if paddle.mean(x) < 0:
                    x_v = x - 1
                else:
                    x_v = x + 1
                return x_v

            x = paddle.ones([1, 2], dtype='float32')
            x_v = func(x)
            print(x_v) # [[2. 2.]]
247

248
    """
249

250 251
    def decorated(python_func):
        """
252
        Decorates a python function into a StaticFunction object.
253 254 255
        """
        # Step 1. unwrap the function if it is already decorated.
        _, python_func = unwrap_decorators(python_func)
256

257
        # Step 2. copy some attributes from original python function.
258 259 260 261 262 263 264 265 266
        static_layer = copy_decorator_attrs(
            original_func=python_func,
            decorated_obj=StaticFunction(
                function=python_func,
                input_spec=input_spec,
                build_strategy=build_strategy,
                property=property,
            ),
        )
267 268

        return static_layer
269

270 271 272
    build_strategy = build_strategy or BuildStrategy()
    if not isinstance(build_strategy, BuildStrategy):
        raise TypeError(
273 274 275 276
            "Required type(build_strategy) shall be `paddle.static.BuildStrategy`, but received {}".format(
                type(build_strategy).__name__
            )
        )
277

278 279
    # for usage: `declarative(foo, ...)`
    if function is not None:
280
        if isinstance(function, Layer):
281
            if isinstance(function.forward, StaticFunction):
282
                class_name = function.__class__.__name__
283
                logging_utils.warn(
284 285 286 287
                    "`{}.forward` has already been decorated somewhere. It will be redecorated to replace previous one.".format(
                        class_name
                    )
                )
288 289 290 291
            function.forward = decorated(function.forward)
            return function
        else:
            return decorated(function)
292

293 294
    # for usage: `@declarative`
    return decorated
295 296


297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
def not_to_static(func=None):
    """
    A Decorator to suppresses the convertion of a function.

    Args:
        func(callable): The function to decorate.

    Returns:
        callable: A function which won't be converted in Dynamic-to-Static.

    Examples:
        .. code-block:: python

            import paddle

            @paddle.jit.not_to_static
            def func_not_to_static(x):
                res = x - 1
                return res

            @paddle.jit.to_static
            def func(x):
                if paddle.mean(x) < 0:
                    out = func_not_to_static(x)
                else:
                    out = x + 1
                return out

            x = paddle.ones([1, 2], dtype='float32')
            out = func(x)
            print(out) # [[2. 2.]]
    """
    if func is None:
        return not_to_static

    options = ConversionOptions(not_convert=True)
    setattr(func, CONVERSION_OPTIONS, options)
    return func


337
class _SaveLoadConfig:
338 339 340 341 342
    def __init__(self):
        self._output_spec = None
        self._model_filename = None
        self._params_filename = None
        self._separate_params = False
343 344
        # used for `paddle.load`
        self._keep_name_table = False
345 346 347 348

        # NOTE: Users rarely use following configs, so these configs are not open to users,
        # reducing user learning costs, but we retain the configuration capabilities

349 350
        # If True, programs are modified to only support direct inference deployment.
        # Otherwise,more information will be stored for flexible optimization and re-training.
351 352 353 354 355
        # Currently, only True is supported
        self._export_for_deployment = True

        # If True, It will save inference program only, and do not save params of Program
        self._program_only = False
356
        self.with_hook = False
357

358 359 360
        # if True, multi `StaticFunction` will share params in one file.
        self.combine_params = False

361 362 363 364 365 366
    @property
    def output_spec(self):
        return self._output_spec

    @output_spec.setter
    def output_spec(self, spec):
367 368
        if spec is None:
            return
369 370
        if not isinstance(spec, list):
            raise TypeError(
371
                "The config `output_spec` should be 'list', but received input type is %s."
372 373
                % type(input)
            )
374 375 376
            for var in spec:
                if not isinstance(var, core.VarBase):
                    raise TypeError(
377
                        "The element in config `output_spec` list should be 'Variable', but received element's type is %s."
378 379
                        % type(var)
                    )
380 381 382 383 384 385 386 387
        self._output_spec = spec

    @property
    def model_filename(self):
        return self._model_filename

    @model_filename.setter
    def model_filename(self, filename):
388 389
        if filename is None:
            return
390
        if not isinstance(filename, str):
391
            raise TypeError(
392
                "The config `model_filename` should be str, but received input's type is %s."
393 394
                % type(filename)
            )
395
        if len(filename) == 0:
396
            raise ValueError("The config `model_filename` is empty string.")
397 398 399 400 401 402 403 404
        self._model_filename = filename

    @property
    def params_filename(self):
        return self._params_filename

    @params_filename.setter
    def params_filename(self, filename):
405 406
        if filename is None:
            return
407
        if not isinstance(filename, str):
408
            raise TypeError(
409
                "The config `params_filename` should be str, but received input's type is %s."
410 411
                % type(filename)
            )
412
        if len(filename) == 0:
413
            raise ValueError("The config `params_filename` is empty string.")
414 415
        self._params_filename = filename

416 417 418 419 420 421
    @property
    def keep_name_table(self):
        return self._keep_name_table

    @keep_name_table.setter
    def keep_name_table(self, value):
422 423
        if value is None:
            return
424 425
        if not isinstance(value, bool):
            raise TypeError(
426
                "The config `keep_name_table` should be bool value, but received input's type is %s."
427 428
                % type(value)
            )
429 430
        self._keep_name_table = value

431

432
def _parse_save_configs(configs):
433
    supported_configs = [
434 435 436 437 438
        'output_spec',
        "with_hook",
        "combine_params",
        "clip_extra",
        "skip_forward",
439
    ]
440 441 442 443 444 445

    # input check
    for key in configs:
        if key not in supported_configs:
            raise ValueError(
                "The additional config (%s) of `paddle.jit.save` is not supported."
446 447
                % (key)
            )
448 449 450 451

    # construct inner config
    inner_config = _SaveLoadConfig()
    inner_config.output_spec = configs.get('output_spec', None)
452
    inner_config.with_hook = configs.get('with_hook', False)
453
    inner_config.combine_params = configs.get("combine_params", False)
454
    inner_config.clip_extra = configs.get("clip_extra", True)
455
    inner_config.skip_forward = configs.get("skip_forward", False)
456 457 458 459 460 461 462 463 464 465 466 467

    return inner_config


def _parse_load_config(configs):
    supported_configs = ['model_filename', 'params_filename']

    # input check
    for key in configs:
        if key not in supported_configs:
            raise ValueError(
                "The additional config (%s) of `paddle.jit.load` is not supported."
468 469
                % (key)
            )
470 471 472 473 474 475 476 477 478

    # construct inner config
    inner_config = _SaveLoadConfig()
    inner_config.model_filename = configs.get('model_filename', None)
    inner_config.params_filename = configs.get('params_filename', None)

    return inner_config


479
def _get_input_var_names(inputs, input_spec):
480 481 482 483
    name_none_error = (
        "The %s's name is None. "
        "When using jit.save, please set InputSepc's name in "
        "to_static(input_spec=[]) and jit.save(input_spec=[]) "
484
        "and make sure they are consistent."
485 486 487 488 489
    )
    name_no_exists_error = (
        "The tensor `%s` does not exists. "
        "Please make sure the name of InputSpec or example Tensor "
        "in input_spec is the same as the name of InputSpec in "
490
        "`to_static` decorated on the Layer.forward method."
491
    )
492
    result_list = []
493 494 495
    input_var_names = [
        var.name for var in flatten(inputs) if isinstance(var, Variable)
    ]
496 497
    if input_spec is None:
        # no prune
498 499 500 501
        return input_var_names
    else:
        # fileter out non-tensor type spec infos.
        input_spec = [
502 503
            spec
            for spec in input_spec
504 505 506 507
            if isinstance(spec, paddle.static.InputSpec)
        ]

    if len(input_spec) == len(input_var_names):
508 509
        # no prune
        result_list = input_var_names
510
        # if input spec name not in input_var_names, only raise warning
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
        for spec in input_spec:
            if spec.name is None:
                warnings.warn(name_none_error % spec)
            elif spec.name not in input_var_names:
                warnings.warn(name_no_exists_error % spec.name)
            else:
                # do nothing
                pass
    else:
        # prune
        for spec in input_spec:
            if spec.name is None:
                # name is None, the input_spec only can be InputSpec
                raise ValueError(name_none_error % spec)
            elif spec.name not in input_var_names:
                # the input_spec can be `InputSpec` or `VarBase`
                raise ValueError(name_no_exists_error % spec.name)
            else:
                result_list.append(spec.name)

    return result_list


534
def _get_output_vars(outputs, output_spec, with_hook=False):
535 536 537 538
    name_no_exists_error = (
        "The tensor `%s` does not exists. "
        "Please make sure the name of example Tensor "
        "in configs.output_spec is the output tensor of "
539
        "Layer.forward method."
540
    )
541 542 543 544
    if output_spec and with_hook:
        raise RuntimeError(
            "Currently not support specify output_spec while founding pre/post hooks in your outermost layer."
        )
545 546
    result_list = []
    output_vars_dict = OrderedDict()
547
    for var in flatten(outputs):
548 549 550
        if isinstance(var, Variable):
            output_vars_dict[var.name] = var
    if output_spec is None:
551
        result_list = list(output_vars_dict.values())
552
    elif output_spec is not None and len(output_spec) == len(output_vars_dict):
553
        result_list = list(output_vars_dict.values())
554 555 556 557 558 559 560 561 562 563 564 565
        for var in output_spec:
            if var.name not in output_vars_dict:
                warnings.warn(name_no_exists_error % var.name)
    else:
        for var in output_spec:
            if var.name not in output_vars_dict:
                raise ValueError(name_no_exists_error % var.name)
            else:
                result_list.append(output_vars_dict[var.name])
    return result_list


566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
# NOTE(chenweihang): [ Handling of use cases of API paddle.jit.load ]
# `paddle.jit.load` may be used to load saved results of:
# 1. Expected cases:
#   - paddle.jit.save
#   - paddle.static.save_inference_model
#   - paddle.fluid.io.save_inference_model
# 2. Error cases:
#   - paddle.save: no .pdmodel for prefix
#   - paddle.static.save: no .pdiparams but .pdparams exists
#   - paddle.fluid.io.save_params/save_persistables: no __model__
# TODO(chenweihang): polish error message in above error cases
def _build_load_path_and_config(path, config):
    # NOTE(chenweihang): If both [prefix save format] and [directory save format] exist,
    # raise error, avoid confusing behavior
    prefix_format_path = path + INFER_MODEL_SUFFIX
    prefix_format_exist = os.path.exists(prefix_format_path)
    directory_format_exist = os.path.isdir(path)
    if prefix_format_exist and directory_format_exist:
        raise ValueError(
            "The %s.pdmodel and %s directory exist at the same time, "
            "don't know which one to load, please make sure that the specified target "
587 588
            "of ``path`` is unique." % (path, path)
        )
589
    elif not prefix_format_exist and not directory_format_exist:
590 591 592 593 594
        raise ValueError(
            "The ``path`` (%s) to load model not exists. "
            "Please make sure that *.pdmodel exists or "
            "don't using ``skip_forward=True`` to jit.save." % path
        )
595 596 597 598 599 600 601 602
    else:
        if prefix_format_exist:
            file_prefix = os.path.basename(path)
            model_path = os.path.dirname(path)
            if config.model_filename is not None:
                warnings.warn(
                    "When loading the result saved with the "
                    "specified file prefix, the ``model_filename`` config does "
603 604
                    "not take effect."
                )
605 606 607 608 609
            config.model_filename = file_prefix + INFER_MODEL_SUFFIX
            if config.params_filename is not None:
                warnings.warn(
                    "When loading the result saved with the "
                    "specified file prefix, the ``params_filename`` config does "
610 611
                    "not take effect."
                )
612 613 614 615
            config.params_filename = file_prefix + INFER_PARAMS_SUFFIX
        else:
            # Compatible with the old save_inference_model format
            model_path = path
616

617
    return model_path, config
618 619


620 621 622 623
_save_pre_hooks_lock = threading.Lock()
_save_pre_hooks = []


624
class HookRemoveHelper:
625
    """A HookRemoveHelper that can be used to remove hook."""
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660

    def __init__(self, hook):
        self._hook = hook

    def remove(self):
        _remove_save_pre_hook(self._hook)


def _register_save_pre_hook(hook):
    """
    Register a save pre-hook for `paddle.jit.save`.
    This hook will be executed before `save` function has been invoked.

    hook(layer, input_spec, configs) -> None
    - layer (Layer|function): This argument is corresponding to `layer` in `paddle.jit.save`.
    - input_spec (list or tuple[InputSpec|Tensor|Python built-in variable]): This argument is corresponding to `input_spec` in `paddle.jit.save`.
    - configs (dict): This argument is corresponding to `configs` in `paddle.jit.save`.

    Args:
        hook(function): a function registered as a save pre-hook

    Returns:
        HookRemoveHelper: a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()`.

    Examples:
        .. code-block:: python

            import numpy as np
            import paddle

            IMAGE_SIZE = 256
            CLASS_NUM = 10

            class LinearNet(paddle.nn.Layer):
                def __init__(self):
661
                    super().__init__()
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
                    self._linear = paddle.nn.Linear(IMAGE_SIZE, CLASS_NUM)

                def forward(self, x):
                    return self._linear(x)

            saving_count = 0
            def save_pre_hook(layer, input_spec, configs):
                global saving_count
                saving_count += 1

            remove_handler = paddle.jit.register_save_pre_hook(save_pre_hook)

            layer = LinearNet()
            paddle.jit.save(layer, "/tmp", [paddle.static.InputSpec(shape=[-1, IMAGE_SIZE])])
            # saving_count == 1

            remove_handler.remove()
            paddle.jit.save(layer, "/tmp", [paddle.static.InputSpec(shape=[-1, IMAGE_SIZE])])
            # saving_count == 1
    """
    global _save_pre_hooks_lock
    global _save_pre_hooks
    _save_pre_hooks_lock.acquire()
    if hook not in _save_pre_hooks:
        _save_pre_hooks.append(hook)
    _save_pre_hooks_lock.release()
    return HookRemoveHelper(hook)


def _clear_save_pre_hooks():
    global _save_pre_hooks_lock
    global _save_pre_hooks
    _save_pre_hooks_lock.acquire()
    _save_pre_hooks.clear()
    _save_pre_hooks_lock.release()


def _remove_save_pre_hook(hook):
    global _save_pre_hooks_lock
    global _save_pre_hooks
    _save_pre_hooks_lock.acquire()
    if hook in _save_pre_hooks:
        _save_pre_hooks.remove(hook)
    _save_pre_hooks_lock.release()


708
@wrap_decorator
709 710 711 712 713 714 715 716 717 718
def _run_save_pre_hooks(func):
    def wrapper(layer, path, input_spec=None, **configs):
        global _save_pre_hooks
        for hook in _save_pre_hooks:
            hook(layer, input_spec, configs)
        func(layer, path, input_spec, **configs)

    return wrapper


719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
def _save_property(filename: Text, property_vals: List[Tuple[Any, Text]]):
    """class property serialization.

    Args:
        filename (Text): *.meta
        property_vals (List[Tuple): class property.
    """

    def set_property(meta, key, val):
        if isinstance(val, float):
            meta.set_float(key, val)
        elif isinstance(val, int):
            meta.set_int(key, val)
        elif isinstance(val, str):
            meta.set_string(key, val)
        elif isinstance(val, (tuple, list)):
            if isinstance(val[0], float):
                meta.set_floats(key, val)
            elif isinstance(val[0], int):
                meta.set_ints(key, val)
            elif isinstance(val[0], str):
                meta.set_strings(key, val)
        else:
            raise ValueError(f"Note support val type: {type(val)}")
        return

    with open(filename, 'wb') as f:
        meta = paddle.framework.core.Property()
        for item in property_vals:
            val, key = item[0], item[1]
            set_property(meta, key, val)
        f.write(meta.serialize_to_string())


753
@_run_save_pre_hooks
754
@switch_to_static_graph
755
def save(layer, path, input_spec=None, **configs):
756
    """
757
    Saves input Layer or function as ``paddle.jit.TranslatedLayer``
758 759
    format model, which can be used for inference or fine-tuning after loading.

760
    It will save the translated program and all related persistable
761
    variables of input Layer to given ``path`` .
762 763

    ``path`` is the prefix of saved objects, and the saved translated program file
764
    suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` ,
765
    and here also saved some additional variable description information to a file,
766
    its suffix is ``.pdiparams.info``, these additional information is used in fine-tuning.
767 768

    The saved model can be loaded by follow APIs:
769 770
      - ``paddle.jit.load``
      - ``paddle.static.load_inference_model``
771 772
      - Other C++ inference APIs

773
    .. note::
774
        When using ``paddle.jit.save`` to save a function, parameters will not be saved. If you have to
775 776
        save the parameter, please pass the Layer containing function and parameter to ``paddle.jit.save``.

777
    Args:
778
        layer (Layer|function): The Layer or function to be saved.
779
        path (str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
780 781 782
        input_spec (list or tuple[InputSpec|Tensor|Python built-in variable], optional): Describes the input of the saved model's forward
            method, which can be described by InputSpec or example Tensor. Moreover, we support to specify non-tensor type argument,
            such as int, float, string, or list/dict of them.If None, all input variables of
783
            the original Layer's forward method would be the inputs of the saved model. Default None.
784 785
        **configs (dict, optional): Other save configuration options for compatibility. We do not
            recommend using these configurations, they may be removed in the future. If not necessary,
786 787 788
            DO NOT use them. Default None.
            The following options are currently supported:
            (1) output_spec (list[Tensor]): Selects the output targets of the saved model.
789 790 791
            By default, all return variables of original Layer's forward method are kept as the
            output of the saved model. If the provided ``output_spec`` list is not all output variables,
            the saved model will be pruned according to the given ``output_spec`` list.
792

793 794 795 796 797 798
    Returns:
        None

    Examples:
        .. code-block:: python

799
            # example 1: save layer
800
            import numpy as np
801 802 803
            import paddle
            import paddle.nn as nn
            import paddle.optimizer as opt
804

805 806 807
            BATCH_SIZE = 16
            BATCH_NUM = 4
            EPOCH_NUM = 4
808

809 810 811 812 813 814 815
            IMAGE_SIZE = 784
            CLASS_NUM = 10

            # define a random dataset
            class RandomDataset(paddle.io.Dataset):
                def __init__(self, num_samples):
                    self.num_samples = num_samples
816

817 818 819 820
                def __getitem__(self, idx):
                    image = np.random.random([IMAGE_SIZE]).astype('float32')
                    label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
                    return image, label
821

822 823
                def __len__(self):
                    return self.num_samples
824

825 826
            class LinearNet(nn.Layer):
                def __init__(self):
827
                    super().__init__()
828
                    self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
829

830
                @paddle.jit.to_static
831 832 833
                def forward(self, x):
                    return self._linear(x)

834 835 836 837 838 839 840 841 842 843 844 845
            def train(layer, loader, loss_fn, opt):
                for epoch_id in range(EPOCH_NUM):
                    for batch_id, (image, label) in enumerate(loader()):
                        out = layer(image)
                        loss = loss_fn(out, label)
                        loss.backward()
                        opt.step()
                        opt.clear_grad()
                        print("Epoch {} batch {}: loss = {}".format(
                            epoch_id, batch_id, np.mean(loss.numpy())))

            # 1. train & save model.
846

847 848 849 850
            # create network
            layer = LinearNet()
            loss_fn = nn.CrossEntropyLoss()
            adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
851

852 853 854 855 856 857 858
            # create data loader
            dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
            loader = paddle.io.DataLoader(dataset,
                batch_size=BATCH_SIZE,
                shuffle=True,
                drop_last=True,
                num_workers=2)
859

860 861
            # train
            train(layer, loader, loss_fn, adam)
862

863
            # save
864 865
            path = "example_model/linear"
            paddle.jit.save(layer, path)
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885

            # example 2: save function
            import paddle
            from paddle.static import InputSpec


            def save_function():
                @paddle.jit.to_static
                def fun(inputs):
                    return paddle.tanh(inputs)

                path = 'test_jit_save_load_function_1/func'
                inps = paddle.rand([3, 6])
                origin = fun(inps)

                paddle.jit.save(fun, path)
                load_func = paddle.jit.load(path)

                load_result = load_func(inps)
                print((load_result - origin).abs().max() < 1e-10)
886

887
            save_function()
888 889
    """

890
    # 1. input build & check
891
    prog_translator = ProgramTranslator()
892
    if not prog_translator.enable_to_static:
893
        raise RuntimeError(
894
            "The paddle.jit.save doesn't work when setting ProgramTranslator.enable to False."
895
        )
896

897 898 899 900 901
    if not (
        isinstance(layer, Layer)
        or inspect.isfunction(layer)
        or isinstance(layer, StaticFunction)
    ):
902
        raise TypeError(
903
            "The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s."
904 905
            % type(layer)
        )
906 907 908 909
    elif inspect.isfunction(layer) or isinstance(layer, StaticFunction):
        warnings.warn(
            'What you save is a function, and `jit.save` will generate the name of the model file according to `path` you specify. When loading these files with `jit.load`, you get a `TranslatedLayer` whose inference result is the same as the inference result of the function you saved.'
        )
910

911 912
    # NOTE(chenweihang): If the input layer be wrapped by DataParallel,
    # the args and kwargs of forward method will can't be parsed by
913
    # function_spec, so here we save DataParallel._layers instead
914 915 916 917 918 919 920
    # DataParallel it self
    # NOTE(chenweihang): using inner_layer, do not change input layer
    if isinstance(layer, paddle.DataParallel):
        inner_layer = layer._layers
    else:
        inner_layer = layer

921 922 923 924 925 926
    # path check
    file_prefix = os.path.basename(path)
    if file_prefix == "":
        raise ValueError(
            "The input path MUST be format of dirname/file_prefix "
            "[dirname\\file_prefix in Windows system], but received "
927 928
            "file_prefix is empty string."
        )
929 930 931 932

    dirname = os.path.dirname(path)
    if dirname and not os.path.exists(dirname):
        os.makedirs(dirname)
933

934 935
    # avoid change user given input_spec
    inner_input_spec = None
936
    if input_spec is not None:
937 938 939
        if isinstance(layer, Layer):
            for attr_func in dir(inner_layer):
                static_func = getattr(inner_layer, attr_func, None)
940 941 942 943
                if (
                    isinstance(static_func, StaticFunction)
                    and 'forward' != attr_func
                ):
944 945
                    raise ValueError(
                        "If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s."
946 947
                        % type(input_spec)
                    )
948

949
        if not isinstance(input_spec, (list, tuple)):
950 951
            raise TypeError(
                "The input input_spec should be 'list', but received input_spec's type is %s."
952 953
                % type(input_spec)
            )
954
        inner_input_spec = []
955
        for var in flatten(input_spec):
956 957
            if isinstance(var, paddle.static.InputSpec):
                inner_input_spec.append(var)
958
            elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)):
959
                inner_input_spec.append(
960 961
                    paddle.static.InputSpec.from_tensor(var)
                )
962
            else:
963 964
                # NOTE(Aurelius84): Support non-Tensor type in `input_spec`.
                inner_input_spec.append(var)
965

966 967
    # parse configs
    configs = _parse_save_configs(configs)
968
    # whether outermost layer has pre/post hook, if does, we need also save
969
    # these operators in program.
970
    with_hook = configs.with_hook
971 972 973
    combine_params = configs.combine_params
    if combine_params:
        configs._program_only = True
974

975 976
    scope = core.Scope()
    extra_var_info = dict()
977 978
    if isinstance(layer, Layer):
        functions = dir(inner_layer)
979 980
        if inner_layer._forward_pre_hooks or inner_layer._forward_post_hooks:
            with_hook = True
981 982
    else:
        # layer is function
983 984 985
        functions = [
            layer,
        ]
986

987
    combine_vars = {}
988
    property_vals = []  # (value, key)
989
    concrete_program = None
990 991 992 993
    for attr_func in functions:
        if isinstance(layer, Layer):
            static_func = getattr(inner_layer, attr_func, None)
            if isinstance(static_func, StaticFunction):
994 995 996 997
                if static_func.is_property:
                    # property method to be exported
                    immediate_val = static_func()
                    property_vals.append(
998 999 1000 1001 1002
                        (
                            immediate_val,
                            layer.__class__.__name__ + '.' + attr_func,
                        )
                    )
1003 1004
                    continue

1005 1006 1007 1008 1009
                concrete_program = (
                    static_func.concrete_program_specify_input_spec(
                        inner_input_spec, with_hook=with_hook
                    )
                )
1010
            elif 'forward' == attr_func:
1011 1012 1013 1014
                if configs.skip_forward:
                    # do not jit.save forward function
                    continue

1015
                # transform in jit.save, if input_spec is incomplete, declarative will throw error
1016
                # inner_input_spec is list[InputSpec], it should be packed with same structure
1017 1018
                # as original input_spec here.
                if inner_input_spec:
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
                    inner_input_spec = pack_sequence_as(
                        input_spec, inner_input_spec
                    )
                static_forward = declarative(
                    inner_layer.forward, input_spec=inner_input_spec
                )
                concrete_program = (
                    static_forward.concrete_program_specify_input_spec(
                        with_hook=with_hook
                    )
                )
1030 1031 1032 1033 1034 1035
                # the input_spec has been used in declarative, which is equal to
                # @declarative with input_spec and jit.save without input_spec,
                # avoid needless warning
                inner_input_spec = None
            else:
                continue
1036 1037 1038
        else:
            # When layer is a function
            if isinstance(attr_func, StaticFunction):
1039 1040 1041 1042 1043 1044
                if attr_func.is_property:
                    # property method to be exported
                    immediate_val = attr_func()
                    property_vals.append((immediate_val, attr_func))
                    continue

1045 1046 1047 1048 1049
                concrete_program = (
                    attr_func.concrete_program_specify_input_spec(
                        inner_input_spec
                    )
                )
1050 1051
            else:
                if inner_input_spec:
1052 1053 1054 1055 1056 1057
                    inner_input_spec = pack_sequence_as(
                        input_spec, inner_input_spec
                    )
                static_function = declarative(
                    attr_func, input_spec=inner_input_spec
                )
1058 1059 1060 1061
                concrete_program = static_function.concrete_program

                if static_function._class_instance is None:
                    warnings.warn(
1062 1063 1064 1065
                        '`jit.save` will only save the `Program`, not the parameters. If you have to save the parameters, please make sure that {} is a member function of `paddle.nn.Layer` and the saved parameters are in `state_dict`'.format(
                            layer
                        )
                    )
1066

1067
        # when save multi `StaticFunction`, all `StaticFunction` share params.
1068 1069
        dygraph_state_dict = None
        if isinstance(inner_layer, Layer):
1070
            dygraph_state_dict = inner_layer.to_static_state_dict()
1071 1072
        elif isinstance(attr_func, StaticFunction):
            if attr_func._class_instance:
1073 1074
                dygraph_state_dict = (
                    attr_func._class_instance.to_static_state_dict()
1075
                )
1076 1077

        if dygraph_state_dict:
1078 1079 1080 1081 1082
            # NOTE(chenweihang): we maintain the mapping of variable name to
            # structured name, the buffer variable (non-persistable)
            # saved to inference program may not need by dygraph Layer,
            # we only record the state_dict variable's structured name
            state_names_dict = dict()
1083
            state_var_dict = dict()
1084
            for structured_name, var in dygraph_state_dict.items():
1085
                state_names_dict[var.name] = structured_name
1086
                state_var_dict[var.name] = var
1087

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
        # 3. share parameters from Layer to scope & record var info
        with dygraph.guard():
            for param_or_buffer in concrete_program.parameters:
                # share to scope
                if param_or_buffer.type == core.VarDesc.VarType.VOCAB:
                    scr_tensor = param_or_buffer.value().get_map_tensor()
                    tgt_var = scope.var(param_or_buffer.name)
                    tgt_var.set_vocab(scr_tensor)
                else:
                    param_or_buffer_tensor = scope.var(
1098 1099 1100 1101 1102 1103 1104 1105
                        param_or_buffer.name
                    ).get_tensor()
                    # src_tensor = param_or_buffer.value().get_tensor()
                    src_tensor = (
                        state_var_dict[param_or_buffer.name]
                        .value()
                        .get_tensor()
                    )
1106 1107 1108 1109 1110 1111
                    param_or_buffer_tensor._share_data_with(src_tensor)
                # record var info
                if param_or_buffer.name not in extra_var_info:
                    extra_info_dict = dict()
                    if param_or_buffer.name in state_names_dict:
                        extra_info_dict['structured_name'] = state_names_dict[
1112 1113
                            param_or_buffer.name
                        ]
1114
                    extra_info_dict[
1115 1116
                        'stop_gradient'
                    ] = param_or_buffer.stop_gradient
1117 1118 1119
                    if isinstance(param_or_buffer, (ParamBase, EagerParamBase)):
                        extra_info_dict['trainable'] = param_or_buffer.trainable
                    extra_var_info[param_or_buffer.name] = extra_info_dict
1120 1121

        # 4. build input & output of save_infernece_model
1122 1123 1124 1125 1126 1127 1128 1129
        # NOTE(chenweihang): [ Get input variables name ]
        # There are two cases, whether to prune the inputs or not
        # - not prune inputs (recommend):
        #   - the len(input_spec) == len((concrete_program.inputs) - 1
        #   - here can use concrete_program.inputs directly
        # - prune inputs:
        #   - the input_spec length < len((concrete_program.inputs) - 1
        #   - the input_spec's name should be in concrete_program.inputs
1130 1131 1132
        input_var_names = _get_input_var_names(
            concrete_program.inputs, inner_input_spec
        )
1133 1134

        # NOTE(chenweihang): [ Get output variables ]
1135 1136
        # the rule is like [ Get input variables name ]. For output var,
        # we only support VarBase spec, and actually, we only need the
1137
        # var name of output, and we don't recommended to use output_spec
1138 1139
        # print(concrete_program.main_program)
        # print(concrete_program.outputs, configs.output_spec)
1140 1141 1142
        output_vars = _get_output_vars(
            concrete_program.outputs, configs.output_spec, with_hook
        )
1143 1144 1145 1146 1147 1148 1149

        # 5. save inference model
        from paddle.fluid.io import save_inference_model

        # construct new save_inference_model arguments
        model_path = dirname
        # NOTE(chenweihang): because prefix contains model and params filename,
1150
        # so we don't support set model_filename & params_filename
1151
        if 'forward' == attr_func or not isinstance(layer, Layer):
1152 1153 1154 1155
            model_filename = file_prefix + INFER_MODEL_SUFFIX
            params_filename = file_prefix + INFER_PARAMS_SUFFIX
        else:
            model_filename = file_prefix + '.' + attr_func + INFER_MODEL_SUFFIX
1156 1157 1158
            params_filename = (
                file_prefix + '.' + attr_func + INFER_PARAMS_SUFFIX
            )
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169

        with scope_guard(scope):
            save_inference_model(
                dirname=model_path,
                feeded_var_names=input_var_names,
                target_vars=output_vars,
                executor=Executor(_current_expected_place()),
                main_program=concrete_program.main_program.clone(),
                model_filename=model_filename,
                params_filename=params_filename,
                export_for_deployment=configs._export_for_deployment,
1170
                program_only=configs._program_only,
1171 1172
                clip_extra=configs.clip_extra,
            )
1173

1174 1175 1176
        if combine_params:
            clone_main_program = concrete_program.main_program.clone()
            clone_main_program = clone_main_program._prune_with_input(
1177 1178
                input_var_names, output_vars
            )
1179 1180
            for block in clone_main_program.blocks:
                combine_vars.update(block.vars)
1181 1182 1183

    # save shared params
    if combine_params:
1184 1185 1186 1187 1188 1189
        # sort vars by name
        combine_vars = sorted(combine_vars.items(), key=lambda item: item[0])
        ordered_vars = []
        for name, var in combine_vars:
            ordered_vars.append(var)

1190 1191
        params_filename = file_prefix + INFER_PARAMS_SUFFIX
        with scope_guard(scope):
1192 1193 1194 1195 1196 1197
            paddle.static.save_vars(
                Executor(_current_expected_place()),
                dirname=model_path,
                vars=list(filter(paddle.fluid.io.is_persistable, ordered_vars)),
                filename=params_filename,
            )
1198
        # save property
1199 1200 1201
        property_save_path = os.path.join(
            os.path.normpath(model_path), file_prefix + INFER_PROPERTY_SUFFIX
        )
1202
        _save_property(property_save_path, property_vals)
1203

1204 1205 1206 1207 1208 1209 1210
    # NOTE(chenweihang): [ Save extra variable info ]
    # save_inference_model will lose some important variable information, including:
    #   - Variable name and correspondence (when saved variables as one file)
    #   - Variable.stop_gradient information
    #   - Which persistent variable are parameter and which are not
    #   - Parameter.trainable information
    #
1211 1212
    # The lost information cannot be recovered when it is loaded again,
    # so if we want to perform fine-tune after loading, we may need to
1213 1214
    # configure redundant information to proceed.
    #
1215 1216
    # Due to compatibility issues, we cannot change the original storage structure,
    # but we can save these information in `jit.save` without changing the original
1217 1218
    # storage to improve user experience. So we save extra information into
    # file `***.pdiparams.info`
1219 1220 1221

    # "layer" can only be Layer or function or StaticFunction.
    contain_parameter = False
1222 1223 1224
    if concrete_program is not None:
        for var in concrete_program.main_program.list_vars():
            contain_parameter |= isinstance(var, Parameter)
1225 1226

    if (isinstance(layer, Layer) or contain_parameter) and extra_var_info:
1227 1228 1229 1230
        with scope_guard(scope):
            extra_var_info_path = path + INFER_PARAMS_INFO_SUFFIX
            with open(extra_var_info_path, 'wb') as f:
                pickle.dump(extra_var_info, f, protocol=2)
1231 1232 1233


@dygraph_only
1234
def load(path, **configs):
1235 1236 1237
    """
    :api_attr: imperative

1238 1239
    Load model saved by ``paddle.jit.save`` or ``paddle.static.save_inference_model`` or
    paddle 1.x API ``paddle.fluid.io.save_inference_model`` as ``paddle.jit.TranslatedLayer``,
1240
    then performing inference or fine-tune training.
1241 1242

    .. note::
1243
        If you load model saved by ``paddle.static.save_inference_model`` ,
1244 1245
        there will be the following limitations when using it in fine-tuning:
        1. Imperative mode do not support LoDTensor. All original model's feed targets or parametars that depend on LoD are temporarily unavailable.
1246
        2. All saved model's feed targets need to be passed into TranslatedLayer's forward function.
1247 1248 1249 1250
        3. The variable's ``stop_gradient`` information is lost and can not be recovered.
        4. The parameter's ``trainable`` information is lost and can not be recovered.

    Args:
1251
        path (str): The path prefix to load model. The format is ``dirname/file_prefix`` or ``file_prefix`` .
1252 1253
        **configs (dict, optional): Other load configuration options for compatibility. We do not
            recommend using these configurations, they may be removed in the future. If not necessary,
1254 1255
            DO NOT use them. Default None.
            The following options are currently supported:
1256 1257 1258 1259
            (1) model_filename (str): The inference model file name of the paddle 1.x
            ``save_inference_model`` save format. Default file name is :code:`__model__` .
            (2) params_filename (str): The persistable variables file name of the paddle 1.x
            ``save_inference_model`` save format. No default file name, save variables separately
1260 1261
            by default.

1262 1263 1264 1265 1266

    Returns:
        TranslatedLayer: A Layer object can run saved translated model.

    Examples:
1267
        1. Load model saved by ``paddle.jit.save`` then performing inference and fine-tune training.
1268 1269 1270 1271

        .. code-block:: python

            import numpy as np
1272 1273 1274
            import paddle
            import paddle.nn as nn
            import paddle.optimizer as opt
1275

1276 1277 1278
            BATCH_SIZE = 16
            BATCH_NUM = 4
            EPOCH_NUM = 4
1279

1280 1281
            IMAGE_SIZE = 784
            CLASS_NUM = 10
1282

1283 1284 1285 1286
            # define a random dataset
            class RandomDataset(paddle.io.Dataset):
                def __init__(self, num_samples):
                    self.num_samples = num_samples
1287

1288 1289 1290 1291
                def __getitem__(self, idx):
                    image = np.random.random([IMAGE_SIZE]).astype('float32')
                    label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
                    return image, label
1292

1293 1294 1295 1296 1297
                def __len__(self):
                    return self.num_samples

            class LinearNet(nn.Layer):
                def __init__(self):
1298
                    super().__init__()
1299
                    self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
1300

1301
                @paddle.jit.to_static
1302 1303 1304
                def forward(self, x):
                    return self._linear(x)

1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
            def train(layer, loader, loss_fn, opt):
                for epoch_id in range(EPOCH_NUM):
                    for batch_id, (image, label) in enumerate(loader()):
                        out = layer(image)
                        loss = loss_fn(out, label)
                        loss.backward()
                        opt.step()
                        opt.clear_grad()
                        print("Epoch {} batch {}: loss = {}".format(
                            epoch_id, batch_id, np.mean(loss.numpy())))

1316
            # 1. train & save model.
1317

1318
            # create network
1319 1320 1321 1322
            layer = LinearNet()
            loss_fn = nn.CrossEntropyLoss()
            adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())

1323
            # create data loader
1324 1325 1326 1327 1328 1329
            dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
            loader = paddle.io.DataLoader(dataset,
                batch_size=BATCH_SIZE,
                shuffle=True,
                drop_last=True,
                num_workers=2)
1330

1331 1332
            # train
            train(layer, loader, loss_fn, adam)
1333

1334
            # save
1335 1336
            path = "example_model/linear"
            paddle.jit.save(layer, path)
1337

1338
            # 2. load model
1339

1340
            # load
1341
            loaded_layer = paddle.jit.load(path)
1342 1343

            # inference
1344 1345 1346
            loaded_layer.eval()
            x = paddle.randn([1, IMAGE_SIZE], 'float32')
            pred = loaded_layer(x)
1347 1348

            # fine-tune
1349 1350 1351
            loaded_layer.train()
            adam = opt.Adam(learning_rate=0.001, parameters=loaded_layer.parameters())
            train(loaded_layer, loader, loss_fn, adam)
1352 1353


1354
        2. Load model saved by ``paddle.fluid.io.save_inference_model`` then performing and fine-tune training.
1355 1356 1357 1358

        .. code-block:: python

            import numpy as np
1359
            import paddle
1360
            import paddle.static as static
1361 1362
            import paddle.nn as nn
            import paddle.optimizer as opt
1363
            import paddle.nn.functional as F
1364

1365 1366 1367
            BATCH_SIZE = 16
            BATCH_NUM = 4
            EPOCH_NUM = 4
1368

1369 1370 1371 1372 1373 1374 1375
            IMAGE_SIZE = 784
            CLASS_NUM = 10

            # define a random dataset
            class RandomDataset(paddle.io.Dataset):
                def __init__(self, num_samples):
                    self.num_samples = num_samples
1376

1377 1378 1379 1380
                def __getitem__(self, idx):
                    image = np.random.random([IMAGE_SIZE]).astype('float32')
                    label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
                    return image, label
1381

1382 1383
                def __len__(self):
                    return self.num_samples
1384

1385 1386
            paddle.enable_static()

1387 1388
            image = static.data(name='image', shape=[None, 784], dtype='float32')
            label = static.data(name='label', shape=[None, 1], dtype='int64')
1389
            pred = static.nn.fc(x=image, size=10, activation='softmax')
1390 1391
            loss = F.cross_entropy(input=pred, label=label)
            avg_loss = paddle.mean(loss)
1392

1393
            optimizer = paddle.optimizer.SGD(learning_rate=0.001)
1394 1395
            optimizer.minimize(avg_loss)

1396 1397 1398
            place = paddle.CPUPlace()
            exe = static.Executor(place)
            exe.run(static.default_startup_program())
1399

1400 1401 1402 1403 1404
            # create data loader
            dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
            loader = paddle.io.DataLoader(dataset,
                feed_list=[image, label],
                places=place,
1405
                batch_size=BATCH_SIZE,
1406 1407
                shuffle=True,
                drop_last=True,
W
WeiXin 已提交
1408
                return_list=False,
1409
                num_workers=2)
1410 1411 1412 1413

            # 1. train and save inference model
            for data in loader():
                exe.run(
1414
                    static.default_main_program(),
1415
                    feed=data,
1416 1417 1418
                    fetch_list=[avg_loss])

            model_path = "fc.example.model"
1419
            paddle.fluid.io.save_inference_model(
1420 1421 1422
                model_path, ["image"], [pred], exe)

            # 2. load model
1423 1424

            # enable dygraph mode
1425 1426 1427 1428
            paddle.disable_static(place)

            # load
            fc = paddle.jit.load(model_path)
1429

1430 1431 1432
            # inference
            fc.eval()
            x = paddle.randn([1, IMAGE_SIZE], 'float32')
1433 1434
            pred = fc(x)

1435
            # fine-tune
1436
            fc.train()
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
            loss_fn = nn.CrossEntropyLoss()
            adam = opt.Adam(learning_rate=0.001, parameters=fc.parameters())
            loader = paddle.io.DataLoader(dataset,
                places=place,
                batch_size=BATCH_SIZE,
                shuffle=True,
                drop_last=True,
                num_workers=2)
            for epoch_id in range(EPOCH_NUM):
                for batch_id, (image, label) in enumerate(loader()):
                    out = fc(image)
                    loss = loss_fn(out, label)
                    loss.backward()
                    adam.step()
                    adam.clear_grad()
                    print("Epoch {} batch {}: loss = {}".format(
                        epoch_id, batch_id, np.mean(loss.numpy())))
1454
    """
1455 1456 1457 1458
    # 1. construct correct config
    config = _parse_load_config(configs)
    model_path, config = _build_load_path_and_config(path, config)

1459
    return TranslatedLayer._construct(model_path, config)
1460 1461


1462
@dygraph_only
1463 1464 1465
def _trace(
    layer, inputs, feed_prefix='feed_', fetch_prefix='fetch_', tmp_prefix='t_'
):
1466
    assert isinstance(layer, Layer)
1467 1468 1469 1470 1471 1472 1473 1474 1475

    if not isinstance(inputs, (list, tuple)):
        inputs = [inputs]

    tracer = _dygraph_tracer()._get_program_desc_tracer()

    var_list = extract_vars(inputs)

    with program_desc_tracing_guard(True):
1476
        original_outputs = layer(*inputs)
1477 1478 1479 1480
        if not isinstance(original_outputs, (list, tuple)):
            outputs = [original_outputs]
        else:
            outputs = original_outputs
1481
        out_vars = extract_vars(outputs, err_tag='outputs')
1482

1483 1484 1485 1486 1487 1488 1489 1490
        (
            program_desc,
            feed_names,
            fetch_names,
            parameters,
        ) = tracer.create_program_desc(
            var_list, feed_prefix, out_vars, fetch_prefix, tmp_prefix
        )
1491 1492 1493 1494 1495
        tracer.reset()

    with _dygraph_guard(None):
        program = create_program_from_desc(program_desc)

1496
    return original_outputs, program, feed_names, fetch_names, parameters
1497 1498


1499
class TracedLayer:
1500
    """
1501
    :api_attr: imperative
1502

1503 1504 1505 1506 1507
    TracedLayer is used to convert a forward dygraph model to a static
    graph model. This is mainly used to save the dygraph model for online
    inference using C++. Besides, users can also do inference in Python
    using the converted static graph model, which usually has better
    performance than the original dygraph model.
1508 1509 1510 1511

    TracedLayer would run the static graph model using :code:`Executor`
    and :code:`CompiledProgram` . The static graph model would share
    parameters with the dygraph model.
1512 1513

    All TracedLayer objects should not be created by constructor and should
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
    be created by static method :code:`TracedLayer.trace(layer, inputs)` .

    The TracedLayer can only be used to convert the data-independent dygraph
    model into the static graph model, which means the dygraph model should
    be independent with the tensor data and shape.
    """

    def __init__(self, program, parameters, feed_names, fetch_names):
        self._program = program
        self._feed_names = feed_names
        self._fetch_names = fetch_names
1525
        self._params = parameters
1526 1527 1528 1529 1530

        self._place = _current_expected_place()

        self._scope = core.Scope()
        for p in parameters:
1531
            src_tensor = p.value().get_tensor()
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
            dst_tensor = self._scope.var(p.name).get_tensor()
            dst_tensor._share_data_with(src_tensor)

        self._exe = Executor(self._place)
        self._compiled_program = None
        self._build_strategy = None
        self._exec_strategy = None

    @property
    def program(self):
        return self._program

    def _switch(self, is_test=True):
        for block_id in range(self._program.num_blocks):
            block = self._program.block(block_id)
            for op in block.ops:
                if op.has_attr("is_test"):
                    op._set_attr("is_test", is_test)

    @staticmethod
    @dygraph_only
    def trace(layer, inputs):
        """
1555
        This method is the only allowed method to create TracedLayer object.
1556 1557 1558 1559
        It would call the :code:`layer(*inputs)` method to run the dygraph
        model and convert it into a static graph model.

        Args:
1560
            layer (paddle.nn.Layer): the layer object to be traced.
1561 1562
            inputs (list(Tensor)|tuple(Tensor)|Tensor): the input tensors of
                the layer object.
1563 1564

        Returns:
1565
            tuple: A tuple of 2 items, whose the first item is the output of
1566 1567
                :code:`layer(*inputs)` , and the second item is the created
                TracedLayer object.
1568

1569
        Examples:
1570 1571
            .. code-block:: python:

1572 1573
                import os
                os.environ['FLAGS_enable_eager_mode'] = '0'
1574
                import paddle
1575

1576
                class ExampleLayer(paddle.nn.Layer):
1577
                    def __init__(self):
1578
                        super().__init__()
1579
                        self._fc = paddle.nn.Linear(3, 10)
1580 1581 1582 1583

                    def forward(self, input):
                        return self._fc(input)

1584

1585 1586 1587 1588 1589 1590
                layer = ExampleLayer()
                in_var = paddle.uniform(shape=[2, 3], dtype='float32')
                out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])

                # run the static graph model using Executor inside
                out_static_graph = static_layer([in_var])
1591

1592 1593
                print(len(out_static_graph)) # 1
                print(out_static_graph[0].shape) # (2, 10)
1594

1595
                # save the static graph model for inference
1596
                static_layer.save_inference_model('./saved_infer_model')
1597

1598
        """
1599 1600
        assert isinstance(
            layer, Layer
1601
        ), "The type of 'layer' in paddle.jit.TracedLayer.trace must be fluid.dygraph.Layer, but received {}.".format(
1602 1603
            type(layer)
        )
1604 1605
        outs, prog, feed, fetch, parameters = _trace(layer, inputs)
        traced = TracedLayer(prog, parameters, feed, fetch)
1606 1607 1608 1609 1610 1611 1612
        return outs, traced

    def set_strategy(self, build_strategy=None, exec_strategy=None):
        """
        Set the strategies when running static graph model.

        Args:
1613
            build_strategy (BuildStrategy, optional): build strategy of
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
                :code:`CompiledProgram` inside TracedLayer. Default None.
            exec_strategy (ExecutionStrategy, optional): execution strategy of
                :code:`CompiledProgram` inside TracedLayer. Default None.

        Returns:
            None

        Examples:
            .. code-block:: python:

1624 1625
                import os
                os.environ['FLAGS_enable_eager_mode'] = '0'
1626
                import paddle
1627

1628
                class ExampleLayer(paddle.nn.Layer):
1629
                    def __init__(self):
1630
                        super().__init__()
1631
                        self._fc = paddle.nn.Linear(3, 10)
1632 1633 1634 1635

                    def forward(self, input):
                        return self._fc(input)

1636 1637 1638 1639
                layer = ExampleLayer()
                in_var = paddle.uniform(shape=[2, 3], dtype='float32')

                out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
1640

1641 1642
                build_strategy = paddle.static.BuildStrategy()
                build_strategy.enable_inplace = True
1643

1644 1645
                exec_strategy = paddle.static.ExecutionStrategy()
                exec_strategy.num_threads = 2
1646

1647 1648
                static_layer.set_strategy(build_strategy=build_strategy, exec_strategy=exec_strategy)
                out_static_graph = static_layer([in_var])
1649 1650 1651

        """
        assert self._compiled_program is None, "Cannot set strategy after run"
1652 1653
        assert isinstance(
            build_strategy, (type(None), BuildStrategy)
1654
        ), "The type of 'build_strategy' in paddle.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received {}.".format(
1655 1656
            type(build_strategy)
        )
1657 1658
        assert isinstance(
            exec_strategy, (type(None), ExecutionStrategy)
1659
        ), "The type of 'exec_strategy' in paddle.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received {}.".format(
1660 1661
            type(exec_strategy)
        )
1662 1663 1664 1665 1666 1667
        self._build_strategy = build_strategy
        self._exec_strategy = exec_strategy

    @switch_to_static_graph
    def _compile(self):
        self._compiled_program = CompiledProgram(
1668 1669 1670 1671 1672 1673
            self._program
        ).with_data_parallel(
            build_strategy=self._build_strategy,
            exec_strategy=self._exec_strategy,
            places=self._place,
        )
1674 1675

    def _build_feed(self, inputs):
1676 1677 1678
        assert isinstance(
            inputs, (list, tuple)
        ), "Inputs should be a list or tuple of variables"
1679 1680
        assert len(inputs) == len(self._feed_names)
        feed_dict = {}
1681
        if _non_static_mode():
1682
            for x, name in zip(inputs, self._feed_names):
1683
                feed_dict[name] = x.value().get_tensor()
1684 1685 1686 1687 1688 1689 1690 1691
        else:
            for x, name in zip(inputs, self._feed_names):
                feed_dict[name] = x

        return feed_dict

    @switch_to_static_graph
    def _run(self, feed):
1692 1693 1694
        return self._exe.run(
            self._compiled_program, feed=feed, fetch_list=self._fetch_names
        )
1695 1696 1697 1698 1699 1700 1701 1702 1703

    def __call__(self, inputs):
        with scope_guard(self._scope):
            if self._compiled_program is None:
                self._compile()

            return self._run(self._build_feed(inputs))

    @switch_to_static_graph
1704
    def save_inference_model(self, path, feed=None, fetch=None, **kwargs):
1705
        """
1706 1707
        Save the TracedLayer to a model for inference. The saved
        inference model can be loaded by C++ inference APIs.
1708

1709 1710 1711
        ``path`` is the prefix of saved objects, and the saved translated program file
        suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` .

1712
        Args:
1713
            path(str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
1714
            feed (list[int], optional): the input variable indices of the saved
1715
                inference model. If None, all input variables of the
1716 1717 1718 1719 1720 1721
                TracedLayer object would be the inputs of the saved inference
                model. Default None.
            fetch (list[int], optional): the output variable indices of the
                saved inference model. If None, all output variables of the
                TracedLayer object would be the outputs of the saved inference
                model. Default None.
1722
            kwargs: Supported keys including 'clip_extra'.set to True if you want to clip extra information for every operator.
1723 1724

        Returns:
1725
            None
1726 1727 1728 1729

        Examples:
            .. code-block:: python:

1730 1731
                import os
                os.environ['FLAGS_enable_eager_mode'] = '0'
1732
                import numpy as np
1733
                import paddle
1734

1735
                class ExampleLayer(paddle.nn.Layer):
1736
                    def __init__(self):
1737
                        super().__init__()
1738
                        self._fc = paddle.nn.Linear(3, 10)
1739 1740 1741 1742

                    def forward(self, input):
                        return self._fc(input)

1743 1744
                save_dirname = './saved_infer_model'
                in_np = np.random.random([2, 3]).astype('float32')
1745 1746
                in_var = paddle.to_tensor(in_np)
                layer = ExampleLayer()
1747

1748 1749
                out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
                static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0])
1750

1751 1752 1753 1754
                paddle.enable_static()
                place = paddle.CPUPlace()
                exe = paddle.static.Executor(place)
                program, feed_vars, fetch_vars = paddle.static.load_inference_model(save_dirname,
1755
                                                    exe)
1756 1757 1758

                fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars)
                print(fetch.shape) # (2, 10)
1759
        """
1760 1761 1762 1763
        check_type(
            path,
            "path",
            str,
1764
            "paddle.jit.TracedLayer.save_inference_model",
1765 1766 1767 1768 1769
        )
        check_type(
            feed,
            "feed",
            (type(None), list),
1770
            "paddle.jit.TracedLayer.save_inference_model",
1771
        )
1772 1773
        if isinstance(feed, list):
            for f in feed:
1774
                check_type(
1775 1776 1777
                    f,
                    "each element of feed",
                    int,
1778
                    "paddle.jit.TracedLayer.save_inference_model",
1779 1780 1781 1782 1783
                )
        check_type(
            fetch,
            "fetch",
            (type(None), list),
1784
            "paddle.jit.TracedLayer.save_inference_model",
1785
        )
1786 1787
        if isinstance(fetch, list):
            for f in fetch:
1788
                check_type(
1789 1790 1791
                    f,
                    "each element of fetch",
                    int,
1792
                    "paddle.jit.TracedLayer.save_inference_model",
1793
                )
1794
        clip_extra = kwargs.get('clip_extra', True)
1795 1796 1797 1798 1799 1800
        # path check
        file_prefix = os.path.basename(path)
        if file_prefix == "":
            raise ValueError(
                "The input path MUST be format of dirname/file_prefix "
                "[dirname\\file_prefix in Windows system], but received "
1801 1802
                "file_prefix is empty string."
            )
1803 1804 1805 1806 1807

        dirname = os.path.dirname(path)
        if dirname and not os.path.exists(dirname):
            os.makedirs(dirname)

1808
        from paddle.fluid.io import save_inference_model
1809 1810 1811 1812 1813

        def get_feed_fetch(all_vars, partial_vars):
            if partial_vars is None:
                return all_vars

1814
            return [all_vars[idx] for idx in partial_vars]
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824

        with scope_guard(self._scope):
            feeded_var_names = get_feed_fetch(self._feed_names, feed)
            target_var_names = get_feed_fetch(self._fetch_names, fetch)
            target_vars = []
            for name in target_var_names:
                target_var = self._program.global_block().vars.get(name, None)
                assert target_var is not None, "{} cannot be found".format(name)
                target_vars.append(target_var)

1825 1826 1827
            model_filename = file_prefix + INFER_MODEL_SUFFIX
            params_filename = file_prefix + INFER_PARAMS_SUFFIX

1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
            save_inference_model(
                dirname=dirname,
                feeded_var_names=feeded_var_names,
                target_vars=target_vars,
                executor=self._exe,
                main_program=self._program.clone(),
                model_filename=model_filename,
                params_filename=params_filename,
                clip_extra=clip_extra,
            )