jit.py 59.8 KB
Newer Older
1
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
M
Ming-Xu Huang 已提交
2
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16 17
from __future__ import print_function

18 19
import os
import pickle
20
import warnings
21
import functools
22
from collections import OrderedDict
23
import inspect
M
Ming-Xu Huang 已提交
24
import threading
25 26

import six
27
import paddle
28
from paddle.fluid import core
29 30
from paddle.fluid.compiler import BuildStrategy, CompiledProgram, ExecutionStrategy
from paddle.fluid.data_feeder import check_type
31
from paddle.fluid.layers.utils import flatten, pack_sequence_as
32
from paddle.fluid.dygraph.base import program_desc_tracing_guard, switch_to_static_graph
33
from paddle.fluid.dygraph.dygraph_to_static import logging_utils
34
from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import ConversionOptions, CONVERSION_OPTIONS
35
from paddle.fluid.dygraph.dygraph_to_static.logging_utils import set_code_level, set_verbosity
36
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction, unwrap_decorators
37
from paddle.fluid.dygraph.io import TranslatedLayer, INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX, INFER_PARAMS_INFO_SUFFIX
38 39
from paddle.fluid.dygraph.layers import Layer
from paddle.fluid.executor import Executor, scope_guard
40
from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter
41 42
from paddle.fluid.framework import _current_expected_place, _dygraph_guard, _dygraph_tracer
from paddle.fluid.framework import dygraph_only, in_dygraph_mode
43
from paddle.fluid.wrapped_decorator import wrap_decorator
44

45 46
__all__ = [
    'TracedLayer', 'declarative', 'dygraph_to_static_func', 'set_code_level',
47
    'set_verbosity', 'save', 'load', 'not_to_static'
48
]
49 50 51 52 53 54 55 56 57 58


def create_program_from_desc(program_desc):
    program = Program()
    program.desc = program_desc
    program.blocks = [Block(program, 0)]
    program._sync_with_cpp()
    return program


59
def _extract_vars(inputs, result_list, err_tag='inputs'):
60
    if isinstance(inputs, Variable):
61
        result_list.append(inputs)
62
    elif isinstance(inputs, (list, tuple)):
63
        for var in inputs:
64
            _extract_vars(var, result_list, err_tag)
65 66
    else:
        raise TypeError(
67 68
            "The type of 'each element of {}' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received {}.".
            format(err_tag, type(inputs)))
69 70


71
def extract_vars(inputs, err_tag='inputs'):
72
    result_list = []
73
    _extract_vars(inputs, result_list, err_tag)
74 75 76
    return result_list


77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
def _dygraph_to_static_func_(dygraph_func):
    """
    Converts imperative dygraph APIs into declarative function APIs. Decorator
    @dygraph_to_static_func only converts imperative dygraph APIs into
    declarative net-building APIs, which means it doesn't return immediate
    digital result as imperative mode. Users should handle Program and Executor
    by themselves.

    Note:
    This decorator is NOT our recommended way to transform imperative function
    to declarative function. We will remove this decorator after we finalize
    cleaning up code.

    Args:
        dygraph_func (callable): callable imperative function.

    Returns:
        Callable: converting imperative dygraph APIs into declarative
        net-building APIs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy as np
          from paddle.fluid.dygraph.jit import dygraph_to_static_func

          @dygraph_to_static_func
          def func(x):
              if fluid.layers.mean(x) < 0:
                  x_v = x - 1
              else:
                  x_v = x + 1

               return x_v

          x = fluid.layers.fill_constant(shape=[3, 3], value=0, dtype='float64')

          x_v = func(x)
          exe = fluid.Executor(fluid.CPUPlace())
          out = exe.run(fetch_list=[x_v])
          print(out[0])
          # [[1. 1. 1.]
          #  [1. 1. 1.]
          #  [1. 1. 1.]]

    """

    # TODO: remove this decorator after we finalize training API
126 127
    def __impl__(*args, **kwargs):
        program_translator = ProgramTranslator()
128
        if in_dygraph_mode() or not program_translator.enable_to_static:
129
            logging_utils.warn(
130
                "The decorator 'dygraph_to_static_func' doesn't work in "
131
                "dygraph mode or set ProgramTranslator.enable to False. "
132 133 134 135
                "We will just return dygraph output.")
            return dygraph_func(*args, **kwargs)
        static_func = program_translator.get_func(dygraph_func)
        return static_func(*args, **kwargs)
136 137 138 139

    return __impl__


140
dygraph_to_static_func = wrap_decorator(_dygraph_to_static_func_)
141

142

143 144 145 146 147 148
def copy_decorator_attrs(original_func, decorated_obj):
    """
    Copies some necessary attributes from original function into decorated function.

    Args:
        original_func(callable): the original decorated function.
149
        decorated_obj(StaticFunction): the target decorated StaticFunction object.
150 151 152 153 154 155 156 157 158 159 160 161 162
    """
    decorator_name = "declarative"

    decorated_obj.__name__ = original_func.__name__
    decorated_obj._decorator_name = decorator_name
    decorated_obj.__wrapped__ = original_func
    decorated_obj.__doc__ = original_func.__doc__
    if hasattr(original_func, "__module__"):
        decorated_obj.__module__ = original_func.__module__

    return decorated_obj


163
def declarative(function=None, input_spec=None, build_strategy=None):
164 165 166
    """
    Converts imperative dygraph APIs into declarative function APIs. Decorator
    @declarative handles the Program and Executor of static mode and returns
167 168 169 170
    the result as dygraph Tensor(s). Users could use the returned dygraph
    Tensor(s) to do imperative training, inference, or other operations. If the
    decorated function calls other imperative function, the called one will be
    converted into declarative function as well.
171

172
    Args:
173
        function (callable): callable imperative function.
174
        input_spec(list[InputSpec]|tuple[InputSpec]): list/tuple of InputSpec to specific the shape/dtype/name
175
            information of each input Tensor.
176 177 178 179 180 181
        build_strategy(BuildStrategy|None): This argument is used to compile the
            converted program with the specified options, such as operators' fusion
            in the computational graph and memory optimization during the execution
            of the computational graph. For more information about build_strategy,
            please refer to :code:`paddle.static.BuildStrategy`. The default is None.

182

183
    Returns:
184
        Tensor(s): containing the numerical result.
185

186 187
    Examples:
        .. code-block:: python
188

189 190 191 192 193 194 195 196 197 198 199 200 201 202
            import paddle
            from paddle.jit import to_static

            @to_static
            def func(x):
                if paddle.mean(x) < 0:
                    x_v = x - 1
                else:
                    x_v = x + 1
                return x_v

            x = paddle.ones([1, 2], dtype='float32')
            x_v = func(x)
            print(x_v) # [[2. 2.]]
203

204
    """
205

206 207
    def decorated(python_func):
        """
208
        Decorates a python function into a StaticFunction object.
209 210 211
        """
        # Step 1. unwrap the function if it is already decorated.
        _, python_func = unwrap_decorators(python_func)
212

213 214 215
        # Step 2. copy some attributes from original python function.
        static_layer = copy_decorator_attrs(
            original_func=python_func,
216
            decorated_obj=StaticFunction(
217 218 219
                function=python_func,
                input_spec=input_spec,
                build_strategy=build_strategy))
220 221

        return static_layer
222

223 224 225 226 227 228
    build_strategy = build_strategy or BuildStrategy()
    if not isinstance(build_strategy, BuildStrategy):
        raise TypeError(
            "Required type(build_strategy) shall be `paddle.static.BuildStrategy`, but received {}".
            format(type(build_strategy).__name__))

229 230
    # for usage: `declarative(foo, ...)`
    if function is not None:
231
        if isinstance(function, Layer):
232
            if isinstance(function.forward, StaticFunction):
233
                class_name = function.__class__.__name__
234
                logging_utils.warn(
235 236 237 238 239 240
                    "`{}.forward` has already been decorated somewhere. It will be redecorated to replace previous one.".
                    format(class_name))
            function.forward = decorated(function.forward)
            return function
        else:
            return decorated(function)
241

242 243
    # for usage: `@declarative`
    return decorated
244 245


246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
def not_to_static(func=None):
    """
    A Decorator to suppresses the convertion of a function.

    Args:
        func(callable): The function to decorate.

    Returns:
        callable: A function which won't be converted in Dynamic-to-Static.

    Examples:
        .. code-block:: python

            import paddle

            @paddle.jit.not_to_static
            def func_not_to_static(x):
                res = x - 1
                return res

            @paddle.jit.to_static
            def func(x):
                if paddle.mean(x) < 0:
                    out = func_not_to_static(x)
                else:
                    out = x + 1
                return out

            x = paddle.ones([1, 2], dtype='float32')
            out = func(x)
            print(out) # [[2. 2.]]
    """
    if func is None:
        return not_to_static

    options = ConversionOptions(not_convert=True)
    setattr(func, CONVERSION_OPTIONS, options)
    return func


286
class _SaveLoadConfig(object):
287 288 289 290 291
    def __init__(self):
        self._output_spec = None
        self._model_filename = None
        self._params_filename = None
        self._separate_params = False
292 293
        # used for `paddle.load`
        self._keep_name_table = False
294 295 296 297

        # NOTE: Users rarely use following configs, so these configs are not open to users,
        # reducing user learning costs, but we retain the configuration capabilities

298 299
        # If True, programs are modified to only support direct inference deployment.
        # Otherwise,more information will be stored for flexible optimization and re-training.
300 301 302 303 304 305 306 307 308 309 310 311
        # Currently, only True is supported
        self._export_for_deployment = True

        # If True, It will save inference program only, and do not save params of Program
        self._program_only = False

    @property
    def output_spec(self):
        return self._output_spec

    @output_spec.setter
    def output_spec(self, spec):
312 313
        if spec is None:
            return
314 315
        if not isinstance(spec, list):
            raise TypeError(
316
                "The config `output_spec` should be 'list', but received input type is %s."
317 318 319 320
                % type(input))
            for var in spec:
                if not isinstance(var, core.VarBase):
                    raise TypeError(
321
                        "The element in config `output_spec` list should be 'Variable', but received element's type is %s."
322 323 324 325 326 327 328 329 330
                        % type(var))
        self._output_spec = spec

    @property
    def model_filename(self):
        return self._model_filename

    @model_filename.setter
    def model_filename(self, filename):
331 332
        if filename is None:
            return
333 334
        if not isinstance(filename, six.string_types):
            raise TypeError(
335
                "The config `model_filename` should be str, but received input's type is %s."
336 337
                % type(filename))
        if len(filename) == 0:
338
            raise ValueError("The config `model_filename` is empty string.")
339 340 341 342 343 344 345 346
        self._model_filename = filename

    @property
    def params_filename(self):
        return self._params_filename

    @params_filename.setter
    def params_filename(self, filename):
347 348
        if filename is None:
            return
349 350
        if not isinstance(filename, six.string_types):
            raise TypeError(
351
                "The config `params_filename` should be str, but received input's type is %s."
352 353
                % type(filename))
        if len(filename) == 0:
354
            raise ValueError("The config `params_filename` is empty string.")
355 356
        self._params_filename = filename

357 358 359 360 361 362
    @property
    def keep_name_table(self):
        return self._keep_name_table

    @keep_name_table.setter
    def keep_name_table(self, value):
363 364
        if value is None:
            return
365 366
        if not isinstance(value, bool):
            raise TypeError(
367
                "The config `keep_name_table` should be bool value, but received input's type is %s."
368 369 370
                % type(value))
        self._keep_name_table = value

371

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
def _parse_save_configs(configs):
    supported_configs = ['output_spec']

    # input check
    for key in configs:
        if key not in supported_configs:
            raise ValueError(
                "The additional config (%s) of `paddle.jit.save` is not supported."
                % (key))

    # construct inner config
    inner_config = _SaveLoadConfig()
    inner_config.output_spec = configs.get('output_spec', None)

    return inner_config


def _parse_load_config(configs):
    supported_configs = ['model_filename', 'params_filename']

    # input check
    for key in configs:
        if key not in supported_configs:
            raise ValueError(
                "The additional config (%s) of `paddle.jit.load` is not supported."
                % (key))

    # construct inner config
    inner_config = _SaveLoadConfig()
    inner_config.model_filename = configs.get('model_filename', None)
    inner_config.params_filename = configs.get('params_filename', None)

    return inner_config


407 408 409 410 411 412 413 414 415 416
def _get_input_var_names(inputs, input_spec):
    name_none_error = "The %s's name is None. " \
        "When using jit.save, please set InputSepc's name in " \
        "to_static(input_spec=[]) and jit.save(input_spec=[]) " \
        "and make sure they are consistent."
    name_no_exists_error = "The tensor `%s` does not exists. " \
        "Please make sure the name of InputSpec or example Tensor " \
        "in input_spec is the same as the name of InputSpec in " \
        "`to_static` decorated on the Layer.forward method."
    result_list = []
417 418 419
    input_var_names = [
        var.name for var in flatten(inputs) if isinstance(var, Variable)
    ]
420 421
    if input_spec is None:
        # no prune
422 423 424 425 426 427 428 429 430
        return input_var_names
    else:
        # fileter out non-tensor type spec infos.
        input_spec = [
            spec for spec in input_spec
            if isinstance(spec, paddle.static.InputSpec)
        ]

    if len(input_spec) == len(input_var_names):
431 432
        # no prune
        result_list = input_var_names
433
        # if input spec name not in input_var_names, only raise warning
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
        for spec in input_spec:
            if spec.name is None:
                warnings.warn(name_none_error % spec)
            elif spec.name not in input_var_names:
                warnings.warn(name_no_exists_error % spec.name)
            else:
                # do nothing
                pass
    else:
        # prune
        for spec in input_spec:
            if spec.name is None:
                # name is None, the input_spec only can be InputSpec
                raise ValueError(name_none_error % spec)
            elif spec.name not in input_var_names:
                # the input_spec can be `InputSpec` or `VarBase`
                raise ValueError(name_no_exists_error % spec.name)
            else:
                result_list.append(spec.name)

    return result_list


def _get_output_vars(outputs, output_spec):
    name_no_exists_error = "The tensor `%s` does not exists. " \
        "Please make sure the name of example Tensor " \
        "in configs.output_spec is the output tensor of " \
        "Layer.forward method."
    result_list = []
    output_vars_dict = OrderedDict()
464
    for var in flatten(outputs):
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
        if isinstance(var, Variable):
            output_vars_dict[var.name] = var
    if output_spec is None:
        result_list = output_vars_dict.values()
    elif output_spec is not None and len(output_spec) == len(output_vars_dict):
        result_list = output_vars_dict.values()
        for var in output_spec:
            if var.name not in output_vars_dict:
                warnings.warn(name_no_exists_error % var.name)
    else:
        for var in output_spec:
            if var.name not in output_vars_dict:
                raise ValueError(name_no_exists_error % var.name)
            else:
                result_list.append(output_vars_dict[var.name])
    return result_list


483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
# NOTE(chenweihang): [ Handling of use cases of API paddle.jit.load ]
# `paddle.jit.load` may be used to load saved results of:
# 1. Expected cases:
#   - paddle.jit.save
#   - paddle.static.save_inference_model
#   - paddle.fluid.io.save_inference_model
# 2. Error cases:
#   - paddle.save: no .pdmodel for prefix
#   - paddle.static.save: no .pdiparams but .pdparams exists
#   - paddle.fluid.io.save_params/save_persistables: no __model__
# TODO(chenweihang): polish error message in above error cases
def _build_load_path_and_config(path, config):
    # NOTE(chenweihang): If both [prefix save format] and [directory save format] exist,
    # raise error, avoid confusing behavior
    prefix_format_path = path + INFER_MODEL_SUFFIX
    prefix_format_exist = os.path.exists(prefix_format_path)
    directory_format_exist = os.path.isdir(path)
    if prefix_format_exist and directory_format_exist:
        raise ValueError(
            "The %s.pdmodel and %s directory exist at the same time, "
            "don't know which one to load, please make sure that the specified target "
            "of ``path`` is unique." % (path, path))
    elif not prefix_format_exist and not directory_format_exist:
        raise ValueError("The ``path`` (%s) to load model not exists." % path)
    else:
        if prefix_format_exist:
            file_prefix = os.path.basename(path)
            model_path = os.path.dirname(path)
            if config.model_filename is not None:
                warnings.warn(
                    "When loading the result saved with the "
                    "specified file prefix, the ``model_filename`` config does "
                    "not take effect.")
            config.model_filename = file_prefix + INFER_MODEL_SUFFIX
            if config.params_filename is not None:
                warnings.warn(
                    "When loading the result saved with the "
                    "specified file prefix, the ``params_filename`` config does "
                    "not take effect.")
            config.params_filename = file_prefix + INFER_PARAMS_SUFFIX
        else:
            # Compatible with the old save_inference_model format
            model_path = path
526

527
    return model_path, config
528 529


M
Ming-Xu Huang 已提交
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
_save_pre_hooks_lock = threading.Lock()
_save_pre_hooks = []


class HookRemoveHelper(object):
    """ A HookRemoveHelper that can be used to remove hook. """

    def __init__(self, hook):
        self._hook = hook

    def remove(self):
        _remove_save_pre_hook(self._hook)


def _register_save_pre_hook(hook):
    """
    Register a save pre-hook for `paddle.jit.save`.
    This hook will be executed before `save` function has been invoked.

    hook(layer, input_spec, configs) -> None
    - layer (Layer|function): This argument is corresponding to `layer` in `paddle.jit.save`.
    - input_spec (list or tuple[InputSpec|Tensor|Python built-in variable]): This argument is corresponding to `input_spec` in `paddle.jit.save`.
    - configs (dict): This argument is corresponding to `configs` in `paddle.jit.save`.

    Args:
        hook(function): a function registered as a save pre-hook

    Returns:
        HookRemoveHelper: a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()`.

    Examples:
        .. code-block:: python

            import numpy as np
            import paddle

            IMAGE_SIZE = 256
            CLASS_NUM = 10

            class LinearNet(paddle.nn.Layer):
                def __init__(self):
                    super(LinearNet, self).__init__()
                    self._linear = paddle.nn.Linear(IMAGE_SIZE, CLASS_NUM)

                def forward(self, x):
                    return self._linear(x)

            saving_count = 0
            def save_pre_hook(layer, input_spec, configs):
                global saving_count
                saving_count += 1

            remove_handler = paddle.jit.register_save_pre_hook(save_pre_hook)

            layer = LinearNet()
            paddle.jit.save(layer, "/tmp", [paddle.static.InputSpec(shape=[-1, IMAGE_SIZE])])
            # saving_count == 1

            remove_handler.remove()
            paddle.jit.save(layer, "/tmp", [paddle.static.InputSpec(shape=[-1, IMAGE_SIZE])])
            # saving_count == 1
    """
    global _save_pre_hooks_lock
    global _save_pre_hooks
    _save_pre_hooks_lock.acquire()
    if hook not in _save_pre_hooks:
        _save_pre_hooks.append(hook)
    _save_pre_hooks_lock.release()
    return HookRemoveHelper(hook)


def _clear_save_pre_hooks():
    global _save_pre_hooks_lock
    global _save_pre_hooks
    _save_pre_hooks_lock.acquire()
    _save_pre_hooks.clear()
    _save_pre_hooks_lock.release()


def _remove_save_pre_hook(hook):
    global _save_pre_hooks_lock
    global _save_pre_hooks
    _save_pre_hooks_lock.acquire()
    if hook in _save_pre_hooks:
        _save_pre_hooks.remove(hook)
    _save_pre_hooks_lock.release()


def _run_save_pre_hooks(func):
    def wrapper(layer, path, input_spec=None, **configs):
        global _save_pre_hooks
        for hook in _save_pre_hooks:
            hook(layer, input_spec, configs)
        func(layer, path, input_spec, **configs)

    return wrapper


@_run_save_pre_hooks
629
@switch_to_static_graph
630
def save(layer, path, input_spec=None, **configs):
631
    """
632
    Saves input Layer or function as ``paddle.jit.TranslatedLayer``
633 634
    format model, which can be used for inference or fine-tuning after loading.

635
    It will save the translated program and all related persistable
636
    variables of input Layer to given ``path`` .
637 638

    ``path`` is the prefix of saved objects, and the saved translated program file
639
    suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` ,
640
    and here also saved some additional variable description information to a file,
641
    its suffix is ``.pdiparams.info``, these additional information is used in fine-tuning.
642 643

    The saved model can be loaded by follow APIs:
644 645
      - ``paddle.jit.load``
      - ``paddle.static.load_inference_model``
646 647
      - Other C++ inference APIs

648 649 650 651
    .. note::
        When using ``paddle.jit.save`` to save a function, parameters will not be saved. If you have to 
        save the parameter, please pass the Layer containing function and parameter to ``paddle.jit.save``.

652
    Args:
653
        layer (Layer|function): The Layer or function to be saved.
654
        path (str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
655 656 657
        input_spec (list or tuple[InputSpec|Tensor|Python built-in variable], optional): Describes the input of the saved model's forward
            method, which can be described by InputSpec or example Tensor. Moreover, we support to specify non-tensor type argument,
            such as int, float, string, or list/dict of them.If None, all input variables of
658
            the original Layer's forward method would be the inputs of the saved model. Default None.
659 660
        **configs (dict, optional): Other save configuration options for compatibility. We do not
            recommend using these configurations, they may be removed in the future. If not necessary,
661 662 663
            DO NOT use them. Default None.
            The following options are currently supported:
            (1) output_spec (list[Tensor]): Selects the output targets of the saved model.
664 665 666
            By default, all return variables of original Layer's forward method are kept as the
            output of the saved model. If the provided ``output_spec`` list is not all output variables,
            the saved model will be pruned according to the given ``output_spec`` list.
667

668 669 670 671 672 673
    Returns:
        None

    Examples:
        .. code-block:: python

674
            # example 1: save layer
675
            import numpy as np
676 677 678
            import paddle
            import paddle.nn as nn
            import paddle.optimizer as opt
679

680 681 682
            BATCH_SIZE = 16
            BATCH_NUM = 4
            EPOCH_NUM = 4
683

684 685 686 687 688 689 690
            IMAGE_SIZE = 784
            CLASS_NUM = 10

            # define a random dataset
            class RandomDataset(paddle.io.Dataset):
                def __init__(self, num_samples):
                    self.num_samples = num_samples
691

692 693 694 695
                def __getitem__(self, idx):
                    image = np.random.random([IMAGE_SIZE]).astype('float32')
                    label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
                    return image, label
696

697 698
                def __len__(self):
                    return self.num_samples
699

700 701
            class LinearNet(nn.Layer):
                def __init__(self):
702
                    super(LinearNet, self).__init__()
703
                    self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
704

705
                @paddle.jit.to_static
706 707 708
                def forward(self, x):
                    return self._linear(x)

709 710 711 712 713 714 715 716 717 718 719 720
            def train(layer, loader, loss_fn, opt):
                for epoch_id in range(EPOCH_NUM):
                    for batch_id, (image, label) in enumerate(loader()):
                        out = layer(image)
                        loss = loss_fn(out, label)
                        loss.backward()
                        opt.step()
                        opt.clear_grad()
                        print("Epoch {} batch {}: loss = {}".format(
                            epoch_id, batch_id, np.mean(loss.numpy())))

            # 1. train & save model.
721

722 723 724 725
            # create network
            layer = LinearNet()
            loss_fn = nn.CrossEntropyLoss()
            adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
726

727 728 729 730 731 732 733
            # create data loader
            dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
            loader = paddle.io.DataLoader(dataset,
                batch_size=BATCH_SIZE,
                shuffle=True,
                drop_last=True,
                num_workers=2)
734

735 736
            # train
            train(layer, loader, loss_fn, adam)
737

738
            # save
739 740
            path = "example_model/linear"
            paddle.jit.save(layer, path)
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762

            # example 2: save function
            import paddle
            from paddle.static import InputSpec


            def save_function():
                @paddle.jit.to_static
                def fun(inputs):
                    return paddle.tanh(inputs)

                path = 'test_jit_save_load_function_1/func'
                inps = paddle.rand([3, 6])
                origin = fun(inps)

                paddle.jit.save(fun, path)
                load_func = paddle.jit.load(path)

                load_result = load_func(inps)
                print((load_result - origin).abs().max() < 1e-10)
                
            save_function()
763 764
    """

765
    # 1. input build & check
766
    prog_translator = ProgramTranslator()
767
    if not prog_translator.enable_to_static:
768
        raise RuntimeError(
769
            "The paddle.jit.save doesn't work when setting ProgramTranslator.enable to False."
770
        )
771 772 773

    if not (isinstance(layer, Layer) or inspect.isfunction(layer) or isinstance(
            layer, StaticFunction)):
774
        raise TypeError(
775
            "The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s."
776
            % type(layer))
777 778 779 780
    elif inspect.isfunction(layer) or isinstance(layer, StaticFunction):
        warnings.warn(
            'What you save is a function, and `jit.save` will generate the name of the model file according to `path` you specify. When loading these files with `jit.load`, you get a `TranslatedLayer` whose inference result is the same as the inference result of the function you saved.'
        )
781

782 783
    # NOTE(chenweihang): If the input layer be wrapped by DataParallel,
    # the args and kwargs of forward method will can't be parsed by
784
    # function_spec, so here we save DataParallel._layers instead
785 786 787 788 789 790 791
    # DataParallel it self
    # NOTE(chenweihang): using inner_layer, do not change input layer
    if isinstance(layer, paddle.DataParallel):
        inner_layer = layer._layers
    else:
        inner_layer = layer

792 793 794 795 796 797 798 799 800 801 802
    # path check
    file_prefix = os.path.basename(path)
    if file_prefix == "":
        raise ValueError(
            "The input path MUST be format of dirname/file_prefix "
            "[dirname\\file_prefix in Windows system], but received "
            "file_prefix is empty string.")

    dirname = os.path.dirname(path)
    if dirname and not os.path.exists(dirname):
        os.makedirs(dirname)
803

804 805
    # avoid change user given input_spec
    inner_input_spec = None
806
    if input_spec is not None:
807 808 809 810 811 812 813 814 815
        if isinstance(layer, Layer):
            for attr_func in dir(inner_layer):
                static_func = getattr(inner_layer, attr_func, None)
                if isinstance(static_func,
                              StaticFunction) and 'forward' != attr_func:
                    raise ValueError(
                        "If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s."
                        % type(input_spec))

816
        if not isinstance(input_spec, (list, tuple)):
817 818 819
            raise TypeError(
                "The input input_spec should be 'list', but received input_spec's type is %s."
                % type(input_spec))
820
        inner_input_spec = []
821
        for var in flatten(input_spec):
822 823 824 825 826 827
            if isinstance(var, paddle.static.InputSpec):
                inner_input_spec.append(var)
            elif isinstance(var, (core.VarBase, Variable)):
                inner_input_spec.append(
                    paddle.static.InputSpec.from_tensor(var))
            else:
828 829
                # NOTE(Aurelius84): Support non-Tensor type in `input_spec`.
                inner_input_spec.append(var)
830

831 832
    # parse configs
    configs = _parse_save_configs(configs)
833 834
    scope = core.Scope()
    extra_var_info = dict()
835 836 837 838 839 840 841 842 843 844 845 846 847
    if isinstance(layer, Layer):
        functions = dir(inner_layer)
    else:
        # layer is function
        functions = [layer, ]
    for attr_func in functions:
        if isinstance(layer, Layer):
            static_func = getattr(inner_layer, attr_func, None)
            if isinstance(static_func, StaticFunction):
                concrete_program = static_func.concrete_program_specify_input_spec(
                    inner_input_spec)
            elif 'forward' == attr_func:
                # transform in jit.save, if input_spec is incomplete, declarative will throw error
848
                # inner_input_spec is list[InputSpec], it should be packed with same structure
849 850 851 852 853 854 855 856 857 858 859 860 861 862
                # as original input_spec here.
                if inner_input_spec:
                    inner_input_spec = pack_sequence_as(input_spec,
                                                        inner_input_spec)
                static_forward = declarative(
                    inner_layer.forward, input_spec=inner_input_spec)
                concrete_program = static_forward.concrete_program
                # the input_spec has been used in declarative, which is equal to
                # @declarative with input_spec and jit.save without input_spec,
                # avoid needless warning
                inner_input_spec = None
            else:
                continue

863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
        else:
            # When layer is a function
            if isinstance(attr_func, StaticFunction):
                concrete_program = attr_func.concrete_program_specify_input_spec(
                    inner_input_spec)
            else:
                if inner_input_spec:
                    inner_input_spec = pack_sequence_as(input_spec,
                                                        inner_input_spec)
                static_function = declarative(
                    attr_func, input_spec=inner_input_spec)
                concrete_program = static_function.concrete_program

                if static_function._class_instance is None:
                    warnings.warn(
                        '`jit.save` will only save the `Program`, not the parameters. If you have to save the parameters, please make sure that {} is a member function of `paddle.nn.Layer` and the saved parameters are in `state_dict`'.
                        format(layer))

        dygraph_state_dict = None
        if isinstance(inner_layer, Layer):
883
            dygraph_state_dict = inner_layer.to_static_state_dict()
884 885
        elif isinstance(attr_func, StaticFunction):
            if attr_func._class_instance:
886 887
                dygraph_state_dict = attr_func._class_instance.to_static_state_dict(
                )
888 889

        if dygraph_state_dict:
890 891 892 893 894
            # NOTE(chenweihang): we maintain the mapping of variable name to
            # structured name, the buffer variable (non-persistable)
            # saved to inference program may not need by dygraph Layer,
            # we only record the state_dict variable's structured name
            state_names_dict = dict()
895
            state_var_dict = dict()
896
            for structured_name, var in six.iteritems(dygraph_state_dict):
897
                state_names_dict[var.name] = structured_name
898
                state_var_dict[var.name] = var
899 900 901 902

            # 3. share parameters from Layer to scope & record var info
            for param_or_buffer in concrete_program.parameters:
                # share to scope
S
Steffy-zxf 已提交
903 904 905 906 907 908 909 910 911 912 913
                if param_or_buffer.type == core.VarDesc.VarType.VOCAB:
                    scr_tensor = param_or_buffer.value().get_map_tensor()
                    tgt_var = scope.var(param_or_buffer.name)
                    tgt_var.set_vocab(scr_tensor)
                else:
                    param_or_buffer_tensor = scope.var(
                        param_or_buffer.name).get_tensor()
                    #src_tensor = param_or_buffer.value().get_tensor()
                    src_tensor = state_var_dict[param_or_buffer.name].value(
                    ).get_tensor()
                    param_or_buffer_tensor._share_data_with(src_tensor)
914 915 916 917 918 919 920 921 922 923 924 925 926
                # record var info
                if param_or_buffer.name not in extra_var_info:
                    extra_info_dict = dict()
                    if param_or_buffer.name in state_names_dict:
                        extra_info_dict['structured_name'] = state_names_dict[
                            param_or_buffer.name]
                    extra_info_dict[
                        'stop_gradient'] = param_or_buffer.stop_gradient
                    if isinstance(param_or_buffer, ParamBase):
                        extra_info_dict['trainable'] = param_or_buffer.trainable
                    extra_var_info[param_or_buffer.name] = extra_info_dict

        # 4. build input & output of save_infernece_model
927 928 929 930 931 932 933 934 935 936 937 938
        # NOTE(chenweihang): [ Get input variables name ]
        # There are two cases, whether to prune the inputs or not
        # - not prune inputs (recommend):
        #   - the len(input_spec) == len((concrete_program.inputs) - 1
        #   - here can use concrete_program.inputs directly
        # - prune inputs:
        #   - the input_spec length < len((concrete_program.inputs) - 1
        #   - the input_spec's name should be in concrete_program.inputs
        input_var_names = _get_input_var_names(concrete_program.inputs,
                                               inner_input_spec)

        # NOTE(chenweihang): [ Get output variables ]
939 940
        # the rule is like [ Get input variables name ]. For output var,
        # we only support VarBase spec, and actually, we only need the
941 942 943 944 945 946 947 948 949 950
        # var name of output, and we don't recommended to use output_spec
        output_vars = _get_output_vars(concrete_program.outputs,
                                       configs.output_spec)

        # 5. save inference model
        from paddle.fluid.io import save_inference_model

        # construct new save_inference_model arguments
        model_path = dirname
        # NOTE(chenweihang): because prefix contains model and params filename,
951
        # so we don't support set model_filename & params_filename
952
        if 'forward' == attr_func or not isinstance(layer, Layer):
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
            model_filename = file_prefix + INFER_MODEL_SUFFIX
            params_filename = file_prefix + INFER_PARAMS_SUFFIX
        else:
            model_filename = file_prefix + '.' + attr_func + INFER_MODEL_SUFFIX
            params_filename = file_prefix + '.' + attr_func + INFER_PARAMS_SUFFIX

        with scope_guard(scope):
            save_inference_model(
                dirname=model_path,
                feeded_var_names=input_var_names,
                target_vars=output_vars,
                executor=Executor(_current_expected_place()),
                main_program=concrete_program.main_program.clone(),
                model_filename=model_filename,
                params_filename=params_filename,
                export_for_deployment=configs._export_for_deployment,
969 970
                program_only=configs._program_only,
                clip_extra=False)
971 972 973 974 975 976 977 978

    # NOTE(chenweihang): [ Save extra variable info ]
    # save_inference_model will lose some important variable information, including:
    #   - Variable name and correspondence (when saved variables as one file)
    #   - Variable.stop_gradient information
    #   - Which persistent variable are parameter and which are not
    #   - Parameter.trainable information
    #
979 980
    # The lost information cannot be recovered when it is loaded again,
    # so if we want to perform fine-tune after loading, we may need to
981 982
    # configure redundant information to proceed.
    #
983 984
    # Due to compatibility issues, we cannot change the original storage structure,
    # but we can save these information in `jit.save` without changing the original
985 986
    # storage to improve user experience. So we save extra information into
    # file `***.pdiparams.info`
987 988 989 990 991 992 993 994

    # "layer" can only be Layer or function or StaticFunction.

    contain_parameter = False
    for var in concrete_program.main_program.list_vars():
        contain_parameter |= isinstance(var, Parameter)

    if (isinstance(layer, Layer) or contain_parameter) and extra_var_info:
995 996 997 998
        with scope_guard(scope):
            extra_var_info_path = path + INFER_PARAMS_INFO_SUFFIX
            with open(extra_var_info_path, 'wb') as f:
                pickle.dump(extra_var_info, f, protocol=2)
999 1000 1001


@dygraph_only
1002
def load(path, **configs):
1003 1004 1005
    """
    :api_attr: imperative

1006 1007
    Load model saved by ``paddle.jit.save`` or ``paddle.static.save_inference_model`` or
    paddle 1.x API ``paddle.fluid.io.save_inference_model`` as ``paddle.jit.TranslatedLayer``,
1008
    then performing inference or fine-tune training.
1009 1010

    .. note::
1011
        If you load model saved by ``paddle.static.save_inference_model`` ,
1012 1013
        there will be the following limitations when using it in fine-tuning:
        1. Imperative mode do not support LoDTensor. All original model's feed targets or parametars that depend on LoD are temporarily unavailable.
1014
        2. All saved model's feed targets need to be passed into TranslatedLayer's forward function.
1015 1016 1017 1018
        3. The variable's ``stop_gradient`` information is lost and can not be recovered.
        4. The parameter's ``trainable`` information is lost and can not be recovered.

    Args:
1019
        path (str): The path prefix to load model. The format is ``dirname/file_prefix`` or ``file_prefix`` .
1020 1021
        **configs (dict, optional): Other load configuration options for compatibility. We do not
            recommend using these configurations, they may be removed in the future. If not necessary,
1022 1023
            DO NOT use them. Default None.
            The following options are currently supported:
1024 1025 1026 1027
            (1) model_filename (str): The inference model file name of the paddle 1.x
            ``save_inference_model`` save format. Default file name is :code:`__model__` .
            (2) params_filename (str): The persistable variables file name of the paddle 1.x
            ``save_inference_model`` save format. No default file name, save variables separately
1028 1029
            by default.

1030 1031 1032 1033 1034

    Returns:
        TranslatedLayer: A Layer object can run saved translated model.

    Examples:
1035
        1. Load model saved by ``paddle.jit.save`` then performing inference and fine-tune training.
1036 1037 1038 1039

        .. code-block:: python

            import numpy as np
1040 1041 1042
            import paddle
            import paddle.nn as nn
            import paddle.optimizer as opt
1043

1044 1045 1046
            BATCH_SIZE = 16
            BATCH_NUM = 4
            EPOCH_NUM = 4
1047

1048 1049
            IMAGE_SIZE = 784
            CLASS_NUM = 10
1050

1051 1052 1053 1054
            # define a random dataset
            class RandomDataset(paddle.io.Dataset):
                def __init__(self, num_samples):
                    self.num_samples = num_samples
1055

1056 1057 1058 1059
                def __getitem__(self, idx):
                    image = np.random.random([IMAGE_SIZE]).astype('float32')
                    label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
                    return image, label
1060

1061 1062 1063 1064 1065
                def __len__(self):
                    return self.num_samples

            class LinearNet(nn.Layer):
                def __init__(self):
1066
                    super(LinearNet, self).__init__()
1067
                    self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
1068

1069
                @paddle.jit.to_static
1070 1071 1072
                def forward(self, x):
                    return self._linear(x)

1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
            def train(layer, loader, loss_fn, opt):
                for epoch_id in range(EPOCH_NUM):
                    for batch_id, (image, label) in enumerate(loader()):
                        out = layer(image)
                        loss = loss_fn(out, label)
                        loss.backward()
                        opt.step()
                        opt.clear_grad()
                        print("Epoch {} batch {}: loss = {}".format(
                            epoch_id, batch_id, np.mean(loss.numpy())))

1084
            # 1. train & save model.
1085

1086
            # create network
1087 1088 1089 1090
            layer = LinearNet()
            loss_fn = nn.CrossEntropyLoss()
            adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())

1091
            # create data loader
1092 1093 1094 1095 1096 1097
            dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
            loader = paddle.io.DataLoader(dataset,
                batch_size=BATCH_SIZE,
                shuffle=True,
                drop_last=True,
                num_workers=2)
1098

1099 1100
            # train
            train(layer, loader, loss_fn, adam)
1101

1102
            # save
1103 1104
            path = "example_model/linear"
            paddle.jit.save(layer, path)
1105

1106
            # 2. load model
1107

1108
            # load
1109
            loaded_layer = paddle.jit.load(path)
1110 1111

            # inference
1112 1113 1114
            loaded_layer.eval()
            x = paddle.randn([1, IMAGE_SIZE], 'float32')
            pred = loaded_layer(x)
1115 1116

            # fine-tune
1117 1118 1119
            loaded_layer.train()
            adam = opt.Adam(learning_rate=0.001, parameters=loaded_layer.parameters())
            train(loaded_layer, loader, loss_fn, adam)
1120 1121


1122
        2. Load model saved by ``paddle.fluid.io.save_inference_model`` then performing and fine-tune training.
1123 1124 1125 1126

        .. code-block:: python

            import numpy as np
1127
            import paddle
1128
            import paddle.static as static
1129 1130
            import paddle.nn as nn
            import paddle.optimizer as opt
1131
            import paddle.nn.functional as F
1132

1133 1134 1135
            BATCH_SIZE = 16
            BATCH_NUM = 4
            EPOCH_NUM = 4
1136

1137 1138 1139 1140 1141 1142 1143
            IMAGE_SIZE = 784
            CLASS_NUM = 10

            # define a random dataset
            class RandomDataset(paddle.io.Dataset):
                def __init__(self, num_samples):
                    self.num_samples = num_samples
1144

1145 1146 1147 1148
                def __getitem__(self, idx):
                    image = np.random.random([IMAGE_SIZE]).astype('float32')
                    label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
                    return image, label
1149

1150 1151
                def __len__(self):
                    return self.num_samples
1152

1153 1154
            paddle.enable_static()

1155 1156
            image = static.data(name='image', shape=[None, 784], dtype='float32')
            label = static.data(name='label', shape=[None, 1], dtype='int64')
1157
            pred = static.nn.fc(x=image, size=10, activation='softmax')
1158 1159
            loss = F.cross_entropy(input=pred, label=label)
            avg_loss = paddle.mean(loss)
1160

1161
            optimizer = paddle.optimizer.SGD(learning_rate=0.001)
1162 1163
            optimizer.minimize(avg_loss)

1164 1165 1166
            place = paddle.CPUPlace()
            exe = static.Executor(place)
            exe.run(static.default_startup_program())
1167

1168 1169 1170 1171 1172
            # create data loader
            dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
            loader = paddle.io.DataLoader(dataset,
                feed_list=[image, label],
                places=place,
1173
                batch_size=BATCH_SIZE,
1174 1175
                shuffle=True,
                drop_last=True,
W
WeiXin 已提交
1176
                return_list=False,
1177
                num_workers=2)
1178 1179 1180 1181

            # 1. train and save inference model
            for data in loader():
                exe.run(
1182
                    static.default_main_program(),
1183
                    feed=data,
1184 1185 1186
                    fetch_list=[avg_loss])

            model_path = "fc.example.model"
1187
            paddle.fluid.io.save_inference_model(
1188 1189 1190
                model_path, ["image"], [pred], exe)

            # 2. load model
1191 1192

            # enable dygraph mode
1193 1194 1195 1196
            paddle.disable_static(place)

            # load
            fc = paddle.jit.load(model_path)
1197

1198 1199 1200
            # inference
            fc.eval()
            x = paddle.randn([1, IMAGE_SIZE], 'float32')
1201 1202
            pred = fc(x)

1203
            # fine-tune
1204
            fc.train()
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
            loss_fn = nn.CrossEntropyLoss()
            adam = opt.Adam(learning_rate=0.001, parameters=fc.parameters())
            loader = paddle.io.DataLoader(dataset,
                places=place,
                batch_size=BATCH_SIZE,
                shuffle=True,
                drop_last=True,
                num_workers=2)
            for epoch_id in range(EPOCH_NUM):
                for batch_id, (image, label) in enumerate(loader()):
                    out = fc(image)
                    loss = loss_fn(out, label)
                    loss.backward()
                    adam.step()
                    adam.clear_grad()
                    print("Epoch {} batch {}: loss = {}".format(
                        epoch_id, batch_id, np.mean(loss.numpy())))
1222
    """
1223 1224 1225 1226
    # 1. construct correct config
    config = _parse_load_config(configs)
    model_path, config = _build_load_path_and_config(path, config)

1227
    return TranslatedLayer._construct(model_path, config)
1228 1229


1230
@dygraph_only
Z
Zeng Jinle 已提交
1231 1232 1233 1234 1235
def _trace(layer,
           inputs,
           feed_prefix='feed_',
           fetch_prefix='fetch_',
           tmp_prefix='t_'):
1236
    assert isinstance(layer, Layer)
1237 1238 1239 1240 1241 1242 1243 1244 1245

    if not isinstance(inputs, (list, tuple)):
        inputs = [inputs]

    tracer = _dygraph_tracer()._get_program_desc_tracer()

    var_list = extract_vars(inputs)

    with program_desc_tracing_guard(True):
1246
        original_outputs = layer(*inputs)
1247 1248 1249 1250
        if not isinstance(original_outputs, (list, tuple)):
            outputs = [original_outputs]
        else:
            outputs = original_outputs
1251
        out_vars = extract_vars(outputs, err_tag='outputs')
1252

1253
        program_desc, feed_names, fetch_names, parameters = tracer.create_program_desc(
Z
Zeng Jinle 已提交
1254
            var_list, feed_prefix, out_vars, fetch_prefix, tmp_prefix)
1255 1256 1257 1258 1259
        tracer.reset()

    with _dygraph_guard(None):
        program = create_program_from_desc(program_desc)

1260
    return original_outputs, program, feed_names, fetch_names, parameters
1261 1262 1263 1264


class TracedLayer(object):
    """
1265
    :api_attr: imperative
1266

1267 1268 1269 1270 1271
    TracedLayer is used to convert a forward dygraph model to a static
    graph model. This is mainly used to save the dygraph model for online
    inference using C++. Besides, users can also do inference in Python
    using the converted static graph model, which usually has better
    performance than the original dygraph model.
1272 1273 1274 1275

    TracedLayer would run the static graph model using :code:`Executor`
    and :code:`CompiledProgram` . The static graph model would share
    parameters with the dygraph model.
1276 1277

    All TracedLayer objects should not be created by constructor and should
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
    be created by static method :code:`TracedLayer.trace(layer, inputs)` .

    The TracedLayer can only be used to convert the data-independent dygraph
    model into the static graph model, which means the dygraph model should
    be independent with the tensor data and shape.
    """

    def __init__(self, program, parameters, feed_names, fetch_names):
        self._program = program
        self._feed_names = feed_names
        self._fetch_names = fetch_names
1289
        self._params = parameters
1290 1291 1292 1293 1294

        self._place = _current_expected_place()

        self._scope = core.Scope()
        for p in parameters:
1295
            src_tensor = p.value().get_tensor()
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
            dst_tensor = self._scope.var(p.name).get_tensor()
            dst_tensor._share_data_with(src_tensor)

        self._exe = Executor(self._place)
        self._compiled_program = None
        self._build_strategy = None
        self._exec_strategy = None

    @property
    def program(self):
        return self._program

    def _switch(self, is_test=True):
        for block_id in range(self._program.num_blocks):
            block = self._program.block(block_id)
            for op in block.ops:
                if op.has_attr("is_test"):
                    op._set_attr("is_test", is_test)

    @staticmethod
    @dygraph_only
    def trace(layer, inputs):
        """
1319
        This method is the only allowed method to create TracedLayer object.
1320 1321 1322 1323
        It would call the :code:`layer(*inputs)` method to run the dygraph
        model and convert it into a static graph model.

        Args:
1324
            layer (paddle.nn.Layer): the layer object to be traced.
1325 1326
            inputs (list(Tensor)|tuple(Tensor)|Tensor): the input tensors of
                the layer object.
1327 1328

        Returns:
1329
            tuple: A tuple of 2 items, whose the first item is the output of
1330 1331
                :code:`layer(*inputs)` , and the second item is the created
                TracedLayer object.
1332

1333
        Examples:
1334 1335
            .. code-block:: python:

1336
                import paddle
1337

1338
                class ExampleLayer(paddle.nn.Layer):
1339 1340
                    def __init__(self):
                        super(ExampleLayer, self).__init__()
1341
                        self._fc = paddle.nn.Linear(3, 10)
1342 1343 1344 1345

                    def forward(self, input):
                        return self._fc(input)

1346

1347 1348 1349 1350 1351 1352
                layer = ExampleLayer()
                in_var = paddle.uniform(shape=[2, 3], dtype='float32')
                out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])

                # run the static graph model using Executor inside
                out_static_graph = static_layer([in_var])
1353

1354 1355
                print(len(out_static_graph)) # 1
                print(out_static_graph[0].shape) # (2, 10)
1356

1357 1358
                # save the static graph model for inference
                static_layer.save_inference_model(dirname='./saved_infer_model')
1359

1360
        """
1361 1362 1363 1364
        assert isinstance(
            layer, Layer
        ), "The type of 'layer' in fluid.dygraph.jit.TracedLayer.trace must be fluid.dygraph.Layer, but received {}.".format(
            type(layer))
1365 1366
        outs, prog, feed, fetch, parameters = _trace(layer, inputs)
        traced = TracedLayer(prog, parameters, feed, fetch)
1367 1368 1369 1370 1371 1372 1373
        return outs, traced

    def set_strategy(self, build_strategy=None, exec_strategy=None):
        """
        Set the strategies when running static graph model.

        Args:
1374
            build_strategy (BuildStrategy, optional): build strategy of
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
                :code:`CompiledProgram` inside TracedLayer. Default None.
            exec_strategy (ExecutionStrategy, optional): execution strategy of
                :code:`CompiledProgram` inside TracedLayer. Default None.

        Returns:
            None

        Examples:
            .. code-block:: python:

1385
                import paddle
1386

1387
                class ExampleLayer(paddle.nn.Layer):
1388 1389
                    def __init__(self):
                        super(ExampleLayer, self).__init__()
1390
                        self._fc = paddle.nn.Linear(3, 10)
1391 1392 1393 1394

                    def forward(self, input):
                        return self._fc(input)

1395 1396 1397 1398
                layer = ExampleLayer()
                in_var = paddle.uniform(shape=[2, 3], dtype='float32')

                out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
1399

1400 1401
                build_strategy = paddle.static.BuildStrategy()
                build_strategy.enable_inplace = True
1402

1403 1404
                exec_strategy = paddle.static.ExecutionStrategy()
                exec_strategy.num_threads = 2
1405

1406 1407
                static_layer.set_strategy(build_strategy=build_strategy, exec_strategy=exec_strategy)
                out_static_graph = static_layer([in_var])
1408 1409 1410

        """
        assert self._compiled_program is None, "Cannot set strategy after run"
1411 1412 1413 1414 1415 1416 1417 1418
        assert isinstance(
            build_strategy, (type(None), BuildStrategy)
        ), "The type of 'build_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received {}.".format(
            type(build_strategy))
        assert isinstance(
            exec_strategy, (type(None), ExecutionStrategy)
        ), "The type of 'exec_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received {}.".format(
            type(exec_strategy))
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
        self._build_strategy = build_strategy
        self._exec_strategy = exec_strategy

    @switch_to_static_graph
    def _compile(self):
        self._compiled_program = CompiledProgram(
            self._program).with_data_parallel(
                build_strategy=self._build_strategy,
                exec_strategy=self._exec_strategy,
                places=self._place)

    def _build_feed(self, inputs):
        assert isinstance(inputs, (list, tuple)), \
            "Inputs should be a list or tuple of variables"
        assert len(inputs) == len(self._feed_names)
        feed_dict = {}
        if in_dygraph_mode():
            for x, name in zip(inputs, self._feed_names):
1437
                feed_dict[name] = x.value().get_tensor()
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457
        else:
            for x, name in zip(inputs, self._feed_names):
                feed_dict[name] = x

        return feed_dict

    @switch_to_static_graph
    def _run(self, feed):
        return self._exe.run(self._compiled_program,
                             feed=feed,
                             fetch_list=self._fetch_names)

    def __call__(self, inputs):
        with scope_guard(self._scope):
            if self._compiled_program is None:
                self._compile()

            return self._run(self._build_feed(inputs))

    @switch_to_static_graph
1458
    def save_inference_model(self, path, feed=None, fetch=None, **kwargs):
1459
        """
1460 1461
        Save the TracedLayer to a model for inference. The saved
        inference model can be loaded by C++ inference APIs.
1462

1463 1464 1465
        ``path`` is the prefix of saved objects, and the saved translated program file
        suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` .

1466
        Args:
1467
            path(str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
1468
            feed (list[int], optional): the input variable indices of the saved
1469
                inference model. If None, all input variables of the
1470 1471 1472 1473 1474 1475
                TracedLayer object would be the inputs of the saved inference
                model. Default None.
            fetch (list[int], optional): the output variable indices of the
                saved inference model. If None, all output variables of the
                TracedLayer object would be the outputs of the saved inference
                model. Default None.
1476
            kwargs: Supported keys including 'clip_extra'.set to True if you want to clip extra information for every operator.
1477 1478

        Returns:
1479
            None
1480 1481 1482 1483 1484

        Examples:
            .. code-block:: python:

                import numpy as np
1485
                import paddle
1486

1487
                class ExampleLayer(paddle.nn.Layer):
1488 1489
                    def __init__(self):
                        super(ExampleLayer, self).__init__()
1490
                        self._fc = paddle.nn.Linear(3, 10)
1491 1492 1493 1494

                    def forward(self, input):
                        return self._fc(input)

1495 1496
                save_dirname = './saved_infer_model'
                in_np = np.random.random([2, 3]).astype('float32')
1497 1498
                in_var = paddle.to_tensor(in_np)
                layer = ExampleLayer()
1499

1500 1501
                out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
                static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0])
1502

1503 1504 1505 1506
                paddle.enable_static()
                place = paddle.CPUPlace()
                exe = paddle.static.Executor(place)
                program, feed_vars, fetch_vars = paddle.static.load_inference_model(save_dirname,
1507
                                                    exe)
1508 1509 1510

                fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars)
                print(fetch.shape) # (2, 10)
1511
        """
1512
        check_type(path, "path", str,
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
                   "fluid.dygraph.jit.TracedLayer.save_inference_model")
        check_type(feed, "feed", (type(None), list),
                   "fluid.dygraph.jit.TracedLayer.save_inference_model")
        if isinstance(feed, list):
            for f in feed:
                check_type(f, "each element of feed", int,
                           "fluid.dygraph.jit.TracedLayer.save_inference_model")
        check_type(fetch, "fetch", (type(None), list),
                   "fluid.dygraph.jit.TracedLayer.save_inference_model")
        if isinstance(fetch, list):
            for f in fetch:
                check_type(f, "each element of fetch", int,
                           "fluid.dygraph.jit.TracedLayer.save_inference_model")
1526
        clip_extra = kwargs.get('clip_extra', False)
1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538
        # path check
        file_prefix = os.path.basename(path)
        if file_prefix == "":
            raise ValueError(
                "The input path MUST be format of dirname/file_prefix "
                "[dirname\\file_prefix in Windows system], but received "
                "file_prefix is empty string.")

        dirname = os.path.dirname(path)
        if dirname and not os.path.exists(dirname):
            os.makedirs(dirname)

1539
        from paddle.fluid.io import save_inference_model
1540 1541 1542 1543 1544

        def get_feed_fetch(all_vars, partial_vars):
            if partial_vars is None:
                return all_vars

1545
            return [all_vars[idx] for idx in partial_vars]
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555

        with scope_guard(self._scope):
            feeded_var_names = get_feed_fetch(self._feed_names, feed)
            target_var_names = get_feed_fetch(self._fetch_names, fetch)
            target_vars = []
            for name in target_var_names:
                target_var = self._program.global_block().vars.get(name, None)
                assert target_var is not None, "{} cannot be found".format(name)
                target_vars.append(target_var)

1556 1557 1558
            model_filename = file_prefix + INFER_MODEL_SUFFIX
            params_filename = file_prefix + INFER_PARAMS_SUFFIX

1559
            save_inference_model(
1560 1561 1562 1563
                dirname=dirname,
                feeded_var_names=feeded_var_names,
                target_vars=target_vars,
                executor=self._exe,
1564 1565
                main_program=self._program.clone(),
                model_filename=model_filename,
1566 1567
                params_filename=params_filename,
                clip_extra=clip_extra)