jit.py 60.9 KB
Newer Older
1
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
M
Ming-Xu Huang 已提交
2
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16 17
from __future__ import print_function

18 19
import os
import pickle
20
import warnings
21
import functools
22
from collections import OrderedDict
23
import inspect
M
Ming-Xu Huang 已提交
24
import threading
25 26

import six
27
import paddle
J
Jiabin Yang 已提交
28
from paddle.fluid import core, dygraph
29 30
from paddle.fluid.compiler import BuildStrategy, CompiledProgram, ExecutionStrategy
from paddle.fluid.data_feeder import check_type
31
from paddle.fluid.layers.utils import flatten, pack_sequence_as
32
from paddle.fluid.dygraph.base import program_desc_tracing_guard, switch_to_static_graph
33
from paddle.fluid.dygraph.dygraph_to_static import logging_utils
34
from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import ConversionOptions, CONVERSION_OPTIONS
35
from paddle.fluid.dygraph.dygraph_to_static.logging_utils import set_code_level, set_verbosity
36
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, StaticFunction, unwrap_decorators
37
from paddle.fluid.dygraph.io import TranslatedLayer, INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX, INFER_PARAMS_INFO_SUFFIX
38 39
from paddle.fluid.dygraph.layers import Layer
from paddle.fluid.executor import Executor, scope_guard
0
0x45f 已提交
40
from paddle.fluid.framework import Block, ParamBase, Program, Variable, Parameter, EagerParamBase
41
from paddle.fluid.framework import _current_expected_place, _dygraph_guard, _dygraph_tracer
J
Jiabin Yang 已提交
42
from paddle.fluid.framework import dygraph_only, _non_static_mode
43
from paddle.fluid.wrapped_decorator import wrap_decorator
44

45 46
__all__ = [
    'TracedLayer', 'declarative', 'dygraph_to_static_func', 'set_code_level',
47
    'set_verbosity', 'save', 'load', 'not_to_static'
48
]
49 50 51 52 53 54 55 56 57 58


def create_program_from_desc(program_desc):
    program = Program()
    program.desc = program_desc
    program.blocks = [Block(program, 0)]
    program._sync_with_cpp()
    return program


59
def _extract_vars(inputs, result_list, err_tag='inputs'):
60
    if isinstance(inputs, Variable):
61
        result_list.append(inputs)
62
    elif isinstance(inputs, (list, tuple)):
63
        for var in inputs:
64
            _extract_vars(var, result_list, err_tag)
65 66
    else:
        raise TypeError(
67 68
            "The type of 'each element of {}' in fluid.dygraph.jit.TracedLayer.trace must be fluid.Variable, but received {}.".
            format(err_tag, type(inputs)))
69 70


71
def extract_vars(inputs, err_tag='inputs'):
72
    result_list = []
73
    _extract_vars(inputs, result_list, err_tag)
74 75 76
    return result_list


77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
def _dygraph_to_static_func_(dygraph_func):
    """
    Converts imperative dygraph APIs into declarative function APIs. Decorator
    @dygraph_to_static_func only converts imperative dygraph APIs into
    declarative net-building APIs, which means it doesn't return immediate
    digital result as imperative mode. Users should handle Program and Executor
    by themselves.

    Note:
    This decorator is NOT our recommended way to transform imperative function
    to declarative function. We will remove this decorator after we finalize
    cleaning up code.

    Args:
        dygraph_func (callable): callable imperative function.

    Returns:
        Callable: converting imperative dygraph APIs into declarative
        net-building APIs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          import numpy as np
          from paddle.fluid.dygraph.jit import dygraph_to_static_func

          @dygraph_to_static_func
          def func(x):
              if fluid.layers.mean(x) < 0:
                  x_v = x - 1
              else:
                  x_v = x + 1

               return x_v

          x = fluid.layers.fill_constant(shape=[3, 3], value=0, dtype='float64')

          x_v = func(x)
          exe = fluid.Executor(fluid.CPUPlace())
          out = exe.run(fetch_list=[x_v])
          print(out[0])
          # [[1. 1. 1.]
          #  [1. 1. 1.]
          #  [1. 1. 1.]]

    """

    # TODO: remove this decorator after we finalize training API
126 127
    def __impl__(*args, **kwargs):
        program_translator = ProgramTranslator()
J
Jiabin Yang 已提交
128
        if _non_static_mode() or not program_translator.enable_to_static:
129
            logging_utils.warn(
130
                "The decorator 'dygraph_to_static_func' doesn't work in "
131
                "dygraph mode or set ProgramTranslator.enable to False. "
132 133 134 135
                "We will just return dygraph output.")
            return dygraph_func(*args, **kwargs)
        static_func = program_translator.get_func(dygraph_func)
        return static_func(*args, **kwargs)
136 137 138 139

    return __impl__


140
dygraph_to_static_func = wrap_decorator(_dygraph_to_static_func_)
141

142

143 144 145 146 147 148
def copy_decorator_attrs(original_func, decorated_obj):
    """
    Copies some necessary attributes from original function into decorated function.

    Args:
        original_func(callable): the original decorated function.
149
        decorated_obj(StaticFunction): the target decorated StaticFunction object.
150 151 152 153 154 155 156 157 158 159 160 161 162
    """
    decorator_name = "declarative"

    decorated_obj.__name__ = original_func.__name__
    decorated_obj._decorator_name = decorator_name
    decorated_obj.__wrapped__ = original_func
    decorated_obj.__doc__ = original_func.__doc__
    if hasattr(original_func, "__module__"):
        decorated_obj.__module__ = original_func.__module__

    return decorated_obj


163
def declarative(function=None, input_spec=None, build_strategy=None):
164 165 166
    """
    Converts imperative dygraph APIs into declarative function APIs. Decorator
    @declarative handles the Program and Executor of static mode and returns
167 168 169 170
    the result as dygraph Tensor(s). Users could use the returned dygraph
    Tensor(s) to do imperative training, inference, or other operations. If the
    decorated function calls other imperative function, the called one will be
    converted into declarative function as well.
171

172
    Args:
173
        function (callable): callable imperative function.
174
        input_spec(list[InputSpec]|tuple[InputSpec]): list/tuple of InputSpec to specific the shape/dtype/name
175
            information of each input Tensor.
176 177 178 179 180 181
        build_strategy(BuildStrategy|None): This argument is used to compile the
            converted program with the specified options, such as operators' fusion
            in the computational graph and memory optimization during the execution
            of the computational graph. For more information about build_strategy,
            please refer to :code:`paddle.static.BuildStrategy`. The default is None.

182

183
    Returns:
184
        Tensor(s): containing the numerical result.
185

186 187
    Examples:
        .. code-block:: python
188

189 190 191 192 193 194 195 196 197 198 199 200 201 202
            import paddle
            from paddle.jit import to_static

            @to_static
            def func(x):
                if paddle.mean(x) < 0:
                    x_v = x - 1
                else:
                    x_v = x + 1
                return x_v

            x = paddle.ones([1, 2], dtype='float32')
            x_v = func(x)
            print(x_v) # [[2. 2.]]
203

204
    """
205

206 207
    def decorated(python_func):
        """
208
        Decorates a python function into a StaticFunction object.
209 210 211
        """
        # Step 1. unwrap the function if it is already decorated.
        _, python_func = unwrap_decorators(python_func)
212

213 214 215
        # Step 2. copy some attributes from original python function.
        static_layer = copy_decorator_attrs(
            original_func=python_func,
216
            decorated_obj=StaticFunction(
217 218 219
                function=python_func,
                input_spec=input_spec,
                build_strategy=build_strategy))
220 221

        return static_layer
222

223 224 225 226 227 228
    build_strategy = build_strategy or BuildStrategy()
    if not isinstance(build_strategy, BuildStrategy):
        raise TypeError(
            "Required type(build_strategy) shall be `paddle.static.BuildStrategy`, but received {}".
            format(type(build_strategy).__name__))

229 230
    # for usage: `declarative(foo, ...)`
    if function is not None:
231
        if isinstance(function, Layer):
232
            if isinstance(function.forward, StaticFunction):
233
                class_name = function.__class__.__name__
234
                logging_utils.warn(
235 236 237 238 239 240
                    "`{}.forward` has already been decorated somewhere. It will be redecorated to replace previous one.".
                    format(class_name))
            function.forward = decorated(function.forward)
            return function
        else:
            return decorated(function)
241

242 243
    # for usage: `@declarative`
    return decorated
244 245


246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
def not_to_static(func=None):
    """
    A Decorator to suppresses the convertion of a function.

    Args:
        func(callable): The function to decorate.

    Returns:
        callable: A function which won't be converted in Dynamic-to-Static.

    Examples:
        .. code-block:: python

            import paddle

            @paddle.jit.not_to_static
            def func_not_to_static(x):
                res = x - 1
                return res

            @paddle.jit.to_static
            def func(x):
                if paddle.mean(x) < 0:
                    out = func_not_to_static(x)
                else:
                    out = x + 1
                return out

            x = paddle.ones([1, 2], dtype='float32')
            out = func(x)
            print(out) # [[2. 2.]]
    """
    if func is None:
        return not_to_static

    options = ConversionOptions(not_convert=True)
    setattr(func, CONVERSION_OPTIONS, options)
    return func


286
class _SaveLoadConfig(object):
287 288 289 290 291
    def __init__(self):
        self._output_spec = None
        self._model_filename = None
        self._params_filename = None
        self._separate_params = False
292 293
        # used for `paddle.load`
        self._keep_name_table = False
294 295 296 297

        # NOTE: Users rarely use following configs, so these configs are not open to users,
        # reducing user learning costs, but we retain the configuration capabilities

298 299
        # If True, programs are modified to only support direct inference deployment.
        # Otherwise,more information will be stored for flexible optimization and re-training.
300 301 302 303 304
        # Currently, only True is supported
        self._export_for_deployment = True

        # If True, It will save inference program only, and do not save params of Program
        self._program_only = False
305
        self.with_hook = False
306 307 308 309 310 311 312

    @property
    def output_spec(self):
        return self._output_spec

    @output_spec.setter
    def output_spec(self, spec):
313 314
        if spec is None:
            return
315 316
        if not isinstance(spec, list):
            raise TypeError(
317
                "The config `output_spec` should be 'list', but received input type is %s."
318 319 320 321
                % type(input))
            for var in spec:
                if not isinstance(var, core.VarBase):
                    raise TypeError(
322
                        "The element in config `output_spec` list should be 'Variable', but received element's type is %s."
323 324 325 326 327 328 329 330 331
                        % type(var))
        self._output_spec = spec

    @property
    def model_filename(self):
        return self._model_filename

    @model_filename.setter
    def model_filename(self, filename):
332 333
        if filename is None:
            return
334 335
        if not isinstance(filename, six.string_types):
            raise TypeError(
336
                "The config `model_filename` should be str, but received input's type is %s."
337 338
                % type(filename))
        if len(filename) == 0:
339
            raise ValueError("The config `model_filename` is empty string.")
340 341 342 343 344 345 346 347
        self._model_filename = filename

    @property
    def params_filename(self):
        return self._params_filename

    @params_filename.setter
    def params_filename(self, filename):
348 349
        if filename is None:
            return
350 351
        if not isinstance(filename, six.string_types):
            raise TypeError(
352
                "The config `params_filename` should be str, but received input's type is %s."
353 354
                % type(filename))
        if len(filename) == 0:
355
            raise ValueError("The config `params_filename` is empty string.")
356 357
        self._params_filename = filename

358 359 360 361 362 363
    @property
    def keep_name_table(self):
        return self._keep_name_table

    @keep_name_table.setter
    def keep_name_table(self, value):
364 365
        if value is None:
            return
366 367
        if not isinstance(value, bool):
            raise TypeError(
368
                "The config `keep_name_table` should be bool value, but received input's type is %s."
369 370 371
                % type(value))
        self._keep_name_table = value

372

373
def _parse_save_configs(configs):
374
    supported_configs = ['output_spec', "with_hook"]
375 376 377 378 379 380 381 382 383 384 385

    # input check
    for key in configs:
        if key not in supported_configs:
            raise ValueError(
                "The additional config (%s) of `paddle.jit.save` is not supported."
                % (key))

    # construct inner config
    inner_config = _SaveLoadConfig()
    inner_config.output_spec = configs.get('output_spec', None)
386
    inner_config.with_hook = configs.get('with_hook', False)
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408

    return inner_config


def _parse_load_config(configs):
    supported_configs = ['model_filename', 'params_filename']

    # input check
    for key in configs:
        if key not in supported_configs:
            raise ValueError(
                "The additional config (%s) of `paddle.jit.load` is not supported."
                % (key))

    # construct inner config
    inner_config = _SaveLoadConfig()
    inner_config.model_filename = configs.get('model_filename', None)
    inner_config.params_filename = configs.get('params_filename', None)

    return inner_config


409 410 411 412 413 414 415 416 417 418
def _get_input_var_names(inputs, input_spec):
    name_none_error = "The %s's name is None. " \
        "When using jit.save, please set InputSepc's name in " \
        "to_static(input_spec=[]) and jit.save(input_spec=[]) " \
        "and make sure they are consistent."
    name_no_exists_error = "The tensor `%s` does not exists. " \
        "Please make sure the name of InputSpec or example Tensor " \
        "in input_spec is the same as the name of InputSpec in " \
        "`to_static` decorated on the Layer.forward method."
    result_list = []
419 420 421
    input_var_names = [
        var.name for var in flatten(inputs) if isinstance(var, Variable)
    ]
422 423
    if input_spec is None:
        # no prune
424 425 426 427 428 429 430 431 432
        return input_var_names
    else:
        # fileter out non-tensor type spec infos.
        input_spec = [
            spec for spec in input_spec
            if isinstance(spec, paddle.static.InputSpec)
        ]

    if len(input_spec) == len(input_var_names):
433 434
        # no prune
        result_list = input_var_names
435
        # if input spec name not in input_var_names, only raise warning
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
        for spec in input_spec:
            if spec.name is None:
                warnings.warn(name_none_error % spec)
            elif spec.name not in input_var_names:
                warnings.warn(name_no_exists_error % spec.name)
            else:
                # do nothing
                pass
    else:
        # prune
        for spec in input_spec:
            if spec.name is None:
                # name is None, the input_spec only can be InputSpec
                raise ValueError(name_none_error % spec)
            elif spec.name not in input_var_names:
                # the input_spec can be `InputSpec` or `VarBase`
                raise ValueError(name_no_exists_error % spec.name)
            else:
                result_list.append(spec.name)

    return result_list


459
def _get_output_vars(outputs, output_spec, with_hook=False):
460 461 462 463
    name_no_exists_error = "The tensor `%s` does not exists. " \
        "Please make sure the name of example Tensor " \
        "in configs.output_spec is the output tensor of " \
        "Layer.forward method."
464 465 466 467
    if output_spec and with_hook:
        raise RuntimeError(
            "Currently not support specify output_spec while founding pre/post hooks in your outermost layer."
        )
468 469
    result_list = []
    output_vars_dict = OrderedDict()
470
    for var in flatten(outputs):
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
        if isinstance(var, Variable):
            output_vars_dict[var.name] = var
    if output_spec is None:
        result_list = output_vars_dict.values()
    elif output_spec is not None and len(output_spec) == len(output_vars_dict):
        result_list = output_vars_dict.values()
        for var in output_spec:
            if var.name not in output_vars_dict:
                warnings.warn(name_no_exists_error % var.name)
    else:
        for var in output_spec:
            if var.name not in output_vars_dict:
                raise ValueError(name_no_exists_error % var.name)
            else:
                result_list.append(output_vars_dict[var.name])
    return result_list


489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
# NOTE(chenweihang): [ Handling of use cases of API paddle.jit.load ]
# `paddle.jit.load` may be used to load saved results of:
# 1. Expected cases:
#   - paddle.jit.save
#   - paddle.static.save_inference_model
#   - paddle.fluid.io.save_inference_model
# 2. Error cases:
#   - paddle.save: no .pdmodel for prefix
#   - paddle.static.save: no .pdiparams but .pdparams exists
#   - paddle.fluid.io.save_params/save_persistables: no __model__
# TODO(chenweihang): polish error message in above error cases
def _build_load_path_and_config(path, config):
    # NOTE(chenweihang): If both [prefix save format] and [directory save format] exist,
    # raise error, avoid confusing behavior
    prefix_format_path = path + INFER_MODEL_SUFFIX
    prefix_format_exist = os.path.exists(prefix_format_path)
    directory_format_exist = os.path.isdir(path)
    if prefix_format_exist and directory_format_exist:
        raise ValueError(
            "The %s.pdmodel and %s directory exist at the same time, "
            "don't know which one to load, please make sure that the specified target "
            "of ``path`` is unique." % (path, path))
    elif not prefix_format_exist and not directory_format_exist:
        raise ValueError("The ``path`` (%s) to load model not exists." % path)
    else:
        if prefix_format_exist:
            file_prefix = os.path.basename(path)
            model_path = os.path.dirname(path)
            if config.model_filename is not None:
                warnings.warn(
                    "When loading the result saved with the "
                    "specified file prefix, the ``model_filename`` config does "
                    "not take effect.")
            config.model_filename = file_prefix + INFER_MODEL_SUFFIX
            if config.params_filename is not None:
                warnings.warn(
                    "When loading the result saved with the "
                    "specified file prefix, the ``params_filename`` config does "
                    "not take effect.")
            config.params_filename = file_prefix + INFER_PARAMS_SUFFIX
        else:
            # Compatible with the old save_inference_model format
            model_path = path
532

533
    return model_path, config
534 535


M
Ming-Xu Huang 已提交
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
_save_pre_hooks_lock = threading.Lock()
_save_pre_hooks = []


class HookRemoveHelper(object):
    """ A HookRemoveHelper that can be used to remove hook. """

    def __init__(self, hook):
        self._hook = hook

    def remove(self):
        _remove_save_pre_hook(self._hook)


def _register_save_pre_hook(hook):
    """
    Register a save pre-hook for `paddle.jit.save`.
    This hook will be executed before `save` function has been invoked.

    hook(layer, input_spec, configs) -> None
    - layer (Layer|function): This argument is corresponding to `layer` in `paddle.jit.save`.
    - input_spec (list or tuple[InputSpec|Tensor|Python built-in variable]): This argument is corresponding to `input_spec` in `paddle.jit.save`.
    - configs (dict): This argument is corresponding to `configs` in `paddle.jit.save`.

    Args:
        hook(function): a function registered as a save pre-hook

    Returns:
        HookRemoveHelper: a HookRemoveHelper object that can be used to remove the added hook by calling `hook_remove_helper.remove()`.

    Examples:
        .. code-block:: python

            import numpy as np
            import paddle

            IMAGE_SIZE = 256
            CLASS_NUM = 10

            class LinearNet(paddle.nn.Layer):
                def __init__(self):
                    super(LinearNet, self).__init__()
                    self._linear = paddle.nn.Linear(IMAGE_SIZE, CLASS_NUM)

                def forward(self, x):
                    return self._linear(x)

            saving_count = 0
            def save_pre_hook(layer, input_spec, configs):
                global saving_count
                saving_count += 1

            remove_handler = paddle.jit.register_save_pre_hook(save_pre_hook)

            layer = LinearNet()
            paddle.jit.save(layer, "/tmp", [paddle.static.InputSpec(shape=[-1, IMAGE_SIZE])])
            # saving_count == 1

            remove_handler.remove()
            paddle.jit.save(layer, "/tmp", [paddle.static.InputSpec(shape=[-1, IMAGE_SIZE])])
            # saving_count == 1
    """
    global _save_pre_hooks_lock
    global _save_pre_hooks
    _save_pre_hooks_lock.acquire()
    if hook not in _save_pre_hooks:
        _save_pre_hooks.append(hook)
    _save_pre_hooks_lock.release()
    return HookRemoveHelper(hook)


def _clear_save_pre_hooks():
    global _save_pre_hooks_lock
    global _save_pre_hooks
    _save_pre_hooks_lock.acquire()
    _save_pre_hooks.clear()
    _save_pre_hooks_lock.release()


def _remove_save_pre_hook(hook):
    global _save_pre_hooks_lock
    global _save_pre_hooks
    _save_pre_hooks_lock.acquire()
    if hook in _save_pre_hooks:
        _save_pre_hooks.remove(hook)
    _save_pre_hooks_lock.release()


def _run_save_pre_hooks(func):
    def wrapper(layer, path, input_spec=None, **configs):
        global _save_pre_hooks
        for hook in _save_pre_hooks:
            hook(layer, input_spec, configs)
        func(layer, path, input_spec, **configs)

    return wrapper


@_run_save_pre_hooks
635
@switch_to_static_graph
636
def save(layer, path, input_spec=None, **configs):
637
    """
638
    Saves input Layer or function as ``paddle.jit.TranslatedLayer``
639 640
    format model, which can be used for inference or fine-tuning after loading.

641
    It will save the translated program and all related persistable
642
    variables of input Layer to given ``path`` .
643 644

    ``path`` is the prefix of saved objects, and the saved translated program file
645
    suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` ,
646
    and here also saved some additional variable description information to a file,
647
    its suffix is ``.pdiparams.info``, these additional information is used in fine-tuning.
648 649

    The saved model can be loaded by follow APIs:
650 651
      - ``paddle.jit.load``
      - ``paddle.static.load_inference_model``
652 653
      - Other C++ inference APIs

654 655 656 657
    .. note::
        When using ``paddle.jit.save`` to save a function, parameters will not be saved. If you have to 
        save the parameter, please pass the Layer containing function and parameter to ``paddle.jit.save``.

658
    Args:
659
        layer (Layer|function): The Layer or function to be saved.
660
        path (str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
661 662 663
        input_spec (list or tuple[InputSpec|Tensor|Python built-in variable], optional): Describes the input of the saved model's forward
            method, which can be described by InputSpec or example Tensor. Moreover, we support to specify non-tensor type argument,
            such as int, float, string, or list/dict of them.If None, all input variables of
664
            the original Layer's forward method would be the inputs of the saved model. Default None.
665 666
        **configs (dict, optional): Other save configuration options for compatibility. We do not
            recommend using these configurations, they may be removed in the future. If not necessary,
667 668 669
            DO NOT use them. Default None.
            The following options are currently supported:
            (1) output_spec (list[Tensor]): Selects the output targets of the saved model.
670 671 672
            By default, all return variables of original Layer's forward method are kept as the
            output of the saved model. If the provided ``output_spec`` list is not all output variables,
            the saved model will be pruned according to the given ``output_spec`` list.
673

674 675 676 677 678 679
    Returns:
        None

    Examples:
        .. code-block:: python

680
            # example 1: save layer
681
            import numpy as np
682 683 684
            import paddle
            import paddle.nn as nn
            import paddle.optimizer as opt
685

686 687 688
            BATCH_SIZE = 16
            BATCH_NUM = 4
            EPOCH_NUM = 4
689

690 691 692 693 694 695 696
            IMAGE_SIZE = 784
            CLASS_NUM = 10

            # define a random dataset
            class RandomDataset(paddle.io.Dataset):
                def __init__(self, num_samples):
                    self.num_samples = num_samples
697

698 699 700 701
                def __getitem__(self, idx):
                    image = np.random.random([IMAGE_SIZE]).astype('float32')
                    label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
                    return image, label
702

703 704
                def __len__(self):
                    return self.num_samples
705

706 707
            class LinearNet(nn.Layer):
                def __init__(self):
708
                    super(LinearNet, self).__init__()
709
                    self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
710

711
                @paddle.jit.to_static
712 713 714
                def forward(self, x):
                    return self._linear(x)

715 716 717 718 719 720 721 722 723 724 725 726
            def train(layer, loader, loss_fn, opt):
                for epoch_id in range(EPOCH_NUM):
                    for batch_id, (image, label) in enumerate(loader()):
                        out = layer(image)
                        loss = loss_fn(out, label)
                        loss.backward()
                        opt.step()
                        opt.clear_grad()
                        print("Epoch {} batch {}: loss = {}".format(
                            epoch_id, batch_id, np.mean(loss.numpy())))

            # 1. train & save model.
727

728 729 730 731
            # create network
            layer = LinearNet()
            loss_fn = nn.CrossEntropyLoss()
            adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
732

733 734 735 736 737 738 739
            # create data loader
            dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
            loader = paddle.io.DataLoader(dataset,
                batch_size=BATCH_SIZE,
                shuffle=True,
                drop_last=True,
                num_workers=2)
740

741 742
            # train
            train(layer, loader, loss_fn, adam)
743

744
            # save
745 746
            path = "example_model/linear"
            paddle.jit.save(layer, path)
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768

            # example 2: save function
            import paddle
            from paddle.static import InputSpec


            def save_function():
                @paddle.jit.to_static
                def fun(inputs):
                    return paddle.tanh(inputs)

                path = 'test_jit_save_load_function_1/func'
                inps = paddle.rand([3, 6])
                origin = fun(inps)

                paddle.jit.save(fun, path)
                load_func = paddle.jit.load(path)

                load_result = load_func(inps)
                print((load_result - origin).abs().max() < 1e-10)
                
            save_function()
769 770
    """

771
    # 1. input build & check
772
    prog_translator = ProgramTranslator()
773
    if not prog_translator.enable_to_static:
774
        raise RuntimeError(
775
            "The paddle.jit.save doesn't work when setting ProgramTranslator.enable to False."
776
        )
777 778 779

    if not (isinstance(layer, Layer) or inspect.isfunction(layer) or isinstance(
            layer, StaticFunction)):
780
        raise TypeError(
781
            "The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s."
782
            % type(layer))
783 784 785 786
    elif inspect.isfunction(layer) or isinstance(layer, StaticFunction):
        warnings.warn(
            'What you save is a function, and `jit.save` will generate the name of the model file according to `path` you specify. When loading these files with `jit.load`, you get a `TranslatedLayer` whose inference result is the same as the inference result of the function you saved.'
        )
787

788 789
    # NOTE(chenweihang): If the input layer be wrapped by DataParallel,
    # the args and kwargs of forward method will can't be parsed by
790
    # function_spec, so here we save DataParallel._layers instead
791 792 793 794 795 796 797
    # DataParallel it self
    # NOTE(chenweihang): using inner_layer, do not change input layer
    if isinstance(layer, paddle.DataParallel):
        inner_layer = layer._layers
    else:
        inner_layer = layer

798 799 800 801 802 803 804 805 806 807 808
    # path check
    file_prefix = os.path.basename(path)
    if file_prefix == "":
        raise ValueError(
            "The input path MUST be format of dirname/file_prefix "
            "[dirname\\file_prefix in Windows system], but received "
            "file_prefix is empty string.")

    dirname = os.path.dirname(path)
    if dirname and not os.path.exists(dirname):
        os.makedirs(dirname)
809

810 811
    # avoid change user given input_spec
    inner_input_spec = None
812
    if input_spec is not None:
813 814 815 816 817 818 819 820 821
        if isinstance(layer, Layer):
            for attr_func in dir(inner_layer):
                static_func = getattr(inner_layer, attr_func, None)
                if isinstance(static_func,
                              StaticFunction) and 'forward' != attr_func:
                    raise ValueError(
                        "If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s."
                        % type(input_spec))

822
        if not isinstance(input_spec, (list, tuple)):
823 824 825
            raise TypeError(
                "The input input_spec should be 'list', but received input_spec's type is %s."
                % type(input_spec))
826
        inner_input_spec = []
827
        for var in flatten(input_spec):
828 829
            if isinstance(var, paddle.static.InputSpec):
                inner_input_spec.append(var)
0
0x45f 已提交
830
            elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)):
831 832 833
                inner_input_spec.append(
                    paddle.static.InputSpec.from_tensor(var))
            else:
834 835
                # NOTE(Aurelius84): Support non-Tensor type in `input_spec`.
                inner_input_spec.append(var)
836

837 838
    # parse configs
    configs = _parse_save_configs(configs)
839 840 841 842
    # whether outermost layer has pre/post hook, if does, we need also save
    # these operators in program. 
    with_hook = configs.with_hook

843 844
    scope = core.Scope()
    extra_var_info = dict()
845 846
    if isinstance(layer, Layer):
        functions = dir(inner_layer)
847 848
        if inner_layer._forward_pre_hooks or inner_layer._forward_post_hooks:
            with_hook = True
849 850 851 852 853 854 855 856
    else:
        # layer is function
        functions = [layer, ]
    for attr_func in functions:
        if isinstance(layer, Layer):
            static_func = getattr(inner_layer, attr_func, None)
            if isinstance(static_func, StaticFunction):
                concrete_program = static_func.concrete_program_specify_input_spec(
857
                    inner_input_spec, with_hook=with_hook)
858 859
            elif 'forward' == attr_func:
                # transform in jit.save, if input_spec is incomplete, declarative will throw error
860
                # inner_input_spec is list[InputSpec], it should be packed with same structure
861 862 863 864 865 866
                # as original input_spec here.
                if inner_input_spec:
                    inner_input_spec = pack_sequence_as(input_spec,
                                                        inner_input_spec)
                static_forward = declarative(
                    inner_layer.forward, input_spec=inner_input_spec)
867 868
                concrete_program = static_forward.concrete_program_specify_input_spec(
                    with_hook=with_hook)
869 870 871 872 873 874 875
                # the input_spec has been used in declarative, which is equal to
                # @declarative with input_spec and jit.save without input_spec,
                # avoid needless warning
                inner_input_spec = None
            else:
                continue

876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
        else:
            # When layer is a function
            if isinstance(attr_func, StaticFunction):
                concrete_program = attr_func.concrete_program_specify_input_spec(
                    inner_input_spec)
            else:
                if inner_input_spec:
                    inner_input_spec = pack_sequence_as(input_spec,
                                                        inner_input_spec)
                static_function = declarative(
                    attr_func, input_spec=inner_input_spec)
                concrete_program = static_function.concrete_program

                if static_function._class_instance is None:
                    warnings.warn(
                        '`jit.save` will only save the `Program`, not the parameters. If you have to save the parameters, please make sure that {} is a member function of `paddle.nn.Layer` and the saved parameters are in `state_dict`'.
                        format(layer))

        dygraph_state_dict = None
        if isinstance(inner_layer, Layer):
896
            dygraph_state_dict = inner_layer.to_static_state_dict()
897 898
        elif isinstance(attr_func, StaticFunction):
            if attr_func._class_instance:
899 900
                dygraph_state_dict = attr_func._class_instance.to_static_state_dict(
                )
901 902

        if dygraph_state_dict:
903 904 905 906 907
            # NOTE(chenweihang): we maintain the mapping of variable name to
            # structured name, the buffer variable (non-persistable)
            # saved to inference program may not need by dygraph Layer,
            # we only record the state_dict variable's structured name
            state_names_dict = dict()
908
            state_var_dict = dict()
909
            for structured_name, var in six.iteritems(dygraph_state_dict):
910
                state_names_dict[var.name] = structured_name
911
                state_var_dict[var.name] = var
912 913

            # 3. share parameters from Layer to scope & record var info
J
Jiabin Yang 已提交
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
            with dygraph.guard():
                for param_or_buffer in concrete_program.parameters:
                    # share to scope
                    if param_or_buffer.type == core.VarDesc.VarType.VOCAB:
                        scr_tensor = param_or_buffer.value().get_map_tensor()
                        tgt_var = scope.var(param_or_buffer.name)
                        tgt_var.set_vocab(scr_tensor)
                    else:
                        param_or_buffer_tensor = scope.var(
                            param_or_buffer.name).get_tensor()
                        #src_tensor = param_or_buffer.value().get_tensor()
                        src_tensor = state_var_dict[param_or_buffer.name].value(
                        ).get_tensor()
                        param_or_buffer_tensor._share_data_with(src_tensor)
                    # record var info
                    if param_or_buffer.name not in extra_var_info:
                        extra_info_dict = dict()
                        if param_or_buffer.name in state_names_dict:
                            extra_info_dict[
                                'structured_name'] = state_names_dict[
                                    param_or_buffer.name]
                        extra_info_dict[
                            'stop_gradient'] = param_or_buffer.stop_gradient
0
0x45f 已提交
937 938
                        if isinstance(param_or_buffer,
                                      (ParamBase, EagerParamBase)):
J
Jiabin Yang 已提交
939 940 941
                            extra_info_dict[
                                'trainable'] = param_or_buffer.trainable
                        extra_var_info[param_or_buffer.name] = extra_info_dict
942 943

        # 4. build input & output of save_infernece_model
944 945 946 947 948 949 950 951 952 953 954 955
        # NOTE(chenweihang): [ Get input variables name ]
        # There are two cases, whether to prune the inputs or not
        # - not prune inputs (recommend):
        #   - the len(input_spec) == len((concrete_program.inputs) - 1
        #   - here can use concrete_program.inputs directly
        # - prune inputs:
        #   - the input_spec length < len((concrete_program.inputs) - 1
        #   - the input_spec's name should be in concrete_program.inputs
        input_var_names = _get_input_var_names(concrete_program.inputs,
                                               inner_input_spec)

        # NOTE(chenweihang): [ Get output variables ]
956 957
        # the rule is like [ Get input variables name ]. For output var,
        # we only support VarBase spec, and actually, we only need the
958
        # var name of output, and we don't recommended to use output_spec
959 960
        # print(concrete_program.main_program)
        # print(concrete_program.outputs, configs.output_spec)
961
        output_vars = _get_output_vars(concrete_program.outputs,
962
                                       configs.output_spec, with_hook)
963 964 965 966 967 968 969

        # 5. save inference model
        from paddle.fluid.io import save_inference_model

        # construct new save_inference_model arguments
        model_path = dirname
        # NOTE(chenweihang): because prefix contains model and params filename,
970
        # so we don't support set model_filename & params_filename
971
        if 'forward' == attr_func or not isinstance(layer, Layer):
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
            model_filename = file_prefix + INFER_MODEL_SUFFIX
            params_filename = file_prefix + INFER_PARAMS_SUFFIX
        else:
            model_filename = file_prefix + '.' + attr_func + INFER_MODEL_SUFFIX
            params_filename = file_prefix + '.' + attr_func + INFER_PARAMS_SUFFIX

        with scope_guard(scope):
            save_inference_model(
                dirname=model_path,
                feeded_var_names=input_var_names,
                target_vars=output_vars,
                executor=Executor(_current_expected_place()),
                main_program=concrete_program.main_program.clone(),
                model_filename=model_filename,
                params_filename=params_filename,
                export_for_deployment=configs._export_for_deployment,
988 989
                program_only=configs._program_only,
                clip_extra=False)
990 991 992 993 994 995 996 997

    # NOTE(chenweihang): [ Save extra variable info ]
    # save_inference_model will lose some important variable information, including:
    #   - Variable name and correspondence (when saved variables as one file)
    #   - Variable.stop_gradient information
    #   - Which persistent variable are parameter and which are not
    #   - Parameter.trainable information
    #
998 999
    # The lost information cannot be recovered when it is loaded again,
    # so if we want to perform fine-tune after loading, we may need to
1000 1001
    # configure redundant information to proceed.
    #
1002 1003
    # Due to compatibility issues, we cannot change the original storage structure,
    # but we can save these information in `jit.save` without changing the original
1004 1005
    # storage to improve user experience. So we save extra information into
    # file `***.pdiparams.info`
1006 1007 1008 1009 1010 1011 1012 1013

    # "layer" can only be Layer or function or StaticFunction.

    contain_parameter = False
    for var in concrete_program.main_program.list_vars():
        contain_parameter |= isinstance(var, Parameter)

    if (isinstance(layer, Layer) or contain_parameter) and extra_var_info:
1014 1015 1016 1017
        with scope_guard(scope):
            extra_var_info_path = path + INFER_PARAMS_INFO_SUFFIX
            with open(extra_var_info_path, 'wb') as f:
                pickle.dump(extra_var_info, f, protocol=2)
1018 1019 1020


@dygraph_only
1021
def load(path, **configs):
1022 1023 1024
    """
    :api_attr: imperative

1025 1026
    Load model saved by ``paddle.jit.save`` or ``paddle.static.save_inference_model`` or
    paddle 1.x API ``paddle.fluid.io.save_inference_model`` as ``paddle.jit.TranslatedLayer``,
1027
    then performing inference or fine-tune training.
1028 1029

    .. note::
1030
        If you load model saved by ``paddle.static.save_inference_model`` ,
1031 1032
        there will be the following limitations when using it in fine-tuning:
        1. Imperative mode do not support LoDTensor. All original model's feed targets or parametars that depend on LoD are temporarily unavailable.
1033
        2. All saved model's feed targets need to be passed into TranslatedLayer's forward function.
1034 1035 1036 1037
        3. The variable's ``stop_gradient`` information is lost and can not be recovered.
        4. The parameter's ``trainable`` information is lost and can not be recovered.

    Args:
1038
        path (str): The path prefix to load model. The format is ``dirname/file_prefix`` or ``file_prefix`` .
1039 1040
        **configs (dict, optional): Other load configuration options for compatibility. We do not
            recommend using these configurations, they may be removed in the future. If not necessary,
1041 1042
            DO NOT use them. Default None.
            The following options are currently supported:
1043 1044 1045 1046
            (1) model_filename (str): The inference model file name of the paddle 1.x
            ``save_inference_model`` save format. Default file name is :code:`__model__` .
            (2) params_filename (str): The persistable variables file name of the paddle 1.x
            ``save_inference_model`` save format. No default file name, save variables separately
1047 1048
            by default.

1049 1050 1051 1052 1053

    Returns:
        TranslatedLayer: A Layer object can run saved translated model.

    Examples:
1054
        1. Load model saved by ``paddle.jit.save`` then performing inference and fine-tune training.
1055 1056 1057 1058

        .. code-block:: python

            import numpy as np
1059 1060 1061
            import paddle
            import paddle.nn as nn
            import paddle.optimizer as opt
1062

1063 1064 1065
            BATCH_SIZE = 16
            BATCH_NUM = 4
            EPOCH_NUM = 4
1066

1067 1068
            IMAGE_SIZE = 784
            CLASS_NUM = 10
1069

1070 1071 1072 1073
            # define a random dataset
            class RandomDataset(paddle.io.Dataset):
                def __init__(self, num_samples):
                    self.num_samples = num_samples
1074

1075 1076 1077 1078
                def __getitem__(self, idx):
                    image = np.random.random([IMAGE_SIZE]).astype('float32')
                    label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
                    return image, label
1079

1080 1081 1082 1083 1084
                def __len__(self):
                    return self.num_samples

            class LinearNet(nn.Layer):
                def __init__(self):
1085
                    super(LinearNet, self).__init__()
1086
                    self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
1087

1088
                @paddle.jit.to_static
1089 1090 1091
                def forward(self, x):
                    return self._linear(x)

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
            def train(layer, loader, loss_fn, opt):
                for epoch_id in range(EPOCH_NUM):
                    for batch_id, (image, label) in enumerate(loader()):
                        out = layer(image)
                        loss = loss_fn(out, label)
                        loss.backward()
                        opt.step()
                        opt.clear_grad()
                        print("Epoch {} batch {}: loss = {}".format(
                            epoch_id, batch_id, np.mean(loss.numpy())))

1103
            # 1. train & save model.
1104

1105
            # create network
1106 1107 1108 1109
            layer = LinearNet()
            loss_fn = nn.CrossEntropyLoss()
            adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())

1110
            # create data loader
1111 1112 1113 1114 1115 1116
            dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
            loader = paddle.io.DataLoader(dataset,
                batch_size=BATCH_SIZE,
                shuffle=True,
                drop_last=True,
                num_workers=2)
1117

1118 1119
            # train
            train(layer, loader, loss_fn, adam)
1120

1121
            # save
1122 1123
            path = "example_model/linear"
            paddle.jit.save(layer, path)
1124

1125
            # 2. load model
1126

1127
            # load
1128
            loaded_layer = paddle.jit.load(path)
1129 1130

            # inference
1131 1132 1133
            loaded_layer.eval()
            x = paddle.randn([1, IMAGE_SIZE], 'float32')
            pred = loaded_layer(x)
1134 1135

            # fine-tune
1136 1137 1138
            loaded_layer.train()
            adam = opt.Adam(learning_rate=0.001, parameters=loaded_layer.parameters())
            train(loaded_layer, loader, loss_fn, adam)
1139 1140


1141
        2. Load model saved by ``paddle.fluid.io.save_inference_model`` then performing and fine-tune training.
1142 1143 1144 1145

        .. code-block:: python

            import numpy as np
1146
            import paddle
1147
            import paddle.static as static
1148 1149
            import paddle.nn as nn
            import paddle.optimizer as opt
1150
            import paddle.nn.functional as F
1151

1152 1153 1154
            BATCH_SIZE = 16
            BATCH_NUM = 4
            EPOCH_NUM = 4
1155

1156 1157 1158 1159 1160 1161 1162
            IMAGE_SIZE = 784
            CLASS_NUM = 10

            # define a random dataset
            class RandomDataset(paddle.io.Dataset):
                def __init__(self, num_samples):
                    self.num_samples = num_samples
1163

1164 1165 1166 1167
                def __getitem__(self, idx):
                    image = np.random.random([IMAGE_SIZE]).astype('float32')
                    label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
                    return image, label
1168

1169 1170
                def __len__(self):
                    return self.num_samples
1171

1172 1173
            paddle.enable_static()

1174 1175
            image = static.data(name='image', shape=[None, 784], dtype='float32')
            label = static.data(name='label', shape=[None, 1], dtype='int64')
1176
            pred = static.nn.fc(x=image, size=10, activation='softmax')
1177 1178
            loss = F.cross_entropy(input=pred, label=label)
            avg_loss = paddle.mean(loss)
1179

1180
            optimizer = paddle.optimizer.SGD(learning_rate=0.001)
1181 1182
            optimizer.minimize(avg_loss)

1183 1184 1185
            place = paddle.CPUPlace()
            exe = static.Executor(place)
            exe.run(static.default_startup_program())
1186

1187 1188 1189 1190 1191
            # create data loader
            dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
            loader = paddle.io.DataLoader(dataset,
                feed_list=[image, label],
                places=place,
1192
                batch_size=BATCH_SIZE,
1193 1194
                shuffle=True,
                drop_last=True,
W
WeiXin 已提交
1195
                return_list=False,
1196
                num_workers=2)
1197 1198 1199 1200

            # 1. train and save inference model
            for data in loader():
                exe.run(
1201
                    static.default_main_program(),
1202
                    feed=data,
1203 1204 1205
                    fetch_list=[avg_loss])

            model_path = "fc.example.model"
1206
            paddle.fluid.io.save_inference_model(
1207 1208 1209
                model_path, ["image"], [pred], exe)

            # 2. load model
1210 1211

            # enable dygraph mode
1212 1213 1214 1215
            paddle.disable_static(place)

            # load
            fc = paddle.jit.load(model_path)
1216

1217 1218 1219
            # inference
            fc.eval()
            x = paddle.randn([1, IMAGE_SIZE], 'float32')
1220 1221
            pred = fc(x)

1222
            # fine-tune
1223
            fc.train()
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
            loss_fn = nn.CrossEntropyLoss()
            adam = opt.Adam(learning_rate=0.001, parameters=fc.parameters())
            loader = paddle.io.DataLoader(dataset,
                places=place,
                batch_size=BATCH_SIZE,
                shuffle=True,
                drop_last=True,
                num_workers=2)
            for epoch_id in range(EPOCH_NUM):
                for batch_id, (image, label) in enumerate(loader()):
                    out = fc(image)
                    loss = loss_fn(out, label)
                    loss.backward()
                    adam.step()
                    adam.clear_grad()
                    print("Epoch {} batch {}: loss = {}".format(
                        epoch_id, batch_id, np.mean(loss.numpy())))
1241
    """
1242 1243 1244 1245
    # 1. construct correct config
    config = _parse_load_config(configs)
    model_path, config = _build_load_path_and_config(path, config)

1246
    return TranslatedLayer._construct(model_path, config)
1247 1248


1249
@dygraph_only
Z
Zeng Jinle 已提交
1250 1251 1252 1253 1254
def _trace(layer,
           inputs,
           feed_prefix='feed_',
           fetch_prefix='fetch_',
           tmp_prefix='t_'):
1255
    assert isinstance(layer, Layer)
1256 1257 1258 1259 1260 1261 1262 1263 1264

    if not isinstance(inputs, (list, tuple)):
        inputs = [inputs]

    tracer = _dygraph_tracer()._get_program_desc_tracer()

    var_list = extract_vars(inputs)

    with program_desc_tracing_guard(True):
1265
        original_outputs = layer(*inputs)
1266 1267 1268 1269
        if not isinstance(original_outputs, (list, tuple)):
            outputs = [original_outputs]
        else:
            outputs = original_outputs
1270
        out_vars = extract_vars(outputs, err_tag='outputs')
1271

1272
        program_desc, feed_names, fetch_names, parameters = tracer.create_program_desc(
Z
Zeng Jinle 已提交
1273
            var_list, feed_prefix, out_vars, fetch_prefix, tmp_prefix)
1274 1275 1276 1277 1278
        tracer.reset()

    with _dygraph_guard(None):
        program = create_program_from_desc(program_desc)

1279
    return original_outputs, program, feed_names, fetch_names, parameters
1280 1281 1282 1283


class TracedLayer(object):
    """
1284
    :api_attr: imperative
1285

1286 1287 1288 1289 1290
    TracedLayer is used to convert a forward dygraph model to a static
    graph model. This is mainly used to save the dygraph model for online
    inference using C++. Besides, users can also do inference in Python
    using the converted static graph model, which usually has better
    performance than the original dygraph model.
1291 1292 1293 1294

    TracedLayer would run the static graph model using :code:`Executor`
    and :code:`CompiledProgram` . The static graph model would share
    parameters with the dygraph model.
1295 1296

    All TracedLayer objects should not be created by constructor and should
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
    be created by static method :code:`TracedLayer.trace(layer, inputs)` .

    The TracedLayer can only be used to convert the data-independent dygraph
    model into the static graph model, which means the dygraph model should
    be independent with the tensor data and shape.
    """

    def __init__(self, program, parameters, feed_names, fetch_names):
        self._program = program
        self._feed_names = feed_names
        self._fetch_names = fetch_names
1308
        self._params = parameters
1309 1310 1311 1312 1313

        self._place = _current_expected_place()

        self._scope = core.Scope()
        for p in parameters:
1314
            src_tensor = p.value().get_tensor()
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
            dst_tensor = self._scope.var(p.name).get_tensor()
            dst_tensor._share_data_with(src_tensor)

        self._exe = Executor(self._place)
        self._compiled_program = None
        self._build_strategy = None
        self._exec_strategy = None

    @property
    def program(self):
        return self._program

    def _switch(self, is_test=True):
        for block_id in range(self._program.num_blocks):
            block = self._program.block(block_id)
            for op in block.ops:
                if op.has_attr("is_test"):
                    op._set_attr("is_test", is_test)

    @staticmethod
    @dygraph_only
    def trace(layer, inputs):
        """
1338
        This method is the only allowed method to create TracedLayer object.
1339 1340 1341 1342
        It would call the :code:`layer(*inputs)` method to run the dygraph
        model and convert it into a static graph model.

        Args:
1343
            layer (paddle.nn.Layer): the layer object to be traced.
1344 1345
            inputs (list(Tensor)|tuple(Tensor)|Tensor): the input tensors of
                the layer object.
1346 1347

        Returns:
1348
            tuple: A tuple of 2 items, whose the first item is the output of
1349 1350
                :code:`layer(*inputs)` , and the second item is the created
                TracedLayer object.
1351

1352
        Examples:
1353 1354
            .. code-block:: python:

1355
                import paddle
1356

1357
                class ExampleLayer(paddle.nn.Layer):
1358 1359
                    def __init__(self):
                        super(ExampleLayer, self).__init__()
1360
                        self._fc = paddle.nn.Linear(3, 10)
1361 1362 1363 1364

                    def forward(self, input):
                        return self._fc(input)

1365

1366 1367 1368 1369 1370 1371
                layer = ExampleLayer()
                in_var = paddle.uniform(shape=[2, 3], dtype='float32')
                out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])

                # run the static graph model using Executor inside
                out_static_graph = static_layer([in_var])
1372

1373 1374
                print(len(out_static_graph)) # 1
                print(out_static_graph[0].shape) # (2, 10)
1375

1376 1377
                # save the static graph model for inference
                static_layer.save_inference_model(dirname='./saved_infer_model')
1378

1379
        """
1380 1381 1382 1383
        assert isinstance(
            layer, Layer
        ), "The type of 'layer' in fluid.dygraph.jit.TracedLayer.trace must be fluid.dygraph.Layer, but received {}.".format(
            type(layer))
1384 1385
        outs, prog, feed, fetch, parameters = _trace(layer, inputs)
        traced = TracedLayer(prog, parameters, feed, fetch)
1386 1387 1388 1389 1390 1391 1392
        return outs, traced

    def set_strategy(self, build_strategy=None, exec_strategy=None):
        """
        Set the strategies when running static graph model.

        Args:
1393
            build_strategy (BuildStrategy, optional): build strategy of
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
                :code:`CompiledProgram` inside TracedLayer. Default None.
            exec_strategy (ExecutionStrategy, optional): execution strategy of
                :code:`CompiledProgram` inside TracedLayer. Default None.

        Returns:
            None

        Examples:
            .. code-block:: python:

1404
                import paddle
1405

1406
                class ExampleLayer(paddle.nn.Layer):
1407 1408
                    def __init__(self):
                        super(ExampleLayer, self).__init__()
1409
                        self._fc = paddle.nn.Linear(3, 10)
1410 1411 1412 1413

                    def forward(self, input):
                        return self._fc(input)

1414 1415 1416 1417
                layer = ExampleLayer()
                in_var = paddle.uniform(shape=[2, 3], dtype='float32')

                out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
1418

1419 1420
                build_strategy = paddle.static.BuildStrategy()
                build_strategy.enable_inplace = True
1421

1422 1423
                exec_strategy = paddle.static.ExecutionStrategy()
                exec_strategy.num_threads = 2
1424

1425 1426
                static_layer.set_strategy(build_strategy=build_strategy, exec_strategy=exec_strategy)
                out_static_graph = static_layer([in_var])
1427 1428 1429

        """
        assert self._compiled_program is None, "Cannot set strategy after run"
1430 1431 1432 1433 1434 1435 1436 1437
        assert isinstance(
            build_strategy, (type(None), BuildStrategy)
        ), "The type of 'build_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received {}.".format(
            type(build_strategy))
        assert isinstance(
            exec_strategy, (type(None), ExecutionStrategy)
        ), "The type of 'exec_strategy' in fluid.dygraph.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received {}.".format(
            type(exec_strategy))
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
        self._build_strategy = build_strategy
        self._exec_strategy = exec_strategy

    @switch_to_static_graph
    def _compile(self):
        self._compiled_program = CompiledProgram(
            self._program).with_data_parallel(
                build_strategy=self._build_strategy,
                exec_strategy=self._exec_strategy,
                places=self._place)

    def _build_feed(self, inputs):
        assert isinstance(inputs, (list, tuple)), \
            "Inputs should be a list or tuple of variables"
        assert len(inputs) == len(self._feed_names)
        feed_dict = {}
J
Jiabin Yang 已提交
1454
        if _non_static_mode():
1455
            for x, name in zip(inputs, self._feed_names):
1456
                feed_dict[name] = x.value().get_tensor()
1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
        else:
            for x, name in zip(inputs, self._feed_names):
                feed_dict[name] = x

        return feed_dict

    @switch_to_static_graph
    def _run(self, feed):
        return self._exe.run(self._compiled_program,
                             feed=feed,
                             fetch_list=self._fetch_names)

    def __call__(self, inputs):
        with scope_guard(self._scope):
            if self._compiled_program is None:
                self._compile()

            return self._run(self._build_feed(inputs))

    @switch_to_static_graph
1477
    def save_inference_model(self, path, feed=None, fetch=None, **kwargs):
1478
        """
1479 1480
        Save the TracedLayer to a model for inference. The saved
        inference model can be loaded by C++ inference APIs.
1481

1482 1483 1484
        ``path`` is the prefix of saved objects, and the saved translated program file
        suffix is ``.pdmodel`` , the saved persistable variables file suffix is ``.pdiparams`` .

1485
        Args:
1486
            path(str): The path prefix to save model. The format is ``dirname/file_prefix`` or ``file_prefix``.
1487
            feed (list[int], optional): the input variable indices of the saved
1488
                inference model. If None, all input variables of the
1489 1490 1491 1492 1493 1494
                TracedLayer object would be the inputs of the saved inference
                model. Default None.
            fetch (list[int], optional): the output variable indices of the
                saved inference model. If None, all output variables of the
                TracedLayer object would be the outputs of the saved inference
                model. Default None.
1495
            kwargs: Supported keys including 'clip_extra'.set to True if you want to clip extra information for every operator.
1496 1497

        Returns:
1498
            None
1499 1500 1501 1502 1503

        Examples:
            .. code-block:: python:

                import numpy as np
1504
                import paddle
1505

1506
                class ExampleLayer(paddle.nn.Layer):
1507 1508
                    def __init__(self):
                        super(ExampleLayer, self).__init__()
1509
                        self._fc = paddle.nn.Linear(3, 10)
1510 1511 1512 1513

                    def forward(self, input):
                        return self._fc(input)

1514 1515
                save_dirname = './saved_infer_model'
                in_np = np.random.random([2, 3]).astype('float32')
1516 1517
                in_var = paddle.to_tensor(in_np)
                layer = ExampleLayer()
1518

1519 1520
                out_dygraph, static_layer = paddle.jit.TracedLayer.trace(layer, inputs=[in_var])
                static_layer.save_inference_model(save_dirname, feed=[0], fetch=[0])
1521

1522 1523 1524 1525
                paddle.enable_static()
                place = paddle.CPUPlace()
                exe = paddle.static.Executor(place)
                program, feed_vars, fetch_vars = paddle.static.load_inference_model(save_dirname,
1526
                                                    exe)
1527 1528 1529

                fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars)
                print(fetch.shape) # (2, 10)
1530
        """
1531
        check_type(path, "path", str,
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544
                   "fluid.dygraph.jit.TracedLayer.save_inference_model")
        check_type(feed, "feed", (type(None), list),
                   "fluid.dygraph.jit.TracedLayer.save_inference_model")
        if isinstance(feed, list):
            for f in feed:
                check_type(f, "each element of feed", int,
                           "fluid.dygraph.jit.TracedLayer.save_inference_model")
        check_type(fetch, "fetch", (type(None), list),
                   "fluid.dygraph.jit.TracedLayer.save_inference_model")
        if isinstance(fetch, list):
            for f in fetch:
                check_type(f, "each element of fetch", int,
                           "fluid.dygraph.jit.TracedLayer.save_inference_model")
1545
        clip_extra = kwargs.get('clip_extra', False)
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
        # path check
        file_prefix = os.path.basename(path)
        if file_prefix == "":
            raise ValueError(
                "The input path MUST be format of dirname/file_prefix "
                "[dirname\\file_prefix in Windows system], but received "
                "file_prefix is empty string.")

        dirname = os.path.dirname(path)
        if dirname and not os.path.exists(dirname):
            os.makedirs(dirname)

1558
        from paddle.fluid.io import save_inference_model
1559 1560 1561 1562 1563

        def get_feed_fetch(all_vars, partial_vars):
            if partial_vars is None:
                return all_vars

1564
            return [all_vars[idx] for idx in partial_vars]
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574

        with scope_guard(self._scope):
            feeded_var_names = get_feed_fetch(self._feed_names, feed)
            target_var_names = get_feed_fetch(self._fetch_names, fetch)
            target_vars = []
            for name in target_var_names:
                target_var = self._program.global_block().vars.get(name, None)
                assert target_var is not None, "{} cannot be found".format(name)
                target_vars.append(target_var)

1575 1576 1577
            model_filename = file_prefix + INFER_MODEL_SUFFIX
            params_filename = file_prefix + INFER_PARAMS_SUFFIX

1578
            save_inference_model(
1579 1580 1581 1582
                dirname=dirname,
                feeded_var_names=feeded_var_names,
                target_vars=target_vars,
                executor=self._exe,
1583 1584
                main_program=self._program.clone(),
                model_filename=model_filename,
1585 1586
                params_filename=params_filename,
                clip_extra=clip_extra)