提交 24a33bed 编写于 作者: C Chen Weihang

replace config by kwargs

上级 6b727e08
......@@ -234,7 +234,6 @@ from .framework import grad #DEFINE_ALIAS
from .framework import no_grad #DEFINE_ALIAS
from .framework import save #DEFINE_ALIAS
from .framework import load #DEFINE_ALIAS
from .framework import SaveLoadConfig #DEFINE_ALIAS
from .framework import DataParallel #DEFINE_ALIAS
from .framework import NoamDecay #DEFINE_ALIAS
......
......@@ -24,7 +24,7 @@ from . import learning_rate_scheduler
import warnings
from .. import core
from .base import guard
from paddle.fluid.dygraph.jit import SaveLoadConfig, deprecate_save_load_configs
from paddle.fluid.dygraph.jit import _SaveLoadConfig
from paddle.fluid.dygraph.io import _construct_program_holders, _construct_params_and_buffers, EXTRA_VAR_INFO_FILENAME
__all__ = [
......@@ -33,35 +33,27 @@ __all__ = [
]
# NOTE(chenweihang): deprecate load_dygraph's argument keep_name_table,
# ensure compatibility when user still use keep_name_table argument
def deprecate_keep_name_table(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
def __warn_and_build_configs__(keep_name_table):
warnings.warn(
"The argument `keep_name_table` has deprecated, please use `SaveLoadConfig.keep_name_table`.",
DeprecationWarning)
config = SaveLoadConfig()
config.keep_name_table = keep_name_table
return config
# deal with arg `keep_name_table`
if len(args) > 1 and isinstance(args[1], bool):
args = list(args)
args[1] = __warn_and_build_configs__(args[1])
# deal with kwargs
elif 'keep_name_table' in kwargs:
kwargs['config'] = __warn_and_build_configs__(kwargs[
'keep_name_table'])
kwargs.pop('keep_name_table')
else:
# do nothing
pass
def _parse_load_config(configs):
supported_configs = [
'model_filename', 'params_filename', 'separate_params',
'keep_name_table'
]
# input check
for key in configs:
if key not in supported_configs:
raise ValueError(
"The additional config (%s) of `paddle.fluid.load_dygraph` is not supported."
% (key))
return func(*args, **kwargs)
# construct inner config
inner_config = _SaveLoadConfig()
inner_config.model_filename = configs.get('model_filename', None)
inner_config.params_filename = configs.get('params_filename', None)
inner_config.separate_params = configs.get('separate_params', None)
inner_config.keep_name_table = configs.get('keep_name_table', None)
return wrapper
return inner_config
@dygraph_only
......@@ -135,9 +127,7 @@ def save_dygraph(state_dict, model_path):
# TODO(qingqing01): remove dygraph_only to support loading static model.
# maybe need to unify the loading interface after 2.0 API is ready.
# @dygraph_only
@deprecate_save_load_configs
@deprecate_keep_name_table
def load_dygraph(model_path, config=None):
def load_dygraph(model_path, **configs):
'''
:api_attr: imperative
......@@ -152,10 +142,20 @@ def load_dygraph(model_path, config=None):
Args:
model_path(str) : The file prefix store the state_dict.
(The path should Not contain suffix '.pdparams')
config (SaveLoadConfig, optional): :ref:`api_imperative_jit_saveLoadConfig`
object that specifies additional configuration options, these options
are for compatibility with ``jit.save/io.save_inference_model`` formats.
Default None.
configs (dict, optional): other save configuration options for compatibility. We do not
recommend using these configurations, if not necessary, DO NOT use them. Default None.
The following options are currently supported:
(1) model_filename (string): The filename to load the translated program of target Layer.
Default filename is :code:`__model__` .
(2) params_filename (string): The filename to load all persistable variables in target Layer.
Default file name is :code:`__variables__` .
(3) separate_params (bool): Configure whether to load the Layer parameters from separete files.
If True, each parameter will be loaded from a file separately, the file name is the parameter name,
and the params_filename configuration will not take effect. Default False.
(4) keep_name_table (bool): Configures whether keep ``structured_name -> parameter_name`` dict in
loaded state dict. This dict is the debugging information saved when call ``paddle.fluid.save_dygraph`` .
It is generally only used for debugging and does not affect the actual training or inference.
By default, it will not be retained in ``paddle.fluid.load_dygraph`` result. Default: False.
Returns:
state_dict(dict) : the dict store the state_dict
......@@ -196,8 +196,7 @@ def load_dygraph(model_path, config=None):
opti_file_path = model_prefix + ".pdopt"
# deal with argument `config`
if config is None:
config = SaveLoadConfig()
config = _parse_load_config(configs)
if os.path.exists(params_file_path) or os.path.exists(opti_file_path):
# Load state dict by `save_dygraph` save format
......
......@@ -39,7 +39,7 @@ from paddle.fluid.wrapped_decorator import wrap_decorator
__all__ = [
'TracedLayer', 'declarative', 'dygraph_to_static_func', 'set_code_level',
'set_verbosity', 'save', 'load', 'SaveLoadConfig'
'set_verbosity', 'save', 'load'
]
......@@ -228,73 +228,7 @@ def declarative(function=None, input_spec=None):
return decorated
class SaveLoadConfig(object):
"""
The additional configuration options may be used in function
``paddle.jit.save/load`` and ``paddle.load`` .
Examples:
1. Using ``SaveLoadConfig`` when saving model
.. code-block:: python
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = nn.Linear(in_size, out_size)
@paddle.jit.to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# enable dygraph mode
paddle.disable_static()
# train model
net = SimpleNet(8, 8)
adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = paddle.randn([4, 8], 'float32')
for i in range(10):
out = net(x)
loss = paddle.tensor.mean(out)
loss.backward()
adam.step()
adam.clear_grad()
# use SaveLoadconfig when saving model
model_path = "simplenet.example.model"
config = paddle.SaveLoadConfig()
config.model_filename = "__simplenet__"
paddle.jit.save(
layer=net,
model_path=model_path,
config=config)
2. Using ``SaveLoadConfig`` when loading model
.. code-block:: python
import paddle
# enable dygraph mode
paddle.disable_static()
# use SaveLoadconfig when loading model
model_path = "simplenet.example.model"
config = paddle.SaveLoadConfig()
config.model_filename = "__simplenet__"
infer_net = paddle.jit.load(model_path, config=config)
# inference
x = paddle.randn([4, 8], 'float32')
pred = infer_net(x)
"""
class _SaveLoadConfig(object):
def __init__(self):
self._output_spec = None
self._model_filename = None
......@@ -316,207 +250,53 @@ class SaveLoadConfig(object):
@property
def output_spec(self):
"""
Selects the output targets of the saved model ( ``paddle.jit.TranslatedLayer`` ).
By default, all return variables of original Layer's forward function
are kept as the output of the saved TranslatedLayer.
The ``output_spec`` type should be list[Variable]. If the provided ``output_spec``
list is not all output variables, the saved model will be pruned according to the
given ``output_spec`` list.
.. note::
The ``output_spec`` is only used when saving model.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = nn.Linear(in_size, out_size)
@paddle.jit.to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
loss = paddle.tensor.mean(z)
return z, loss
# enable dygraph mode
paddle.disable_static()
# train model
net = SimpleNet(8, 8)
adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = paddle.randn([4, 8], 'float32')
for i in range(10):
out, loss = net(x)
loss.backward()
adam.step()
adam.clear_grad()
# use SaveLoadconfig.output_spec
model_path = "simplenet.example.model.output_spec"
config = paddle.SaveLoadConfig()
config.output_spec = [out]
paddle.jit.save(
layer=net,
model_path=model_path,
config=config)
infer_net = paddle.jit.load(model_path)
x = paddle.randn([4, 8], 'float32')
pred = infer_net(x)
"""
return self._output_spec
@output_spec.setter
def output_spec(self, spec):
if spec is None:
return
if not isinstance(spec, list):
raise TypeError(
"The SaveLoadConfig.output_spec should be 'list', but received input type is %s."
"The config `output_spec` should be 'list', but received input type is %s."
% type(input))
for var in spec:
if not isinstance(var, core.VarBase):
raise TypeError(
"The element in SaveLoadConfig.output_spec list should be 'Variable', but received element's type is %s."
"The element in config `output_spec` list should be 'Variable', but received element's type is %s."
% type(var))
self._output_spec = spec
@property
def model_filename(self):
"""
The name of file to save the translated program of target Layer.
Default filename is :code:`__model__` .
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = nn.Linear(in_size, out_size)
@paddle.jit.to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# enable dygraph mode
paddle.disable_static()
# train model
net = SimpleNet(8, 8)
adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = paddle.randn([4, 8], 'float32')
for i in range(10):
out = net(x)
loss = paddle.tensor.mean(out)
loss.backward()
adam.step()
adam.clear_grad()
# saving with configs.model_filename
model_path = "simplenet.example.model.model_filename"
config = paddle.SaveLoadConfig()
config.model_filename = "__simplenet__"
paddle.jit.save(
layer=net,
model_path=model_path,
config=config)
# loading with configs.model_filename
infer_net = paddle.jit.load(model_path, config=config)
x = paddle.randn([4, 8], 'float32')
pred = infer_net(x)
"""
return self._model_filename
@model_filename.setter
def model_filename(self, filename):
if filename is None:
return
if not isinstance(filename, six.string_types):
raise TypeError(
"The SaveLoadConfig.model_filename should be str, but received input's type is %s."
"The config `model_filename` should be str, but received input's type is %s."
% type(filename))
if len(filename) == 0:
raise ValueError(
"The SaveLoadConfig.model_filename is empty string.")
raise ValueError("The config `model_filename` is empty string.")
self._model_filename = filename
@property
def params_filename(self):
"""
The name of file to save all persistable variables in target Layer.
Default file name is :code:`__variables__` .
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = nn.Linear(in_size, out_size)
@paddle.jit.to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# enable dygraph mode
paddle.disable_static()
# train model
net = SimpleNet(8, 8)
adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = paddle.randn([4, 8], 'float32')
for i in range(10):
out = net(x)
loss = paddle.tensor.mean(out)
loss.backward()
adam.step()
adam.clear_grad()
model_path = "simplenet.example.model.params_filename"
config = paddle.SaveLoadConfig()
config.params_filename = "__params__"
# saving with configs.params_filename
paddle.jit.save(
layer=net,
model_path=model_path,
config=config)
# loading with configs.params_filename
infer_net = paddle.jit.load(model_path, config=config)
x = paddle.randn([4, 8], 'float32')
pred = infer_net(x)
"""
return self._params_filename
@params_filename.setter
def params_filename(self, filename):
if filename is None:
return
if not isinstance(filename, six.string_types):
raise TypeError(
"The SaveLoadConfig.params_filename should be str, but received input's type is %s."
"The config `params_filename` should be str, but received input's type is %s."
% type(filename))
if len(filename) == 0:
raise ValueError(
"The SaveLoadConfig.params_filename is empty string.")
raise ValueError("The config `params_filename` is empty string.")
self._params_filename = filename
# NOTE: [why not use params_filename=None control params saved separately]
......@@ -527,122 +307,72 @@ class SaveLoadConfig(object):
# separately can makes the concept clearer.
@property
def separate_params(self):
"""
Configure whether to save the Layer parameters as separete files.
(In order to be compatible with the behavior of ``paddle.static.save_inference_model`` )
If True, each parameter will be saved to a file separately, the file name is the parameter name,
and the SaveLoadConfig.params_filename configuration will not take effect. Default False.
.. note::
Only used for ``paddle.jit.save`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
class SimpleNet(nn.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = nn.Linear(in_size, out_size)
@paddle.jit.to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
return z
# enable dygraph mode
paddle.disable_static()
# train model
net = SimpleNet(8, 8)
adam = opt.Adam(learning_rate=0.1, parameters=net.parameters())
x = paddle.randn([4, 8], 'float32')
for i in range(10):
out = net(x)
loss = paddle.tensor.mean(out)
loss.backward()
adam.step()
adam.clear_grad()
model_path = "simplenet.example.model.separate_params"
config = paddle.SaveLoadConfig()
config.separate_params = True
# saving with configs.separate_params
paddle.jit.save(
layer=net,
model_path=model_path,
config=config)
# [result] the saved model directory contains:
# linear_0.b_0 linear_0.w_0 __model__ __variables.info__
# loading with configs.params_filename
infer_net = paddle.jit.load(model_path, config=config)
x = paddle.randn([4, 8], 'float32')
pred = infer_net(x)
"""
return self._separate_params
@separate_params.setter
def separate_params(self, value):
if value is None:
return None
if not isinstance(value, bool):
raise TypeError(
"The SaveLoadConfig.separate_params should be bool value, but received input's type is %s."
"The config `separate_params` should be bool value, but received input's type is %s."
% type(value))
self._separate_params = value
@property
def keep_name_table(self):
"""
Configures whether keep ``structured_name -> parameter_name`` dict in loaded state dict.
This dict is the debugging information saved when call ``paddle.save`` .
It is generally only used for debugging and does not affect the actual training or inference.
By default, it will not be retained in ``paddle.load`` result. Default: False.
return self._keep_name_table
.. note::
Only used for ``paddle.load`` .
@keep_name_table.setter
def keep_name_table(self, value):
if value is None:
return
if not isinstance(value, bool):
raise TypeError(
"The config `keep_name_table` should be bool value, but received input's type is %s."
% type(value))
self._keep_name_table = value
Examples:
.. code-block:: python
import paddle
def _parse_save_configs(configs):
supported_configs = [
'output_spec', 'model_filename', 'params_filename', 'separate_params'
]
paddle.disable_static()
# input check
for key in configs:
if key not in supported_configs:
raise ValueError(
"The additional config (%s) of `paddle.jit.save` is not supported."
% (key))
linear = paddle.nn.Linear(5, 1)
# construct inner config
inner_config = _SaveLoadConfig()
inner_config.output_spec = configs.get('output_spec', None)
inner_config.model_filename = configs.get('model_filename', None)
inner_config.params_filename = configs.get('params_filename', None)
inner_config.separate_params = configs.get('separate_params', None)
state_dict = linear.state_dict()
paddle.save(state_dict, "paddle_dy.pdparams")
return inner_config
config = paddle.SaveLoadConfig()
config.keep_name_table = True
para_state_dict = paddle.load("paddle_dy.pdparams", config)
print(para_state_dict)
# the name_table is 'StructuredToParameterName@@'
# {'bias': array([0.], dtype=float32),
# 'StructuredToParameterName@@':
# {'bias': u'linear_0.b_0', 'weight': u'linear_0.w_0'},
# 'weight': array([[ 0.04230034],
# [-0.1222527 ],
# [ 0.7392676 ],
# [-0.8136974 ],
# [ 0.01211023]], dtype=float32)}
"""
return self._keep_name_table
def _parse_load_config(configs):
supported_configs = ['model_filename', 'params_filename', 'separate_params']
@keep_name_table.setter
def keep_name_table(self, value):
if not isinstance(value, bool):
raise TypeError(
"The SaveLoadConfig.keep_name_table should be bool value, but received input's type is %s."
% type(value))
self._keep_name_table = value
# input check
for key in configs:
if key not in supported_configs:
raise ValueError(
"The additional config (%s) of `paddle.jit.load` is not supported."
% (key))
# construct inner config
inner_config = _SaveLoadConfig()
inner_config.model_filename = configs.get('model_filename', None)
inner_config.params_filename = configs.get('params_filename', None)
inner_config.separate_params = configs.get('separate_params', None)
return inner_config
def _get_input_var_names(inputs, input_spec):
......@@ -712,21 +442,8 @@ def _get_output_vars(outputs, output_spec):
return result_list
# NOTE(chenweihang): change jit.save/load argument `configs` to `config`
def deprecate_save_load_configs(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if 'configs' in kwargs:
kwargs['config'] = kwargs['configs']
kwargs.pop('configs')
return func(*args, **kwargs)
return wrapper
@deprecate_save_load_configs
@switch_to_static_graph
def save(layer, model_path, input_spec=None, config=None):
def save(layer, model_path, input_spec=None, **configs):
"""
Saves input declarative Layer as :ref:`api_imperative_TranslatedLayer`
format model, which can be used for inference or fine-tuning after loading.
......@@ -747,12 +464,25 @@ def save(layer, model_path, input_spec=None, config=None):
Args:
layer (Layer): the Layer to be saved. The Layer should be decorated by `@declarative`.
model_path (str): the directory to save the model.
input_spec (list[Variable], optional): Describes the input of the saved model.
input_spec (list[InputSpec|Tensor], optional): Describes the input of the saved model.
It is the example inputs that will be passed to saved TranslatedLayer's forward
function. If None, all input variables of the original Layer's forward function
would be the inputs of the saved model. Default None.
config (SaveLoadConfig, optional): :ref:`api_imperative_jit_saveLoadConfig` object
that specifies additional configuration options. Default None.
configs (dict, optional): other save configuration options for compatibility. We do not
recommend using these configurations, if not necessary, DO NOT use them. Default None.
The following options are currently supported:
(1) output_spec (list[Tensor]): Selects the output targets of the saved model.
By default, all return variables of original Layer's forward function are kept as the
output of the saved model. If the provided ``output_spec`` list is not all output variables,
the saved model will be pruned according to the given ``output_spec`` list.
(2) model_filename (string): The name of file to save the translated program of target Layer.
Default filename is :code:`__model__` .
(3) params_filename (string): The name of file to save all persistable variables in target Layer.
Default file name is :code:`__variables__` .
(4) separate_params (bool): Configure whether to save the Layer parameters as separete files.
If True, each parameter will be saved to a file separately, the file name is the parameter name,
and the params_filename configuration will not take effect. Default False.
Returns:
None
......@@ -843,9 +573,7 @@ def save(layer, model_path, input_spec=None, config=None):
"The input layer of paddle.jit.save should be 'Layer', but received layer type is %s."
% type(layer))
configs = config
if configs is None:
configs = SaveLoadConfig()
configs = _parse_save_configs(configs)
# avoid change user given input_spec
inner_input_spec = None
......@@ -964,9 +692,8 @@ def save(layer, model_path, input_spec=None, config=None):
pickle.dump(extra_var_info, f, protocol=2)
@deprecate_save_load_configs
@dygraph_only
def load(model_path, config=None):
def load(model_path, **configs):
"""
:api_attr: imperative
......@@ -983,8 +710,17 @@ def load(model_path, config=None):
Args:
model_path (str): The directory path where the model is saved.
config (SaveLoadConfig, optional): :ref:`api_imperative_jit_saveLoadConfig` object that specifies
additional configuration options. Default None.
configs (dict, optional): other save configuration options for compatibility. We do not
recommend using these configurations, if not necessary, DO NOT use them. Default None.
The following options are currently supported:
(1) model_filename (string): The filename to load the translated program of target Layer.
Default filename is :code:`__model__` .
(2) params_filename (string): The filename to load all persistable variables in target Layer.
Default file name is :code:`__variables__` .
(3) separate_params (bool): Configure whether to load the Layer parameters from separete files.
If True, each parameter will be loaded from a file separately, the file name is the parameter name,
and the params_filename configuration will not take effect. Default False.
Returns:
TranslatedLayer: A Layer object can run saved translated model.
......@@ -1179,6 +915,7 @@ def load(model_path, config=None):
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
"""
config = _parse_load_config(configs)
return TranslatedLayer._construct(model_path, config)
......
......@@ -14,7 +14,7 @@
from __future__ import print_function
from paddle.fluid.dygraph.jit import SaveLoadConfig
from paddle.fluid.dygraph.jit import _SaveLoadConfig
from paddle.fluid.dygraph.io import TranslatedLayer
......@@ -31,7 +31,7 @@ class StaticModelRunner(object):
"""
def __new__(cls, model_dir, model_filename=None, params_filename=None):
configs = SaveLoadConfig()
configs = _SaveLoadConfig()
if model_filename is not None:
configs.model_filename = model_filename
if params_filename is not None:
......
......@@ -498,13 +498,11 @@ def do_train(args, to_static):
step += 1
# save inference model
if to_static:
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.output_spec = [crf_decode]
fluid.dygraph.jit.save(
layer=model,
model_path=args.model_save_dir,
input_spec=[words, length],
configs=configs)
output_spec=[crf_decode])
else:
fluid.dygraph.save_dygraph(model.state_dict(), args.dy_param_path)
......
......@@ -218,13 +218,11 @@ class TestMNISTWithToStatic(TestMNIST):
def check_jit_save_load(self, model, inputs, input_spec, to_static, gt_out):
if to_static:
infer_model_path = "./test_mnist_inference_model_by_jit_save"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.output_spec = [gt_out]
fluid.dygraph.jit.save(
layer=model,
model_path=infer_model_path,
input_spec=input_spec,
configs=configs)
output_spec=[gt_out])
# load in static mode
static_infer_out = self.jit_load_and_run_inference_static(
infer_model_path, inputs)
......
......@@ -67,13 +67,11 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
layer.clear_gradients()
# test for saving model in dygraph.guard
infer_model_dir = "./test_dy2stat_save_inference_model_in_guard"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.output_spec = [pred]
fluid.dygraph.jit.save(
layer=layer,
model_path=infer_model_dir,
input_spec=[x],
configs=configs)
output_spec=[pred])
# Check the correctness of the inference
dygraph_out, _ = layer(x)
self.check_save_inference_model(layer, [x_data], dygraph_out.numpy())
......@@ -92,15 +90,12 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
expected_persistable_vars = set([p.name for p in model.parameters()])
infer_model_dir = "./test_dy2stat_save_inference_model"
configs = fluid.dygraph.jit.SaveLoadConfig()
if fetch is not None:
configs.output_spec = fetch
configs.separate_params = True
fluid.dygraph.jit.save(
layer=model,
model_path=infer_model_dir,
input_spec=feed if feed else None,
configs=configs)
separate_params=True,
output_spec=fetch if fetch else None)
saved_var_names = set([
filename for filename in os.listdir(infer_model_dir)
if filename != '__model__' and filename != EXTRA_VAR_INFO_FILENAME
......
......@@ -383,10 +383,10 @@ def train(train_reader, to_static):
step_idx += 1
if step_idx == STEP_NUM:
if to_static:
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.output_spec = [pred]
fluid.dygraph.jit.save(se_resnext, MODEL_SAVE_PATH,
[img], configs)
fluid.dygraph.jit.save(
se_resnext,
MODEL_SAVE_PATH, [img],
output_spec=[pred])
else:
fluid.dygraph.save_dygraph(se_resnext.state_dict(),
DY_STATE_DICT_SAVE_PATH)
......
......@@ -43,15 +43,14 @@ class TestDirectory(unittest.TestCase):
'paddle.distributed.prepare_context', 'paddle.DataParallel',
'paddle.jit', 'paddle.jit.TracedLayer', 'paddle.jit.to_static',
'paddle.jit.ProgramTranslator', 'paddle.jit.TranslatedLayer',
'paddle.jit.save', 'paddle.jit.load', 'paddle.SaveLoadConfig',
'paddle.NoamDecay', 'paddle.PiecewiseDecay',
'paddle.NaturalExpDecay', 'paddle.ExponentialDecay',
'paddle.InverseTimeDecay', 'paddle.PolynomialDecay',
'paddle.CosineDecay', 'paddle.static.Executor',
'paddle.static.global_scope', 'paddle.static.scope_guard',
'paddle.static.append_backward', 'paddle.static.gradients',
'paddle.static.BuildStrategy', 'paddle.static.CompiledProgram',
'paddle.static.ExecutionStrategy',
'paddle.jit.save', 'paddle.jit.load', 'paddle.NoamDecay',
'paddle.PiecewiseDecay', 'paddle.NaturalExpDecay',
'paddle.ExponentialDecay', 'paddle.InverseTimeDecay',
'paddle.PolynomialDecay', 'paddle.CosineDecay',
'paddle.static.Executor', 'paddle.static.global_scope',
'paddle.static.scope_guard', 'paddle.static.append_backward',
'paddle.static.gradients', 'paddle.static.BuildStrategy',
'paddle.static.CompiledProgram', 'paddle.static.ExecutionStrategy',
'paddle.static.default_main_program',
'paddle.static.default_startup_program', 'paddle.static.Program',
'paddle.static.name_scope', 'paddle.static.program_guard',
......@@ -104,9 +103,7 @@ class TestDirectory(unittest.TestCase):
'paddle.imperative.TracedLayer', 'paddle.imperative.declarative',
'paddle.imperative.ProgramTranslator',
'paddle.imperative.TranslatedLayer', 'paddle.imperative.jit.save',
'paddle.imperative.jit.load',
'paddle.imperative.jit.SaveLoadConfig',
'paddle.imperative.NoamDecay'
'paddle.imperative.jit.load', 'paddle.imperative.NoamDecay'
'paddle.imperative.PiecewiseDecay',
'paddle.imperative.NaturalExpDecay',
'paddle.imperative.ExponentialDecay',
......
......@@ -225,16 +225,13 @@ class TestJitSaveLoad(unittest.TestCase):
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def train_and_save_model(self, model_path=None, configs=None):
def train_and_save_model(self, model_path=None):
layer = LinearNet(784, 1)
example_inputs, layer, _ = train(layer)
final_model_path = model_path if model_path else self.model_path
orig_input_types = [type(x) for x in example_inputs]
fluid.dygraph.jit.save(
layer=layer,
model_path=final_model_path,
input_spec=example_inputs,
configs=configs)
layer=layer, model_path=final_model_path, input_spec=example_inputs)
new_input_types = [type(x) for x in example_inputs]
self.assertEqual(orig_input_types, new_input_types)
return layer
......@@ -314,7 +311,6 @@ class TestSaveLoadWithInputSpec(unittest.TestCase):
[None, 8], name='x')])
model_path = "model.input_spec.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig()
# check inputs and outputs
self.assertTrue(len(net.forward.inputs) == 1)
input_x = net.forward.inputs[0]
......@@ -322,11 +318,11 @@ class TestSaveLoadWithInputSpec(unittest.TestCase):
self.assertTrue(input_x.name == 'x')
# 1. prune loss
configs.output_spec = net.forward.outputs[:1]
fluid.dygraph.jit.save(net, model_path, configs=configs)
output_spec = net.forward.outputs[:1]
fluid.dygraph.jit.save(net, model_path, output_spec=output_spec)
# 2. load to infer
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
infer_layer = fluid.dygraph.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
pred = infer_layer(x)
......@@ -335,7 +331,6 @@ class TestSaveLoadWithInputSpec(unittest.TestCase):
net = LinearNetMultiInput(8, 8)
model_path = "model.multi_inout.output_spec1"
configs = fluid.dygraph.jit.SaveLoadConfig()
# 1. check inputs and outputs
self.assertTrue(len(net.forward.inputs) == 2)
input_x = net.forward.inputs[0]
......@@ -344,11 +339,11 @@ class TestSaveLoadWithInputSpec(unittest.TestCase):
self.assertTrue(input_y.shape == (-1, 8))
# 2. prune loss
configs.output_spec = net.forward.outputs[:2]
fluid.dygraph.jit.save(net, model_path, configs=configs)
output_spec = net.forward.outputs[:2]
fluid.dygraph.jit.save(net, model_path, output_spec=output_spec)
# 3. load to infer
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
infer_layer = fluid.dygraph.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
y = fluid.dygraph.to_variable(
......@@ -358,10 +353,11 @@ class TestSaveLoadWithInputSpec(unittest.TestCase):
# 1. prune y and loss
model_path = "model.multi_inout.output_spec2"
configs.output_spec = net.forward.outputs[:1]
fluid.dygraph.jit.save(net, model_path, [input_x], configs)
output_spec = net.forward.outputs[:1]
fluid.dygraph.jit.save(
net, model_path, [input_x], output_spec=output_spec)
# 2. load again
infer_layer2 = fluid.dygraph.jit.load(model_path, configs=configs)
infer_layer2 = fluid.dygraph.jit.load(model_path)
# 3. predict
pred_xx = infer_layer2(x)
......@@ -377,16 +373,16 @@ class TestJitSaveLoadConfig(unittest.TestCase):
paddle.manual_seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def basic_save_load(self, layer, model_path, configs):
def basic_save_load(self, layer, model_path, **configs):
# 1. train & save
example_inputs, train_layer, _ = train(layer)
fluid.dygraph.jit.save(
layer=train_layer,
model_path=model_path,
input_spec=example_inputs,
configs=configs)
**configs)
# 2. load
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
infer_layer = fluid.dygraph.jit.load(model_path, **configs)
train_layer.eval()
# 3. inference & compare
x = fluid.dygraph.to_variable(
......@@ -397,23 +393,18 @@ class TestJitSaveLoadConfig(unittest.TestCase):
def test_model_filename(self):
layer = LinearNet(784, 1)
model_path = "model.save_load_config.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.model_filename = "__simplenet__"
self.basic_save_load(layer, model_path, configs)
self.basic_save_load(layer, model_path, model_filename="__simplenet__")
def test_params_filename(self):
layer = LinearNet(784, 1)
model_path = "model.save_load_config.params_filename"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.params_filename = "__params__"
self.basic_save_load(layer, model_path, configs)
self.basic_save_load(layer, model_path, params_filename="__params__")
def test_separate_params(self):
layer = LinearNet(784, 1)
model_path = "model.save_load_config.separate_params"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.separate_params = True
self.basic_save_load(layer, model_path, configs)
self.basic_save_load(layer, model_path, separate_params=True)
def test_output_spec(self):
train_layer = LinearNetReturnLoss(8, 8)
......@@ -428,16 +419,15 @@ class TestJitSaveLoadConfig(unittest.TestCase):
train_layer.clear_gradients()
model_path = "model.save_load_config.output_spec"
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.output_spec = [out]
output_spec = [out]
fluid.dygraph.jit.save(
layer=train_layer,
model_path=model_path,
input_spec=[x],
configs=configs)
output_spec=output_spec)
train_layer.eval()
infer_layer = fluid.dygraph.jit.load(model_path, configs=configs)
infer_layer = fluid.dygraph.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32'))
self.assertTrue(
......@@ -494,13 +484,12 @@ class TestJitPruneModelAndLoad(unittest.TestCase):
adam.minimize(loss)
train_layer.clear_gradients()
configs = fluid.dygraph.jit.SaveLoadConfig()
configs.output_spec = [hidden]
output_spec = [hidden]
fluid.dygraph.jit.save(
layer=train_layer,
model_path=self.model_path,
input_spec=[x],
configs=configs)
output_spec=output_spec)
return train_layer
......@@ -617,8 +606,6 @@ class TestJitSaveMultiCases(unittest.TestCase):
out = train_with_label(layer)
model_path = "test_prune_to_static_after_train"
configs = paddle.SaveLoadConfig()
configs.output_spec = [out]
paddle.jit.save(
layer,
model_path,
......@@ -626,7 +613,7 @@ class TestJitSaveMultiCases(unittest.TestCase):
InputSpec(
shape=[None, 784], dtype='float32', name="image")
],
configs=configs)
output_spec=[out])
self.verify_inference_correctness(layer, model_path, True)
......@@ -634,10 +621,9 @@ class TestJitSaveMultiCases(unittest.TestCase):
layer = LinerNetWithLabel(784, 1)
model_path = "test_prune_to_static_no_train"
configs = paddle.SaveLoadConfig()
# TODO: no train, cannot get output_spec var here
# now only can use index
configs.output_spec = layer.forward.outputs[:1]
output_spec = layer.forward.outputs[:1]
paddle.jit.save(
layer,
model_path,
......@@ -645,7 +631,7 @@ class TestJitSaveMultiCases(unittest.TestCase):
InputSpec(
shape=[None, 784], dtype='float32', name="image")
],
configs=configs)
output_spec=output_spec)
self.verify_inference_correctness(layer, model_path, True)
......@@ -676,10 +662,8 @@ class TestJitSaveMultiCases(unittest.TestCase):
train(layer)
model_path = "test_not_prune_output_spec_name_warning"
configs = paddle.SaveLoadConfig()
out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
configs.output_spec = [out]
paddle.jit.save(layer, model_path, configs=configs)
paddle.jit.save(layer, model_path, output_spec=[out])
self.verify_inference_correctness(layer, model_path)
......@@ -708,9 +692,7 @@ class TestJitSaveMultiCases(unittest.TestCase):
train_with_label(layer)
model_path = "test_prune_to_static_after_train"
configs = paddle.SaveLoadConfig()
out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
configs.output_spec = [out]
with self.assertRaises(ValueError):
paddle.jit.save(
layer,
......@@ -719,7 +701,7 @@ class TestJitSaveMultiCases(unittest.TestCase):
InputSpec(
shape=[None, 784], dtype='float32', name="image")
],
configs=configs)
output_spec=[out])
class TestJitSaveLoadEmptyLayer(unittest.TestCase):
......
......@@ -63,6 +63,8 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase):
self.epoch_num = 1
self.batch_size = 128
self.batch_num = 10
# enable static mode
paddle.enable_static()
def train_and_save_model(self, only_params=False):
with new_program_scope():
......@@ -136,13 +138,12 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase):
self.params_filename = None
orig_param_dict = self.train_and_save_model()
config = paddle.SaveLoadConfig()
config.separate_params = True
config.model_filename = self.model_filename
load_param_dict, _ = fluid.load_dygraph(self.save_dirname, config)
load_param_dict, _ = fluid.load_dygraph(
self.save_dirname, model_filename=self.model_filename)
self.check_load_state_dict(orig_param_dict, load_param_dict)
new_load_param_dict = paddle.load(self.save_dirname, config)
new_load_param_dict = paddle.load(
self.save_dirname, model_filename=self.model_filename)
self.check_load_state_dict(orig_param_dict, new_load_param_dict)
def test_load_with_param_filename(self):
......@@ -151,12 +152,12 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase):
self.params_filename = "static_mnist.params"
orig_param_dict = self.train_and_save_model()
config = paddle.SaveLoadConfig()
config.params_filename = self.params_filename
load_param_dict, _ = fluid.load_dygraph(self.save_dirname, config)
load_param_dict, _ = fluid.load_dygraph(
self.save_dirname, params_filename=self.params_filename)
self.check_load_state_dict(orig_param_dict, load_param_dict)
new_load_param_dict = paddle.load(self.save_dirname, config)
new_load_param_dict = paddle.load(
self.save_dirname, params_filename=self.params_filename)
self.check_load_state_dict(orig_param_dict, new_load_param_dict)
def test_load_with_model_and_param_filename(self):
......@@ -165,13 +166,16 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase):
self.params_filename = "static_mnist.params"
orig_param_dict = self.train_and_save_model()
config = paddle.SaveLoadConfig()
config.params_filename = self.params_filename
config.model_filename = self.model_filename
load_param_dict, _ = fluid.load_dygraph(self.save_dirname, config)
load_param_dict, _ = fluid.load_dygraph(
self.save_dirname,
params_filename=self.params_filename,
model_filename=self.model_filename)
self.check_load_state_dict(orig_param_dict, load_param_dict)
new_load_param_dict = paddle.load(self.save_dirname, config)
new_load_param_dict = paddle.load(
self.save_dirname,
params_filename=self.params_filename,
model_filename=self.model_filename)
self.check_load_state_dict(orig_param_dict, new_load_param_dict)
def test_load_state_dict_from_save_params(self):
......
......@@ -20,8 +20,8 @@ __all__ = [
]
__all__ += [
'grad', 'LayerList', 'load', 'save', 'SaveLoadConfig', 'to_variable',
'no_grad', 'DataParallel'
'grad', 'LayerList', 'load', 'save', 'to_variable', 'no_grad',
'DataParallel'
]
__all__ += [
......@@ -50,7 +50,6 @@ from ..fluid.dygraph.base import to_variable #DEFINE_ALIAS
from ..fluid.dygraph.base import grad #DEFINE_ALIAS
from .io import save
from .io import load
from ..fluid.dygraph.jit import SaveLoadConfig #DEFINE_ALIAS
from ..fluid.dygraph.parallel import DataParallel #DEFINE_ALIAS
from ..fluid.dygraph.learning_rate_scheduler import NoamDecay #DEFINE_ALIAS
......
......@@ -26,6 +26,7 @@ import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.framework import Variable, _varbase_creator, _dygraph_tracer
from paddle.fluid.dygraph.jit import _SaveLoadConfig
from paddle.fluid.dygraph.io import _construct_program_holders, _construct_params_and_buffers, EXTRA_VAR_INFO_FILENAME
__all__ = [
......@@ -116,6 +117,29 @@ def _load_state_dict_from_save_params(model_path):
return load_param_dict
def _parse_load_config(configs):
supported_configs = [
'model_filename', 'params_filename', 'separate_params',
'keep_name_table'
]
# input check
for key in configs:
if key not in supported_configs:
raise ValueError(
"The additional config (%s) of `paddle.load` is not supported."
% key)
# construct inner config
inner_config = _SaveLoadConfig()
inner_config.model_filename = configs.get('model_filename', None)
inner_config.params_filename = configs.get('params_filename', None)
inner_config.separate_params = configs.get('separate_params', None)
inner_config.keep_name_table = configs.get('keep_name_table', None)
return inner_config
def save(obj, path):
'''
Save an object to the specified path.
......@@ -178,7 +202,7 @@ def save(obj, path):
pickle.dump(saved_obj, f, protocol=2)
def load(path, config=None):
def load(path, **configs):
'''
Load an object can be used in paddle from specified path.
......@@ -197,10 +221,20 @@ def load(path, config=None):
path(str) : The path to load the target object. Generally, the path is the target
file path, when compatible with loading the saved results of
``paddle.jit.save/paddle.static.save_inference_model`` , the path is a directory.
config (SaveLoadConfig, optional): :ref:`api_imperative_jit_saveLoadConfig`
object that specifies additional configuration options, these options
are for compatibility with ``paddle.jit.save/paddle.static.save_inference_model``
formats. Default None.
configs (dict, optional): other save configuration options for compatibility. We do not
recommend using these configurations, if not necessary, DO NOT use them. Default None.
The following options are currently supported:
(1) model_filename (string): The filename to load the translated program of target Layer.
Default filename is :code:`__model__` .
(2) params_filename (string): The filename to load all persistable variables in target Layer.
Default file name is :code:`__variables__` .
(3) separate_params (bool): Configure whether to load the Layer parameters from separete files.
If True, each parameter will be loaded from a file separately, the file name is the parameter name,
and the params_filename configuration will not take effect. Default False.
(4) keep_name_table (bool): Configures whether keep ``structured_name -> parameter_name`` dict in
loaded state dict. This dict is the debugging information saved when call ``paddle.save`` .
It is generally only used for debugging and does not affect the actual training or inference.
By default, it will not be retained in ``paddle.load`` result. Default: False.
Returns:
Object(Object): a target object can be used in paddle
......@@ -242,8 +276,7 @@ def load(path, config=None):
"`paddle.load('model')`."
raise ValueError(error_msg % path)
if config is None:
config = paddle.SaveLoadConfig()
config = _parse_load_config(configs)
# 2. load target
load_result = None
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册