未验证 提交 bfc64801 编写于 作者: D Difer 提交者: GitHub

repacle fluid.io.load_inference_model, fluid.io.save_inference_model in fluid...

repacle fluid.io.load_inference_model, fluid.io.save_inference_model in fluid with 2.0 version  (#55345)

* repacle fluid.io.load_inference_model

* replace fluid.io.save_inference_model

* fix some bug

* fix some bugs of load & save model

* fix some bug

* fix test_inference_model_io bug

* fix word2vec_inference_model bug

* fix some bug

* fix valueError bug

* fix some bug

* fix a warning error

* for debug

* for debug

* fix io error

* fix test_wordvec_book error

* remove debug print

* fix load_var bug

* for debug cinn test

* revert cinn & fix inference_pass_test in windows

* fix some bugs

* revert cinn & fix inference_pass_test in windows

* for debug vars

* for debug

* fix quant_dequant_test

* fix some path errors

* remove fluid save/load

* fix incubate-fleet save

* move some from fluid.io to static.io
上级 eafc9889
...@@ -714,7 +714,7 @@ class ParameterServerRuntime(RuntimeBase): ...@@ -714,7 +714,7 @@ class ParameterServerRuntime(RuntimeBase):
self, self,
executor, executor,
dirname, dirname,
feeded_var_names, feeded_vars,
target_vars, target_vars,
main_program=None, main_program=None,
export_for_deployment=True, export_for_deployment=True,
...@@ -735,28 +735,21 @@ class ParameterServerRuntime(RuntimeBase): ...@@ -735,28 +735,21 @@ class ParameterServerRuntime(RuntimeBase):
raise TypeError( raise TypeError(
"in fleet.save_inference_model() function, main_program must be as Program type, CompiledProgram is not allowed" "in fleet.save_inference_model() function, main_program must be as Program type, CompiledProgram is not allowed"
) )
paddle.fluid.io.save_inference_model( paddle.static.io.save_inference_model(
dirname, dirname,
feeded_var_names, feeded_vars,
target_vars, target_vars,
executor, executor,
main_program, program=main_program,
None,
None,
export_for_deployment,
legacy_format=legacy_format, legacy_format=legacy_format,
) )
else: else:
paddle.fluid.io.save_inference_model( paddle.static.save_inference_model(
dirname, dirname,
feeded_var_names, feeded_vars,
target_vars, target_vars,
executor, executor,
self.origin_main_program, program=self.origin_main_program,
None,
None,
export_for_deployment,
True,
legacy_format=legacy_format, legacy_format=legacy_format,
) )
......
...@@ -59,448 +59,9 @@ from . import core ...@@ -59,448 +59,9 @@ from . import core
from paddle.utils import deprecated from paddle.utils import deprecated
from paddle.fluid.framework import static_only from paddle.fluid.framework import static_only
__all__ = [ __all__ = reader.__all__
'save_inference_model',
'load_inference_model',
] + reader.__all__
_logger = get_logger( _logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s'
) )
def prepend_feed_ops(
inference_program, feed_target_names, feed_holder_name='feed'
):
if len(feed_target_names) == 0:
return
global_block = inference_program.global_block()
feed_var = global_block.create_var(
name=feed_holder_name,
type=core.VarDesc.VarType.FEED_MINIBATCH,
persistable=True,
)
for i, name in enumerate(feed_target_names):
if not global_block.has_var(name):
raise ValueError(
"The feeded_var_names[{i}]: '{name}' doesn't exist in pruned inference program. "
"Please check whether '{name}' is a valid feed_var name, or remove it from feeded_var_names "
"if '{name}' is not involved in the target_vars calculation.".format(
i=i, name=name
)
)
out = global_block.var(name)
global_block._prepend_op(
type='feed',
inputs={'X': [feed_var]},
outputs={'Out': [out]},
attrs={'col': i},
)
def append_fetch_ops(
inference_program, fetch_target_names, fetch_holder_name='fetch'
):
global_block = inference_program.global_block()
fetch_var = global_block.create_var(
name=fetch_holder_name,
type=core.VarDesc.VarType.FETCH_LIST,
persistable=True,
)
for i, name in enumerate(fetch_target_names):
global_block.append_op(
type='fetch',
inputs={'X': [name]},
outputs={'Out': [fetch_var]},
attrs={'col': i},
)
@static_only
@deprecated(since="2.0.0", update_to="paddle.static.save_inference_model")
def save_inference_model(
dirname,
feeded_var_names,
target_vars,
executor,
main_program=None,
model_filename=None,
params_filename=None,
export_for_deployment=True,
program_only=False,
clip_extra=True,
legacy_format=False,
):
"""
Prune the given `main_program` to build a new program especially for inference,
and then save it and all related parameters to given `dirname` .
If you just want to save parameters of your trained model, please use the
:ref:`api_fluid_io_save_params` . You can refer to :ref:`api_guide_model_save_reader_en`
for more details.
Note:
The :code:`dirname` is used to specify the folder where inference model
structure and parameters are going to be saved. If you would like to save params of
Program in separate files, set `params_filename` None; if you would like to save all
params of Program in a single file, use `params_filename` to specify the file name.
Args:
dirname(str): The directory path to save the inference model.
feeded_var_names(list[str]): list of string. Names of variables that need to be fed
data during inference.
target_vars(list[Variable]): list of Variable. Variables from which we can get
inference results.
executor(Executor): The executor that saves the inference model. You can refer
to :ref:`api_guide_executor_en` for more details.
main_program(Program, optional): The original program, which will be pruned to
build the inference model. If is set None,
the global default :code:`_main_program_` will be used.
Default: None.
model_filename(str, optional): The name of file to save the inference program
itself. If is set None, a default filename
:code:`__model__` will be used.
params_filename(str, optional): The name of file to save all related parameters.
If it is set None, parameters will be saved
in separate files .
export_for_deployment(bool, optional): If True, programs are modified to only support
direct inference deployment. Otherwise,
more information will be stored for flexible
optimization and re-training. Currently, only
True is supported.
Default: True.
program_only(bool, optional): If True, It will save inference program only, and do not
save params of Program.
Default: False.
legacy_format(bool, optional): Whether to save program in legacy format.
Default: False.
Returns:
list, The fetch variables' name list.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
paddle.enable_static()
path = "./infer_model"
# User defined network, here a softmax regession example
image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace())
predict = paddle.static.nn.fc(x=image, size=10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(
input=predict, label=label,
reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
# Feed data and train process
# Save inference model. Note we don't save label and loss in this example
fluid.io.save_inference_model(dirname=path,
feeded_var_names=['img'],
target_vars=[predict],
executor=exe)
# In this example, the save_inference_mode inference will prune the default
# main program according to the network's input node (img) and output node(predict).
# The pruned inference program is going to be saved in the "./infer_model/__model__"
# and parameters are going to be saved in separate files under folder
# "./infer_model".
"""
if isinstance(feeded_var_names, str):
feeded_var_names = [feeded_var_names]
elif export_for_deployment:
if len(feeded_var_names) > 0:
# TODO(paddle-dev): polish these code blocks
if not (
bool(feeded_var_names)
and all(isinstance(name, str) for name in feeded_var_names)
):
raise ValueError("'feed_var_names' should be a list of str.")
if isinstance(target_vars, Variable):
target_vars = [target_vars]
elif export_for_deployment:
if not (
bool(target_vars)
and all(isinstance(var, Variable) for var in target_vars)
):
raise ValueError("'target_vars' should be a list of Variable.")
main_program = paddle.static.io._get_valid_program(main_program)
# remind user to set auc_states to zeros if the program contains auc op
all_ops = main_program.global_block().ops
for op in all_ops:
# clear device of Op
device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
op._set_attr(device_attr_name, "")
if op.type == 'auc':
warnings.warn(
"please ensure that you have set the auc states to zeros before saving inference model"
)
break
with program_guard(main_program):
uniq_target_vars = []
for i, var in enumerate(target_vars):
uniq_target_vars.append(var)
target_vars = uniq_target_vars
target_var_name_list = [var.name for var in target_vars]
# when a pserver and a trainer running on the same machine, mkdir may conflict
save_dirname = dirname
try:
save_dirname = os.path.normpath(dirname)
os.makedirs(save_dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if model_filename is not None:
model_basename = os.path.basename(model_filename)
else:
model_basename = "__model__"
model_basename = os.path.join(save_dirname, model_basename)
# When export_for_deployment is true, we modify the program online so that
# it can only be loaded for inference directly. If it's false, the whole
# original program and related meta are saved so that future usage can be
# more flexible.
origin_program = main_program.clone()
if export_for_deployment:
main_program = main_program.clone()
global_block = main_program.global_block()
need_to_remove_op_index = []
for i, op in enumerate(global_block.ops):
op.desc.set_is_target(False)
if op.type == "feed" or op.type == "fetch":
need_to_remove_op_index.append(i)
for index in need_to_remove_op_index[::-1]:
global_block._remove_op(index)
main_program.desc.flush()
main_program = main_program._prune_with_input(
feeded_var_names=feeded_var_names, targets=target_vars
)
main_program = main_program._inference_optimize(prune_read_op=True)
fetch_var_names = [v.name for v in target_vars]
for target_v in target_vars:
if not main_program.global_block().has_var(target_v.name):
main_program.global_block().create_var(
name=target_v.name,
shape=target_v.shape,
dtype=target_v.dtype,
persistable=target_v.persistable,
)
prepend_feed_ops(main_program, feeded_var_names)
append_fetch_ops(main_program, fetch_var_names)
with open(model_basename, "wb") as f:
f.write(
main_program._remove_training_info(
clip_extra=clip_extra
).desc.serialize_to_string()
)
else:
# TODO(panyx0718): Save more information so that it can also be used
# for training and more flexible post-processing.
with open(model_basename + ".main_program", "wb") as f:
f.write(
main_program._remove_training_info(
clip_extra=clip_extra
).desc.serialize_to_string()
)
if program_only:
warnings.warn(
"save_inference_model specified the param `program_only` to True, It will not save params of Program."
)
return target_var_name_list
main_program._copy_dist_param_info_from(origin_program)
if params_filename is not None:
params_filename = os.path.basename(params_filename)
paddle.distributed.io.save_persistables(
executor, save_dirname, main_program, params_filename
)
return target_var_name_list
@static_only
@deprecated(since="2.0.0", update_to="paddle.static.load_inference_model")
def load_inference_model(
dirname,
executor,
model_filename=None,
params_filename=None,
pserver_endpoints=None,
):
"""
Load the inference model from a given directory. By this API, you can get the model
structure(Inference Program) and model parameters. If you just want to load
parameters of the pre-trained model, please use the :ref:`api_fluid_io_load_params` API.
You can refer to :ref:`api_guide_model_save_reader_en` for more details.
Args:
dirname(str): One of the following:
- The given directory path.
- Set to None when reading the model from memory.
executor(Executor): The executor to run for loading inference model.
See :ref:`api_guide_executor_en` for more details about it.
model_filename(str, optional): One of the following:
- The name of file to load the inference program.
- If it is None, the default filename ``__model__`` will be used.
- When ``dirname`` is ``None``, it must be set to a string containing model.
Default: ``None``.
params_filename(str, optional): It is only used for the case that all
parameters were saved in a single binary file. One of the following:
- The name of file to load all parameters.
- When ``dirname`` is ``None``, it must be set to a string containing all the parameters.
- If parameters were saved in separate files, set it as ``None``.
Default: ``None``.
pserver_endpoints(list, optional): It is only needed by the distributed inference.
If using a distributed look up table during the training,
this table is also needed by the inference process. Its value is
a list of pserver endpoints.
Returns:
list: The return of this API is a list with three elements:
(program, feed_target_names, fetch_targets). The `program` is a
``Program`` (refer to :ref:`api_guide_Program_en`), which is used for inference.
The `feed_target_names` is a list of ``str``, which contains names of variables
that need to feed data in the inference program. The `fetch_targets` is a list of
``Variable`` (refer to :ref:`api_guide_Program_en`). It contains variables from which
we can get inference results.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
paddle.enable_static()
# Build the model
main_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_prog, startup_prog):
data = paddle.static.data(name="img", shape=[-1, 64, 784])
w = paddle.create_parameter(shape=[784, 200], dtype='float32')
b = paddle.create_parameter(shape=[200], dtype='float32')
hidden_w = paddle.matmul(x=data, y=w)
hidden_b = paddle.add(hidden_w, b)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
# Save the inference model
path = "./infer_model"
fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'],
target_vars=[hidden_b], executor=exe, main_program=main_prog)
# Demo one. Not need to set the distributed look up table, because the
# training doesn't use a distributed look up table.
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(dirname=path, executor=exe))
tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32)
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
# Demo two. If the training uses a distributed look up table, the pserver
# endpoints list should be supported when loading the inference model.
# The below is just an example.
endpoints = ["127.0.0.1:2023","127.0.0.1:2024"]
[dist_inference_program, dist_feed_target_names, dist_fetch_targets] = (
fluid.io.load_inference_model(dirname=path,
executor=exe,
pserver_endpoints=endpoints))
# In this example, the inference program was saved in the file
# "./infer_model/__model__" and parameters were saved in
# separate files under the directory "./infer_model".
# By the inference program, feed_target_names and
# fetch_targets, we can use an executor to run the inference
# program for getting the inference result.
"""
load_from_memory = False
if dirname is not None:
load_dirname = os.path.normpath(dirname)
if not os.path.isdir(load_dirname):
raise ValueError("There is no directory named '%s'" % dirname)
if model_filename is None:
model_filename = '__model__'
model_filename = os.path.join(
load_dirname, os.path.basename(model_filename)
)
if params_filename is not None:
params_filename = os.path.basename(params_filename)
with open(model_filename, "rb") as f:
program_desc_str = f.read()
else:
load_from_memory = True
if params_filename is None:
raise ValueError(
"The path of params cannot be None when the directory path is None."
)
load_dirname = dirname
program_desc_str = model_filename
params_filename = params_filename
program = Program.parse_from_string(program_desc_str)
if not core._is_program_version_supported(program._version()):
raise ValueError(
"Unsupported program version: %d\n" % program._version()
)
# Binary data also need versioning.
paddle.distributed.io.load_persistables(
executor, load_dirname, program, params_filename
)
if pserver_endpoints:
program = _endpoints_replacement(program, pserver_endpoints)
feed_target_names = program.desc.get_feed_target_names()
fetch_target_names = program.desc.get_fetch_target_names()
fetch_targets = [
program.global_block().var(name) for name in fetch_target_names
]
return [program, feed_target_names, fetch_targets]
def _endpoints_replacement(program, endpoints):
ENDPOINT_MAP = "epmap"
for op in program.global_block().ops:
if op.has_attr(ENDPOINT_MAP):
op.set_attr(ENDPOINT_MAP, endpoints)
program._sync_with_cpp()
return program
...@@ -2268,17 +2268,15 @@ class Model: ...@@ -2268,17 +2268,15 @@ class Model:
infer_prog = prog.clone(for_test=True) infer_prog = prog.clone(for_test=True)
input_names = [v.name for v in self._adapter._input_vars['test']] inputs = list(self._adapter._input_vars['test'])
endpoints = self._adapter._endpoints['test']['output'] endpoints = self._adapter._endpoints['test']['output']
fluid.io.save_inference_model( paddle.static.save_inference_model(
model_path, model_path,
input_names, inputs,
endpoints, endpoints,
self._adapter._executor, self._adapter._executor,
main_program=infer_prog, program=infer_prog,
model_filename=model_filename,
params_filename=params_filename,
) )
def _run_one_epoch( def _run_one_epoch(
......
...@@ -18,7 +18,6 @@ import paddle ...@@ -18,7 +18,6 @@ import paddle
import paddle.distributed.transpiler.distribute_transpiler as dist_transpiler import paddle.distributed.transpiler.distribute_transpiler as dist_transpiler
from paddle import fluid from paddle import fluid
from paddle.distributed.fleet.meta_optimizers import RawProgramOptimizer from paddle.distributed.fleet.meta_optimizers import RawProgramOptimizer
from paddle.fluid import io
from paddle.fluid.compiler import CompiledProgram from paddle.fluid.compiler import CompiledProgram
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program from paddle.fluid.framework import Program
...@@ -31,6 +30,7 @@ from paddle.incubate.distributed.fleet.base import ( ...@@ -31,6 +30,7 @@ from paddle.incubate.distributed.fleet.base import (
Fleet, Fleet,
Mode, Mode,
) )
from paddle.static import io
class Collective(Fleet): class Collective(Fleet):
...@@ -77,11 +77,10 @@ class Collective(Fleet): ...@@ -77,11 +77,10 @@ class Collective(Fleet):
def save_inference_model( def save_inference_model(
self, self,
executor, executor,
dirname, path_prefix,
feeded_var_names=None, feeded_vas=None,
target_vars=None, fetch_vars=None,
main_program=None, program=None,
export_for_deployment=True,
legacy_format=False, legacy_format=False,
): ):
""" """
...@@ -94,22 +93,19 @@ class Collective(Fleet): ...@@ -94,22 +93,19 @@ class Collective(Fleet):
" Executor type." " Executor type."
) )
if main_program is None: if program is None:
main_program = self._origin_program program = self._origin_program
assert isinstance(main_program, Program), ( assert isinstance(program, Program), (
"In fleet.save_inference_model() function, main_program " "In fleet.save_inference_model() function, main_program "
"must be as Program type." "must be as Program type."
) )
io.save_inference_model( io.save_inference_model(
dirname, path_prefix,
feeded_var_names, feeded_vas,
target_vars, fetch_vars,
executor, executor,
main_program, program=program,
None,
None,
export_for_deployment,
legacy_format=legacy_format, legacy_format=legacy_format,
) )
......
...@@ -965,28 +965,26 @@ class FleetUtil: ...@@ -965,28 +965,26 @@ class FleetUtil:
""" """
day = str(day) day = str(day)
pass_id = str(pass_id) pass_id = str(pass_id)
feeded_var_names = [i.name for i in feeded_vars]
model_name = "inference_model" model_name = "inference_model"
# pull dense before save # pull dense before save
self.pull_all_dense_params(scope, program) self.pull_all_dense_params(scope, program)
if fleet.worker_index() == 0: if fleet.worker_index() == 0:
with fluid.scope_guard(scope): with fluid.scope_guard(scope):
if save_combine: if save_combine:
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
dirname=model_name, model_name,
feeded_var_names=feeded_var_names, feeded_vars,
target_vars=target_vars, target_vars,
executor=executor, executor,
main_program=program.clone(), program=program.clone(),
params_filename="params",
) )
else: else:
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
dirname=model_name, model_name,
feeded_var_names=feeded_var_names, feeded_vars,
target_vars=target_vars, target_vars,
executor=executor, executor,
main_program=program.clone(), program=program.clone(),
) )
configs = { configs = {
......
...@@ -238,11 +238,12 @@ def try_load_model_vars( ...@@ -238,11 +238,12 @@ def try_load_model_vars(
dump_prog_fn = program_type_trans( dump_prog_fn = program_type_trans(
dump_dir, dump_prog_fn, is_text_dump_program dump_dir, dump_prog_fn, is_text_dump_program
) )
(
[
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
) = fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
dump_dir, dump_dir,
exe, exe,
model_filename=dump_prog_fn, model_filename=dump_prog_fn,
......
...@@ -74,7 +74,7 @@ from paddle.fluid.framework import ( ...@@ -74,7 +74,7 @@ from paddle.fluid.framework import (
) )
from paddle.fluid.framework import dygraph_only from paddle.fluid.framework import dygraph_only
from paddle.fluid.wrapped_decorator import wrap_decorator from paddle.fluid.wrapped_decorator import wrap_decorator
from paddle.fluid.io import save_inference_model from paddle.static.io import save_inference_model
from paddle.framework import in_dynamic_mode from paddle.framework import in_dynamic_mode
...@@ -1222,23 +1222,25 @@ def save(layer, path, input_spec=None, **configs): ...@@ -1222,23 +1222,25 @@ def save(layer, path, input_spec=None, **configs):
if 'forward' == attr_func or not isinstance(layer, Layer): if 'forward' == attr_func or not isinstance(layer, Layer):
model_filename = file_prefix + INFER_MODEL_SUFFIX model_filename = file_prefix + INFER_MODEL_SUFFIX
params_filename = file_prefix + INFER_PARAMS_SUFFIX params_filename = file_prefix + INFER_PARAMS_SUFFIX
path_prefix = file_prefix
else: else:
model_filename = file_prefix + '.' + attr_func + INFER_MODEL_SUFFIX model_filename = file_prefix + '.' + attr_func + INFER_MODEL_SUFFIX
params_filename = ( params_filename = (
file_prefix + '.' + attr_func + INFER_PARAMS_SUFFIX file_prefix + '.' + attr_func + INFER_PARAMS_SUFFIX
) )
file_prefix = file_prefix + '.' + attr_func
file_prefix = os.path.join(model_path, file_prefix)
with scope_guard(scope): with scope_guard(scope):
input_vars = []
for var in concrete_program.main_program.clone().list_vars():
if var.name in input_var_names:
input_vars.append(var)
save_inference_model( save_inference_model(
dirname=model_path, path_prefix=file_prefix,
feeded_var_names=input_var_names, feed_vars=input_vars,
target_vars=output_vars, fetch_vars=output_vars,
executor=Executor(_current_expected_place()), executor=Executor(_current_expected_place()),
main_program=concrete_program.main_program.clone(), program=concrete_program.main_program.clone(),
model_filename=model_filename,
params_filename=params_filename,
export_for_deployment=configs._export_for_deployment,
program_only=configs._program_only,
clip_extra=configs.clip_extra, clip_extra=configs.clip_extra,
) )
...@@ -1893,24 +1895,24 @@ class TracedLayer: ...@@ -1893,24 +1895,24 @@ class TracedLayer:
with scope_guard(self._scope): with scope_guard(self._scope):
feeded_var_names = get_feed_fetch(self._feed_names, feed) feeded_var_names = get_feed_fetch(self._feed_names, feed)
target_var_names = get_feed_fetch(self._fetch_names, fetch) target_var_names = get_feed_fetch(self._fetch_names, fetch)
feed_vars = []
for name in feeded_var_names:
feed_var = self._program.global_block().vars.get(name, None)
assert feed_var is not None, f"{name} cannot be found"
feed_vars.append(feed_var)
target_vars = [] target_vars = []
for name in target_var_names: for name in target_var_names:
target_var = self._program.global_block().vars.get(name, None) target_var = self._program.global_block().vars.get(name, None)
assert target_var is not None, f"{name} cannot be found" assert target_var is not None, f"{name} cannot be found"
target_vars.append(target_var) target_vars.append(target_var)
model_filename = file_prefix + INFER_MODEL_SUFFIX
params_filename = file_prefix + INFER_PARAMS_SUFFIX
legacy_format = kwargs.get('legacy_format', False) legacy_format = kwargs.get('legacy_format', False)
file_prefix = os.path.join(dirname, file_prefix)
save_inference_model( save_inference_model(
dirname=dirname, path_prefix=file_prefix,
feeded_var_names=feeded_var_names, feed_vars=feed_vars,
target_vars=target_vars, fetch_vars=target_vars,
executor=self._exe, executor=self._exe,
main_program=self._program.clone(), program=self._program.clone(),
model_filename=model_filename,
params_filename=params_filename,
clip_extra=clip_extra, clip_extra=clip_extra,
legacy_format=legacy_format, legacy_format=legacy_format,
) )
......
...@@ -34,7 +34,6 @@ from paddle.fluid import ( ...@@ -34,7 +34,6 @@ from paddle.fluid import (
) )
from paddle.fluid.executor import Executor, global_scope from paddle.fluid.executor import Executor, global_scope
from paddle.fluid.framework import Parameter, dygraph_not_support, static_only from paddle.fluid.framework import Parameter, dygraph_not_support, static_only
from paddle.fluid.io import append_fetch_ops, prepend_feed_ops
from paddle.fluid.log_helper import get_logger from paddle.fluid.log_helper import get_logger
from paddle.framework.io_utils import ( from paddle.framework.io_utils import (
_clone_var_in_block_, _clone_var_in_block_,
...@@ -138,6 +137,56 @@ def _clone_var_in_block(block, var): ...@@ -138,6 +137,56 @@ def _clone_var_in_block(block, var):
) )
def prepend_feed_ops(
inference_program, feed_target_names, feed_holder_name='feed'
):
if len(feed_target_names) == 0:
return
global_block = inference_program.global_block()
feed_var = global_block.create_var(
name=feed_holder_name,
type=core.VarDesc.VarType.FEED_MINIBATCH,
persistable=True,
)
for i, name in enumerate(feed_target_names):
if not global_block.has_var(name):
raise ValueError(
"The feeded_var_names[{i}]: '{name}' doesn't exist in pruned inference program. "
"Please check whether '{name}' is a valid feed_var name, or remove it from feeded_var_names "
"if '{name}' is not involved in the target_vars calculation.".format(
i=i, name=name
)
)
out = global_block.var(name)
global_block._prepend_op(
type='feed',
inputs={'X': [feed_var]},
outputs={'Out': [out]},
attrs={'col': i},
)
def append_fetch_ops(
inference_program, fetch_target_names, fetch_holder_name='fetch'
):
global_block = inference_program.global_block()
fetch_var = global_block.create_var(
name=fetch_holder_name,
type=core.VarDesc.VarType.FETCH_LIST,
persistable=True,
)
for i, name in enumerate(fetch_target_names):
global_block.append_op(
type='fetch',
inputs={'X': [name]},
outputs={'Out': [fetch_var]},
attrs={'col': i},
)
def normalize_program(program, feed_vars, fetch_vars): def normalize_program(program, feed_vars, fetch_vars):
""" """
...@@ -200,8 +249,7 @@ def normalize_program(program, feed_vars, fetch_vars): ...@@ -200,8 +249,7 @@ def normalize_program(program, feed_vars, fetch_vars):
op._set_attr(device_attr_name, "") op._set_attr(device_attr_name, "")
if op.type == 'auc': if op.type == 'auc':
warnings.warn( warnings.warn(
"Be sure that you have set auc states to 0 " "Be sure that you have set auc states to 0 before saving inference model."
"before saving inference model."
) )
break break
...@@ -521,15 +569,23 @@ def save_inference_model( ...@@ -521,15 +569,23 @@ def save_inference_model(
program = _get_valid_program(kwargs.get('program', None)) program = _get_valid_program(kwargs.get('program', None))
clip_extra = kwargs.get('clip_extra', True) clip_extra = kwargs.get('clip_extra', True)
program = normalize_program(program, feed_vars, fetch_vars) program = normalize_program(program, feed_vars, fetch_vars)
# serialize and save program # serialize and save program
legacy_format = kwargs.get('legacy_format', False) legacy_format = kwargs.get('legacy_format', False)
program_bytes = _serialize_program( program_bytes = _serialize_program(
program._remove_training_info(clip_extra=clip_extra), program._remove_training_info(clip_extra=clip_extra),
legacy_format=legacy_format, legacy_format=legacy_format,
) )
save_to_file(model_path, program_bytes) save_to_file(model_path, program_bytes)
vars = list(filter(is_persistable, program.list_vars())) vars = list(filter(is_persistable, program.list_vars()))
if len(list(vars)) == 0:
warnings.warn(
"no variable in your model, please ensure there are any variables in your model to save"
)
if len(vars) > 0: if len(vars) > 0:
save_dirname = os.path.dirname(params_path) save_dirname = os.path.dirname(params_path)
params_filename = os.path.basename(params_path) params_filename = os.path.basename(params_path)
...@@ -832,7 +888,9 @@ def load_inference_model(path_prefix, executor, **kwargs): ...@@ -832,7 +888,9 @@ def load_inference_model(path_prefix, executor, **kwargs):
else: else:
# check and norm path_prefix # check and norm path_prefix
path_prefix = _normalize_path_prefix(path_prefix) path_prefix = _normalize_path_prefix(path_prefix)
dir_path = os.path.dirname(path_prefix)
if not os.path.isdir(dir_path):
raise ValueError(f"There is no directory named {dir_path}")
# set model_path and params_path in new way, # set model_path and params_path in new way,
# path_prefix represents a file path without suffix in this case. # path_prefix represents a file path without suffix in this case.
if not kwargs: if not kwargs:
...@@ -867,6 +925,7 @@ def load_inference_model(path_prefix, executor, **kwargs): ...@@ -867,6 +925,7 @@ def load_inference_model(path_prefix, executor, **kwargs):
model_path, params_path model_path, params_path
) )
) )
program_bytes = load_from_file(model_path) program_bytes = load_from_file(model_path)
# deserialize bytes to program # deserialize bytes to program
...@@ -876,6 +935,7 @@ def load_inference_model(path_prefix, executor, **kwargs): ...@@ -876,6 +935,7 @@ def load_inference_model(path_prefix, executor, **kwargs):
if len(vars) > 0: if len(vars) > 0:
load_dirname = os.path.dirname(params_path) load_dirname = os.path.dirname(params_path)
params_filename = os.path.basename(params_path) params_filename = os.path.basename(params_path)
load_vars( load_vars(
executor, executor,
dirname=load_dirname, dirname=load_dirname,
...@@ -1139,6 +1199,9 @@ def load_vars( ...@@ -1139,6 +1199,9 @@ def load_vars(
else: else:
vars_from_memory = True vars_from_memory = True
if filename == '':
filename = None
if vars is None: if vars is None:
if main_program is None: if main_program is None:
main_program = default_main_program() main_program = default_main_program()
......
...@@ -110,8 +110,8 @@ def train( ...@@ -110,8 +110,8 @@ def train(
print("cost=" + str(cost_val) + " acc=" + str(acc_val)) print("cost=" + str(cost_val) + " acc=" + str(acc_val))
if cost_val < 0.4 and acc_val > 0.8: if cost_val < 0.4 and acc_val > 0.8:
if save_dirname is not None: if save_dirname is not None:
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
save_dirname, ["words"], prediction, exe save_dirname, data, prediction, exe
) )
return return
if math.isnan(float(cost_val)): if math.isnan(float(cost_val)):
...@@ -153,7 +153,7 @@ def infer(word_dict, use_cuda, save_dirname=None): ...@@ -153,7 +153,7 @@ def infer(word_dict, use_cuda, save_dirname=None):
inference_scope = fluid.core.Scope() inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope): with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc, # Use paddle.static.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be fed # the feed_target_names (the names of variables that will be fed
# data using feed operators), and the fetch_targets (variables that # data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators). # we want to obtain data from using fetch operators).
...@@ -161,7 +161,7 @@ def infer(word_dict, use_cuda, save_dirname=None): ...@@ -161,7 +161,7 @@ def infer(word_dict, use_cuda, save_dirname=None):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model(save_dirname, exe) ] = paddle.static.io.load_inference_model(save_dirname, exe)
word_dict_len = len(word_dict) word_dict_len = len(word_dict)
......
...@@ -188,8 +188,8 @@ def train(net_type, use_cuda, save_dirname, is_local): ...@@ -188,8 +188,8 @@ def train(net_type, use_cuda, save_dirname, is_local):
) )
if acc_value > 0.01: # Low threshold for speeding up CI if acc_value > 0.01: # Low threshold for speeding up CI
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
save_dirname, ["pixel"], [predict], exe save_dirname, images, [predict], exe
) )
return return
...@@ -228,7 +228,7 @@ def infer(use_cuda, save_dirname=None): ...@@ -228,7 +228,7 @@ def infer(use_cuda, save_dirname=None):
inference_scope = fluid.core.Scope() inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope): with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc, # Use paddle.static.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be fed # the feed_target_names (the names of variables that will be fed
# data using feed operators), and the fetch_targets (variables that # data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators). # we want to obtain data from using fetch operators).
...@@ -236,7 +236,7 @@ def infer(use_cuda, save_dirname=None): ...@@ -236,7 +236,7 @@ def infer(use_cuda, save_dirname=None):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model(save_dirname, exe) ] = paddle.static.io.load_inference_model(save_dirname, exe)
# The input's dimension of conv should be 4-D or 5-D. # The input's dimension of conv should be 4-D or 5-D.
# Use normilized image pixels as input data, which should be in the range [0, 1.0]. # Use normilized image pixels as input data, which should be in the range [0, 1.0].
...@@ -252,13 +252,16 @@ def infer(use_cuda, save_dirname=None): ...@@ -252,13 +252,16 @@ def infer(use_cuda, save_dirname=None):
) )
print("infer results: ", results[0]) print("infer results: ", results[0])
feeded_vars = [
fluid.io.save_inference_model( inference_program.global_block().var(name)
for name in feed_target_names
]
paddle.static.io.save_inference_model(
save_dirname, save_dirname,
feed_target_names, feeded_vars,
fetch_targets, fetch_targets,
exe, exe,
inference_program, program=inference_program,
) )
...@@ -269,7 +272,7 @@ def main(net_type, use_cuda, is_local=True): ...@@ -269,7 +272,7 @@ def main(net_type, use_cuda, is_local=True):
# Directory for saving the trained model # Directory for saving the trained model
temp_dir = tempfile.TemporaryDirectory() temp_dir = tempfile.TemporaryDirectory()
save_dirname = os.path.join( save_dirname = os.path.join(
temp_dir.name, "image_classification_" + net_type + ".inference.model" temp_dir.name, "image_classification_" + net_type + "_inference_model"
) )
train(net_type, use_cuda, save_dirname, is_local) train(net_type, use_cuda, save_dirname, is_local)
......
...@@ -137,23 +137,18 @@ def train( ...@@ -137,23 +137,18 @@ def train(
if float(acc_val) > 0.2 or pass_id == (PASS_NUM - 1): if float(acc_val) > 0.2 or pass_id == (PASS_NUM - 1):
# Smaller value to increase CI speed # Smaller value to increase CI speed
if save_dirname is not None: if save_dirname is not None:
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
save_dirname, save_dirname,
["img"], img,
[prediction], [prediction],
exe, exe,
model_filename=model_filename,
params_filename=params_filename,
) )
if save_full_dirname is not None: if save_full_dirname is not None:
fluid.io.save_inference_model( paddle.static.save_inference_model(
save_full_dirname, save_full_dirname,
[], [],
[], [],
exe, exe,
model_filename=model_filename,
params_filename=params_filename,
export_for_deployment=False,
) )
return return
else: else:
...@@ -206,7 +201,7 @@ def infer( ...@@ -206,7 +201,7 @@ def infer(
inference_scope = fluid.core.Scope() inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope): with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc, # Use paddle.static.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be feeded # the feed_target_names (the names of variables that will be feeded
# data using feed operators), and the fetch_targets (variables that # data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators). # we want to obtain data from using fetch operators).
...@@ -214,8 +209,9 @@ def infer( ...@@ -214,8 +209,9 @@ def infer(
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
save_dirname, exe, model_filename, params_filename save_dirname,
exe,
) )
# The input's dimension of conv should be 4-D or 5-D. # The input's dimension of conv should be 4-D or 5-D.
...@@ -241,11 +237,13 @@ def main(use_cuda, parallel, nn_type, combine): ...@@ -241,11 +237,13 @@ def main(use_cuda, parallel, nn_type, combine):
model_filename = None model_filename = None
params_filename = None params_filename = None
if not use_cuda and not parallel: if not use_cuda and not parallel:
save_dirname = "recognize_digits_" + nn_type + ".inference.model" save_dirname = "recognize_digits_" + nn_type + "_inference_model"
save_full_dirname = "recognize_digits_" + nn_type + ".train.model" save_full_dirname = "recognize_digits_" + nn_type + "_train_model"
if combine: if combine:
model_filename = "__model_combined__" model_filename = "__model_combined__"
params_filename = "__params_combined__" params_filename = "__params_combined__"
save_dirname = save_dirname + model_filename
save_full_dirname = params_filename + params_filename
# call train() with is_local argument to run distributed train # call train() with is_local argument to run distributed train
train( train(
......
...@@ -213,6 +213,15 @@ def train(use_cuda, save_dirname, is_local=True): ...@@ -213,6 +213,15 @@ def train(use_cuda, save_dirname, is_local=True):
'movie_title', 'movie_title',
'score', 'score',
] ]
feed_infer_order = [
'user_id',
'gender_id',
'age_id',
'job_id',
'movie_id',
'category_id',
'movie_title',
]
def train_loop(main_program): def train_loop(main_program):
exe.run(framework.default_startup_program()) exe.run(framework.default_startup_program())
...@@ -220,6 +229,10 @@ def train(use_cuda, save_dirname, is_local=True): ...@@ -220,6 +229,10 @@ def train(use_cuda, save_dirname, is_local=True):
feed_list = [ feed_list = [
main_program.global_block().var(var_name) for var_name in feed_order main_program.global_block().var(var_name) for var_name in feed_order
] ]
feed_infer_list = [
main_program.global_block().var(var_name)
for var_name in feed_infer_order
]
feeder = fluid.DataFeeder(feed_list, place) feeder = fluid.DataFeeder(feed_list, place)
PASS_NUM = 100 PASS_NUM = 100
...@@ -248,17 +261,9 @@ def train(use_cuda, save_dirname, is_local=True): ...@@ -248,17 +261,9 @@ def train(use_cuda, save_dirname, is_local=True):
if test_avg_cost < 6.0: if test_avg_cost < 6.0:
# if avg_cost less than 6.0, we think our code is good. # if avg_cost less than 6.0, we think our code is good.
if save_dirname is not None: if save_dirname is not None:
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
save_dirname, save_dirname,
[ feed_infer_list,
"user_id",
"gender_id",
"age_id",
"job_id",
"movie_id",
"category_id",
"movie_title",
],
[scale_infer], [scale_infer],
exe, exe,
) )
...@@ -302,7 +307,7 @@ def infer(use_cuda, save_dirname=None): ...@@ -302,7 +307,7 @@ def infer(use_cuda, save_dirname=None):
inference_scope = fluid.core.Scope() inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope): with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc, # Use paddle.static.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be fed # the feed_target_names (the names of variables that will be fed
# data using feed operators), and the fetch_targets (variables that # data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators). # we want to obtain data from using fetch operators).
...@@ -310,7 +315,7 @@ def infer(use_cuda, save_dirname=None): ...@@ -310,7 +315,7 @@ def infer(use_cuda, save_dirname=None):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model(save_dirname, exe) ] = paddle.static.io.load_inference_model(save_dirname, exe)
# Use the first data from paddle.dataset.movielens.test() as input # Use the first data from paddle.dataset.movielens.test() as input
assert feed_target_names[0] == "user_id" assert feed_target_names[0] == "user_id"
......
...@@ -159,9 +159,9 @@ def train( ...@@ -159,9 +159,9 @@ def train(
) )
if avg_cost_np[0] < 5.0: if avg_cost_np[0] < 5.0:
if save_dirname is not None and not pure_bf16: if save_dirname is not None and not pure_bf16:
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
save_dirname, save_dirname,
['firstw', 'secondw', 'thirdw', 'forthw'], [first_word, second_word, third_word, forth_word],
[predict_word], [predict_word],
exe, exe,
) )
...@@ -205,15 +205,16 @@ def infer(target, save_dirname=None): ...@@ -205,15 +205,16 @@ def infer(target, save_dirname=None):
exe = fluid.Executor(place) exe = fluid.Executor(place)
inference_scope = fluid.core.Scope() inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope): with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc, # Use paddle.static.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be fed # the feed_target_names (the names of variables that will be fed
# data using feed operators), and the fetch_targets (variables that # data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators). # we want to obtain data from using fetch operators).
[ [
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model(save_dirname, exe) ] = paddle.static.io.load_inference_model(save_dirname, exe)
word_dict = paddle.dataset.imikolov.build_dict() word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict) dict_size = len(word_dict)
...@@ -272,7 +273,8 @@ def infer(target, save_dirname=None): ...@@ -272,7 +273,8 @@ def infer(target, save_dirname=None):
infer_inputs = [to_infer_tensor(t) for t in infer_inputs] infer_inputs = [to_infer_tensor(t) for t in infer_inputs]
infer_config = fluid.core.NativeConfig() infer_config = fluid.core.NativeConfig()
infer_config.model_dir = save_dirname infer_config.prog_file = save_dirname + ".pdmodel"
infer_config.param_file = save_dirname + ".pdiparams"
if target == "cuda": if target == "cuda":
infer_config.use_gpu = True infer_config.use_gpu = True
infer_config.device = 0 infer_config.device = 0
...@@ -300,7 +302,7 @@ def main(target, is_sparse, is_parallel, use_bf16, pure_bf16): ...@@ -300,7 +302,7 @@ def main(target, is_sparse, is_parallel, use_bf16, pure_bf16):
temp_dir = tempfile.TemporaryDirectory() temp_dir = tempfile.TemporaryDirectory()
if not is_parallel: if not is_parallel:
save_dirname = os.path.join(temp_dir.name, "word2vec.inference.model") save_dirname = os.path.join(temp_dir.name, "word2vec_inference_model")
else: else:
save_dirname = None save_dirname = None
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import paddle import paddle
from paddle import fluid, static from paddle import static
paddle.enable_static() paddle.enable_static()
...@@ -48,7 +48,5 @@ exe = static.Executor(cpu) ...@@ -48,7 +48,5 @@ exe = static.Executor(cpu)
exe.run(static.default_startup_program()) exe.run(static.default_startup_program())
static.io.save_inference_model("./resnet_model", [resnet_input], [temp7], exe) static.io.save_inference_model("./resnet_model", [resnet_input], [temp7], exe)
fluid.io.save_inference_model( static.io.save_inference_model("./resnet_model_1", [resnet_input], [temp7], exe)
"./resnet_model_1", [resnet_input.name], [temp7], exe
)
print('res', temp7.name) print('res', temp7.name)
...@@ -81,7 +81,7 @@ paddle.enable_static() ...@@ -81,7 +81,7 @@ paddle.enable_static()
# exe.run(paddle.static.default_startup_program()) # exe.run(paddle.static.default_startup_program())
# prog = paddle.static.default_main_program() # prog = paddle.static.default_main_program()
# paddle.fluid.io.save_inference_model("./stack", [x.name, y.name], [prediction], exe, prog) # paddle.static.io.save_inference_model("./stack", [x.name, y.name], [prediction], exe, prog)
# ``` # ```
# Second load and run model like: # Second load and run model like:
# ``` # ```
...@@ -150,11 +150,9 @@ class TestPaddleModel(OpMapperTest): ...@@ -150,11 +150,9 @@ class TestPaddleModel(OpMapperTest):
self.inference_program, self.inference_program,
self.feed_names, self.feed_names,
self.fetch_targets, self.fetch_targets,
] = paddle.fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
dirname=self.model_dir, path_prefix=self.model_dir,
executor=self.exe, executor=self.exe,
model_filename=self.model_filename,
params_filename=self.params_filename,
) )
self.param_vars = paddle.load( self.param_vars = paddle.load(
......
...@@ -217,12 +217,12 @@ def train(net_type, use_cuda, save_dirname, is_local): ...@@ -217,12 +217,12 @@ def train(net_type, use_cuda, save_dirname, is_local):
) )
if acc_value > 0.08: # Low threshold for speeding up CI if acc_value > 0.08: # Low threshold for speeding up CI
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
save_dirname, save_dirname,
["pixel"], images,
[predict], [predict],
exe, exe,
main_program=train_program, program=train_program,
clip_extra=True, clip_extra=True,
) )
return return
...@@ -262,7 +262,7 @@ def infer(use_cuda, save_dirname=None): ...@@ -262,7 +262,7 @@ def infer(use_cuda, save_dirname=None):
inference_scope = fluid.core.Scope() inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope): with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc, # Use paddle.static.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be fed # the feed_target_names (the names of variables that will be fed
# data using feed operators), and the fetch_targets (variables that # data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators). # we want to obtain data from using fetch operators).
...@@ -270,7 +270,7 @@ def infer(use_cuda, save_dirname=None): ...@@ -270,7 +270,7 @@ def infer(use_cuda, save_dirname=None):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model(save_dirname, exe) ] = paddle.static.io.load_inference_model(save_dirname, exe)
# The input's dimension of conv should be 4-D or 5-D. # The input's dimension of conv should be 4-D or 5-D.
# Use normilized image pixels as input data, which should be in the range [0, 1.0]. # Use normilized image pixels as input data, which should be in the range [0, 1.0].
...@@ -287,12 +287,12 @@ def infer(use_cuda, save_dirname=None): ...@@ -287,12 +287,12 @@ def infer(use_cuda, save_dirname=None):
print("infer results: ", results[0]) print("infer results: ", results[0])
fluid.io.save_inference_model( paddle.static.save_inference_model(
save_dirname, save_dirname,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
exe, exe,
inference_program, parogram=inference_program,
clip_extra=True, clip_extra=True,
) )
......
...@@ -181,7 +181,7 @@ class TestBert(unittest.TestCase): ...@@ -181,7 +181,7 @@ class TestBert(unittest.TestCase):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
self.model_save_dir, self.model_save_dir,
executor=exe, executor=exe,
model_filename=self.model_filename, model_filename=self.model_filename,
......
...@@ -855,7 +855,7 @@ class TestTrain(unittest.TestCase): ...@@ -855,7 +855,7 @@ class TestTrain(unittest.TestCase):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
self.model_save_dir, self.model_save_dir,
executor=exe, executor=exe,
model_filename=self.model_filename, model_filename=self.model_filename,
......
...@@ -667,7 +667,7 @@ class TestLACModel(unittest.TestCase): ...@@ -667,7 +667,7 @@ class TestLACModel(unittest.TestCase):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
self.model_save_dir, self.model_save_dir,
executor=exe, executor=exe,
model_filename=self.model_filename, model_filename=self.model_filename,
......
...@@ -292,8 +292,8 @@ class TestMNISTWithToStatic(TestMNIST): ...@@ -292,8 +292,8 @@ class TestMNISTWithToStatic(TestMNIST):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
dirname=model_path, path_prefix=model_path,
executor=exe, executor=exe,
model_filename=model_filename, model_filename=model_filename,
params_filename=params_filename, params_filename=params_filename,
......
...@@ -608,7 +608,7 @@ def predict_static(args, data): ...@@ -608,7 +608,7 @@ def predict_static(args, data):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
args.model_save_dir, args.model_save_dir,
executor=exe, executor=exe,
model_filename=args.model_filename, model_filename=args.model_filename,
......
...@@ -355,7 +355,7 @@ class ResNetHelper: ...@@ -355,7 +355,7 @@ class ResNetHelper:
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
self.model_save_dir, self.model_save_dir,
executor=exe, executor=exe,
model_filename=self.model_filename, model_filename=self.model_filename,
......
...@@ -130,8 +130,8 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase): ...@@ -130,8 +130,8 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
dirname=model_path, path_prefix=model_path,
executor=exe, executor=exe,
model_filename=model_filename, model_filename=model_filename,
params_filename=params_filename, params_filename=params_filename,
......
...@@ -493,7 +493,7 @@ class TestSeResnet(unittest.TestCase): ...@@ -493,7 +493,7 @@ class TestSeResnet(unittest.TestCase):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
self.model_save_dir, self.model_save_dir,
executor=exe, executor=exe,
model_filename=self.model_filename, model_filename=self.model_filename,
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import errno
import os import os
import random import random
import tempfile import tempfile
...@@ -57,13 +58,36 @@ class InferencePassTest(unittest.TestCase): ...@@ -57,13 +58,36 @@ class InferencePassTest(unittest.TestCase):
self, dirname, feeded_var_names, target_vars, executor, program, scope self, dirname, feeded_var_names, target_vars, executor, program, scope
): ):
with fluid.scope_guard(scope): with fluid.scope_guard(scope):
# save models as combined to ensure that # save models as combined but sometimes params is null
# there won't be too many useless files # To adapt to this situation, the path needs to be adjusted to the old version format.
# after finishing a couple of tests. feeded_vars = []
fluid.io.save_inference_model( for var in program.list_vars():
dirname, feeded_var_names, target_vars, executor, program if var.name in feeded_var_names:
feeded_vars.append(var)
paddle.static.io.save_inference_model(
dirname,
feeded_vars,
target_vars,
executor,
program=program,
) )
# if the param save is null
# replace model_path to old version
param_file = dirname + ".pdiparams"
if not os.path.exists(param_file):
model_path = dirname + ".pdmodel"
try:
save_dirname = os.path.normpath(dirname)
os.makedirs(save_dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
model_path_old = os.path.join(save_dirname, "__model__")
if not os.path.exists(model_path_old):
os.rename(model_path, model_path_old)
def _get_paddle_outs(self, executor, program, scope): def _get_paddle_outs(self, executor, program, scope):
''' '''
Return PaddlePaddle outputs. Return PaddlePaddle outputs.
...@@ -109,7 +133,14 @@ class InferencePassTest(unittest.TestCase): ...@@ -109,7 +133,14 @@ class InferencePassTest(unittest.TestCase):
''' '''
Return a new object of AnalysisConfig. Return a new object of AnalysisConfig.
''' '''
# To adapt to save_inference_model
param_file = self.path + ".pdiparams"
if not os.path.exists(param_file):
config = AnalysisConfig(self.path) config = AnalysisConfig(self.path)
else:
config = AnalysisConfig(
self.path + ".pdmodel", self.path + ".pdiparams"
)
config.disable_gpu() config.disable_gpu()
config.switch_specify_input_names(True) config.switch_specify_input_names(True)
config.switch_ir_optim(True) config.switch_ir_optim(True)
......
...@@ -432,7 +432,7 @@ def create_quant_model( ...@@ -432,7 +432,7 @@ def create_quant_model(
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = paddle.static.load_inference_model( ] = paddle.static.io.load_inference_model(
path_prefix=None, path_prefix=None,
executor=exe, executor=exe,
model_filename=model, model_filename=model,
...@@ -596,18 +596,19 @@ def create_quant_model( ...@@ -596,18 +596,19 @@ def create_quant_model(
tensor = scope.var(var_name).get_tensor() tensor = scope.var(var_name).get_tensor()
tensor.set(np.ones(tensor.shape(), dtype=np.float32), place) tensor.set(np.ones(tensor.shape(), dtype=np.float32), place)
feed_vars = [
main_program.global_block().var(name) for name in feed_target_names
]
if save: if save:
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
'test_inference_model', 'test_inference_model',
feed_target_names, feed_vars,
fetch_targets, fetch_targets,
exe, exe,
main_program=main_program, program=main_program,
) )
feed_vars = [
main_program.global_block().var(name) for name in feed_target_names
]
serialized_program = paddle.static.serialize_program( serialized_program = paddle.static.serialize_program(
feed_vars, fetch_targets, program=main_program feed_vars, fetch_targets, program=main_program
) )
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import errno
import os
import random import random
import unittest import unittest
import warnings import warnings
...@@ -23,7 +25,7 @@ from paddle import fluid ...@@ -23,7 +25,7 @@ from paddle import fluid
from paddle.fluid import Program, Variable, core from paddle.fluid import Program, Variable, core
from paddle.fluid.core import AnalysisConfig, create_paddle_predictor from paddle.fluid.core import AnalysisConfig, create_paddle_predictor
from paddle.fluid.framework import IrGraph from paddle.fluid.framework import IrGraph
from paddle.fluid.io import append_fetch_ops, prepend_feed_ops from paddle.static.io import append_fetch_ops, prepend_feed_ops
from paddle.static.quantization import ( from paddle.static.quantization import (
AddQuantDequantPass, AddQuantDequantPass,
OutScaleForInferencePass, OutScaleForInferencePass,
...@@ -51,7 +53,7 @@ class QuantDequantTest(unittest.TestCase): ...@@ -51,7 +53,7 @@ class QuantDequantTest(unittest.TestCase):
self.dynamic_shape_params = None self.dynamic_shape_params = None
self.enable_lite = False self.enable_lite = False
self.lite_parameters = None self.lite_parameters = None
self.path = "./inference_pass/" + self.__class__.__name__ + "/" self.path = "./inference_pass/" + self.__class__.__name__
self.data = None self.data = None
self.label = None self.label = None
self.result = None self.result = None
...@@ -118,15 +120,36 @@ class QuantDequantTest(unittest.TestCase): ...@@ -118,15 +120,36 @@ class QuantDequantTest(unittest.TestCase):
def _save_models( def _save_models(
self, dirname, feeded_var_names, target_vars, executor, program, scope self, dirname, feeded_var_names, target_vars, executor, program, scope
): ):
# save models as combined but sometimes params is null
# To adapt to this situation, the path needs to be adjusted to the old version format.
feeded_vars = []
for var in program.list_vars():
if var.name in feeded_var_names:
feeded_vars.append(var)
with fluid.scope_guard(scope): with fluid.scope_guard(scope):
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
dirname, dirname,
feeded_var_names, feeded_vars,
target_vars, target_vars,
executor, executor,
program, program=program,
clip_extra=True, clip_extra=True,
) )
# if the param save is null
# replace model_path to old version
param_file = dirname + ".pdiparams"
if not os.path.exists(param_file):
model_path = dirname + ".pdmodel"
try:
save_dirname = os.path.normpath(dirname)
os.makedirs(save_dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
model_path_old = os.path.join(save_dirname, "__model__")
if not os.path.exists(model_path_old):
os.rename(model_path, model_path_old)
def _get_paddle_outs(self, feed, fetch_list, executor, program, scope): def _get_paddle_outs(self, feed, fetch_list, executor, program, scope):
''' '''
...@@ -172,7 +195,14 @@ class QuantDequantTest(unittest.TestCase): ...@@ -172,7 +195,14 @@ class QuantDequantTest(unittest.TestCase):
''' '''
Return a new object of AnalysisConfig. Return a new object of AnalysisConfig.
''' '''
# To adapt to save_inference_model
param_file = self.path + ".pdiparams"
if not os.path.exists(param_file):
config = AnalysisConfig(self.path) config = AnalysisConfig(self.path)
else:
config = AnalysisConfig(
self.path + ".pdmodel", self.path + ".pdiparams"
)
config.disable_gpu() config.disable_gpu()
config.switch_specify_input_names(True) config.switch_specify_input_names(True)
config.switch_ir_optim(True) config.switch_ir_optim(True)
......
...@@ -435,10 +435,10 @@ class TestDistRunnerBase: ...@@ -435,10 +435,10 @@ class TestDistRunnerBase:
model_save_dir, "fleet_persistables" model_save_dir, "fleet_persistables"
) )
infer_save_dir_fluid = os.path.join( infer_save_dir_fluid = os.path.join(
model_save_dir, "fluid_infer" model_save_dir, "fluid_infer/infer"
) )
infer_save_dir_fleet = os.path.join( infer_save_dir_fleet = os.path.join(
model_save_dir, "fleet_infer" model_save_dir, "fleet_infer/infer"
) )
else: else:
model_save_dir_fluid = os.path.join( model_save_dir_fluid = os.path.join(
...@@ -448,25 +448,24 @@ class TestDistRunnerBase: ...@@ -448,25 +448,24 @@ class TestDistRunnerBase:
model_save_dir, "fleet_persistables_2" model_save_dir, "fleet_persistables_2"
) )
infer_save_dir_fluid = os.path.join( infer_save_dir_fluid = os.path.join(
model_save_dir, "fluid_infer_2" model_save_dir, "fluid_infer_2/infer_2"
) )
infer_save_dir_fleet = os.path.join( infer_save_dir_fleet = os.path.join(
model_save_dir, "fleet_infer_2" model_save_dir, "fleet_infer_2/infer_2"
) )
paddle.distributed.io.save_persistables( paddle.distributed.io.save_persistables(
exe, model_save_dir_fluid, fleet._origin_program exe, model_save_dir_fluid, fleet._origin_program
) )
fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet) fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet)
feeded_var_names = [var.name for var in feed_var_list] paddle.static.io.save_inference_model(
fluid.io.save_inference_model( path_prefix=infer_save_dir_fluid,
infer_save_dir_fluid, feed_vars=feed_var_list,
feeded_var_names, fetch_vars=[avg_cost],
[avg_cost], executor=exe,
exe, program=fleet._origin_program,
fleet._origin_program,
) )
fleet.save_inference_model( fleet.save_inference_model(
exe, infer_save_dir_fleet, feeded_var_names, [avg_cost] exe, infer_save_dir_fleet, feed_var_list, [avg_cost]
) )
def run_trainer(self, args): def run_trainer(self, args):
......
...@@ -68,7 +68,6 @@ class TestDistMnistFleetSave(TestDistBase): ...@@ -68,7 +68,6 @@ class TestDistMnistFleetSave(TestDistBase):
if fluid_persistables[i] != fleet_persistables[i]: if fluid_persistables[i] != fleet_persistables[i]:
self._rm_temp_files(dirname) self._rm_temp_files(dirname)
raise ValueError("Test Failed.") raise ValueError("Test Failed.")
if len(fluid_infer_files) != len(fleet_infer_files): if len(fluid_infer_files) != len(fleet_infer_files):
self._rm_temp_files(dirname) self._rm_temp_files(dirname)
raise ValueError("Test Failed.") raise ValueError("Test Failed.")
......
...@@ -314,7 +314,7 @@ def lm_model( ...@@ -314,7 +314,7 @@ def lm_model(
paddle.assign(last_cell, output=init_cell) paddle.assign(last_cell, output=init_cell)
paddle.assign(last_hidden, output=init_hidden) paddle.assign(last_hidden, output=init_hidden)
feeding_list = ['x', 'y', 'init_hidden', 'init_cell'] feeding_list = [x, y, init_hidden, init_cell]
return loss, last_hidden, last_cell, feeding_list return loss, last_hidden, last_cell, feeding_list
...@@ -365,7 +365,7 @@ class PaddingRNNTestBase(unittest.TestCase): ...@@ -365,7 +365,7 @@ class PaddingRNNTestBase(unittest.TestCase):
self.loss, self.loss,
self.last_hidden, self.last_hidden,
self.last_cell, self.last_cell,
self.feed_order, self.feed_list,
) = res_vars ) = res_vars
paddle.nn.clip.set_gradient_clip( paddle.nn.clip.set_gradient_clip(
......
...@@ -88,13 +88,12 @@ class ExecutorPaddingRNNTest(PaddingRNNTestBase): ...@@ -88,13 +88,12 @@ class ExecutorPaddingRNNTest(PaddingRNNTestBase):
config = RNNConfig("test", rnn_model) config = RNNConfig("test", rnn_model)
with fluid.scope_guard(fluid.Scope()): with fluid.scope_guard(fluid.Scope()):
self.train(config, use_program_cache) self.train(config, use_program_cache)
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
main_program=self.main_program, path_prefix="padding_rnn." + rnn_model + ".inference_model",
feeded_var_names=self.feed_order, feed_vars=self.feed_list,
target_vars=[self.loss, self.last_hidden, self.last_cell], fetch_vars=[self.loss, self.last_hidden, self.last_cell],
executor=self.exe, executor=self.exe,
dirname="padding_rnn." + rnn_model + ".inference_model", program=self.main_program,
params_filename="__params__",
) )
def test_inference_output(self): def test_inference_output(self):
...@@ -134,8 +133,8 @@ class ExecutorPaddingRNNTest(PaddingRNNTestBase): ...@@ -134,8 +133,8 @@ class ExecutorPaddingRNNTest(PaddingRNNTestBase):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = fluid.io.load_inference_model( ] = paddle.static.io.load_inference_model(
save_dirname, self.exe, params_filename="__params__" save_dirname, self.exe
) )
results = self.exe.run( results = self.exe.run(
......
...@@ -29,7 +29,7 @@ from paddle.distributed.io import ( ...@@ -29,7 +29,7 @@ from paddle.distributed.io import (
from paddle.fluid import core, executor from paddle.fluid import core, executor
from paddle.fluid.compiler import CompiledProgram from paddle.fluid.compiler import CompiledProgram
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
from paddle.fluid.io import load_inference_model, save_inference_model from paddle.static.io import load_inference_model, save_inference_model
paddle.enable_static() paddle.enable_static()
...@@ -82,15 +82,15 @@ class TestBook(unittest.TestCase): ...@@ -82,15 +82,15 @@ class TestBook(unittest.TestCase):
) )
# Separated model and unified model # Separated model and unified model
save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program) save_inference_model(
MODEL_DIR, [x, y], [avg_cost], exe, program=program
)
save_inference_model( save_inference_model(
UNI_MODEL_DIR, UNI_MODEL_DIR,
["x", "y"], [x, y],
[avg_cost], [avg_cost],
exe, exe,
program, program=program,
'model',
'params',
) )
main_program = program.clone()._prune_with_input( main_program = program.clone()._prune_with_input(
feeded_var_names=["x", "y"], targets=[avg_cost] feeded_var_names=["x", "y"], targets=[avg_cost]
...@@ -104,12 +104,22 @@ class TestBook(unittest.TestCase): ...@@ -104,12 +104,22 @@ class TestBook(unittest.TestCase):
importlib.reload(executor) # reload to build a new scope importlib.reload(executor) # reload to build a new scope
model_0 = InferModel(load_inference_model(MODEL_DIR, exe)) model_0 = InferModel(load_inference_model(MODEL_DIR, exe))
with open(os.path.join(UNI_MODEL_DIR, 'model'), "rb") as f: with open((UNI_MODEL_DIR + '.pdmodel'), "rb") as f:
model_str = f.read() model_str = f.read()
model_1 = InferModel( model_1 = InferModel(load_inference_model(UNI_MODEL_DIR, exe))
load_inference_model(None, exe, model_str, params_str)
# To be compatible with load_inference_model_distributed function
tmp_model_filename = MODEL_DIR + '.pdmodel'
tmp_params_filename = MODEL_DIR + '.pdiparams'
model_2 = InferModel(
load_inference_model_distributed(
root_path.name,
exe,
model_filename=tmp_model_filename,
params_filename=tmp_params_filename,
)
) )
model_2 = InferModel(load_inference_model_distributed(MODEL_DIR, exe))
model_3 = InferModel( model_3 = InferModel(
load_inference_model_distributed(None, exe, model_str, params_str) load_inference_model_distributed(None, exe, model_str, params_str)
) )
...@@ -134,11 +144,11 @@ class TestBook(unittest.TestCase): ...@@ -134,11 +144,11 @@ class TestBook(unittest.TestCase):
self.assertRaises( self.assertRaises(
ValueError, ValueError,
fluid.io.load_inference_model, paddle.static.io.load_inference_model,
None, None,
exe, exe,
model_str, model_filename=model_str,
None, params_filename=None,
) )
self.assertRaises( self.assertRaises(
ValueError, ValueError,
...@@ -173,7 +183,9 @@ class TestSaveInferenceModel(unittest.TestCase): ...@@ -173,7 +183,9 @@ class TestSaveInferenceModel(unittest.TestCase):
exe = executor.Executor(place) exe = executor.Executor(place)
exe.run(init_program, feed={}, fetch_list=[]) exe.run(init_program, feed={}, fetch_list=[])
save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program) save_inference_model(
MODEL_DIR, [x, y], [avg_cost], exe, program=program
)
root_path.cleanup() root_path.cleanup()
def test_save_inference_model_with_auc(self): def test_save_inference_model_with_auc(self):
...@@ -202,10 +214,10 @@ class TestSaveInferenceModel(unittest.TestCase): ...@@ -202,10 +214,10 @@ class TestSaveInferenceModel(unittest.TestCase):
with warnings.catch_warnings(record=True) as w: with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always") warnings.simplefilter("always")
save_inference_model( save_inference_model(
MODEL_DIR, ["x", "y"], [avg_cost], exe, program MODEL_DIR, [x, y], [avg_cost], exe, program=program
) )
root_path.cleanup() root_path.cleanup()
expected_warn = "please ensure that you have set the auc states to zeros before saving inference model" expected_warn = "Be sure that you have set auc states to 0 before saving inference model."
self.assertTrue(len(w) > 0) self.assertTrue(len(w) > 0)
self.assertTrue(expected_warn == str(w[0].message)) self.assertTrue(expected_warn == str(w[0].message))
...@@ -237,11 +249,13 @@ class TestInstance(unittest.TestCase): ...@@ -237,11 +249,13 @@ class TestInstance(unittest.TestCase):
cp_prog = CompiledProgram(program) cp_prog = CompiledProgram(program)
save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, cp_prog) save_inference_model(
MODEL_DIR, [x, y], [avg_cost], exe, program=cp_prog
)
self.assertRaises( self.assertRaises(
TypeError, TypeError,
save_inference_model, save_inference_model,
[MODEL_DIR, ["x", "y"], [avg_cost], [], cp_prog], [MODEL_DIR, [x, y], [avg_cost], [], cp_prog],
) )
root_path.cleanup() root_path.cleanup()
...@@ -535,7 +549,7 @@ class TestLoadInferenceModelError(unittest.TestCase): ...@@ -535,7 +549,7 @@ class TestLoadInferenceModelError(unittest.TestCase):
place = core.CPUPlace() place = core.CPUPlace()
exe = executor.Executor(place) exe = executor.Executor(place)
self.assertRaises( self.assertRaises(
ValueError, load_inference_model, './test_not_exist_dir', exe ValueError, load_inference_model, './test_not_exist_dir/model', exe
) )
self.assertRaises( self.assertRaises(
ValueError, ValueError,
......
...@@ -79,12 +79,12 @@ class TestSaveInferenceModelAPIError(unittest.TestCase): ...@@ -79,12 +79,12 @@ class TestSaveInferenceModelAPIError(unittest.TestCase):
with self.assertRaisesRegex( with self.assertRaisesRegex(
ValueError, "not involved in the target_vars calculation" ValueError, "not involved in the target_vars calculation"
): ):
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
dirname=os.path.join(self.temp_dir.name, 'model'), path_prefix=os.path.join(self.temp_dir.name, 'model'),
feeded_var_names=['x', 'y'], feed_vars=[x, y],
target_vars=[z], fetch_vars=[z],
executor=exe, executor=exe,
main_program=main_prog, program=main_prog,
) )
......
...@@ -125,13 +125,11 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase): ...@@ -125,13 +125,11 @@ class TestLoadStateDictFromSaveInferenceModel(unittest.TestCase):
param.name param.name
) )
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
self.save_dirname, self.save_dirname,
["img"], [img],
[prediction], [prediction],
exe, exe,
model_filename=self.model_filename,
params_filename=self.params_filename,
) )
return static_param_dict return static_param_dict
......
...@@ -35,13 +35,11 @@ class TestSaveModelWithoutVar(unittest.TestCase): ...@@ -35,13 +35,11 @@ class TestSaveModelWithoutVar(unittest.TestCase):
with warnings.catch_warnings(record=True) as w: with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always") warnings.simplefilter("always")
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
dirname='test', 'test',
feeded_var_names=['data'], data,
target_vars=[data_plus], [data_plus],
executor=exe, exe,
model_filename='model',
params_filename='params',
) )
expected_warn = "no variable in your model, please ensure there are any variables in your model to save" expected_warn = "no variable in your model, please ensure there are any variables in your model to save"
self.assertTrue(len(w) > 0) self.assertTrue(len(w) > 0)
......
...@@ -56,7 +56,9 @@ def generate_dot_for_model(model_path, save_graph_dir, save_graph_name): ...@@ -56,7 +56,9 @@ def generate_dot_for_model(model_path, save_graph_dir, save_graph_name):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = paddle.fluid.io.load_inference_model(model_path, exe) ] = paddle.static.io.load_inference_model(
model_path, exe, model_filename='__model__'
)
else: else:
[ [
inference_program, inference_program,
......
...@@ -195,7 +195,9 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): ...@@ -195,7 +195,9 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = paddle.fluid.io.load_inference_model(model_path, exe) ] = paddle.static.io.load_inference_model(
model_path, exe, model_filename=None, params_filename=None
)
else: else:
[ [
inference_program, inference_program,
......
...@@ -173,7 +173,9 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): ...@@ -173,7 +173,9 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase):
inference_program, inference_program,
feed_target_names, feed_target_names,
fetch_targets, fetch_targets,
] = paddle.fluid.io.load_inference_model(model_path, exe) ] = paddle.static.io.load_inference_model(
model_path, exe, model_filename=None, params_filename=None
)
else: else:
[ [
inference_program, inference_program,
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
import paddle import paddle
from paddle import fluid, static from paddle import static
# For paddlepaddle version >=2.0rc, we need to set paddle.enable_static() # For paddlepaddle version >=2.0rc, we need to set paddle.enable_static()
paddle.enable_static() paddle.enable_static()
...@@ -30,7 +30,7 @@ loss = exe = static.Executor(cpu) ...@@ -30,7 +30,7 @@ loss = exe = static.Executor(cpu)
exe.run(static.default_startup_program()) exe.run(static.default_startup_program())
fluid.io.save_inference_model( paddle.static.io.save_inference_model(
"./elementwise_add_model", [a.name, b.name], [a1], exe "./elementwise_add_model", [a, b], [a1], exe
) )
print('input and output names are: ', a.name, b.name, a1.name) print('input and output names are: ', a.name, b.name, a1.name)
...@@ -17,7 +17,7 @@ import time ...@@ -17,7 +17,7 @@ import time
import numpy as np import numpy as np
import paddle import paddle
from paddle import fluid, static from paddle import static
from paddle.fluid.core import AnalysisConfig, create_paddle_predictor from paddle.fluid.core import AnalysisConfig, create_paddle_predictor
...@@ -84,7 +84,7 @@ def create_model(input_names, input_shapes, input_dtypes, fn, attrs=None): ...@@ -84,7 +84,7 @@ def create_model(input_names, input_shapes, input_dtypes, fn, attrs=None):
model_name += "_" + str(input_shapes[0][i]) model_name += "_" + str(input_shapes[0][i])
print("save model:", model_name) print("save model:", model_name)
fluid.io.save_inference_model(model_name, input_args_names, [res], exe) paddle.static.io.save_inference_model(model_name, input_args, [res], exe)
print('output name is: ', res.name) print('output name is: ', res.name)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册