未验证 提交 8c8b42f2 编写于 作者: L LiuChiachi 提交者: GitHub

Update path name of saving in hapi (#28462)

* update hapi save_inference_model output pathname

* update hapi save_inference_model output pathname

* use new 2.0-api paddle.static.io.load_inference_model

* add unittests to increase coverage rate
上级 0073f9bd
......@@ -38,6 +38,7 @@ from paddle.fluid.io import is_belong_to_optimizer
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator, FunctionSpec
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.fluid.layers.utils import flatten
from paddle.fluid.layers import collective
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
......@@ -1708,38 +1709,17 @@ class Model(object):
cbks.on_end('test', logs)
return outputs
def _save_inference_model(self,
save_dir,
model_filename=None,
params_filename=None,
model_only=False):
def _save_inference_model(self, path):
"""
Save inference model can be in static or dynamic mode.
Save inference model can be used in static or dynamic mode.
Args:
save_dir (str): The directory path to save the inference model.
model_filename (str|None): The name of file to save the inference
model itself. If is set None, a default filename
:code:`__model__` will be used.
params_filename (str|None): The name of file to save all related
parameters. If it is set None, parameters will be saved
in separate files .
model_only (bool): If True, It will save inference model only,
and do not save parameters. Default: False.
path (str): The path prefix to save model. The format is
``dirname/file_prefix`` or ``file_prefix``.
Returns:
list: The fetch variables' name list
None
"""
def get_inout_spec(all_vars, return_name=False):
result_list = []
valid_vars = [var for var in all_vars if isinstance(var, Variable)]
result_list = valid_vars
if return_name:
result_list = [var.name for var in result_list]
return result_list
if fluid.in_dygraph_mode():
with fluid.framework._dygraph_guard(None):
layer = self.network
......@@ -1752,68 +1732,25 @@ class Model(object):
"'inputs' was not specified when Model initialization, so the input shape to be saved will be the shape derived from the user's actual inputs. The input shape to be saved is %s. For saving correct input shapes, please provide 'inputs' for Model initialization."
% self._input_info[0])
layer.forward = paddle.jit.to_static(
layer.forward, input_spec=self._inputs)
# 1. input check
prog_translator = ProgramTranslator()
if not prog_translator.enable_to_static:
raise RuntimeError(
"save_inference_model doesn't work when setting ProgramTranslator.enable to False."
)
if not isinstance(layer, Layer):
raise TypeError(
"The input layer should be 'Layer', but received layer type is %s."
% type(layer))
# 2. get program of declarative Layer.forward
concrete_program = layer.forward.concrete_program
# NOTE: we maintain the mapping of variable name to
# structured name, the buffer variable (non-persistable)
# saved to inference program may not need by dygraph Layer,
# we only record the state_dict variable's structured name
state_names_dict = dict()
for structured_name, var in layer.state_dict().items():
state_names_dict[var.name] = structured_name
# 3. share parameters from Layer to scope & record var info
scope = core.Scope()
extra_var_info = dict()
for param_or_buffer in concrete_program.parameters:
# share to scope
param_or_buffer_tensor = scope.var(
param_or_buffer.name).get_tensor()
src_tensor = param_or_buffer.value().get_tensor()
param_or_buffer_tensor._share_data_with(src_tensor)
# record var info
extra_info_dict = dict()
if param_or_buffer.name in state_names_dict:
extra_info_dict['structured_name'] = state_names_dict[
param_or_buffer.name]
extra_info_dict[
'stop_gradient'] = param_or_buffer.stop_gradient
if isinstance(param_or_buffer, ParamBase):
extra_info_dict['trainable'] = param_or_buffer.trainable
extra_var_info[param_or_buffer.name] = extra_info_dict
# 4. build input & output spec
input_var_names = get_inout_spec(concrete_program.inputs, True)
output_vars = get_inout_spec(concrete_program.outputs)
# 5. save inference model
with scope_guard(scope):
return fluid.io.save_inference_model(
dirname=save_dir,
feeded_var_names=input_var_names,
target_vars=output_vars,
executor=Executor(_current_expected_place()),
main_program=concrete_program.main_program.clone(),
model_filename=model_filename,
params_filename=params_filename,
program_only=model_only)
paddle.jit.save(layer, path, input_spec=self._inputs)
else:
# path check
file_prefix = os.path.basename(path)
if file_prefix == "":
raise ValueError(
"The input path MUST be format of dirname/file_prefix "
"[dirname\\file_prefix in Windows system], but received "
"file_prefix is empty string.")
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
model_path = dirname
model_filename = file_prefix + INFER_MODEL_SUFFIX
params_filename = file_prefix + INFER_PARAMS_SUFFIX
prog = self._adapter._progs.get('test', None)
assert prog, \
"Model is not ready, please call `model.prepare()` first"
......@@ -1823,15 +1760,14 @@ class Model(object):
input_names = [v.name for v in self._adapter._input_vars['test']]
endpoints = self._adapter._endpoints['test']['output']
return fluid.io.save_inference_model(
save_dir,
fluid.io.save_inference_model(
model_path,
input_names,
endpoints,
self._adapter._executor,
main_program=infer_prog,
model_filename=model_filename,
params_filename=params_filename,
program_only=model_only)
params_filename=params_filename)
def _run_one_epoch(self, data_loader, callbacks, mode, logs={}):
outputs = []
......
......@@ -591,8 +591,8 @@ class TestModelFunction(unittest.TestCase):
with fluid.scope_guard(new_scope):
exe = fluid.Executor(place)
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=save_dir, executor=exe))
paddle.static.io.load_inference_model(
path_prefix=save_dir, executor=exe))
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
......@@ -787,7 +787,6 @@ class TestModelWithLRScheduler(unittest.TestCase):
class TestRaiseError(unittest.TestCase):
def test_input_without_name(self):
net = MyModel()
inputs = [InputSpec([None, 10], 'float32')]
labels = [InputSpec([None, 1], 'int64', 'label')]
with self.assertRaises(ValueError):
......@@ -810,6 +809,18 @@ class TestRaiseError(unittest.TestCase):
model.save(save_dir, training=False)
paddle.enable_static()
def test_save_infer_model_without_file_prefix(self):
paddle.enable_static()
net = LeNet()
inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
model = Model(net, inputs)
model.prepare()
path = ""
tensor_img = np.array(
np.random.random((1, 1, 28, 28)), dtype=np.float32)
with self.assertRaises(ValueError):
model.save(path, training=False)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册