未验证 提交 23d1228c 编写于 作者: C Chen Weihang 提交者: GitHub

remove ProgramTranslator.save_inference_model (#25740)

* remove ProgramTranslator.save_inference_model

* adapt save_quantized_model

* revert buffer check implemention

* remove useless import function
上级 1b3081b1
...@@ -74,8 +74,12 @@ class SaveCombineOpKernel : public framework::OpKernel<T> { ...@@ -74,8 +74,12 @@ class SaveCombineOpKernel : public framework::OpKernel<T> {
inp_var_names[i])); inp_var_names[i]));
auto &tensor = inp_vars[i]->Get<framework::LoDTensor>(); auto &tensor = inp_vars[i]->Get<framework::LoDTensor>();
PADDLE_ENFORCE_EQ(
tensor.IsInitialized(), true,
platform::errors::InvalidArgument(
"The Tensor of Variable(%s) to be saved is not initialized.",
inp_var_names[i]));
// Serialize tensors one by one // Serialize tensors one by one
// Check types to see if a fp16 transformation is required // Check types to see if a fp16 transformation is required
auto in_dtype = tensor.type(); auto in_dtype = tensor.type();
auto out_dtype = auto out_dtype =
......
...@@ -191,9 +191,6 @@ class ImperativeQuantAware(object): ...@@ -191,9 +191,6 @@ class ImperativeQuantAware(object):
assert len(input_dtype) == len( assert len(input_dtype) == len(
feed), "The length of input_shape should be equal to feed's." feed), "The length of input_shape should be equal to feed's."
def _convert(model, *args):
return model(*args)
prog_trans = dygraph.ProgramTranslator() prog_trans = dygraph.ProgramTranslator()
with dygraph.guard(): with dygraph.guard():
model.eval() model.eval()
...@@ -204,8 +201,18 @@ class ImperativeQuantAware(object): ...@@ -204,8 +201,18 @@ class ImperativeQuantAware(object):
dtype) if append_batch_size else raw_data.astype(dtype) dtype) if append_batch_size else raw_data.astype(dtype)
input_var = dygraph.to_variable(input_data) input_var = dygraph.to_variable(input_data)
input_vars.append(input_var) input_vars.append(input_var)
prog_trans.get_output(_convert, model, *input_vars) outputs = prog_trans.get_output(model.forward, model, *input_vars)
prog_trans.save_inference_model(dirname, feed, fetch) input_spec = [input_vars[i] for i in feed]
configs = dygraph.jit.SaveLoadConfig()
configs.separate_params = True
if not isinstance(outputs, (tuple, list)):
outputs = [outputs]
configs.output_spec = [outputs[i] for i in fetch]
dygraph.jit.save(
layer=model,
model_path=dirname,
input_spec=input_spec,
configs=configs)
def _get_quantized_counterpart(self, layer): def _get_quantized_counterpart(self, layer):
quant_layers = tuple(self._quant_layers_map.values()) quant_layers = tuple(self._quant_layers_map.values())
......
...@@ -70,10 +70,13 @@ def save_dygraph(state_dict, model_path): ...@@ -70,10 +70,13 @@ def save_dygraph(state_dict, model_path):
suffix = ".pdparams" suffix = ".pdparams"
assert len(state_dict) > 0, "state_dict is empty, no need to save" assert len(state_dict) > 0, "state_dict is empty, no need to save"
param_num = 0
for k, v in state_dict.items(): for k, v in state_dict.items():
if not isinstance(v, ParamBase): if isinstance(v, ParamBase):
suffix = ".pdopt" param_num += 1
break
if param_num == 0:
suffix = ".pdopt"
model_dict = {} model_dict = {}
name_table = {} name_table = {}
......
...@@ -650,99 +650,6 @@ class ProgramTranslator(object): ...@@ -650,99 +650,6 @@ class ProgramTranslator(object):
source_code = ast_to_source_code(root_wrapper.node) source_code = ast_to_source_code(root_wrapper.node)
return source_code return source_code
@deprecated(since='2.0', instead="paddle.imperative.jit.save")
@switch_to_static_graph
def save_inference_model(self, dirname, feed=None, fetch=None):
"""
Saves current model as the inference model. It will prune the main_program
to build a new program especially for inference, and then save it and all
related parameters to given `dirname` . The saved inference model can be
loaded by `:ref:`api_fluid_io_load_inference_model` or `C++ inference APIs.
Args:
dirname (str): the directory to save the inference model.
feed (list[int], optional): the indices of the input variables of the
dygraph functions which will be saved as input variables in
inference model. If None, all input variables of the dygraph function
would be the inputs of the saved inference model. Default None.
fetch (list[int], optional): the indices of the returned variable of the
dygraph functions which will be saved as output variables in
inference model. If None, all output variables of the dygraph function
would be the outputs of the saved inference model. Default None.
Returns:
None
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph import declarative
from paddle.fluid.dygraph import ProgramTranslator
class SimpleNet(fluid.dygraph.Layer):
def __init__(self, in_size, out_size):
super(SimpleNet, self).__init__()
self._linear = Linear(in_size, out_size)
@declarative
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
loss = fluid.layers.mean(z)
return z, loss
with fluid.dygraph.guard(fluid.CPUPlace()):
net = SimpleNet(8, 8)
adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters())
x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32'))
for i in range(10):
loss, out = net(x)
loss.backward()
adam.minimize(loss)
net.clear_gradients()
# Save inference model.
# Note that fetch=[0] means we set 'z' as the inference output.
prog_trans = ProgramTranslator()
prog_trans.save_inference_model("./dy2stat_infer_model", fetch=[0])
# In this example, the inference model will be pruned based on output (z).
# The pruned inference program is going to be saved in the folder
# "./dy2stat_infer_model" and parameters are going to be saved in separate
# files in the folder.
"""
def get_feed_fetch(var_list, partial_vars, return_name=False):
vars = [
var for var in var_list if isinstance(var, framework.Variable)
]
if partial_vars:
vars = [vars[idx] for idx in partial_vars]
if return_name:
vars = [var.name for var in vars]
return vars
func_spec, (concrete_program,
partial_layer) = self._program_cache.last()
# share paramBase data with parameter
scope = core.Scope()
for param_base in concrete_program.parameters:
param_tensor = scope.var(param_base.name).get_tensor()
src_tensor = param_base.value().get_tensor()
param_tensor._share_data_with(src_tensor)
feed_var_names = get_feed_fetch(concrete_program.inputs, feed, True)
fetch_vars = get_feed_fetch(concrete_program.outputs, fetch)
from paddle.fluid.io import save_inference_model
with scope_guard(scope):
save_inference_model(
dirname=dirname,
feeded_var_names=feed_var_names,
target_vars=fetch_vars,
executor=executor.Executor(framework._current_expected_place()),
main_program=concrete_program.main_program.clone())
def get_program_cache(self): def get_program_cache(self):
""" """
Returns the ProgramCache instance. This method is used by PaddlePaddle Returns the ProgramCache instance. This method is used by PaddlePaddle
......
...@@ -672,7 +672,7 @@ def save(layer, model_path, input_spec=None, configs=None): ...@@ -672,7 +672,7 @@ def save(layer, model_path, input_spec=None, configs=None):
else: else:
result_list = valid_vars result_list = valid_vars
if return_name: if return_name:
result_list = [var.name for var in target_vars] result_list = [var.name for var in result_list]
return result_list return result_list
...@@ -707,18 +707,27 @@ def save(layer, model_path, input_spec=None, configs=None): ...@@ -707,18 +707,27 @@ def save(layer, model_path, input_spec=None, configs=None):
layer_func = FunctionSpec(type(layer).forward, [layer], {}) layer_func = FunctionSpec(type(layer).forward, [layer], {})
concrete_program, _ = prog_cache.get_program(layer_func) concrete_program, _ = prog_cache.get_program(layer_func)
# NOTE: we maintain the mapping of variable name to
# structured name, the buffer variable (non-persistable)
# saved to inference program may not need by dygraph Layer,
# we only record the state_dict variable's structured name
state_names_dict = dict()
for structured_name, var in layer.state_dict().items():
state_names_dict[var.name] = structured_name
# 3. share parameters from Layer to scope & record var info # 3. share parameters from Layer to scope & record var info
scope = core.Scope() scope = core.Scope()
state_dict = layer.state_dict()
extra_var_info = dict() extra_var_info = dict()
for structured_name, param_or_buffer in state_dict.items(): for param_or_buffer in concrete_program.parameters:
# share to scope # share to scope
param_or_buffer_tensor = scope.var(param_or_buffer.name).get_tensor() param_or_buffer_tensor = scope.var(param_or_buffer.name).get_tensor()
src_tensor = param_or_buffer.value().get_tensor() src_tensor = param_or_buffer.value().get_tensor()
param_or_buffer_tensor._share_data_with(src_tensor) param_or_buffer_tensor._share_data_with(src_tensor)
# record var info # record var info
extra_info_dict = dict() extra_info_dict = dict()
extra_info_dict['structured_name'] = structured_name if param_or_buffer.name in state_names_dict:
extra_info_dict['structured_name'] = state_names_dict[
param_or_buffer.name]
extra_info_dict['stop_gradient'] = param_or_buffer.stop_gradient extra_info_dict['stop_gradient'] = param_or_buffer.stop_gradient
if isinstance(param_or_buffer, ParamBase): if isinstance(param_or_buffer, ParamBase):
extra_info_dict['trainable'] = param_or_buffer.trainable extra_info_dict['trainable'] = param_or_buffer.trainable
......
...@@ -20,6 +20,7 @@ import paddle.fluid as fluid ...@@ -20,6 +20,7 @@ import paddle.fluid as fluid
from paddle.fluid import ParamAttr from paddle.fluid import ParamAttr
from paddle.fluid.dygraph import to_variable from paddle.fluid.dygraph import to_variable
from paddle.fluid.dygraph import declarative, ProgramTranslator from paddle.fluid.dygraph import declarative, ProgramTranslator
from paddle.fluid.dygraph.io import VARIABLE_FILENAME
SEED = 2020 SEED = 2020
DATATYPE = 'float32' DATATYPE = 'float32'
...@@ -616,7 +617,7 @@ def train_bmn(args, place, to_static): ...@@ -616,7 +617,7 @@ def train_bmn(args, place, to_static):
if batch_id == args.train_batch_num: if batch_id == args.train_batch_num:
if to_static: if to_static:
program_translator.save_inference_model(args.infer_dir) fluid.dygraph.jit.save(bmn, args.infer_dir)
else: else:
fluid.dygraph.save_dygraph(bmn.state_dict(), fluid.dygraph.save_dygraph(bmn.state_dict(),
args.dy_param_path) args.dy_param_path)
...@@ -721,7 +722,9 @@ class TestTrain(unittest.TestCase): ...@@ -721,7 +722,9 @@ class TestTrain(unittest.TestCase):
# load inference model # load inference model
[inference_program, feed_target_names, [inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model( fetch_targets] = fluid.io.load_inference_model(
self.args.infer_dir, executor=exe) self.args.infer_dir,
executor=exe,
params_filename=VARIABLE_FILENAME)
pred_res = exe.run(inference_program, pred_res = exe.run(inference_program,
feed={feed_target_names[0]: data}, feed={feed_target_names[0]: data},
fetch_list=fetch_targets) fetch_list=fetch_targets)
......
...@@ -25,6 +25,7 @@ import paddle.fluid as fluid ...@@ -25,6 +25,7 @@ import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable from paddle.fluid.dygraph import to_variable
from paddle.fluid.dygraph import Embedding, Linear, GRUUnit from paddle.fluid.dygraph import Embedding, Linear, GRUUnit
from paddle.fluid.dygraph import declarative, ProgramTranslator from paddle.fluid.dygraph import declarative, ProgramTranslator
from paddle.fluid.dygraph.io import VARIABLE_FILENAME
SEED = 2020 SEED = 2020
...@@ -494,8 +495,13 @@ def do_train(args, to_static): ...@@ -494,8 +495,13 @@ def do_train(args, to_static):
step += 1 step += 1
# save inference model # save inference model
if to_static: if to_static:
program_translator.save_inference_model( configs = fluid.dygraph.jit.SaveLoadConfig()
dirname=args.model_save_dir, feed=[0, 2], fetch=[1]) configs.output_spec = [crf_decode]
fluid.dygraph.jit.save(
layer=model,
model_path=args.model_save_dir,
input_spec=[words, length],
configs=configs)
else: else:
fluid.dygraph.save_dygraph(model.state_dict(), args.dy_param_path) fluid.dygraph.save_dygraph(model.state_dict(), args.dy_param_path)
...@@ -558,7 +564,9 @@ class TestLACModel(unittest.TestCase): ...@@ -558,7 +564,9 @@ class TestLACModel(unittest.TestCase):
# load inference model # load inference model
[inference_program, feed_target_names, [inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model( fetch_targets] = fluid.io.load_inference_model(
self.args.model_save_dir, executor=exe) self.args.model_save_dir,
executor=exe,
params_filename=VARIABLE_FILENAME)
words, targets, length = batch words, targets, length = batch
pred_res = exe.run( pred_res = exe.run(
......
...@@ -196,35 +196,12 @@ class TestMNISTWithDeclarative(TestMNIST): ...@@ -196,35 +196,12 @@ class TestMNISTWithDeclarative(TestMNIST):
mnist.eval() mnist.eval()
prediction, acc, avg_loss = mnist(img, label) prediction, acc, avg_loss = mnist(img, label)
loss_data.append(avg_loss.numpy()[0]) loss_data.append(avg_loss.numpy()[0])
self.check_save_inference_model([dy_x_data, y_data],
prog_trans, to_static,
prediction)
# new save load check # new save load check
self.check_jit_save_load(mnist, [dy_x_data], [img], self.check_jit_save_load(mnist, [dy_x_data], [img],
to_static, prediction) to_static, prediction)
break break
return loss_data return loss_data
def check_save_inference_model(self, inputs, prog_trans, to_static, gt_out):
if to_static:
infer_model_path = "./test_mnist_inference_model"
prog_trans.save_inference_model(infer_model_path)
infer_out = self.load_and_run_inference(infer_model_path, inputs)
self.assertTrue(np.allclose(gt_out.numpy(), infer_out))
@switch_to_static_graph
def load_and_run_inference(self, model_path, inputs):
exe = fluid.Executor(self.place)
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(
dirname=model_path, executor=exe)
assert len(inputs) == len(feed_target_names)
results = exe.run(inference_program,
feed=dict(zip(feed_target_names, inputs)),
fetch_list=fetch_targets)
return np.array(results[0])
def check_jit_save_load(self, model, inputs, input_spec, to_static, gt_out): def check_jit_save_load(self, model, inputs, input_spec, to_static, gt_out):
if to_static: if to_static:
infer_model_path = "./test_mnist_inference_model_by_jit_save" infer_model_path = "./test_mnist_inference_model_by_jit_save"
......
...@@ -23,6 +23,7 @@ import paddle.fluid as fluid ...@@ -23,6 +23,7 @@ import paddle.fluid as fluid
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.dygraph.jit import declarative from paddle.fluid.dygraph.jit import declarative
from paddle.fluid.dygraph.dygraph_to_static.partial_program import partial_program_from from paddle.fluid.dygraph.dygraph_to_static.partial_program import partial_program_from
from paddle.fluid.dygraph.io import EXTRA_VAR_INFO_FILENAME
SEED = 2020 SEED = 2020
...@@ -60,21 +61,26 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase): ...@@ -60,21 +61,26 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
parameter_list=layer.parameters()) parameter_list=layer.parameters())
for i in range(5): for i in range(5):
loss, _ = layer(x) loss, pred = layer(x)
loss.backward() loss.backward()
adam.minimize(loss) adam.minimize(loss)
layer.clear_gradients() layer.clear_gradients()
# test for saving model in dygraph.guard # test for saving model in dygraph.guard
infer_model_dir = "./test_dy2stat_save_inference_model" infer_model_dir = "./test_dy2stat_save_inference_model_in_guard"
program_translator.save_inference_model( configs = fluid.dygraph.jit.SaveLoadConfig()
infer_model_dir, feed=[0], fetch=[1]) configs.output_spec = [pred]
fluid.dygraph.jit.save(
layer=layer,
model_path=infer_model_dir,
input_spec=[x],
configs=configs)
# Check the correctness of the inference # Check the correctness of the inference
dygraph_out, _ = layer(x) dygraph_out, _ = layer(x)
self.check_save_inference_model(layer, [x_data], dygraph_out.numpy()) self.check_save_inference_model(layer, [x_data], dygraph_out.numpy())
self.check_save_inference_model( self.check_save_inference_model(
layer, [x_data], dygraph_out.numpy(), fetch=[0]) layer, [x_data], dygraph_out.numpy(), fetch=[loss])
self.check_save_inference_model( self.check_save_inference_model(
layer, [x_data], dygraph_out.numpy(), feed=[0]) layer, [x_data], dygraph_out.numpy(), feed=[x])
def check_save_inference_model(self, def check_save_inference_model(self,
model, model,
...@@ -86,11 +92,18 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase): ...@@ -86,11 +92,18 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
expected_persistable_vars = set([p.name for p in model.parameters()]) expected_persistable_vars = set([p.name for p in model.parameters()])
infer_model_dir = "./test_dy2stat_save_inference_model" infer_model_dir = "./test_dy2stat_save_inference_model"
program_translator.save_inference_model( configs = fluid.dygraph.jit.SaveLoadConfig()
infer_model_dir, feed=feed, fetch=fetch) if fetch is not None:
configs.output_spec = fetch
configs.separate_params = True
fluid.dygraph.jit.save(
layer=model,
model_path=infer_model_dir,
input_spec=feed if feed else None,
configs=configs)
saved_var_names = set([ saved_var_names = set([
filename for filename in os.listdir(infer_model_dir) filename for filename in os.listdir(infer_model_dir)
if filename != '__model__' if filename != '__model__' and filename != EXTRA_VAR_INFO_FILENAME
]) ])
self.assertEqual(saved_var_names, expected_persistable_vars) self.assertEqual(saved_var_names, expected_persistable_vars)
# Check the correctness of the inference # Check the correctness of the inference
......
...@@ -71,9 +71,9 @@ def monkey_patch_layer(): ...@@ -71,9 +71,9 @@ def monkey_patch_layer():
key_name = key if use_structured_name else param.name key_name = key if use_structured_name else param.name
try: try:
match_res = _check_match(key_name, param) match_res = _check_match(key_name, param)
matched_param_state.append(match_res)
except ValueError as err: except ValueError as err:
warnings.warn(("Skip loading for {}. ".format(key) + str(err))) warnings.warn(("Skip loading for {}. ".format(key) + str(err)))
matched_param_state.append(match_res)
if in_dygraph_mode(): if in_dygraph_mode():
for param, state in matched_param_state: for param, state in matched_param_state:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册