diff --git a/paddle/fluid/operators/save_combine_op.h b/paddle/fluid/operators/save_combine_op.h index 9ddb751f40a4fda76e029c5f6ccb5fd63c96062a..0246c42d433255ebb35f259b78cab1cce2118475 100644 --- a/paddle/fluid/operators/save_combine_op.h +++ b/paddle/fluid/operators/save_combine_op.h @@ -74,8 +74,12 @@ class SaveCombineOpKernel : public framework::OpKernel { inp_var_names[i])); auto &tensor = inp_vars[i]->Get(); + PADDLE_ENFORCE_EQ( + tensor.IsInitialized(), true, + platform::errors::InvalidArgument( + "The Tensor of Variable(%s) to be saved is not initialized.", + inp_var_names[i])); // Serialize tensors one by one - // Check types to see if a fp16 transformation is required auto in_dtype = tensor.type(); auto out_dtype = diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py index c77648ac7b56e2c1a2f7bae6311fe7e5c2eceaa4..cece2ba4a3d788ab2df4c0a6a847c9597d36047a 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py @@ -191,9 +191,6 @@ class ImperativeQuantAware(object): assert len(input_dtype) == len( feed), "The length of input_shape should be equal to feed's." - def _convert(model, *args): - return model(*args) - prog_trans = dygraph.ProgramTranslator() with dygraph.guard(): model.eval() @@ -204,8 +201,18 @@ class ImperativeQuantAware(object): dtype) if append_batch_size else raw_data.astype(dtype) input_var = dygraph.to_variable(input_data) input_vars.append(input_var) - prog_trans.get_output(_convert, model, *input_vars) - prog_trans.save_inference_model(dirname, feed, fetch) + outputs = prog_trans.get_output(model.forward, model, *input_vars) + input_spec = [input_vars[i] for i in feed] + configs = dygraph.jit.SaveLoadConfig() + configs.separate_params = True + if not isinstance(outputs, (tuple, list)): + outputs = [outputs] + configs.output_spec = [outputs[i] for i in fetch] + dygraph.jit.save( + layer=model, + model_path=dirname, + input_spec=input_spec, + configs=configs) def _get_quantized_counterpart(self, layer): quant_layers = tuple(self._quant_layers_map.values()) diff --git a/python/paddle/fluid/dygraph/checkpoint.py b/python/paddle/fluid/dygraph/checkpoint.py index d359910167d6361076429c19b35ff12814ca648e..de4330cf51669ebbbfb1ca7e9edcc0c82b1d0e72 100644 --- a/python/paddle/fluid/dygraph/checkpoint.py +++ b/python/paddle/fluid/dygraph/checkpoint.py @@ -70,10 +70,13 @@ def save_dygraph(state_dict, model_path): suffix = ".pdparams" assert len(state_dict) > 0, "state_dict is empty, no need to save" + param_num = 0 for k, v in state_dict.items(): - if not isinstance(v, ParamBase): - suffix = ".pdopt" - break + if isinstance(v, ParamBase): + param_num += 1 + + if param_num == 0: + suffix = ".pdopt" model_dict = {} name_table = {} diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py b/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py index 9701ebd7b4fccf21afa3af161a99b63fbe8f847b..64fbb51f9a5f7a2937b5f7791cf0a004517bceab 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py @@ -650,99 +650,6 @@ class ProgramTranslator(object): source_code = ast_to_source_code(root_wrapper.node) return source_code - @deprecated(since='2.0', instead="paddle.imperative.jit.save") - @switch_to_static_graph - def save_inference_model(self, dirname, feed=None, fetch=None): - """ - Saves current model as the inference model. It will prune the main_program - to build a new program especially for inference, and then save it and all - related parameters to given `dirname` . The saved inference model can be - loaded by `:ref:`api_fluid_io_load_inference_model` or `C++ inference APIs. - - Args: - dirname (str): the directory to save the inference model. - feed (list[int], optional): the indices of the input variables of the - dygraph functions which will be saved as input variables in - inference model. If None, all input variables of the dygraph function - would be the inputs of the saved inference model. Default None. - fetch (list[int], optional): the indices of the returned variable of the - dygraph functions which will be saved as output variables in - inference model. If None, all output variables of the dygraph function - would be the outputs of the saved inference model. Default None. - Returns: - None - Examples: - .. code-block:: python - import numpy as np - import paddle.fluid as fluid - from paddle.fluid.dygraph import Linear - from paddle.fluid.dygraph import declarative - from paddle.fluid.dygraph import ProgramTranslator - - class SimpleNet(fluid.dygraph.Layer): - def __init__(self, in_size, out_size): - super(SimpleNet, self).__init__() - self._linear = Linear(in_size, out_size) - - @declarative - def forward(self, x): - y = self._linear(x) - z = self._linear(y) - loss = fluid.layers.mean(z) - return z, loss - - with fluid.dygraph.guard(fluid.CPUPlace()): - net = SimpleNet(8, 8) - adam = fluid.optimizer.AdamOptimizer(learning_rate=0.1, parameter_list=net.parameters()) - x = fluid.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) - for i in range(10): - loss, out = net(x) - loss.backward() - adam.minimize(loss) - net.clear_gradients() - # Save inference model. - # Note that fetch=[0] means we set 'z' as the inference output. - prog_trans = ProgramTranslator() - prog_trans.save_inference_model("./dy2stat_infer_model", fetch=[0]) - - # In this example, the inference model will be pruned based on output (z). - # The pruned inference program is going to be saved in the folder - # "./dy2stat_infer_model" and parameters are going to be saved in separate - # files in the folder. - """ - - def get_feed_fetch(var_list, partial_vars, return_name=False): - vars = [ - var for var in var_list if isinstance(var, framework.Variable) - ] - if partial_vars: - vars = [vars[idx] for idx in partial_vars] - if return_name: - vars = [var.name for var in vars] - - return vars - - func_spec, (concrete_program, - partial_layer) = self._program_cache.last() - # share paramBase data with parameter - scope = core.Scope() - for param_base in concrete_program.parameters: - param_tensor = scope.var(param_base.name).get_tensor() - src_tensor = param_base.value().get_tensor() - param_tensor._share_data_with(src_tensor) - - feed_var_names = get_feed_fetch(concrete_program.inputs, feed, True) - fetch_vars = get_feed_fetch(concrete_program.outputs, fetch) - - from paddle.fluid.io import save_inference_model - with scope_guard(scope): - save_inference_model( - dirname=dirname, - feeded_var_names=feed_var_names, - target_vars=fetch_vars, - executor=executor.Executor(framework._current_expected_place()), - main_program=concrete_program.main_program.clone()) - def get_program_cache(self): """ Returns the ProgramCache instance. This method is used by PaddlePaddle diff --git a/python/paddle/fluid/dygraph/jit.py b/python/paddle/fluid/dygraph/jit.py index 531aa37cb1dee840c6194932504648a3ba84f7dc..64faae247fbf80637a45429eaa1d5833df122a1a 100644 --- a/python/paddle/fluid/dygraph/jit.py +++ b/python/paddle/fluid/dygraph/jit.py @@ -672,7 +672,7 @@ def save(layer, model_path, input_spec=None, configs=None): else: result_list = valid_vars if return_name: - result_list = [var.name for var in target_vars] + result_list = [var.name for var in result_list] return result_list @@ -707,18 +707,27 @@ def save(layer, model_path, input_spec=None, configs=None): layer_func = FunctionSpec(type(layer).forward, [layer], {}) concrete_program, _ = prog_cache.get_program(layer_func) + # NOTE: we maintain the mapping of variable name to + # structured name, the buffer variable (non-persistable) + # saved to inference program may not need by dygraph Layer, + # we only record the state_dict variable's structured name + state_names_dict = dict() + for structured_name, var in layer.state_dict().items(): + state_names_dict[var.name] = structured_name + # 3. share parameters from Layer to scope & record var info scope = core.Scope() - state_dict = layer.state_dict() extra_var_info = dict() - for structured_name, param_or_buffer in state_dict.items(): + for param_or_buffer in concrete_program.parameters: # share to scope param_or_buffer_tensor = scope.var(param_or_buffer.name).get_tensor() src_tensor = param_or_buffer.value().get_tensor() param_or_buffer_tensor._share_data_with(src_tensor) # record var info extra_info_dict = dict() - extra_info_dict['structured_name'] = structured_name + if param_or_buffer.name in state_names_dict: + extra_info_dict['structured_name'] = state_names_dict[ + param_or_buffer.name] extra_info_dict['stop_gradient'] = param_or_buffer.stop_gradient if isinstance(param_or_buffer, ParamBase): extra_info_dict['trainable'] = param_or_buffer.trainable diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py index 5896d3a29294861bde07a025678a9d78bebf5a6b..72c283c3b956d7655f28f983fd554cb20b732764 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py @@ -20,6 +20,7 @@ import paddle.fluid as fluid from paddle.fluid import ParamAttr from paddle.fluid.dygraph import to_variable from paddle.fluid.dygraph import declarative, ProgramTranslator +from paddle.fluid.dygraph.io import VARIABLE_FILENAME SEED = 2020 DATATYPE = 'float32' @@ -616,7 +617,7 @@ def train_bmn(args, place, to_static): if batch_id == args.train_batch_num: if to_static: - program_translator.save_inference_model(args.infer_dir) + fluid.dygraph.jit.save(bmn, args.infer_dir) else: fluid.dygraph.save_dygraph(bmn.state_dict(), args.dy_param_path) @@ -721,7 +722,9 @@ class TestTrain(unittest.TestCase): # load inference model [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model( - self.args.infer_dir, executor=exe) + self.args.infer_dir, + executor=exe, + params_filename=VARIABLE_FILENAME) pred_res = exe.run(inference_program, feed={feed_target_names[0]: data}, fetch_list=fetch_targets) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py index 8141f9f462c1682188189ef3cfcef37f576f504c..305e1a2f58a677650ed76ac6e19ea7707eca2a52 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_lac.py @@ -25,6 +25,7 @@ import paddle.fluid as fluid from paddle.fluid.dygraph import to_variable from paddle.fluid.dygraph import Embedding, Linear, GRUUnit from paddle.fluid.dygraph import declarative, ProgramTranslator +from paddle.fluid.dygraph.io import VARIABLE_FILENAME SEED = 2020 @@ -494,8 +495,13 @@ def do_train(args, to_static): step += 1 # save inference model if to_static: - program_translator.save_inference_model( - dirname=args.model_save_dir, feed=[0, 2], fetch=[1]) + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.output_spec = [crf_decode] + fluid.dygraph.jit.save( + layer=model, + model_path=args.model_save_dir, + input_spec=[words, length], + configs=configs) else: fluid.dygraph.save_dygraph(model.state_dict(), args.dy_param_path) @@ -558,7 +564,9 @@ class TestLACModel(unittest.TestCase): # load inference model [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model( - self.args.model_save_dir, executor=exe) + self.args.model_save_dir, + executor=exe, + params_filename=VARIABLE_FILENAME) words, targets, length = batch pred_res = exe.run( diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py index 09be10e6c8a7e9b676e434b410f702c3fe7bdb91..b8aa0379638fadd19b4956a56c1a3e4811558535 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py @@ -196,35 +196,12 @@ class TestMNISTWithDeclarative(TestMNIST): mnist.eval() prediction, acc, avg_loss = mnist(img, label) loss_data.append(avg_loss.numpy()[0]) - self.check_save_inference_model([dy_x_data, y_data], - prog_trans, to_static, - prediction) # new save load check self.check_jit_save_load(mnist, [dy_x_data], [img], to_static, prediction) break return loss_data - def check_save_inference_model(self, inputs, prog_trans, to_static, gt_out): - if to_static: - infer_model_path = "./test_mnist_inference_model" - prog_trans.save_inference_model(infer_model_path) - infer_out = self.load_and_run_inference(infer_model_path, inputs) - self.assertTrue(np.allclose(gt_out.numpy(), infer_out)) - - @switch_to_static_graph - def load_and_run_inference(self, model_path, inputs): - exe = fluid.Executor(self.place) - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model( - dirname=model_path, executor=exe) - assert len(inputs) == len(feed_target_names) - results = exe.run(inference_program, - feed=dict(zip(feed_target_names, inputs)), - fetch_list=fetch_targets) - - return np.array(results[0]) - def check_jit_save_load(self, model, inputs, input_spec, to_static, gt_out): if to_static: infer_model_path = "./test_mnist_inference_model_by_jit_save" diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py index 180ada7b9a769731e82db239dc696e23c13feed5..0386b7c7a17a0f93040fa18d688347f30f27850d 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py @@ -23,6 +23,7 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator from paddle.fluid.dygraph.jit import declarative from paddle.fluid.dygraph.dygraph_to_static.partial_program import partial_program_from +from paddle.fluid.dygraph.io import EXTRA_VAR_INFO_FILENAME SEED = 2020 @@ -60,21 +61,26 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase): parameter_list=layer.parameters()) for i in range(5): - loss, _ = layer(x) + loss, pred = layer(x) loss.backward() adam.minimize(loss) layer.clear_gradients() # test for saving model in dygraph.guard - infer_model_dir = "./test_dy2stat_save_inference_model" - program_translator.save_inference_model( - infer_model_dir, feed=[0], fetch=[1]) + infer_model_dir = "./test_dy2stat_save_inference_model_in_guard" + configs = fluid.dygraph.jit.SaveLoadConfig() + configs.output_spec = [pred] + fluid.dygraph.jit.save( + layer=layer, + model_path=infer_model_dir, + input_spec=[x], + configs=configs) # Check the correctness of the inference dygraph_out, _ = layer(x) self.check_save_inference_model(layer, [x_data], dygraph_out.numpy()) self.check_save_inference_model( - layer, [x_data], dygraph_out.numpy(), fetch=[0]) + layer, [x_data], dygraph_out.numpy(), fetch=[loss]) self.check_save_inference_model( - layer, [x_data], dygraph_out.numpy(), feed=[0]) + layer, [x_data], dygraph_out.numpy(), feed=[x]) def check_save_inference_model(self, model, @@ -86,11 +92,18 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase): expected_persistable_vars = set([p.name for p in model.parameters()]) infer_model_dir = "./test_dy2stat_save_inference_model" - program_translator.save_inference_model( - infer_model_dir, feed=feed, fetch=fetch) + configs = fluid.dygraph.jit.SaveLoadConfig() + if fetch is not None: + configs.output_spec = fetch + configs.separate_params = True + fluid.dygraph.jit.save( + layer=model, + model_path=infer_model_dir, + input_spec=feed if feed else None, + configs=configs) saved_var_names = set([ filename for filename in os.listdir(infer_model_dir) - if filename != '__model__' + if filename != '__model__' and filename != EXTRA_VAR_INFO_FILENAME ]) self.assertEqual(saved_var_names, expected_persistable_vars) # Check the correctness of the inference diff --git a/python/paddle/incubate/hapi/dygraph_layer_patch.py b/python/paddle/incubate/hapi/dygraph_layer_patch.py index 80a3d82fc87cf1eea42b871ec80b37cb177caec9..cb3cc10a84dd9347bf4b781031bedb5836dfbd4c 100644 --- a/python/paddle/incubate/hapi/dygraph_layer_patch.py +++ b/python/paddle/incubate/hapi/dygraph_layer_patch.py @@ -71,9 +71,9 @@ def monkey_patch_layer(): key_name = key if use_structured_name else param.name try: match_res = _check_match(key_name, param) + matched_param_state.append(match_res) except ValueError as err: warnings.warn(("Skip loading for {}. ".format(key) + str(err))) - matched_param_state.append(match_res) if in_dygraph_mode(): for param, state in matched_param_state: