From 08838f3909bbc178f2d47625af3c93e68213e363 Mon Sep 17 00:00:00 2001 From: flame Date: Wed, 20 Mar 2019 22:33:52 +0800 Subject: [PATCH] Fix save inference model bug (#16242) * save infer model bug fix, return target vars' name list --- paddle/fluid/API.spec | 2 +- python/paddle/fluid/io.py | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index c76489d183..11ccef30f6 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -56,7 +56,7 @@ paddle.fluid.io.save_persistables (ArgSpec(args=['executor', 'dirname', 'main_pr paddle.fluid.io.load_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '0a5308f496632ab1ec3ba1f1377e6f95')) paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '41779819cef32f2246e83aebc5a002e2')) paddle.fluid.io.load_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '28df5bfe26ca7a077f91156abb0fe6d2')) -paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', '582d87b8df75a5a639a107db8ff86f9c')) +paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', '70f4f53f13572436ac72d1c8b5efeb9d')) paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '7a5255386075dac3c75b7058254fcdcb')) paddle.fluid.initializer.ConstantInitializer.__init__ (ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.initializer.UniformInitializer.__init__ (ArgSpec(args=['self', 'low', 'high', 'seed'], varargs=None, keywords=None, defaults=(-1.0, 1.0, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 1775159798..326a84d82b 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -896,7 +896,7 @@ def save_inference_model(dirname, True is supported. Returns: - None + target_var_name_list(list): The fetch variables' name list Raises: ValueError: If `feed_var_names` is not a list of basestring. @@ -949,11 +949,13 @@ def save_inference_model(dirname, # TODO(Superjomn) add an IR pass to remove 1-scale op. with program_guard(main_program): uniq_target_vars = [] - for var in target_vars: + for i, var in enumerate(target_vars): if isinstance(var, Variable): - var1 = layers.scale(var, 1.) - uniq_target_vars.append(var1) + var = layers.scale( + var, 1., name="save_infer_model/scale_{}".format(i)) + uniq_target_vars.append(var) target_vars = uniq_target_vars + target_var_name_list = [var.name for var in target_vars] # when a pserver and a trainer running on the same machine, mkdir may conflict try: @@ -1010,6 +1012,7 @@ def save_inference_model(dirname, params_filename = os.path.basename(params_filename) save_persistables(executor, dirname, main_program, params_filename) + return target_var_name_list def load_inference_model(dirname, -- GitLab