From 2c4462711fd35d3c254852bf04f12b10c5650b22 Mon Sep 17 00:00:00 2001 From: Huihuang Zheng Date: Wed, 8 May 2019 23:23:45 +0800 Subject: [PATCH] Fix API example code of save_inference_model (#17274) * Fix API example code of save_inference_model test=develop * Add "import" in exmaple of save_inference_model * Fix typo "exsample" -> "example" test=develop --- paddle/fluid/API.spec | 4 ++-- python/paddle/fluid/io.py | 31 +++++++++++++++++++++++++------ 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 8a1607954c..9052675a9d 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -52,8 +52,8 @@ paddle.fluid.io.save_persistables (ArgSpec(args=['executor', 'dirname', 'main_pr paddle.fluid.io.load_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '0a5308f496632ab1ec3ba1f1377e6f95')) paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '41779819cef32f2246e83aebc5a002e2')) paddle.fluid.io.load_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '28df5bfe26ca7a077f91156abb0fe6d2')) -paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', '70f4f53f13572436ac72d1c8b5efeb9d')) -paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '7a5255386075dac3c75b7058254fcdcb')) +paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', 'af82e1b5fe5764029905a191b987f63d')) +paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '648f64d8fd81572eef34a69e533459ef')) paddle.fluid.io.PyReader.__init__ (ArgSpec(args=['self', 'feed_list', 'capacity', 'use_double_buffer', 'iterable'], varargs=None, keywords=None, defaults=(True, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', '4a072de39998ee4e0de33fcec11325a6')) paddle.fluid.io.PyReader.decorate_sample_generator (ArgSpec(args=['self', 'sample_generator', 'batch_size', 'drop_last', 'places'], varargs=None, keywords=None, defaults=(True, None)), ('document', '3db4b24d33fe4f711e303f9673dc5c6a')) diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 16524d385f..e15497b633 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -913,13 +913,32 @@ def save_inference_model(dirname, Examples: .. code-block:: python - exe = fluid.Executor(fluid.CPUPlace()) + import paddle.fluid as fluid + path = "./infer_model" - fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'], - target_vars=[predict_var], executor=exe) - # In this exsample, the function will prune the default main program - # to make it suitable for infering the `predict_var`. The pruned + # User defined network, here a softmax regresssion example + image = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace()) + predict = fluid.layers.fc(input=image, size=10, act='softmax') + + loss = fluid.layers.cross_entropy(input=predict, label=label) + avg_loss = fluid.layers.mean(loss) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + # Feed data and train process + + # Save inference model. Note we don't save label and loss in this example + fluid.io.save_inference_model(dirname=path, + feeded_var_names=['img'], + target_vars=[predict], + executor=exe) + + # In this example, the function will prune the default main program + # to make it suitable for infering the `predict` var. The pruned # inference program is going to be saved in the "./infer_model/__model__" # and parameters are going to be saved in separate files under folder # "./infer_model". @@ -1075,7 +1094,7 @@ def load_inference_model(dirname, # if we need lookup table, we will use: fluid.io.load_inference_model(dirname=path, executor=exe, pserver_endpoints=endpoints) - # In this exsample, the inference program was saved in the + # In this example, the inference program was saved in the # "./infer_model/__model__" and parameters were saved in # separate files in ""./infer_model". # After getting inference program, feed target names and -- GitLab