未验证 提交 8bd651b7 编写于 作者: Z Zhen Wang 提交者: GitHub

Fix the bug in the AnalysisPredictor and add more directions about io APIs. (#17639)

* fix the bug that sub_scope_ may be null in AnalysisPredictor::Run.

* add more directions about io APIs' docs.

* update the API.spec. test=develop test=document_preview
上级 34301732
......@@ -48,13 +48,13 @@ paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.
paddle.fluid.BuildStrategy.ReduceStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.ReduceStrategy, arg0: int) -> None
paddle.fluid.BuildStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy) -> None
paddle.fluid.io.save_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '869104f47e6fd21d897c3fcc426aa942'))
paddle.fluid.io.save_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '3a7a99abac3e1bf898871fe609354218'))
paddle.fluid.io.save_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '07ffd5351b30cf47172ccfd61bd0de6f'))
paddle.fluid.io.save_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '9141bb5f32caf7975eb3fd88c8a1b2da'))
paddle.fluid.io.load_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '1bb9454cf09d71f190bb51550c5a3ac9'))
paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '41779819cef32f2246e83aebc5a002e2'))
paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '944291120d37bdb037a689d2c86d0a6e'))
paddle.fluid.io.load_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '28df5bfe26ca7a077f91156abb0fe6d2'))
paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', 'af82e1b5fe5764029905a191b987f63d'))
paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'aaf3392332f2e5ef9d9177985be2c04a'))
paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', '89539e459eb959145f15c9c3e38fa97c'))
paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '2f54d7c206b62f8c10f4f9d78c731cfd'))
paddle.fluid.io.PyReader.__init__ (ArgSpec(args=['self', 'feed_list', 'capacity', 'use_double_buffer', 'iterable'], varargs=None, keywords=None, defaults=(True, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', '4a072de39998ee4e0de33fcec11325a6'))
paddle.fluid.io.PyReader.decorate_sample_generator (ArgSpec(args=['self', 'sample_generator', 'batch_size', 'drop_last', 'places'], varargs=None, keywords=None, defaults=(True, None)), ('document', '3db4b24d33fe4f711e303f9673dc5c6a'))
......
......@@ -202,6 +202,7 @@ bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
timer.tic();
// set feed variable
framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
PADDLE_ENFORCE_NOT_NULL(scope, "The scope should not be nullptr.");
if (!SetFeed(inputs, scope)) {
LOG(ERROR) << "fail to set feed";
return false;
......@@ -229,7 +230,9 @@ bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
// Here is a bugfix, collect all the container variables, and reset then to a
// bool; the next time, the operator will call MutableData and construct a new
// container again, so that the container will be empty for each batch.
tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
if (sub_scope_) {
tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
}
tensor_array_batch_cleaner_.ResetNoTensorVars();
return true;
}
......
......@@ -242,7 +242,9 @@ def save_params(executor, dirname, main_program=None, filename=None):
NOTICE: Some variables are not Parameter while they are necessary for
training. So you can NOT save and continue your training just by
`save_params()` and `load_params()`. Please use `save_persistables()`
and `load_persistables()` instead.
and `load_persistables()` instead. If you want to save your model for
the inference, please use the `save_inference_model` API. You can refer
to :ref:`api_guide_model_save_reader_en` for more details.
Args:
executor(Executor): The executor to run for saving parameters.
......@@ -658,6 +660,9 @@ def load_params(executor, dirname, main_program=None, filename=None):
training. So you can NOT save and continue your training just by
`save_params()` and `load_params()`. Please use `save_persistables()`
and `load_persistables()` instead.
If you want to load the pre-trained model structure and parameters
for the inference, please use the `load_inference_model` API. You can
refer to :ref:`api_guide_model_save_reader_en` for more details.
Args:
executor(Executor): The executor to run for loading parameters.
......@@ -906,6 +911,10 @@ def save_inference_model(dirname,
"""
Prune the given `main_program` to build a new program especially for inference,
and then save it and all related parameters to given `dirname` by the `executor`.
If you just want to save parameters of your trained model, please use the
`save_params` API. You can refer to :ref:`api_guide_model_save_reader_en` for
more details.
Args:
dirname(str): The directory path to save the inference model.
......@@ -1077,7 +1086,10 @@ def load_inference_model(dirname,
params_filename=None,
pserver_endpoints=None):
"""
Load inference model from a directory
Load inference model from a directory. By this API, you can get the model
structure(inference program) and model parameters. If you just want to load
parameters of the pre-trained model, please use the `load_params` API.
You can refer to :ref:`api_guide_model_save_reader_en` for more details.
Args:
dirname(str): The directory path
......@@ -1128,8 +1140,8 @@ def load_inference_model(dirname,
fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'],
target_vars=[hidden_b], executor=exe, main_program=main_prog)
tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32)
[inference_program, feed_target_names, fetch_targets] = \
fluid.io.load_inference_model(dirname=path, executor=exe)
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(dirname=path, executor=exe))
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
......@@ -1137,10 +1149,10 @@ def load_inference_model(dirname,
# endpoints is your pserver endpoints list, the above is just an example
endpoints = ["127.0.0.1:2023","127.0.0.1:2024"]
# if we need lookup table, we will use:
[dist_inference_program, dist_feed_target_names, dist_fetch_targets] = \
[dist_inference_program, dist_feed_target_names, dist_fetch_targets] = (
fluid.io.load_inference_model(dirname=path,
executor=exe,
pserver_endpoints=endpoints)
pserver_endpoints=endpoints))
# In this example, the inference program was saved in the
# "./infer_model/__model__" and parameters were saved in
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册