From f0806bdaf2c34d44c5d4fd504d4bccc021a5443a Mon Sep 17 00:00:00 2001 From: Shibo Tao <62922815+T8T9@users.noreply.github.com> Date: Thu, 19 Nov 2020 15:38:46 +0800 Subject: [PATCH] fix save_inference_model and load_inference_mode alias. test=develop (#28736) --- python/paddle/static/__init__.py | 4 ++-- python/paddle/static/io.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py index bca045852f..7c9c034e8f 100644 --- a/python/paddle/static/__init__.py +++ b/python/paddle/static/__init__.py @@ -23,8 +23,8 @@ __all__ = [ ] from . import nn -from .io import save_inference_model -from .io import load_inference_model +from .io import save_inference_model #DEFINE_ALIAS +from .io import load_inference_model #DEFINE_ALIAS from ..fluid import Scope #DEFINE_ALIAS from .input import data #DEFINE_ALIAS from .input import InputSpec #DEFINE_ALIAS diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index 14536b880f..a25a8fb191 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -97,7 +97,7 @@ def save_inference_model(path_prefix, feed_vars, fetch_vars, executor): # Feed data and train process # Save inference model. Note we don't save label and loss in this example - paddle.static.io.save_inference_model(path_prefix, [image], [predict], exe) + paddle.static.save_inference_model(path_prefix, [image], [predict], exe) # In this example, the save_inference_mode inference will prune the default # main program according to the network's input node (img) and output node(predict). @@ -239,10 +239,10 @@ def load_inference_model(path_prefix, executor, **configs): # Save the inference model path_prefix = "./infer_model" - paddle.static.io.save_inference_model(path_prefix, [image], [hidden_b], exe) + paddle.static.save_inference_model(path_prefix, [image], [hidden_b], exe) [inference_program, feed_target_names, fetch_targets] = ( - paddle.static.io.load_inference_model(path_prefix, exe)) + paddle.static.load_inference_model(path_prefix, exe)) tensor_img = np.array(np.random.random((64, 784)), dtype=np.float32) results = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, -- GitLab