diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py index bca045852fd06e65cd03841b27d257c5864f5dca..7c9c034e8f9740a403d11c64c60577ae08ace618 100644 --- a/python/paddle/static/__init__.py +++ b/python/paddle/static/__init__.py @@ -23,8 +23,8 @@ __all__ = [ ] from . import nn -from .io import save_inference_model -from .io import load_inference_model +from .io import save_inference_model #DEFINE_ALIAS +from .io import load_inference_model #DEFINE_ALIAS from ..fluid import Scope #DEFINE_ALIAS from .input import data #DEFINE_ALIAS from .input import InputSpec #DEFINE_ALIAS diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index 14536b880f585a4e302d18dee35148a9c0037437..a25a8fb191bb2db45746ef3c027fc2993fe78ef4 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -97,7 +97,7 @@ def save_inference_model(path_prefix, feed_vars, fetch_vars, executor): # Feed data and train process # Save inference model. Note we don't save label and loss in this example - paddle.static.io.save_inference_model(path_prefix, [image], [predict], exe) + paddle.static.save_inference_model(path_prefix, [image], [predict], exe) # In this example, the save_inference_mode inference will prune the default # main program according to the network's input node (img) and output node(predict). @@ -239,10 +239,10 @@ def load_inference_model(path_prefix, executor, **configs): # Save the inference model path_prefix = "./infer_model" - paddle.static.io.save_inference_model(path_prefix, [image], [hidden_b], exe) + paddle.static.save_inference_model(path_prefix, [image], [hidden_b], exe) [inference_program, feed_target_names, fetch_targets] = ( - paddle.static.io.load_inference_model(path_prefix, exe)) + paddle.static.load_inference_model(path_prefix, exe)) tensor_img = np.array(np.random.random((64, 784)), dtype=np.float32) results = exe.run(inference_program, feed={feed_target_names[0]: tensor_img},