diff --git a/doc/SAVE.md b/doc/SAVE.md index c1e6b19a45c75a64207802984f52c734d44f8fc8..3f7f97e12e1e309ff0933e150ea7bcd23298b60e 100644 --- a/doc/SAVE.md +++ b/doc/SAVE.md @@ -1,8 +1,9 @@ -## How to save a servable model of Paddle Serving? +# How to save a servable model of Paddle Serving? ([简体中文](./SAVE_CN.md)|English) -- Currently, paddle serving provides a save_model interface for users to access, the interface is similar with `save_inference_model` of Paddle. +## Save from training or prediction script +Currently, paddle serving provides a save_model interface for users to access, the interface is similar with `save_inference_model` of Paddle. ``` python import paddle_serving_client.io as serving_io serving_io.save_model("imdb_model", "imdb_client_conf", @@ -29,3 +30,15 @@ for line in sys.stdin: fetch_map = client.predict(feed=feed, fetch=fetch) print("{} {}".format(fetch_map["prediction"][1], label[0])) ``` + +## Export from saved model files +If you have saved model files using Paddle's `save_inference_model` API, you can use Paddle Serving's` inference_model_to_serving` API to convert it into a model file that can be used for Paddle Serving. +``` +import paddle_serving_client.io as serving_io +serving_io.inference_model_to_serving(dirname, model_filename=None, params_filename=None, serving_server="serving_server", serving_client="serving_client") +``` +dirname (str) - Path of saved model files. Program file and parameter files are saved in this directory. +model_filename (str, optional) - The name of file to load the inference program. If it is None, the default filename __model__ will be used. Default: None. +paras_filename (str, optional) - The name of file to load all parameters. It is only used for the case that all parameters were saved in a single binary file. If parameters were saved in separate files, set it as None. Default: None. +serving_server (str, optional) - The path of model files and configuration files for server. Default: "serving_server". +serving_client (str, optional) - The path of configuration files for client. Default: "serving_client". diff --git a/doc/SAVE_CN.md b/doc/SAVE_CN.md index 43b62c2ac623b386505356194ac136ea305fe683..fc75cd8d015a6d6f42a08f29e4035db20f450d91 100644 --- a/doc/SAVE_CN.md +++ b/doc/SAVE_CN.md @@ -1,8 +1,9 @@ -## 怎样保存用于Paddle Serving的模型? +# 怎样保存用于Paddle Serving的模型? (简体中文|[English](./SAVE.md)) -- 目前,Paddle Serving提供了一个save_model接口供用户访问,该接口与Paddle的`save_inference_model`类似。 +## 从训练或预测脚本中保存 +目前,Paddle Serving提供了一个save_model接口供用户访问,该接口与Paddle的`save_inference_model`类似。 ``` python import paddle_serving_client.io as serving_io @@ -29,3 +30,15 @@ for line in sys.stdin: fetch_map = client.predict(feed=feed, fetch=fetch) print("{} {}".format(fetch_map["prediction"][1], label[0])) ``` + +## 从已保存的模型文件中导出 +如果已使用Paddle 的`save_inference_model`接口保存出预测要使用的模型,则可以通过Paddle Serving的`inference_model_to_serving`接口转换成可用于Paddle Serving的模型文件。 +``` +import paddle_serving_client.io as serving_io +serving_io.inference_model_to_serving(dirname, model_filename=None, params_filename=None, serving_server="serving_server", serving_client="serving_client") +``` +dirname (str) – 需要转换的模型文件存储路径,Program结构文件和参数文件均保存在此目录。 +model_filename (str,可选) – 存储需要转换的模型Inference Program结构的文件名称。如果设置为None,则使用 __model__ 作为默认的文件名。默认值为None。 +params_filename (str,可选) – 存储需要转换的模型所有参数的文件名称。当且仅当所有模型参数被保存在一个单独的二进制文件中,它才需要被指定。如果模型参数是存储在各自分离的文件中,设置它的值为None。默认值为None。 +serving_server (str, 可选) - 转换后的模型文件和配置文件的存储路径。默认值为"serving_server"。 +serving_client (str, 可选) - 转换后的客户端配置文件存储路径。默认值为"serving_client"。 diff --git a/python/paddle_serving_client/io/__init__.py b/python/paddle_serving_client/io/__init__.py index 74a6ca871b5c1e32b3c1ecbc6656c95d7c78a399..4f174866e5521577ba35f39216f7dd0793879a6c 100644 --- a/python/paddle_serving_client/io/__init__.py +++ b/python/paddle_serving_client/io/__init__.py @@ -103,17 +103,21 @@ def save_model(server_model_folder, fout.write(config.SerializeToString()) -def inference_model_to_serving(infer_model, serving_client, serving_server): +def inference_model_to_serving(dirname, + model_filename=None, + params_filename=None, + serving_server="serving_server", + serving_client="serving_client"): place = fluid.CPUPlace() exe = fluid.Executor(place) inference_program, feed_target_names, fetch_targets = \ - fluid.io.load_inference_model(dirname=infer_model, executor=exe) + fluid.io.load_inference_model(dirname=dirname, executor=exe, model_filename=model_filename, params_filename=params_filename) feed_dict = { x: inference_program.global_block().var(x) for x in feed_target_names } fetch_dict = {x.name: x for x in fetch_targets} - save_model(serving_client, serving_server, feed_dict, fetch_dict, + save_model(serving_server, serving_client, feed_dict, fetch_dict, inference_program) feed_names = feed_dict.keys() fetch_names = fetch_dict.keys()