From 5d5e77d2824348ffa79c1622f0139df80bf61521 Mon Sep 17 00:00:00 2001 From: WangXi Date: Tue, 1 Sep 2020 20:10:14 +0800 Subject: [PATCH] Add paddle_serving_client.io.convert module --- doc/SAVE.md | 5 ++ doc/SAVE_CN.md | 6 ++ python/paddle_serving_client/io/convert.py | 66 ++++++++++++++++++++++ 3 files changed, 77 insertions(+) create mode 100644 python/paddle_serving_client/io/convert.py diff --git a/doc/SAVE.md b/doc/SAVE.md index 54800fa0..35a70eb6 100644 --- a/doc/SAVE.md +++ b/doc/SAVE.md @@ -47,3 +47,8 @@ serving_client (str, optional) - The path of configuration files for client. Def model_filename (str, optional) - The name of file to load the inference program. If it is None, the default filename `__model__` will be used. Default: None. paras_filename (str, optional) - The name of file to load all parameters. It is only used for the case that all parameters were saved in a single binary file. If parameters were saved in separate files, set it as None. Default: None. +Or you can use a build-in python module called `paddle_serving_client.io.convert` to convert it. +```python +python -m paddle_serving_client.io.convert --dirname ./your_inference_model_dir +``` +Arguments are the same as `inference_model_to_serving` API. diff --git a/doc/SAVE_CN.md b/doc/SAVE_CN.md index aaf0647f..0b10c32f 100644 --- a/doc/SAVE_CN.md +++ b/doc/SAVE_CN.md @@ -48,3 +48,9 @@ serving_client (str, 可选) - 转换后的客户端配置文件存储路径。 model_filename (str,可选) – 存储需要转换的模型Inference Program结构的文件名称。如果设置为None,则使用 `__model__` 作为默认的文件名。默认值为None。 params_filename (str,可选) – 存储需要转换的模型所有参数的文件名称。当且仅当所有模型参数被保存在一个单独的二进制文件中,它才需要被指定。如果模型参数是存储在各自分离的文件中,设置它的值为None。默认值为None。 + +或者你可以使用Paddle Serving提供的名为`paddle_serving_client.io.convert`的内置模块进行转换。 +```python +python -m paddle_serving_client.io.convert --dirname ./your_inference_model_dir +``` +模块参数与`inference_model_to_serving`接口参数相同。 diff --git a/python/paddle_serving_client/io/convert.py b/python/paddle_serving_client/io/convert.py new file mode 100644 index 00000000..f33691ff --- /dev/null +++ b/python/paddle_serving_client/io/convert.py @@ -0,0 +1,66 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + Convert a paddle inference model into a model file that can be used for Paddle Serving. + Example: + python -m paddle_serving_client.io.convert --dirname ./inference_model +""" +import argparse +from . import inference_model_to_serving + + +def parse_args(): # pylint: disable=doc-string-missing + parser = argparse.ArgumentParser("convert") + parser.add_argument( + "--dirname", + type=str, + required=True, + help='Path of saved model files. Program file and parameter files are saved in this directory.' + ) + parser.add_argument( + "--serving_server", + type=str, + default="serving_server", + help='The path of model files and configuration files for server. Default: "serving_server".' + ) + parser.add_argument( + "--serving_client", + type=str, + default="serving_client", + help='The path of configuration files for client. Default: "serving_client".' + ) + parser.add_argument( + "--model_filename", + type=str, + default=None, + help='The name of file to load the inference program. If it is None, the default filename __model__ will be used' + ) + parser.add_argument( + "--params_filename", + type=str, + default=None, + help='The name of file to load all parameters. It is only used for the case that all parameters were saved in a single binary file. If parameters were saved in separate files, set it as None. Default: None.' + ) + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + inference_model_to_serving( + args.dirname, + serving_server=args.serving_server, + serving_client=args.serving_client, + model_filename=args.model_filename, + params_filename=args.params_filename) -- GitLab