From 098b5d4292d9c68594055b37630db678eca42b80 Mon Sep 17 00:00:00 2001 From: sandyhouse Date: Fri, 28 Feb 2020 19:02:13 +0800 Subject: [PATCH] add serving doc --- docs/source/md/serving.md | 2 +- docs/source/md/serving_en.md | 62 ++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 1 deletion(-) create mode 100644 docs/source/md/serving_en.md diff --git a/docs/source/md/serving.md b/docs/source/md/serving.md index 20b8eb4..f5878da 100644 --- a/docs/source/md/serving.md +++ b/docs/source/md/serving.md @@ -68,5 +68,5 @@ fc.connect('127.0.0.1:8010') #调用server端预测,输入为样本列表list类型,返回值为样本对应的embedding结果,list类型,shape为 batch size * embedding size result = fc.encode([image]) print(result[0]) -bc.close() +fc.close() ``` diff --git a/docs/source/md/serving_en.md b/docs/source/md/serving_en.md new file mode 100644 index 0000000..e53f7b2 --- /dev/null +++ b/docs/source/md/serving_en.md @@ -0,0 +1,62 @@ +# Serving Deployment +## How to export inference model +Generally, the saved model of PLSC only includes parameters, but not the network structure used for prediction. To do prediction, we have to convert the saved model to inference model including both the model structure and parameters. +The following example code shows how to do that. +```python +# export_for_inference.py + +from plsc import Entry + +if __name__ == "__main__": + ins = Entry() + ins.set_checkpoint_dir('./pretrain_model') + ins.set_model_save_dir('./inference_model') + + ins.convert_for_prediction() +``` + +The following command can be used to start converting. +```shell script +python export_for_inference.py +``` + +## How to use inference library +The inference library requires python3. + +### How to use the server +Use the following command to install the server. +```shell script +pip3 install plsc-serving +``` + +Currently, you can only use the server with GPU devices, and it requires the cuda version to be at least 9.0. + +You can use the following command to start the server: +```python +from plsc_serving.run import PLSCServer +fs = PLSCServer() +# Set the absolute path for inference model +fs.with_model(model_path = '/XXX/XXX') +fs.run(gpu_index = 0, port = 8010) +``` + +### How to use the client +First, you have to install ujson: +```shell script +pip install ujson +``` + +Then, copy the [client script](../../../serving/client/face_service/face_service.py) to your local directory. + +You can use the following command to start the client: +```python +from face_service import FaceService +with open('./data/00000000.jpg', 'rb') as f: + image = f.read() +fc = FaceService() +# connect to the server +fc.connect('127.0.0.1:8010') +result = fc.encode([image]) +print(result[0]) +fc.close() +``` \ No newline at end of file -- GitLab