diff --git a/cmake/paddlepaddle.cmake b/cmake/paddlepaddle.cmake index 0e202d3b06537646e489510c781cf125e87e3e07..82d35932a0240a3bd230c0c2d5072899ed9fa230 100644 --- a/cmake/paddlepaddle.cmake +++ b/cmake/paddlepaddle.cmake @@ -136,8 +136,8 @@ if (WITH_TRT) endif() if (WITH_LITE) - ADD_LIBRARY(paddle_api_full_bundled STATIC IMPORTED GLOBAL) - SET_PROPERTY(TARGET paddle_api_full_bundled PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/third_party/install/lite/cxx/lib/libpaddle_api_full_bundled.a) + ADD_LIBRARY(paddle_full_api_shared STATIC IMPORTED GLOBAL) + SET_PROPERTY(TARGET paddle_full_api_shared PROPERTY IMPORTED_LOCATION ${PADDLE_INSTALL_DIR}/third_party/install/lite/cxx/lib/libpaddle_full_api_shared.so) if (WITH_XPU) ADD_LIBRARY(xpuapi SHARED IMPORTED GLOBAL) @@ -157,7 +157,7 @@ LIST(APPEND paddle_depend_libs xxhash) if(WITH_LITE) - LIST(APPEND paddle_depend_libs paddle_api_full_bundled) + LIST(APPEND paddle_depend_libs paddle_full_api_shared) if(WITH_XPU) LIST(APPEND paddle_depend_libs xpuapi xpurt) endif() diff --git a/doc/FAQ.md b/doc/FAQ.md index 0dc4ed35a55e5904adbd1b924441aa21bc5436ab..233ee8381d5d2e6a0ea2b1a3084e310de84a272f 100644 --- a/doc/FAQ.md +++ b/doc/FAQ.md @@ -34,6 +34,42 @@ **A:** http rpc +## 安装问题 + +#### Q: pip install安装whl包过程,报错信息如下: +``` +Collecting opencv-python + Using cached opencv-python-4.3.0.38.tar.gz (88.0 MB) + Installing build dependencies ... done + Getting requirements to build wheel ... error + ERROR: Command errored out with exit status 1: + command: /home/work/Python-2.7.17/build/bin/python /home/work/Python-2.7.17/build/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py get_requires_for_build_wheel /tmp/tmpLiweA9 + cwd: /tmp/pip-install-_w6AUI/opencv-python + Complete output (22 lines): + Traceback (most recent call last): + File "/home/work/Python-2.7.17/build/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py", line 280, in + main() + File "/home/work/Python-2.7.17/build/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py", line 263, in main + json_out['return_val'] = hook(**hook_input['kwargs']) + File "/home/work/Python-2.7.17/build/lib/python2.7/site-packages/pip/_vendor/pep517/_in_process.py", line 114, in get_requires_for_build_wheel + return hook(config_settings) + File "/tmp/pip-build-env-AUCbP4/overlay/lib/python2.7/site-packages/setuptools/build_meta.py", line 146, in get_requires_for_build_wheel + return self._get_build_requires(config_settings, requirements=['wheel']) + File "/tmp/pip-build-env-AUCbP4/overlay/lib/python2.7/site-packages/setuptools/build_meta.py", line 127, in _get_build_requires + self.run_setup() + File "/tmp/pip-build-env-AUCbP4/overlay/lib/python2.7/site-packages/setuptools/build_meta.py", line 243, in run_setup + self).run_setup(setup_script=setup_script) + File "/tmp/pip-build-env-AUCbP4/overlay/lib/python2.7/site-packages/setuptools/build_meta.py", line 142, in run_setup + exec(compile(code, __file__, 'exec'), locals()) + File "setup.py", line 448, in + main() + File "setup.py", line 99, in main + % {"ext": re.escape(sysconfig.get_config_var("EXT_SUFFIX"))} + File "/home/work/Python-2.7.17/build/lib/python2.7/re.py", line 210, in escape + s = list(pattern) + TypeError: 'NoneType' object is not iterable +``` +**A:** 指定opencv-python版本安装,pip install opencv-python==4.2.0.32,再安装whl包 ## 编译问题 diff --git a/doc/INFERENCE_TO_SERVING.md b/doc/INFERENCE_TO_SERVING.md index 719aa63c0a9b408d6bff628e7be4f35dfb49c5c8..15647dbbf8d461bcda237b0ef09b6818ee92fb00 100644 --- a/doc/INFERENCE_TO_SERVING.md +++ b/doc/INFERENCE_TO_SERVING.md @@ -2,35 +2,15 @@ ([简体中文](./INFERENCE_TO_SERVING_CN.md)|English) -We should know something before converting to serving model - -**inference_model_dir**:the directory of Paddle inference model - -**serving_client_dir**: the directory of server side configuration - -**serving_client_dir**: the directory of client side configuration - -**model_filename**: this is model description file whose default value is `__model__`, if it's not default name, set `model_filename` explicitly - -**params_filename**: during `save_inference_model` every Variable will be save as a single file. If we have the inference model whose params are compressed into one file, please set `params_filename` explicitly - - - -## Example - -``` python -from paddle_serving_client.io import inference_model_to_serving -inference_model_dir = "your_inference_model" -serving_client_dir = "serving_client_dir" -serving_server_dir = "serving_server_dir" -feed_var_names, fetch_var_names = inference_model_to_serving( - inference_model_dir, serving_server_dir, serving_client_dir) -``` - -if your model file and params file are both standalone, please use the following api. - -``` -feed_var_names, fetch_var_names = inference_model_to_serving( - inference_model_dir, serving_server_dir, serving_client_dir, - model_filename="model", params_filename="params") +you can use a build-in python module called `paddle_serving_client.convert` to convert it. +```python +python -m paddle_serving_client.convert --dirname ./your_inference_model_dir ``` +Arguments are the same as `inference_model_to_serving` API. +| Argument | Type | Default | Description | +|--------------|------|-----------|--------------------------------| +| `dirname` | str | - | Path of saved model files. Program file and parameter files are saved in this directory. | +| `serving_server` | str | `"serving_server"` | The path of model files and configuration files for server. | +| `serving_client` | str | `"serving_client"` | The path of configuration files for client. | +| `model_filename` | str | None | The name of file to load the inference program. If it is None, the default filename `__model__` will be used. | +| `params_filename` | str | None | The name of file to load all parameters. It is only used for the case that all parameters were saved in a single binary file. If parameters were saved in separate files, set it as None. | diff --git a/doc/INFERENCE_TO_SERVING_CN.md b/doc/INFERENCE_TO_SERVING_CN.md index 5d783f25a3f367baa94d471e50f227d9e6f733d1..64caf659f66f000cfab8a43abc66e3c3cd2e377c 100644 --- a/doc/INFERENCE_TO_SERVING_CN.md +++ b/doc/INFERENCE_TO_SERVING_CN.md @@ -2,32 +2,15 @@ ([English](./INFERENCE_TO_SERVING.md)|简体中文) -## 示例 - -在下列代码中,我们需要知道以下信息。 - -**模型文件夹**:这个文件夹就是Paddle的inference_model所在的文件夹 - -**serving_client_dir**: 这个文件夹是inference_model转换成Serving模型后,服务端配置的保存路径 - -**serving_client_dir**: 这个文件夹是inference_model转换成Serving模型后,客户端配置的保存路径 - -**模型描述文件**: 模型描述文件也就是`model_filename`默认值为`__model__`,是一个pb2文本文件,如果是别的文件名需要显式指定 - -**模型参数文件**: 在`save_inference_model`阶段,默认方式是每一个Variable保存一个二进制文件,如果是这种情况就不需要做指定。如果所有参数用压缩成一个文件的形式保存,则需要显式指定`params_filename` - - -``` python -from paddle_serving_client.io import inference_model_to_serving -inference_model_dir = "your_inference_model" -serving_client_dir = "serving_client_dir" -serving_server_dir = "serving_server_dir" -feed_var_names, fetch_var_names = inference_model_to_serving( - inference_model_dir, serving_server_dir, serving_client_dir) -``` -如果模型中有模型描述文件`model_filename` 和 模型参数文件`params_filename`,那么请用 -``` -feed_var_names, fetch_var_names = inference_model_to_serving( - inference_model_dir, serving_server_dir, serving_client_dir, - model_filename="model", params_filename="params") +你可以使用Paddle Serving提供的名为`paddle_serving_client.convert`的内置模块进行转换。 +```python +python -m paddle_serving_client.convert --dirname ./your_inference_model_dir ``` +模块参数与`inference_model_to_serving`接口参数相同。 +| 参数 | 类型 | 默认值 | 描述 | +|--------------|------|-----------|--------------------------------| +| `dirname` | str | - | 需要转换的模型文件存储路径,Program结构文件和参数文件均保存在此目录。| +| `serving_server` | str | `"serving_server"` | 转换后的模型文件和配置文件的存储路径。默认值为serving_server | +| `serving_client` | str | `"serving_client"` | 转换后的客户端配置文件存储路径。默认值为serving_client | +| `model_filename` | str | None | 存储需要转换的模型Inference Program结构的文件名称。如果设置为None,则使用 `__model__` 作为默认的文件名 | +| `params_filename` | str | None | 存储需要转换的模型所有参数的文件名称。当且仅当所有模型参数被保存在一个单独的>二进制文件中,它才需要被指定。如果模型参数是存储在各自分离的文件中,设置它的值为None | diff --git a/doc/LATEST_PACKAGES.md b/doc/LATEST_PACKAGES.md index 1c15371fda01e0f1aee00312a2f7bc9628b741af..d4071541663350c44b4743fb8bdfd228fe00b794 100644 --- a/doc/LATEST_PACKAGES.md +++ b/doc/LATEST_PACKAGES.md @@ -29,7 +29,7 @@ https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post102 https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post9-py2-none-any.whl #cuda 10.0 https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post10-py2-none-any.whl -##cuda10.1 with TensorRT 6 +#cuda10.1 with TensorRT 6 https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post101-py2-none-any.whl #cuda10.2 with TensorRT 7 https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post102-py2-none-any.whl diff --git a/java/README.md b/java/README.md index 2346d13e20b4f81c454bd4bf731fe406015ab26f..a4934eb3a2aff8ddfe7c27e88bea97367f7fc138 100644 --- a/java/README.md +++ b/java/README.md @@ -110,10 +110,10 @@ The first is that GPU Serving and Java Client are in the same image. After start The second is to deploy GPU Serving and Java Client separately. If they are on the same host, you can learn the IP address of the corresponding container through ifconfig, and then when you connect to client.connect in `examples/src/main/java/PaddleServingClientExample.java` Make changes to the endpoint, and then compile it again. Or select `--net=host` to bind the network device of docker and host when docker starts, so that it can run directly without customizing java code. -**It should be noted that in the example, all models need to use `--use_multilang` to start GRPC multi-programming language support, and the port number is 9393. If you need another port, you need to modify it in the java file** +**It should be noted that in the example, all models(not pipeline) need to use `--use_multilang` to start GRPC multi-programming language support, and the port number is 9393. If you need another port, you need to modify it in the java file** -**Currently Serving has launched the Pipeline mode (see [Pipeline Serving](../doc/PIPELINE_SERVING.md) for details). Pipeline Serving Client for Java is released, the next version multi-thread java client example will be released** +**Currently Serving has launched the Pipeline mode (see [Pipeline Serving](../doc/PIPELINE_SERVING.md) for details). Pipeline Serving Client for Java is released.** -**It should be noted that in the example, Java Pipeline Client code is in path /Java/Examples and /Java/src/main, and the Pipeline server code is in path /python/examples/pipeline/** +**It should be noted that in the example, Java Pipeline Client code is in path /Java/Examples and /Java/src/main, and the Pipeline server code is in path /python/examples/pipeline/ The Client IP and Port(which is configured in java/examples/src/main/java/PipelineClientExample.java) should be corresponding to the Pipeline Server IP and Port(which is configured in config.yaml) ** diff --git a/java/README_CN.md b/java/README_CN.md index 4c1df65fbeb78340187c9e603ff185751ebecf56..8fb6edd1af562bde12044f36ec2d9e71558c5a6b 100644 --- a/java/README_CN.md +++ b/java/README_CN.md @@ -111,11 +111,9 @@ java -cp paddle-serving-sdk-java-examples-0.0.1-jar-with-dependencies.jar Pipeli 第二种是GPU Serving和Java Client分开部署,如果在同一台宿主机,可以通过ifconfig了解对应容器的IP地址,然后在`examples/src/main/java/PaddleServingClientExample.java`当中对client.connect时的endpoint做修改,然后再编译一次。 或者在docker启动时选择 `--net=host`来绑定docker和宿主机的网络设备,这样不需要定制java代码可以直接运行。 -**需要注意的是,在示例中,所有模型都需要使用`--use_multilang`来启动GRPC多编程语言支持,以及端口号都是9393,如果需要别的端口,需要在java文件里修改** +**需要注意的是,在示例中,所有非Pipeline模型都需要使用`--use_multilang`来启动GRPC多编程语言支持,以及端口号都是9393,如果需要别的端口,需要在java文件里修改** -**目前Serving已推出Pipeline模式(详见[Pipeline Serving](../doc/PIPELINE_SERVING_CN.md)),面向Java的Pipeline Serving Client已发布,下个更新会发布Java版本的多线程用例敬请期待。** +**目前Serving已推出Pipeline模式(详见[Pipeline Serving](../doc/PIPELINE_SERVING_CN.md)),面向Java的Pipeline Serving Client已发布。** -**需要注意的是,Java Pipeline Client相关示例在/Java/Examples和/Java/src/main中,对应的Pipeline server在/python/examples/pipeline/中** - - -**目前Serving已推出Pipeline模式(详见[Pipeline Serving](../doc/PIPELINE_SERVING_CN.md)),下个版本(0.4.1)面向Java的Pipeline Serving Client将会发布,敬请期待。** +**需要注意的是,Java Pipeline Client相关示例在/Java/Examples和/Java/src/main中,对应的Pipeline server在/python/examples/pipeline/中 +注意java/examples/src/main/java/PipelineClientExample.java中的ip和port,需要与/python/examples/pipeline/中对应Pipeline server的config.yaml文件中配置的ip和port相对应。** diff --git a/java/examples/src/main/java/PipelineClientExample.java b/java/examples/src/main/java/PipelineClientExample.java index 1f459d82a99ad707c5803ab00d662eeceea56219..d279e00e30de5aeec37e14dfedc1aeb998222825 100644 --- a/java/examples/src/main/java/PipelineClientExample.java +++ b/java/examples/src/main/java/PipelineClientExample.java @@ -32,7 +32,7 @@ public class PipelineClientExample { System.out.println(fetch); if (StaticPipelineClient.succ != true) { - if(!StaticPipelineClient.initClient("172.17.0.2","18070")){ + if(!StaticPipelineClient.initClient("127.0.0.1","18070")){ System.out.println("connect failed."); return false; } @@ -57,12 +57,12 @@ public class PipelineClientExample { List fetch = Arrays.asList("prediction"); System.out.println(fetch); if (StaticPipelineClient.succ != true) { - if(!StaticPipelineClient.initClient("172.17.0.2","18070")){ + if(!StaticPipelineClient.initClient("127.0.0.1","18070")){ System.out.println("connect failed."); return false; } } - PipelineFuture future = StaticPipelineClient.client.asyn_pr::qedict(feed_data, fetch,false,0); + PipelineFuture future = StaticPipelineClient.client.asyn_predict(feed_data, fetch,false,0); HashMap result = future.get(); if (result == null) { return false; @@ -86,7 +86,7 @@ public class PipelineClientExample { }}; List fetch = Arrays.asList("prediction"); if (StaticPipelineClient.succ != true) { - if(!StaticPipelineClient.initClient("172.17.0.2","9998")){ + if(!StaticPipelineClient.initClient("127.0.0.1","9998")){ System.out.println("connect failed."); return false; } diff --git a/java/examples/src/main/java/StaticPipelineClient.java b/java/examples/src/main/java/StaticPipelineClient.java index 7399b05969c712602bc097d36ec5db2380c89328..6a54ce2e5cc5e302c5debe07d119b21c0873f7a6 100644 --- a/java/examples/src/main/java/StaticPipelineClient.java +++ b/java/examples/src/main/java/StaticPipelineClient.java @@ -37,7 +37,7 @@ public class StaticPipelineClient { System.out.println("already connect."); return true; } - succ = clieint.connect(target); + succ = client.connect(target); if (succ != true) { System.out.println("connect failed."); return false; diff --git a/paddle_inference/inferencer-fluid-arm/include/fluid_arm_engine.h b/paddle_inference/inferencer-fluid-arm/include/fluid_arm_engine.h index 92408cdacc581f7f9323840b87518df8ab8136ed..b3db6e1ad03d1822155918f9eb8714b6285972d1 100644 --- a/paddle_inference/inferencer-fluid-arm/include/fluid_arm_engine.h +++ b/paddle_inference/inferencer-fluid-arm/include/fluid_arm_engine.h @@ -128,20 +128,22 @@ class FluidArmAnalysisCore : public FluidFamilyCore { config.DisableGpu(); config.SetCpuMathLibraryNumThreads(1); - if (params.enable_memory_optimization()) { - config.EnableMemoryOptim(); + if (params.use_lite()) { + config.EnableLiteEngine(PrecisionType::kFloat32, true); } - if (params.enable_memory_optimization()) { - config.EnableMemoryOptim(); + if (params.use_xpu()) { + config.EnableXpu(2 * 1024 * 1024); } - if (params.use_lite()) { - config.EnableLiteEngine(PrecisionType::kFloat32, true); + if (params.enable_memory_optimization()) { + config.EnableMemoryOptim(); } - if (params.use_xpu()) { - config.EnableXpu(100); + if (params.enable_ir_optimization()) { + config.SwitchIrOptim(true); + } else { + config.SwitchIrOptim(false); } config.SwitchSpecifyInputNames(true); @@ -173,6 +175,14 @@ class FluidArmAnalysisDirCore : public FluidFamilyCore { config.SwitchSpecifyInputNames(true); config.SetCpuMathLibraryNumThreads(1); + if (params.use_lite()) { + config.EnableLiteEngine(PrecisionType::kFloat32, true); + } + + if (params.use_xpu()) { + config.EnableXpu(2 * 1024 * 1024); + } + if (params.enable_memory_optimization()) { config.EnableMemoryOptim(); } @@ -183,14 +193,6 @@ class FluidArmAnalysisDirCore : public FluidFamilyCore { config.SwitchIrOptim(false); } - if (params.use_lite()) { - config.EnableLiteEngine(PrecisionType::kFloat32, true); - } - - if (params.use_xpu()) { - config.EnableXpu(100); - } - AutoLock lock(GlobalPaddleCreateMutex::instance()); _core = CreatePredictor(config); if (NULL == _core.get()) { diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 2f3865d67d22403c38d9db21fbfb39e98de2659f..d17844991ea342e142476acececb14ac2e6ae106 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -99,15 +99,27 @@ if (SERVER) DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) elseif(WITH_LITE) - add_custom_command( - OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp - COMMAND cp -r - ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server_gpu/ ${PADDLE_SERVING_BINARY_DIR}/python/ - COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py - "server_gpu" arm - COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel - DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) - add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) + if(WITH_XPU) + add_custom_command( + OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp + COMMAND cp -r + ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server_gpu/ ${PADDLE_SERVING_BINARY_DIR}/python/ + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py + "server_gpu" arm-xpu + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel + DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) + add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) + else() + add_custom_command( + OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp + COMMAND cp -r + ${CMAKE_CURRENT_SOURCE_DIR}/paddle_serving_server_gpu/ ${PADDLE_SERVING_BINARY_DIR}/python/ + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} gen_version.py + "server_gpu" arm + COMMAND env ${py_env} ${PYTHON_EXECUTABLE} setup.py bdist_wheel + DEPENDS ${SERVING_SERVER_CORE} server_config_py_proto ${PY_FILES}) + add_custom_target(paddle_python ALL DEPENDS ${PADDLE_SERVING_BINARY_DIR}/.timestamp) + endif() else() add_custom_command( OUTPUT ${PADDLE_SERVING_BINARY_DIR}/.timestamp diff --git a/python/examples/bert/README.md b/python/examples/bert/README.md index 4cfa5590ffb4501c78e9e6ff886f5f82c94dd2db..a8fa35ddaec86ea2f05b025a3bde4b999d57f1dc 100644 --- a/python/examples/bert/README.md +++ b/python/examples/bert/README.md @@ -3,9 +3,10 @@ ([简体中文](./README_CN.md)|English) In the example, a BERT model is used for semantic understanding prediction, and the text is represented as a vector, which can be used for further analysis and prediction. +If your python version is 3.X, replace the 'pip' field in the following command with 'pip3',replace 'python' with 'python3'. ### Getting Model - +method 1: This example use model [BERT Chinese Model](https://www.paddlepaddle.org.cn/hubdetail?name=bert_chinese_L-12_H-768_A-12&en_category=SemanticModel) from [Paddlehub](https://github.com/PaddlePaddle/PaddleHub). Install paddlehub first @@ -22,11 +23,13 @@ the 128 in the command above means max_seq_len in BERT model, which is the lengt the config file and model file for server side are saved in the folder bert_seq128_model. the config file generated for client side is saved in the folder bert_seq128_client. +method 2: You can also download the above model from BOS(max_seq_len=128). After decompression, the config file and model file for server side are stored in the bert_chinese_L-12_H-768_A-12_model folder, and the config file generated for client side is stored in the bert_chinese_L-12_H-768_A-12_client folder: ```shell wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/SemanticModel/bert_chinese_L-12_H-768_A-12.tar.gz tar -xzf bert_chinese_L-12_H-768_A-12.tar.gz ``` +if your model is bert_chinese_L-12_H-768_A-12_model, replace the 'bert_seq128_model' field in the following command with 'bert_chinese_L-12_H-768_A-12_model',replace 'bert_seq128_client' with 'bert_chinese_L-12_H-768_A-12_client'. ### Getting Dict and Sample Dataset @@ -36,11 +39,11 @@ sh get_data.sh this script will download Chinese Dictionary File vocab.txt and Chinese Sample Data data-c.txt ### RPC Inference Service -Run +start cpu inference service,Run ``` python -m paddle_serving_server.serve --model bert_seq128_model/ --port 9292 #cpu inference service ``` -Or +Or,start gpu inference service,Run ``` python -m paddle_serving_server_gpu.serve --model bert_seq128_model/ --port 9292 --gpu_ids 0 #launch gpu inference service at GPU 0 ``` @@ -59,12 +62,18 @@ head data-c.txt | python bert_client.py --model bert_seq128_client/serving_clien the client reads data from data-c.txt and send prediction request, the prediction is given by word vector. (Due to massive data in the word vector, we do not print it). ### HTTP Inference Service +start cpu HTTP inference service,Run +``` + python bert_web_service.py bert_seq128_model/ 9292 #launch gpu inference service +``` + +Or,start gpu HTTP inference service,Run ``` export CUDA_VISIBLE_DEVICES=0,1 ``` set environmental variable to specify which gpus are used, the command above means gpu 0 and gpu 1 is used. ``` - python bert_web_service.py bert_seq128_model/ 9292 #launch gpu inference service + python bert_web_service_gpu.py bert_seq128_model/ 9292 #launch gpu inference service ``` ### HTTP Inference diff --git a/python/examples/bert/README_CN.md b/python/examples/bert/README_CN.md index 93ec8f2adbd9ae31489011900472a0077cb33783..e06e17c8f345b65884feabee08d40e5f345fa322 100644 --- a/python/examples/bert/README_CN.md +++ b/python/examples/bert/README_CN.md @@ -4,8 +4,9 @@ 示例中采用BERT模型进行语义理解预测,将文本表示为向量的形式,可以用来做进一步的分析和预测。 +若使用python的版本为3.X, 将以下命令中的pip 替换为pip3, python替换为python3. ### 获取模型 - +方法1: 示例中采用[Paddlehub](https://github.com/PaddlePaddle/PaddleHub)中的[BERT中文模型](https://www.paddlepaddle.org.cn/hubdetail?name=bert_chinese_L-12_H-768_A-12&en_category=SemanticModel)。 请先安装paddlehub ``` @@ -19,11 +20,15 @@ python prepare_model.py 128 生成server端配置文件与模型文件,存放在bert_seq128_model文件夹。 生成client端配置文件,存放在bert_seq128_client文件夹。 +方法2: 您也可以从bos上直接下载上述模型(max_seq_len=128),解压后server端配置文件与模型文件存放在bert_chinese_L-12_H-768_A-12_model文件夹,client端配置文件存放在bert_chinese_L-12_H-768_A-12_client文件夹: ```shell wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/SemanticModel/bert_chinese_L-12_H-768_A-12.tar.gz tar -xzf bert_chinese_L-12_H-768_A-12.tar.gz ``` +若使用bert_chinese_L-12_H-768_A-12_model模型,将下面命令中的bert_seq128_model字段替换为bert_chinese_L-12_H-768_A-12_model,bert_seq128_client字段替换为bert_chinese_L-12_H-768_A-12_client. + + ### 获取词典和样例数据 @@ -33,13 +38,15 @@ sh get_data.sh 脚本将下载中文词典vocab.txt和中文样例数据data-c.txt ### 启动RPC预测服务 -执行 +启动cpu预测服务,执行 ``` python -m paddle_serving_server.serve --model bert_seq128_model/ --port 9292 #启动cpu预测服务 + ``` -或者 +或者,启动gpu预测服务,执行 ``` python -m paddle_serving_server_gpu.serve --model bert_seq128_model/ --port 9292 --gpu_ids 0 #在gpu 0上启动gpu预测服务 + ``` ### 执行预测 @@ -51,17 +58,28 @@ pip install paddle_serving_app 执行 ``` head data-c.txt | python bert_client.py --model bert_seq128_client/serving_client_conf.prototxt + ``` 启动client读取data-c.txt中的数据进行预测,预测结果为文本的向量表示(由于数据较多,脚本中没有将输出进行打印),server端的地址在脚本中修改。 + + ### 启动HTTP预测服务 +启动cpu HTTP预测服务,执行 +``` +python bert_web_service.py bert_seq128_model/ 9292 #启动gpu预测服务 + +``` + +或者,启动gpu HTTP预测服务,执行 ``` export CUDA_VISIBLE_DEVICES=0,1 ``` 通过环境变量指定gpu预测服务使用的gpu,示例中指定索引为0和1的两块gpu ``` - python bert_web_service.py bert_seq128_model/ 9292 #启动gpu预测服务 +python bert_web_service_gpu.py bert_seq128_model/ 9292 #启动gpu预测服务 ``` + ### 执行预测 ``` diff --git a/python/examples/bert/bert_web_service_gpu.py b/python/examples/bert/bert_web_service_gpu.py new file mode 100644 index 0000000000000000000000000000000000000000..cbdd321c0932bf68c1e37f02f0c08e08a6c0e43e --- /dev/null +++ b/python/examples/bert/bert_web_service_gpu.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing +from paddle_serving_server_gpu.web_service import WebService +from paddle_serving_app.reader import ChineseBertReader +import sys +import os +import numpy as np + + +class BertService(WebService): + def load(self): + self.reader = ChineseBertReader({ + "vocab_file": "vocab.txt", + "max_seq_len": 128 + }) + + def preprocess(self, feed=[], fetch=[]): + feed_res = [] + is_batch = False + for ins in feed: + feed_dict = self.reader.process(ins["words"].encode("utf-8")) + for key in feed_dict.keys(): + feed_dict[key] = np.array(feed_dict[key]).reshape( + (len(feed_dict[key]), 1)) + feed_res.append(feed_dict) + return feed_res, fetch, is_batch + + +bert_service = BertService(name="bert") +bert_service.load() +bert_service.load_model_config(sys.argv[1]) +bert_service.prepare_server( + workdir="workdir", port=int(sys.argv[2]), device="gpu") +bert_service.run_rpc_service() +bert_service.run_web_service() diff --git a/python/examples/encryption/get_data.sh b/python/examples/encryption/get_data.sh index d1e97727fe5602552e48fbd7899128a274186948..c3cd5c236f5643d53c3a30bf0ffd367853ffaf13 100644 --- a/python/examples/encryption/get_data.sh +++ b/python/examples/encryption/get_data.sh @@ -1,4 +1,4 @@ wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing_example/encrypt.tar.gz tar -xzf encrypt.tar.gz -cp -rvf ../fit_a_line/uci_housing_model . -cp -rvf ../fit_a_line/uci_housing_client . +wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz +tar -xzf uci_housing.tar.gz diff --git a/python/examples/fit_a_line/README.md b/python/examples/fit_a_line/README.md index 480457f8ce856cf22a89ff29260be7d1a9f0ccf8..af45b2a854381cb5c5739e9c89518d2e80753f1b 100644 --- a/python/examples/fit_a_line/README.md +++ b/python/examples/fit_a_line/README.md @@ -34,7 +34,7 @@ python test_client.py uci_housing_client/serving_client_conf.prototxt Start a web service with default web service hosting modules: ``` shell -python test_server.py +python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9393 --name uci ``` ### Client prediction diff --git a/python/examples/fit_a_line/README_CN.md b/python/examples/fit_a_line/README_CN.md index 451f273322afe6c5ae76c4cd3ef102b01b8856e6..b18b7204ef2c678ac2811c2bc78df611e0dc538b 100644 --- a/python/examples/fit_a_line/README_CN.md +++ b/python/examples/fit_a_line/README_CN.md @@ -41,7 +41,7 @@ python test_client.py uci_housing_client/serving_client_conf.prototxt 通过下面的一行代码开启默认web服务: ``` shell -python test_server.py +python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9393 --name uci ``` ### 客户端预测 diff --git a/python/paddle_serving_app/local_predict.py b/python/paddle_serving_app/local_predict.py index 5a641fe6358a62b67c435e9881d481c2c5616b1f..1c49f01f22cbc23cfecb70fb36d3a72ff0991e5f 100644 --- a/python/paddle_serving_app/local_predict.py +++ b/python/paddle_serving_app/local_predict.py @@ -132,6 +132,7 @@ class LocalPredictor(object): ops_filter=[]) if use_xpu: + # 2MB l3 cache config.enable_xpu(8 * 1024 * 1024) self.predictor = create_paddle_predictor(config) diff --git a/python/paddle_serving_client/io/__init__.py b/python/paddle_serving_client/io/__init__.py index e6aa9947ca3326d8ff8e2bce012c37bffdb69b8d..48e0c8f2535db90e741eec4f8326a0b02b04486b 100644 --- a/python/paddle_serving_client/io/__init__.py +++ b/python/paddle_serving_client/io/__init__.py @@ -202,6 +202,7 @@ def inference_model_to_serving(dirname, serving_client="serving_client", model_filename=None, params_filename=None): + paddle.enable_static() place = fluid.CPUPlace() exe = fluid.Executor(place) inference_program, feed_target_names, fetch_targets = \ diff --git a/python/paddle_serving_server/web_service.py b/python/paddle_serving_server/web_service.py index 1f035db9262ffbd8e031c9b0018877eb2ba6fad2..fbe48180867faf9f2baba71fc3c5c8cf6ab771e2 100644 --- a/python/paddle_serving_server/web_service.py +++ b/python/paddle_serving_server/web_service.py @@ -20,7 +20,7 @@ from paddle_serving_server import OpMaker, OpSeqMaker, Server from paddle_serving_client import Client from contextlib import closing import socket - +import numpy as np from paddle_serving_server import pipeline from paddle_serving_server.pipeline import Op @@ -64,8 +64,8 @@ class WebService(object): f = open(client_config, 'r') model_conf = google.protobuf.text_format.Merge( str(f.read()), model_conf) - self.feed_names = [var.alias_name for var in model_conf.feed_var] - self.fetch_names = [var.alias_name for var in model_conf.fetch_var] + self.feed_vars = {var.name: var for var in model_conf.feed_var} + self.fetch_vars = {var.name: var for var in model_conf.fetch_var} def _launch_rpc_service(self): op_maker = OpMaker() @@ -201,6 +201,15 @@ class WebService(object): def preprocess(self, feed=[], fetch=[]): print("This API will be deprecated later. Please do not use it") is_batch = True + feed_dict = {} + for var_name in self.feed_vars.keys(): + feed_dict[var_name] = [] + for feed_ins in feed: + for key in feed_ins: + feed_dict[key].append(np.array(feed_ins[key]).reshape(list(self.feed_vars[key].shape))[np.newaxis,:]) + feed = {} + for key in feed_dict: + feed[key] = np.concatenate(feed_dict[key], axis=0) return feed, fetch, is_batch def postprocess(self, feed=[], fetch=[], fetch_map=None): diff --git a/python/paddle_serving_server_gpu/__init__.py b/python/paddle_serving_server_gpu/__init__.py index b8fe91bb594b1f91141658afcb876f2291d4d35e..44402e734f3b9dd22db4ae674cf85e5cff614f8f 100644 --- a/python/paddle_serving_server_gpu/__init__.py +++ b/python/paddle_serving_server_gpu/__init__.py @@ -212,6 +212,7 @@ class Server(object): self.module_path = os.path.dirname(paddle_serving_server.__file__) self.cur_path = os.getcwd() self.use_local_bin = False + self.device = "cpu" self.gpuid = 0 self.use_trt = False self.use_lite = False @@ -279,6 +280,9 @@ class Server(object): "GPU not found, please check your environment or use cpu version by \"pip install paddle_serving_server\"" ) + def set_device(self, device="cpu"): + self.device = device + def set_gpuid(self, gpuid=0): self.gpuid = gpuid @@ -311,18 +315,19 @@ class Server(object): engine.static_optimization = False engine.force_update_static_cache = False engine.use_trt = self.use_trt - engine.use_lite = self.use_lite - engine.use_xpu = self.use_xpu - - - + if os.path.exists('{}/__params__'.format(model_config_path)): + suffix = "" + else: + suffix = "_DIR" + if device == "arm": + engine.use_lite = self.use_lite + engine.use_xpu = self.use_xpu if device == "cpu": - engine.type = "FLUID_CPU_ANALYSIS_DIR" + engine.type = "FLUID_CPU_ANALYSIS" + suffix elif device == "gpu": - engine.type = "FLUID_GPU_ANALYSIS_DIR" + engine.type = "FLUID_GPU_ANALYSIS" + suffix elif device == "arm": - engine.type = "FLUID_ARM_ANALYSIS_DIR" - + engine.type = "FLUID_ARM_ANALYSIS" + suffix self.model_toolkit_conf.engines.extend([engine]) def _prepare_infer_service(self, port): @@ -425,7 +430,7 @@ class Server(object): cuda_version = line.split("\"")[1] if cuda_version == "101" or cuda_version == "102" or cuda_version == "110": device_version = "serving-gpu-" + cuda_version + "-" - elif cuda_version == "arm": + elif cuda_version == "arm" or cuda_version == "arm-xpu": device_version = "serving-" + cuda_version + "-" else: device_version = "serving-gpu-cuda" + cuda_version + "-" @@ -528,7 +533,8 @@ class Server(object): else: print("Use local bin : {}".format(self.bin_path)) #self.check_cuda() - if self.use_lite: + # Todo: merge CPU and GPU code, remove device to model_toolkit + if self.device == "cpu" or self.device == "arm": command = "{} " \ "-enable_model_toolkit " \ "-inferservice_path {} " \ diff --git a/python/paddle_serving_server_gpu/serve.py b/python/paddle_serving_server_gpu/serve.py index ffa4c2336fd4307f67fd2f3578a1aa3102850ce9..057a25e483cd7c160bc7bbef8b9378f9bf08f32c 100644 --- a/python/paddle_serving_server_gpu/serve.py +++ b/python/paddle_serving_server_gpu/serve.py @@ -73,6 +73,7 @@ def start_gpu_card_model(index, gpuid, args): # pylint: disable=doc-string-miss server.set_lite() device = "arm" + server.set_device(device) if args.use_xpu: server.set_xpu() diff --git a/python/paddle_serving_server_gpu/web_service.py b/python/paddle_serving_server_gpu/web_service.py index 4b89d90ee6893c3fafd596dc8f6c5cabc3a248bf..e2c24f4068da1a6ccccaa789186cab4e2a8fa6d9 100644 --- a/python/paddle_serving_server_gpu/web_service.py +++ b/python/paddle_serving_server_gpu/web_service.py @@ -70,8 +70,8 @@ class WebService(object): f = open(client_config, 'r') model_conf = google.protobuf.text_format.Merge( str(f.read()), model_conf) - self.feed_names = [var.alias_name for var in model_conf.feed_var] - self.fetch_names = [var.alias_name for var in model_conf.fetch_var] + self.feed_vars = {var.name: var for var in model_conf.feed_var} + self.fetch_vars = {var.name: var for var in model_conf.fetch_var} def set_gpus(self, gpus): print("This API will be deprecated later. Please do not use it") @@ -107,6 +107,7 @@ class WebService(object): server.set_num_threads(thread_num) server.set_memory_optimize(mem_optim) server.set_ir_optimize(ir_optim) + server.set_device(device) if use_lite: server.set_lite() @@ -278,6 +279,15 @@ class WebService(object): def preprocess(self, feed=[], fetch=[]): print("This API will be deprecated later. Please do not use it") is_batch = True + feed_dict = {} + for var_name in self.feed_vars.keys(): + feed_dict[var_name] = [] + for feed_ins in feed: + for key in feed_ins: + feed_dict[key].append(np.array(feed_ins[key]).reshape(list(self.feed_vars[key].shape))[np.newaxis,:]) + feed = {} + for key in feed_dict: + feed[key] = np.concatenate(feed_dict[key], axis=0) return feed, fetch, is_batch def postprocess(self, feed=[], fetch=[], fetch_map=None): diff --git a/python/pipeline/local_service_handler.py b/python/pipeline/local_service_handler.py index eaa04ee01411260f82992d4327c9d8ac033b91f0..65261dfa38f20a2174dc90fea70b5296187f0044 100644 --- a/python/pipeline/local_service_handler.py +++ b/python/pipeline/local_service_handler.py @@ -249,6 +249,8 @@ class LocalServiceHandler(object): server = Server() if gpuid >= 0: server.set_gpuid(gpuid) + # TODO: support arm or arm + xpu later + server.set_device(self._device_name) server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_num_threads(thread_num)