diff --git a/README.md b/README.md index 736db2632e30ff12361ce78de594772384da75f3..ab6b1c0148315f2d19838b67a84cc732f175c944 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Build Status - Release + Release Issues License Slack @@ -86,15 +86,15 @@ We **highly recommend** you to **run Paddle Serving in Docker**, please visit [R ``` # Run CPU Docker -docker pull registry.baidubce.com/paddlepaddle/serving:0.6.0-devel -docker run -p 9292:9292 --name test -dit registry.baidubce.com/paddlepaddle/serving:0.6.0-devel bash +docker pull registry.baidubce.com/paddlepaddle/serving:0.6.2-devel +docker run -p 9292:9292 --name test -dit registry.baidubce.com/paddlepaddle/serving:0.6.2-devel bash docker exec -it test bash git clone https://github.com/PaddlePaddle/Serving ``` ``` # Run GPU Docker -nvidia-docker pull registry.baidubce.com/paddlepaddle/serving:0.6.0-cuda10.2-cudnn8-devel -nvidia-docker run -p 9292:9292 --name test -dit registry.baidubce.com/paddlepaddle/serving:0.6.0-cuda10.2-cudnn8-devel bash +nvidia-docker pull registry.baidubce.com/paddlepaddle/serving:0.6.2-cuda10.2-cudnn8-devel +nvidia-docker run -p 9292:9292 --name test -dit registry.baidubce.com/paddlepaddle/serving:0.6.2-cuda10.2-cudnn8-devel bash nvidia-docker exec -it test bash git clone https://github.com/PaddlePaddle/Serving ``` @@ -105,13 +105,13 @@ pip3 install -r python/requirements.txt ``` ```shell -pip3 install paddle-serving-client==0.6.0 -pip3 install paddle-serving-server==0.6.0 # CPU -pip3 install paddle-serving-app==0.6.0 -pip3 install paddle-serving-server-gpu==0.6.0.post102 #GPU with CUDA10.2 + TensorRT7 +pip3 install paddle-serving-client==0.6.2 +pip3 install paddle-serving-server==0.6.2 # CPU +pip3 install paddle-serving-app==0.6.2 +pip3 install paddle-serving-server-gpu==0.6.2.post102 #GPU with CUDA10.2 + TensorRT7 # DO NOT RUN ALL COMMANDS! check your GPU env and select the right one -pip3 install paddle-serving-server-gpu==0.6.0.post101 # GPU with CUDA10.1 + TensorRT6 -pip3 install paddle-serving-server-gpu==0.6.0.post11 # GPU with CUDA10.1 + TensorRT7 +pip3 install paddle-serving-server-gpu==0.6.2.post101 # GPU with CUDA10.1 + TensorRT6 +pip3 install paddle-serving-server-gpu==0.6.2.post11 # GPU with CUDA10.1 + TensorRT7 ``` You may need to use a domestic mirror source (in China, you can use the Tsinghua mirror source, add `-i https://pypi.tuna.tsinghua.edu.cn/simple` to pip command) to speed up the download. diff --git a/README_CN.md b/README_CN.md index b5d3acaa6dd3cf102935fe6b9485e43f72c969d7..d728071dbd80ae2400a6e95b5ccb06010fd7ef06 100644 --- a/README_CN.md +++ b/README_CN.md @@ -87,15 +87,15 @@ Paddle Serving开发者为您提供了简单易用的[AIStudio教程-Paddle Serv ``` # 启动 CPU Docker -docker pull registry.baidubce.com/paddlepaddle/serving:0.6.0-devel -docker run -p 9292:9292 --name test -dit registry.baidubce.com/paddlepaddle/serving:0.6.0-devel bash +docker pull registry.baidubce.com/paddlepaddle/serving:0.6.2-devel +docker run -p 9292:9292 --name test -dit registry.baidubce.com/paddlepaddle/serving:0.6.2-devel bash docker exec -it test bash git clone https://github.com/PaddlePaddle/Serving ``` ``` # 启动 GPU Docker -nvidia-docker pull registry.baidubce.com/paddlepaddle/serving:0.6.0-cuda10.2-cudnn8-devel -nvidia-docker run -p 9292:9292 --name test -dit registry.baidubce.com/paddlepaddle/serving:0.6.0-cuda10.2-cudnn8-devel bash +nvidia-docker pull registry.baidubce.com/paddlepaddle/serving:0.6.2-cuda10.2-cudnn8-devel +nvidia-docker run -p 9292:9292 --name test -dit registry.baidubce.com/paddlepaddle/serving:0.6.2-cuda10.2-cudnn8-devel bash nvidia-docker exec -it test bash git clone https://github.com/PaddlePaddle/Serving ``` @@ -107,13 +107,13 @@ pip3 install -r python/requirements.txt ``` ```shell -pip3 install paddle-serving-client==0.6.0 -pip3 install paddle-serving-server==0.6.0 # CPU -pip3 install paddle-serving-app==0.6.0 -pip3 install paddle-serving-server-gpu==0.6.0.post102 #GPU with CUDA10.2 + TensorRT7 +pip3 install paddle-serving-client==0.6.2 +pip3 install paddle-serving-server==0.6.2 # CPU +pip3 install paddle-serving-app==0.6.2 +pip3 install paddle-serving-server-gpu==0.6.2.post102 #GPU with CUDA10.2 + TensorRT7 # 其他GPU环境需要确认环境再选择执行哪一条 -pip3 install paddle-serving-server-gpu==0.6.0.post101 # GPU with CUDA10.1 + TensorRT6 -pip3 install paddle-serving-server-gpu==0.6.0.post11 # GPU with CUDA10.1 + TensorRT7 +pip3 install paddle-serving-server-gpu==0.6.2.post101 # GPU with CUDA10.1 + TensorRT6 +pip3 install paddle-serving-server-gpu==0.6.2.post11 # GPU with CUDA10.1 + TensorRT7 ``` 您可能需要使用国内镜像源(例如清华源, 在pip命令中添加`-i https://pypi.tuna.tsinghua.edu.cn/simple`)来加速下载。 @@ -124,7 +124,7 @@ paddle-serving-server和paddle-serving-server-gpu安装包支持Centos 6/7, Ubun paddle-serving-client和paddle-serving-app安装包支持Linux和Windows,其中paddle-serving-client仅支持python3.6/3.7/3.8。 -**最新的0.6.0的版本,已经不支持Cuda 9.0和Cuda 10.0,Python已不支持2.7和3.5。** +**最新的0.6.2的版本,已经不支持Cuda 9.0和Cuda 10.0,Python已不支持2.7和3.5。** 推荐安装2.1.0及以上版本的paddle diff --git a/doc/DOCKER_IMAGES.md b/doc/DOCKER_IMAGES.md index 65209f344d13cfedd73bdff74fabd1605a3df102..637a8593cb74c37d348625fe1e153516cf91a2a7 100644 --- a/doc/DOCKER_IMAGES.md +++ b/doc/DOCKER_IMAGES.md @@ -64,18 +64,33 @@ Develop Images: | Env | Version | Docker images tag | OS | Gcc Version | |----------|---------|------------------------------|-----------|-------------| -| CPU | >=0.5.0 | 0.6.0-devel | Ubuntu 16 | 8.2.0 | +| CPU | >=0.5.0 | 0.6.2-devel | Ubuntu 16 | 8.2.0 | | | <=0.4.0 | 0.4.0-devel | CentOS 7 | 4.8.5 | -| Cuda10.1 | >=0.5.0 | 0.6.0-cuda10.1-cudnn7-devel | Ubuntu 16 | 8.2.0 | -| | 0.6.0 | 0.6.0-cuda10.1-cudnn7-gcc54-devel(not ready) | Ubuntu 16 | 5.4.0 | -| | <=0.4.0 | 0.6.0-cuda10.1-cudnn7-devel | CentOS 7 | 4.8.5 | -| Cuda10.2 | >=0.5.0 | 0.6.0-cuda10.2-cudnn8-devel | Ubuntu 16 | 8.2.0 | +| Cuda10.1 | >=0.5.0 | 0.6.2-cuda10.1-cudnn7-devel | Ubuntu 16 | 8.2.0 | +| | 0.6.2 | 0.6.2-cuda10.1-cudnn7-gcc54-devel(not ready) | Ubuntu 16 | 5.4.0 | +| | <=0.4.0 | 0.6.2-cuda10.1-cudnn7-devel | CentOS 7 | 4.8.5 | +| Cuda10.2 | >=0.5.0 | 0.6.2-cuda10.2-cudnn8-devel | Ubuntu 16 | 8.2.0 | | | <=0.4.0 | Nan | Nan | Nan | -| Cuda11.0 | >=0.5.0 | 0.6.0-cuda11.0-cudnn8-devel | Ubuntu 18 | 8.2.0 | +| Cuda11.0 | >=0.5.0 | 0.6.2-cuda11.0-cudnn8-devel | Ubuntu 18 | 8.2.0 | | | <=0.4.0 | Nan | Nan | Nan | Running Images: -Running Images is lighter than Develop Images, and Running Images are too many due to multiple combinations of python, device environment. If you want to know about it, plese check the document [Paddle Serving on Kubernetes.](PADDLE_SERVING_ON_KUBERNETES.md). +Running Images is lighter than Develop Images, and Running Images are made up with serving whl and bin, but without develop tools like cmake because of lower image size. If you want to know about it, plese check the document [Paddle Serving on Kubernetes.](PADDLE_SERVING_ON_KUBERNETES.md). + +| ENV | Python Version | Tag | +|------------------------------------------|----------------|-----------------------------| +| cpu | 3.6 | 0.6.2-py36-runtime | +| cpu | 3.7 | 0.6.2-py37-runtime | +| cpu | 3.8 | 0.6.2-py38-runtime | +| cuda-10.1 + cudnn-7.6.5 + tensorrt-6.0.1 | 3.6 | 0.6.2-cuda10.1-py36-runtime | +| cuda-10.1 + cudnn-7.6.5 + tensorrt-6.0.1 | 3.7 | 0.6.2-cuda10.1-py37-runtime | +| cuda-10.1 + cudnn-7.6.5 + tensorrt-6.0.1 | 3.8 | 0.6.2-cuda10.1-py38-runtime | +| cuda-10.2 + cudnn-8.2.0 + tensorrt-7.1.3 | 3.6 | 0.6.2-cuda10.2-py36-runtime | +| cuda-10.2 + cudnn-8.2.0 + tensorrt-7.1.3 | 3.7 | 0.6.2-cuda10.2-py37-runtime | +| cuda-10.2 + cudnn-8.2.0 + tensorrt-7.1.3 | 3.8 | 0.6.2-cuda10.2-py38-runtime | +| cuda-11 + cudnn-8.0.5 + tensorrt-7.1.3 | 3.6 | 0.6.2-cuda11-py36-runtime | +| cuda-11 + cudnn-8.0.5 + tensorrt-7.1.3 | 3.7 | 0.6.2-cuda11-py37-runtime | +| cuda-11 + cudnn-8.0.5 + tensorrt-7.1.3 | 3.8 | 0.6.2-cuda11-py38-runtime | **Tips:** If you want to use CPU server and GPU server (version>=0.5.0) at the same time, you should check the gcc version, only Cuda10.1/10.2/11 can run with CPU server owing to the same gcc version(8.2). diff --git a/doc/DOCKER_IMAGES_CN.md b/doc/DOCKER_IMAGES_CN.md index cc93ecba807ae389cd9c43bea491f1d7b91fa9f1..9446bbf5272679c00d05b102d1927e1030321b9c 100644 --- a/doc/DOCKER_IMAGES_CN.md +++ b/doc/DOCKER_IMAGES_CN.md @@ -69,18 +69,32 @@ registry.baidubce.com/paddlepaddle/serving:xpu-x86 # for x86 xpu user | Env | Version | Docker images tag | OS | Gcc Version | |----------|---------|------------------------------|-----------|-------------| -| CPU | >=0.5.0 | 0.6.0-devel | Ubuntu 16 | 8.2.0 | +| CPU | >=0.5.0 | 0.6.2-devel | Ubuntu 16 | 8.2.0 | | | <=0.4.0 | 0.4.0-devel | CentOS 7 | 4.8.5 | -| Cuda10.1 | >=0.5.0 | 0.6.0-cuda10.1-cudnn7-devel | Ubuntu 16 | 8.2.0 | -| | 0.6.0 | 0.6.0-cuda10.1-cudnn7-gcc54-devel (not ready) | Ubuntu 16 | 5.4.0 | -| | <=0.4.0 | 0.6.0-cuda10.1-cudnn7-devel | CentOS 7 | 4.8.5 | -| Cuda10.2 | >=0.5.0 | 0.6.0-cuda10.2-cudnn8-devel | Ubuntu 16 | 8.2.0 | +| Cuda10.1 | >=0.5.0 | 0.6.2-cuda10.1-cudnn7-devel | Ubuntu 16 | 8.2.0 | +| | <=0.4.0 | 0.6.2-cuda10.1-cudnn7-devel | CentOS 7 | 4.8.5 | +| Cuda10.2 | >=0.5.0 | 0.6.2-cuda10.2-cudnn8-devel | Ubuntu 16 | 8.2.0 | | | <=0.4.0 | Nan | Nan | Nan | -| Cuda11.0 | >=0.5.0 | 0.6.0-cuda11.0-cudnn8-devel | Ubuntu 18 | 8.2.0 | +| Cuda11.0 | >=0.5.0 | 0.6.2-cuda11.0-cudnn8-devel | Ubuntu 18 | 8.2.0 | | | <=0.4.0 | Nan | Nan | Nan | 运行镜像: -运行镜像比开发镜像更加轻量化, 且由于python,运行环境的多种组合,进而导致运行镜像种类过多。 如果您想了解有关信息,请检查文档[在Kubernetes上使用Paddle Serving](PADDLE_SERVING_ON_KUBERNETES.md)。 +运行镜像比开发镜像更加轻量化, 运行镜像提供了serving的whl和bin,但为了运行期更小的镜像体积,没有提供诸如cmake这样但开发工具。 如果您想了解有关信息,请检查文档[在Kubernetes上使用Paddle Serving](PADDLE_SERVING_ON_KUBERNETES.md)。 + +| ENV | Python Version | Tag | +|------------------------------------------|----------------|-----------------------------| +| cpu | 3.6 | 0.6.2-py36-runtime | +| cpu | 3.7 | 0.6.2-py37-runtime | +| cpu | 3.8 | 0.6.2-py38-runtime | +| cuda-10.1 + cudnn-7.6.5 + tensorrt-6.0.1 | 3.6 | 0.6.2-cuda10.1-py36-runtime | +| cuda-10.1 + cudnn-7.6.5 + tensorrt-6.0.1 | 3.7 | 0.6.2-cuda10.1-py37-runtime | +| cuda-10.1 + cudnn-7.6.5 + tensorrt-6.0.1 | 3.8 | 0.6.2-cuda10.1-py38-runtime | +| cuda-10.2 + cudnn-8.2.0 + tensorrt-7.1.3 | 3.6 | 0.6.2-cuda10.2-py36-runtime | +| cuda-10.2 + cudnn-8.2.0 + tensorrt-7.1.3 | 3.7 | 0.6.2-cuda10.2-py37-runtime | +| cuda-10.2 + cudnn-8.2.0 + tensorrt-7.1.3 | 3.8 | 0.6.2-cuda10.2-py38-runtime | +| cuda-11 + cudnn-8.0.5 + tensorrt-7.1.3 | 3.6 | 0.6.2-cuda11-py36-runtime | +| cuda-11 + cudnn-8.0.5 + tensorrt-7.1.3 | 3.7 | 0.6.2-cuda11-py37-runtime | +| cuda-11 + cudnn-8.0.5 + tensorrt-7.1.3 | 3.8 | 0.6.2-cuda11-py38-runtime | **注意事项:** 如果您在0.5.0及以上版本需要在一个容器当中同时运行CPU server和GPU server,需要选择Cuda10.1/10.2/11的镜像,因为他们和CPU环境有着相同版本的gcc。 diff --git a/python/examples/pipeline/ocr/web_service.py b/python/examples/pipeline/ocr/web_service.py index e91c813c455174224ff014c7cb4c288f16924fca..6724415886497e43595672b840f6ed9c7362f2ee 100644 --- a/python/examples/pipeline/ocr/web_service.py +++ b/python/examples/pipeline/ocr/web_service.py @@ -175,6 +175,6 @@ class OcrService(WebService): return rec_op -uci_service = OcrService(name="ocr") -uci_service.prepare_pipeline_config("config.yml") -uci_service.run_service() +ocr_service = OcrService(name="ocr") +ocr_service.prepare_pipeline_config("config.yml") +ocr_service.run_service() diff --git a/python/paddle_serving_server/server.py b/python/paddle_serving_server/server.py index 82a203d8acddc7bc276f3537bfb0ee8f85bd66f5..6d8077ad3a3a10c943201f9a945a2ef92b370df0 100755 --- a/python/paddle_serving_server/server.py +++ b/python/paddle_serving_server/server.py @@ -675,7 +675,7 @@ class MultiLangServer(object): use_encryption_model=False, cube_conf=None): if not self._port_is_available(port): - raise SystemExit("Prot {} is already used".format(port)) + raise SystemExit("Port {} is already used".format(port)) default_port = 12000 self.port_list_ = [] for i in range(1000):