diff --git a/deploy/paddle2onnx/readme.md b/deploy/paddle2onnx/readme.md index 02c069b7ac69110b63c53cd88e6ab5a223174972..782cffce9e5611b38da5bc002670a8282c08cc19 100644 --- a/deploy/paddle2onnx/readme.md +++ b/deploy/paddle2onnx/readme.md @@ -7,6 +7,7 @@ 需要准备 Paddle2ONNX 模型转化环境,和 ONNX 模型预测环境 ### Paddle2ONNX + Paddle2ONNX 支持将 PaddlePaddle 模型格式转化到 ONNX 模型格式,算子目前稳定支持导出 ONNX Opset 9~11,部分Paddle算子支持更低的ONNX Opset转换。 更多细节可参考 [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX/blob/develop/README_zh.md) @@ -51,6 +52,9 @@ paddle2onnx --model_dir=./inference/ch_ppocr_mobile_v2.0_det_infer/ \ 执行完毕后,ONNX 模型会被保存在 `./inference/det_mobile_onnx/` 路径下 +* 注意:以下几个模型暂不支持转换为 ONNX 模型: +NRTR、SAR、RARE、SRN + ## 3. onnx 预测 以检测模型为例,使用 ONNX 预测可执行如下命令: @@ -69,4 +73,4 @@ The predict time of ../../doc/imgs/1.jpg: 0.06162881851196289 The visualized image saved in ./inference_results/det_res_1.jpg ``` -* 注意:ONNX暂时不支持变长预测,因为需要将输入resize到固定输入,预测结果可能与直接使用Paddle预测有细微不同。 +* 注意:ONNX暂时不支持变长预测,需要将输入resize到固定输入,预测结果可能与直接使用Paddle预测有细微不同。 diff --git a/deploy/pdserving/ocr_cpp_client.py b/deploy/pdserving/ocr_cpp_client.py new file mode 100755 index 0000000000000000000000000000000000000000..2baa7565ac78b9551c788c7b36457bce38828eb5 --- /dev/null +++ b/deploy/pdserving/ocr_cpp_client.py @@ -0,0 +1,56 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# pylint: disable=doc-string-missing + +from paddle_serving_client import Client +import sys +import numpy as np +import base64 +import os +import cv2 +from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor +from paddle_serving_app.reader import Div, Normalize, Transpose +from ocr_reader import OCRReader + +client = Client() +# TODO:load_client need to load more than one client model. +# this need to figure out some details. +client.load_client_config(sys.argv[1:]) +client.connect(["127.0.0.1:9293"]) + +import paddle +test_img_dir = "test_img/" + +ocr_reader = OCRReader(char_dict_path="../../ppocr/utils/ppocr_keys_v1.txt") + + +def cv2_to_base64(image): + return base64.b64encode(image).decode( + 'utf8') #data.tostring()).decode('utf8') + + +for img_file in os.listdir(test_img_dir): + with open(os.path.join(test_img_dir, img_file), 'rb') as file: + image_data = file.read() + image = cv2_to_base64(image_data) + res_list = [] + #print(image) + fetch_map = client.predict( + feed={"x": image}, fetch=["save_infer_model/scale_0.tmp_1"], batch=True) + print("fetrch map:", fetch_map) + one_batch_res = ocr_reader.postprocess(fetch_map, with_score=True) + for res in one_batch_res: + res_list.append(res[0]) + res = {"res": str(res_list)} + print(res) diff --git a/deploy/pdserving/pipeline_http_client.py b/deploy/pdserving/pipeline_http_client.py index 0befe2f6144d18e24fb3f72ed1d919fd8cd7d5a4..61d13178220118eaf53c51723a9ef65201373ffb 100644 --- a/deploy/pdserving/pipeline_http_client.py +++ b/deploy/pdserving/pipeline_http_client.py @@ -18,13 +18,19 @@ import json import base64 import os +import argparse +parser = argparse.ArgumentParser(description="args for paddleserving") +parser.add_argument("--image_dir", type=str, default="../../doc/imgs/") +args = parser.parse_args() + def cv2_to_base64(image): return base64.b64encode(image).decode('utf8') url = "http://127.0.0.1:9998/ocr/prediction" -test_img_dir = "../../doc/imgs/" +test_img_dir = args.image_dir + for idx, img_file in enumerate(os.listdir(test_img_dir)): with open(os.path.join(test_img_dir, img_file), 'rb') as file: image_data1 = file.read() @@ -36,5 +42,4 @@ for idx, img_file in enumerate(os.listdir(test_img_dir)): r = requests.post(url=url, data=json.dumps(data)) print(r.json()) -test_img_dir = "../../doc/imgs/" print("==> total number of test imgs: ", len(os.listdir(test_img_dir))) diff --git a/deploy/pdserving/pipeline_rpc_client.py b/deploy/pdserving/pipeline_rpc_client.py index 79f898faf37f946cdbf4a87d4d62c8b1f9d5c93b..4dcb1ad5f533729e344809e99951b59fb2908537 100644 --- a/deploy/pdserving/pipeline_rpc_client.py +++ b/deploy/pdserving/pipeline_rpc_client.py @@ -30,7 +30,12 @@ def cv2_to_base64(image): return base64.b64encode(image).decode('utf8') -test_img_dir = "imgs/" +import argparse +parser = argparse.ArgumentParser(description="args for paddleserving") +parser.add_argument("--image_dir", type=str, default="../../doc/imgs/") +args = parser.parse_args() +test_img_dir = args.image_dir + for img_file in os.listdir(test_img_dir): with open(os.path.join(test_img_dir, img_file), 'rb') as file: image_data = file.read() diff --git a/doc/doc_ch/detection.md b/doc/doc_ch/detection.md index 166e15fb03b604b63f47b95304ac06c1f4ae9dd2..8db64664f6ff560450a5ee99d708313c931989fc 100644 --- a/doc/doc_ch/detection.md +++ b/doc/doc_ch/detection.md @@ -98,7 +98,7 @@ python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/ -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained # 多机多卡训练,通过 --ips 参数设置使用的机器IP地址,通过 --gpus 参数设置使用的GPU ID -python3 -m paddle.distributed.launch --ips="10.21.226.181,10.21.226.133" --gpus '0,1,2,3' tools/train.py -c configs/det/det_mv3_db.yml \ +python3 -m paddle.distributed.launch --ips="xx.xx.xx.xx,xx.xx.xx.xx" --gpus '0,1,2,3' tools/train.py -c configs/det/det_mv3_db.yml \ -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained ``` diff --git a/doc/doc_en/detection_en.md b/doc/doc_en/detection_en.md index 68c5691cda9f78c7f805c0f0ecdf82f00534de72..948733e16cebea2ce819367a863948434ece5ae5 100644 --- a/doc/doc_en/detection_en.md +++ b/doc/doc_en/detection_en.md @@ -101,7 +101,7 @@ python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs # multi-Node, multi-GPU training # Set the IPs of your nodes used by the '--ips' parameter. Set the GPU ID used by the '--gpus' parameter. -python3 -m paddle.distributed.launch --ips="10.21.226.181,10.21.226.133" --gpus '0,1,2,3' tools/train.py -c configs/det/det_mv3_db.yml \ +python3 -m paddle.distributed.launch --ips="xx.xx.xx.xx,xx.xx.xx.xx" --gpus '0,1,2,3' tools/train.py -c configs/det/det_mv3_db.yml \ -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained ``` **Note:** For multi-Node multi-GPU training, you need to replace the `ips` value in the preceding command with the address of your machine, and the machines must be able to ping each other. The command for viewing the IP address of the machine is `ifconfig`. diff --git a/requirements.txt b/requirements.txt index 6758a59bad20f6ffa271766fc4d0df5ebf4c7a4b..0c87c5c95069a2699f5a3a50320c883c6118ffe7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ shapely -scikit-image==0.18.3 +scikit-image imgaug==0.4.0 pyclipper lmdb diff --git a/test_tipc/configs/jeston_ppocr_det_mobile_params.txt b/test_tipc/configs/jeston_ppocr_det_mobile_params.txt new file mode 100644 index 0000000000000000000000000000000000000000..7e2bba2462bc4517f13da35a6a3bb527275bb85f --- /dev/null +++ b/test_tipc/configs/jeston_ppocr_det_mobile_params.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:ocr_det +python:python +gpu_list:null +Global.use_gpu:null +Global.auto_cast:null +Global.epoch_num:null +Global.save_model_dir:null +Train.loader.batch_size_per_card:null +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:null +null:null +## +trainer:null +norm_train:null +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.pretrained_model:null +norm_export:null +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +inference_dir:null +train_model:./inference/ch_ppocr_mobile_v2.0_det_infer +infer_export:null +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp16|fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +null:null diff --git a/test_tipc/configs/mac_ppocr_det_mobile_params.txt b/test_tipc/configs/mac_ppocr_det_mobile_params.txt index b0415c9a1f79837866812d1e545ad8fd09fb681d..0200e2954948e5aeb719aa43e8a88d70c2af506d 100644 --- a/test_tipc/configs/mac_ppocr_det_mobile_params.txt +++ b/test_tipc/configs/mac_ppocr_det_mobile_params.txt @@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs +pipline:pipeline_http_client.py|pipeline_rpc_client.py +--image_dir=../../doc/imgs ===========================kl_quant_params=========================== infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o diff --git a/test_tipc/configs/ppocr_det_mobile_params.txt b/test_tipc/configs/ppocr_det_mobile_params.txt index d7e9cf95c2e9b4b2e18265e5f8b4a65cd6bdf518..0ccf77d1f91bde4e0ef191d11debb5818dcd49e0 100644 --- a/test_tipc/configs/ppocr_det_mobile_params.txt +++ b/test_tipc/configs/ppocr_det_mobile_params.txt @@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs +pipline:pipeline_rpc_client.py|pipeline_http_client.py +--image_dir:../../doc/imgs ===========================kl_quant_params=========================== infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o diff --git a/test_tipc/configs/ppocr_det_server_params.txt b/test_tipc/configs/ppocr_det_server_params.txt index bba4ef44f769ed16671ead55a0eba6ee986aaaaa..f688fffac8824b0608ea6b6cec0683c70feb659e 100644 --- a/test_tipc/configs/ppocr_det_server_params.txt +++ b/test_tipc/configs/ppocr_det_server_params.txt @@ -80,4 +80,5 @@ op.det.local_service_conf.use_mkldnn:True|False op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs +pipline:pipeline_http_client.py|pipeline_rpc_client.py +--image_dir=../../doc/imgs diff --git a/test_tipc/configs/ppocr_rec_mobile_params.txt b/test_tipc/configs/ppocr_rec_mobile_params.txt index f3f3a54e14e042693d28559e487852a079f77bdd..3177d19cf6cf7759e13e5597492f3bd7fcea78ff 100644 --- a/test_tipc/configs/ppocr_rec_mobile_params.txt +++ b/test_tipc/configs/ppocr_rec_mobile_params.txt @@ -80,4 +80,5 @@ op.rec.local_service_conf.use_mkldnn:True|False op.rec.local_service_conf.thread_num:1|6 op.rec.local_service_conf.use_trt:False|True op.rec.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en +pipline:pipeline_http_client.py|pipeline_rpc_client.py +--image_dir=../../doc/imgs_words_en diff --git a/test_tipc/configs/ppocr_rec_server_params.txt b/test_tipc/configs/ppocr_rec_server_params.txt index 77961e8e651e0d770dae64860cc129aa2d50dcf2..3bc1dcce2c7103f2180c19551e8f5379e5524476 100644 --- a/test_tipc/configs/ppocr_rec_server_params.txt +++ b/test_tipc/configs/ppocr_rec_server_params.txt @@ -80,4 +80,5 @@ op.rec.local_service_conf.use_mkldnn:True|False op.rec.local_service_conf.thread_num:1|6 op.rec.local_service_conf.use_trt:False|True op.rec.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs_words_en +pipline:pipeline_http_client.py|pipeline_rpc_client.py +--image_dir=../../doc/imgs_words_en diff --git a/test_tipc/configs/win_ppocr_det_mobile_params.txt b/test_tipc/configs/win_ppocr_det_mobile_params.txt index 5a532ceb307fe87174dc6b46fbde236405f59ff5..0f4faee4b32925b4d0780ece6838c176238c7000 100644 --- a/test_tipc/configs/win_ppocr_det_mobile_params.txt +++ b/test_tipc/configs/win_ppocr_det_mobile_params.txt @@ -80,7 +80,8 @@ op.det.local_service_conf.use_mkldnn:True|False op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs +pipline:pipeline_http_client.py|pipeline_rpc_client.py +--image_dir=../../doc/imgs ===========================kl_quant_params=========================== infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o diff --git a/test_tipc/docs/jeston_test_train_inference_python.md b/test_tipc/docs/jeston_test_train_inference_python.md new file mode 100644 index 0000000000000000000000000000000000000000..3c0524df21df84fefdbbfd8e691766c9c542dff5 --- /dev/null +++ b/test_tipc/docs/jeston_test_train_inference_python.md @@ -0,0 +1,118 @@ +# Jeston端基础训练预测功能测试 + +Jeston端基础训练预测功能测试的主程序为`test_train_inference_python.sh`,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。 + +## 1. 测试结论汇总 + +- 预测相关:基于训练是否使用量化,可以将训练产出的模型可以分为`正常模型`和`量化模型`,这两类模型对应的预测功能汇总如下: + +| 模型类型 |device | batchsize | tensorrt | mkldnn | cpu多线程 | +| ---- | ---- | ---- | :----: | :----: | :----: | +| 正常模型 | GPU | 1/6 | fp32/fp16 | - | - | +| 量化模型 | GPU | 1/6 | int8 | - | - | + + +## 2. 测试流程 + +环境准备只需要Python环境即可,安装PaddlePaddle等依赖参考下述文档。 + +### 2.1 安装依赖 +- 安装PaddlePaddle >= 2.0 +- 安装PaddleOCR依赖 + ``` + pip install -r ../requirements.txt + ``` +- 安装autolog(规范化日志输出工具) + ``` + git clone https://github.com/LDOUBLEV/AutoLog + cd AutoLog + pip install -r requirements.txt + python setup.py bdist_wheel + pip install ./dist/auto_log-1.0.0-py3-none-any.whl + cd ../ + ``` +- 安装PaddleSlim (可选) + ``` + # 如果要测试量化、裁剪等功能,需要安装PaddleSlim + pip install paddleslim + ``` + + +### 2.2 功能测试 + +先运行`prepare.sh`准备数据和模型,然后运行`test_train_inference_python.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。 + +`test_train_inference_python.sh`包含5种[运行模式](./test_train_inference_python.md),在Jeston端,仅需要测试预测推理的模式即可: + +``` +- 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度; +```shell +bash test_tipc/prepare.sh ./test_tipc/configs/mac_ppocr_det_mobile_params.txt 'whole_infer' +# 用法1: +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/jeston_ppocr_det_mobile_params.txt 'whole_infer' +# 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/jeston_ppocr_det_mobile_params.txt 'whole_infer' '1' +``` + +运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`lite_train_lite_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件: +``` +test_tipc/output/ +|- results_python.log # 运行指令状态的日志 +|- python_infer_gpu_usetensorrt_True_precision_fp32_batchsize_1.log # GPU上开启TensorRT,batch_size=1条件下的预测运行日志 +...... +``` + +其中`results_python.log`中包含了每条指令的运行状态,如果运行成功会输出: +``` +Run successfully with command - python tools/infer/predict_det.py --use_gpu=True --use_tensorrt=False --precision=fp32 --det_model_dir=./inference/ch_ppocr_mobile_v2.0_det_infer/ --rec_batch_num=1 --image_dir=./inference/ch_det_data_50/all-sum-510/ --benchmark=True > ./test_tipc/output/python_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log 2>&1 ! +Run successfully with command - python tools/infer/predict_det.py --use_gpu=True --use_tensorrt=True --precision=fp32 --det_model_dir=./inference/ch_ppocr_mobile_v2.0_det_infer/ --rec_batch_num=1 --image_dir=./inference/ch_det_data_50/all-sum-510/ --benchmark=True > ./test_tipc/output/python_infer_gpu_usetrt_True_precision_fp32_batchsize_1.log 2>&1 ! +Run successfully with command - python tools/infer/predict_det.py --use_gpu=True --use_tensorrt=True --precision=fp16 --det_model_dir=./inference/ch_ppocr_mobile_v2.0_det_infer/ --rec_batch_num=1 --image_dir=./inference/ch_det_data_50/all-sum-510/ --benchmark=True > ./test_tipc/output/python_infer_gpu_usetrt_True_precision_fp16_batchsize_1.log 2>&1 ! +``` +如果运行失败,会输出: +``` +Run failed with command - python tools/infer/predict_det.py --use_gpu=True --use_tensorrt=False --precision=fp32 --det_model_dir=./inference/ch_ppocr_mobile_v2.0_det_infer/ --rec_batch_num=1 --image_dir=./inference/ch_det_data_50/all-sum-510/ --benchmark=True > ./test_tipc/output/python_infer_gpu_usetrt_False_precision_fp32_batchsize_1.log 2>&1 ! +Run failed with command - python tools/infer/predict_det.py --use_gpu=True --use_tensorrt=True --precision=fp32 --det_model_dir=./inference/ch_ppocr_mobile_v2.0_det_infer/ --rec_batch_num=1 --image_dir=./inference/ch_det_data_50/all-sum-510/ --benchmark=True > ./test_tipc/output/python_infer_gpu_usetrt_True_precision_fp32_batchsize_1.log 2>&1 ! +Run failed with command - python tools/infer/predict_det.py --use_gpu=True --use_tensorrt=True --precision=fp16 --det_model_dir=./inference/ch_ppocr_mobile_v2.0_det_infer/ --rec_batch_num=1 --image_dir=./inference/ch_det_data_50/all-sum-510/ --benchmark=True > ./test_tipc/output/python_infer_gpu_usetrt_True_precision_fp16_batchsize_1.log 2>&1 ! +``` +可以很方便的根据`results_python.log`中的内容判定哪一个指令运行错误。 + +### 2.3 精度测试 + +使用compare_results.py脚本比较模型预测的结果是否符合预期,主要步骤包括: +- 提取日志中的预测坐标; +- 从本地文件中提取保存好的坐标结果; +- 比较上述两个结果是否符合精度预期,误差大于设置阈值时会报错。 + +#### 使用方式 +运行命令: +```shell +python test_tipc/compare_results.py --gt_file=./test_tipc/results/python_*.txt --log_file=./test_tipc/output/python_*.log --atol=1e-3 --rtol=1e-3 +``` + +参数介绍: +- gt_file: 指向事先保存好的预测结果路径,支持*.txt 结尾,会自动索引*.txt格式的文件,文件默认保存在test_tipc/result/ 文件夹下 +- log_file: 指向运行test_tipc/test_train_inference_python.sh 脚本的infer模式保存的预测日志,预测日志中打印的有预测结果,比如:文本框,预测文本,类别等等,同样支持python_infer_*.log格式传入 +- atol: 设置的绝对误差 +- rtol: 设置的相对误差 + +#### 运行结果 + +正常运行效果如下: +``` +Assert allclose passed! The results of python_infer_gpu_usetrt_True_precision_fp32_batchsize_1.log and ./test_tipc/results/python_ppocr_det_mobile_results_fp32.txt are consistent! +``` + +出现不一致结果时的运行输出: +``` +...... +Traceback (most recent call last): + File "test_tipc/compare_results.py", line 140, in + format(filename, gt_filename)) +ValueError: The results of python_infer_gpu_usetrt_True_precision_fp32_batchsize_1.log and the results of ./test_tipc/results/python_ppocr_det_mobile_results_fp32.txt are inconsistent! +``` + + +## 3. 更多教程 +本文档为功能测试用,更丰富的训练预测使用教程请参考: +[模型训练](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/training.md) +[基于Python预测引擎推理](https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_ch/inference.md) diff --git a/test_tipc/test_lite.sh b/test_tipc/test_lite.sh index 832003ba302fe86995e20029cdb019e72d9ce162..1fd9d3c7186207922c436e7981622c707a56596f 100644 --- a/test_tipc/test_lite.sh +++ b/test_tipc/test_lite.sh @@ -3,7 +3,7 @@ source ./common_func.sh export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH FILENAME=$1 -dataline=$(awk 'NR==101, NR==110{print}' $FILENAME) +dataline=$(awk 'NR==102, NR==111{print}' $FILENAME) echo $dataline # parser params IFS=$'\n' diff --git a/test_tipc/test_serving.sh b/test_tipc/test_serving.sh index be7b594c3848c423937c59336ce3bf686f8f228d..9b1e90ed6116f32e232657e30277a747a70904c7 100644 --- a/test_tipc/test_serving.sh +++ b/test_tipc/test_serving.sh @@ -2,7 +2,7 @@ source test_tipc/common_func.sh FILENAME=$1 -dataline=$(awk 'NR==67, NR==83{print}' $FILENAME) +dataline=$(awk 'NR==67, NR==84{print}' $FILENAME) # parser params IFS=$'\n' @@ -35,6 +35,8 @@ web_use_trt_list=$(func_parser_value "${lines[14]}") web_precision_key=$(func_parser_key "${lines[15]}") web_precision_list=$(func_parser_value "${lines[15]}") pipeline_py=$(func_parser_value "${lines[16]}") +image_dir_key=$(func_parser_key "${lines[17]}") +image_dir_value=$(func_parser_value "${lines[17]}") LOG_PATH="../../test_tipc/output" mkdir -p ./test_tipc/output @@ -51,67 +53,98 @@ function func_serving(){ set_params_filename=$(func_set_params "${params_filename_key}" "${params_filename_value}") set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}") set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}") + set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" eval $trans_model_cmd cd ${serving_dir_value} echo $PWD unset https_proxy unset http_proxy - for use_gpu in ${web_use_gpu_list[*]}; do - echo ${ues_gpu} - if [ ${use_gpu} = "null" ]; then - for use_mkldnn in ${web_use_mkldnn_list[*]}; do - if [ ${use_mkldnn} = "False" ]; then - continue - fi - for threads in ${web_cpu_threads_list[*]}; do - _save_log_path="${LOG_PATH}/server_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log" - set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") - web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &" - eval $web_service_cmd - sleep 2s - pipeline_cmd="${python} ${pipeline_py} > ${_save_log_path} 2>&1 " - eval $pipeline_cmd - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" - status_check $last_status "${pipeline_cmd}" "${status_log}" - PID=$! - kill $PID - sleep 2s - ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - done - done - elif [ ${use_gpu} = "0" ]; then - for use_trt in ${web_use_trt_list[*]}; do - for precision in ${web_precision_list[*]}; do - if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then - continue - fi - if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then - continue - fi - if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then - continue - fi - _save_log_path="${LOG_PATH}/server_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_1.log" - set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}") - set_precision=$(func_set_params "${web_precision_key}" "${precision}") - web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & " + for python in ${python[*]}; do + if [ ${python} = "cpp"]; then + for use_gpu in ${web_use_gpu_list[*]}; do + if [ ${use_gpu} = "null" ]; then + web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293" eval $web_service_cmd sleep 2s - pipeline_cmd="${python} ${pipeline_py} > ${_save_log_path} 2>&1" + _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" + pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" eval $pipeline_cmd - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" status_check $last_status "${pipeline_cmd}" "${status_log}" - PID=$! - kill $PID sleep 2s ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 - done + else + web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0" + eval $web_service_cmd + sleep 2s + _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" + pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" + eval $pipeline_cmd + status_check $last_status "${pipeline_cmd}" "${status_log}" + sleep 2s + ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 + fi done else - echo "Does not support hardware other than CPU and GPU Currently!" + # python serving + for use_gpu in ${web_use_gpu_list[*]}; do + echo ${ues_gpu} + if [ ${use_gpu} = "null" ]; then + for use_mkldnn in ${web_use_mkldnn_list[*]}; do + if [ ${use_mkldnn} = "False" ]; then + continue + fi + for threads in ${web_cpu_threads_list[*]}; do + set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") + web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &" + eval $web_service_cmd + sleep 2s + for pipeline in ${pipeline_py[*]}; do + _save_log_path="${LOG_PATH}/server_infer_cpu_${pipeline%_client*}_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_1.log" + pipeline_cmd="${python} ${pipeline} ${set_image_dir} > ${_save_log_path} 2>&1 " + eval $pipeline_cmd + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${pipeline_cmd}" "${status_log}" + sleep 2s + done + ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 + done + done + elif [ ${use_gpu} = "0" ]; then + for use_trt in ${web_use_trt_list[*]}; do + for precision in ${web_precision_list[*]}; do + if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then + continue + fi + if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then + continue + fi + if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [[ ${_flag_quant} = "True" ]]; then + continue + fi + set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}") + set_precision=$(func_set_params "${web_precision_key}" "${precision}") + web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & " + eval $web_service_cmd + + sleep 2s + for pipeline in ${pipeline_py[*]}; do + _save_log_path="${LOG_PATH}/server_infer_gpu_${pipeline%_client*}_usetrt_${use_trt}_precision_${precision}_batchsize_1.log" + pipeline_cmd="${python} ${pipeline} ${set_image_dir}> ${_save_log_path} 2>&1" + eval $pipeline_cmd + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${pipeline_cmd}" "${status_log}" + sleep 2s + done + ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 + done + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done fi done } diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index d032bdeac72070e2a243460e7d368d0d2adc3f5b..c62b6274f8dcbc84d08900c5d228d78fd3c0de1a 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -90,7 +90,7 @@ infer_value1=$(func_parser_value "${lines[50]}") # parser klquant_infer if [ ${MODE} = "klquant_whole_infer" ]; then - dataline=$(awk 'NR==82, NR==98{print}' $FILENAME) + dataline=$(awk 'NR==85 NR==101{print}' $FILENAME) lines=(${dataline}) # parser inference model infer_model_dir_list=$(func_parser_value "${lines[1]}") diff --git a/tools/infer/predict_e2e.py b/tools/infer/predict_e2e.py index 5029d6059346a00062418d8d1b6cb029b0110643..08b87f36b0670e98e54c3f38c9328fe9462a6d0f 100755 --- a/tools/infer/predict_e2e.py +++ b/tools/infer/predict_e2e.py @@ -38,6 +38,7 @@ class TextE2E(object): def __init__(self, args): self.args = args self.e2e_algorithm = args.e2e_algorithm + self.use_onnx = args.use_onnx pre_process_list = [{ 'E2EResizeForTest': {} }, { @@ -106,21 +107,31 @@ class TextE2E(object): img = img.copy() starttime = time.time() - self.input_tensor.copy_from_cpu(img) - self.predictor.run() - outputs = [] - for output_tensor in self.output_tensors: - output = output_tensor.copy_to_cpu() - outputs.append(output) - - preds = {} - if self.e2e_algorithm == 'PGNet': + if self.use_onnx: + input_dict = {} + input_dict[self.input_tensor.name] = img + outputs = self.predictor.run(self.output_tensors, input_dict) + preds = {} preds['f_border'] = outputs[0] preds['f_char'] = outputs[1] preds['f_direction'] = outputs[2] preds['f_score'] = outputs[3] else: - raise NotImplementedError + self.input_tensor.copy_from_cpu(img) + self.predictor.run() + outputs = [] + for output_tensor in self.output_tensors: + output = output_tensor.copy_to_cpu() + outputs.append(output) + + preds = {} + if self.e2e_algorithm == 'PGNet': + preds['f_border'] = outputs[0] + preds['f_char'] = outputs[1] + preds['f_direction'] = outputs[2] + preds['f_score'] = outputs[3] + else: + raise NotImplementedError post_result = self.postprocess_op(preds, shape_list) points, strs = post_result['points'], post_result['texts'] dt_boxes = self.filter_tag_det_res_only_clip(points, ori_im.shape) diff --git a/tools/infer/utility.py b/tools/infer/utility.py index a3cac647982b77f5ee54d8681b07e677987d9ccb..58170e393cdc9d8441408a89c84aa6f88d683db3 100755 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -179,8 +179,8 @@ def create_predictor(args, mode, logger): if args.use_gpu: gpu_id = get_infer_gpuid() if gpu_id is None: - raise ValueError( - "Not found GPU in current device. Please check your device or set args.use_gpu as False" + logger.warning( + "GPU is not found in current device by nvidia-smi. Please check your device or ignore it if run on jeston." ) config.enable_use_gpu(args.gpu_mem, 0) if args.use_tensorrt: