diff --git a/PPOCRLabel/README_ch.md b/PPOCRLabel/README_ch.md index d745c9734636e2637f5019a5dd5097dd88cf2e56..e50e7b7daf5ac203129cf80b5a926a988929af5c 100644 --- a/PPOCRLabel/README_ch.md +++ b/PPOCRLabel/README_ch.md @@ -71,6 +71,8 @@ pip3 install opencv-contrib-python-headless==4.2.0.32 # 如果下载过慢请添 PPOCRLabel --lang ch # 启动 ``` +> 如果上述安装出现问题,可以参考3.6节 错误提示 + #### 1.2.2 本地构建whl包并安装 ```bash diff --git a/PPOCRLabel/libs/canvas.py b/PPOCRLabel/libs/canvas.py index d5662ac79a85c07c79ed2b7df315f338a229535c..6ac1f28b85e65c3776d310136352b70c45628db6 100644 --- a/PPOCRLabel/libs/canvas.py +++ b/PPOCRLabel/libs/canvas.py @@ -704,8 +704,9 @@ class Canvas(QWidget): def keyPressEvent(self, ev): key = ev.key() - shapesBackup = [] shapesBackup = copy.deepcopy(self.shapes) + if len(shapesBackup) == 0: + return self.shapesBackups.pop() self.shapesBackups.append(shapesBackup) if key == Qt.Key_Escape and self.current: diff --git a/benchmark/run_benchmark_det.sh b/benchmark/run_benchmark_det.sh index 3ab3ad2b66bd052fa49a850562cff624c5ce2c22..54263e953f3f758b318df147d34ee942a247ed18 100644 --- a/benchmark/run_benchmark_det.sh +++ b/benchmark/run_benchmark_det.sh @@ -17,7 +17,7 @@ function _set_params(){ skip_steps=2 # 解析日志,有些模型前几个step耗时长,需要跳过 (必填) keyword="ips:" # 解析日志,筛选出数据所在行的关键字 (必填) index="1" - model_name=${model_item}_${run_mode}_bs${batch_size}_${fp_item} # model_item 用于yml文件名匹配,model_name 用于数据入库前端展示 + model_name=${model_item}_bs${batch_size}_${fp_item} # model_item 用于yml文件名匹配,model_name 用于数据入库前端展示 # 以下不用修改 device=${CUDA_VISIBLE_DEVICES//,/ } arr=(${device}) diff --git a/benchmark/run_det.sh b/benchmark/run_det.sh index c7755d5d76187faebc2be37a1a5bf64a3bc1381e..be0c141f7ee168d10eebb6efb57158d18ed02f72 100644 --- a/benchmark/run_det.sh +++ b/benchmark/run_det.sh @@ -2,6 +2,7 @@ # 提供可稳定复现性能的脚本,默认在标准docker环境内py37执行: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7 paddle=2.1.2 py=37 # 执行目录: ./PaddleOCR # 1 安装该模型需要的依赖 (如需开启优化策略请注明) +log_path=${LOG_PATH_INDEX_DIR:-$(pwd)} python -m pip install -r requirements.txt # 2 拷贝该模型需要数据、预训练模型 wget -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar && cd train_data && tar xf icdar2015.tar && cd ../ @@ -12,18 +13,22 @@ wget -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dyg model_mode_list=(det_res18_db_v2.0 det_r50_vd_east det_r50_vd_pse) fp_item_list=(fp32) -bs_list=(8 16) for model_mode in ${model_mode_list[@]}; do for fp_item in ${fp_item_list[@]}; do + if [ ${model_mode} == "det_r50_vd_east" ]; then + bs_list=(16) + else + bs_list=(8 16) + fi for bs_item in ${bs_list[@]}; do echo "index is speed, 1gpus, begin, ${model_name}" run_mode=sp - log_name=ocr_${model_mode}_${run_mode}_bs${bs_item}_${fp_item} + log_name=ocr_${model_mode}_bs${bs_item}_${fp_item}_${run_mode} CUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark_det.sh ${run_mode} ${bs_item} ${fp_item} 1 ${model_mode} | tee ${log_path}/${log_name}_speed_1gpus 2>&1 # (5min) sleep 60 echo "index is speed, 8gpus, run_mode is multi_process, begin, ${model_name}" run_mode=mp - log_name=ocr_${model_mode}_${run_mode}_bs${bs_item}_${fp_item} + log_name=ocr_${model_mode}_bs${bs_item}_${fp_item}_${run_mode} CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash benchmark/run_benchmark_det.sh ${run_mode} ${bs_item} ${fp_item} 2 ${model_mode} | tee ${log_path}/${log_name}_speed_8gpus8p 2>&1 sleep 60 done diff --git a/configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml b/configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml index ab484a44833a405513d7f2b4079a4da4c2e403c8..bb6a196864b6e9e7525f2b5217f0c90ea2ca05a4 100644 --- a/configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml +++ b/configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml @@ -18,6 +18,7 @@ Global: Architecture: name: DistillationModel algorithm: Distillation + model_type: det Models: Teacher: freeze_params: true diff --git a/deploy/pdserving/README.md b/deploy/pdserving/README.md index cb2845c581d244e80ca597e0eb485a16ad369f20..c461fd5e54d3a51ad3427f83a1fca35cbe3ab2d8 100644 --- a/deploy/pdserving/README.md +++ b/deploy/pdserving/README.md @@ -45,63 +45,67 @@ PaddleOCR operating environment and Paddle Serving operating environment are nee ``` 3. Install the client to send requests to the service - In [download link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md) find the client installation package corresponding to the python version. - The python3.7 version is recommended here: - ``` - wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl - pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl - ``` - -4. Install serving-app - ``` - pip3 install paddle-serving-app==0.6.1 - ``` +```bash +# 安装serving,用于启动服务 +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_server_gpu-0.7.0.post102-py3-none-any.whl +pip3 install paddle_serving_server_gpu-0.7.0.post102-py3-none-any.whl +# 如果是cuda10.1环境,可以使用下面的命令安装paddle-serving-server +# wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_server_gpu-0.7.0.post101-py3-none-any.whl +# pip3 install paddle_serving_server_gpu-0.7.0.post101-py3-none-any.whl + +# 安装client,用于向服务发送请求 +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.7.0-cp37-none-any.whl +pip3 install paddle_serving_client-0.7.0-cp37-none-any.whl + +# 安装serving-app +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_app-0.7.0-py3-none-any.whl +pip3 install paddle_serving_app-0.7.0-py3-none-any.whl +``` - **note:** If you want to install the latest version of PaddleServing, refer to [link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md). + **note:** If you want to install the latest version of PaddleServing, refer to [link](https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Latest_Packages_CN.md). ## Model conversion When using PaddleServing for service deployment, you need to convert the saved inference model into a serving model that is easy to deploy. -Firstly, download the [inference model](https://github.com/PaddlePaddle/PaddleOCR#pp-ocr-20-series-model-listupdate-on-dec-15) of PPOCR +Firstly, download the [inference model](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.3/README_ch.md#pp-ocr%E7%B3%BB%E5%88%97%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8%E6%9B%B4%E6%96%B0%E4%B8%AD) of PPOCR ``` # Download and unzip the OCR text detection model -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar -O ch_PP-OCRv2_det_infer.tar && tar -xf ch_PP-OCRv2_det_infer.tar # Download and unzip the OCR text recognition model -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar - +wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar -O ch_PP-OCRv2_rec_infer.tar && tar -xf ch_PP-OCRv2_rec_infer.tar ``` Then, you can use installed paddle_serving_client tool to convert inference model to mobile model. ``` # Detection model conversion -python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_det_infer/ \ +python3 -m paddle_serving_client.convert --dirname ./ch_PP-OCRv2_det_infer/ \ --model_filename inference.pdmodel \ --params_filename inference.pdiparams \ - --serving_server ./ppocr_det_mobile_2.0_serving/ \ - --serving_client ./ppocr_det_mobile_2.0_client/ + --serving_server ./ppocrv2_det_serving/ \ + --serving_client ./ppocrv2_det_client/ # Recognition model conversion -python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_rec_infer/ \ +python3 -m paddle_serving_client.convert --dirname ./ch_PP-OCRv2_rec_infer/ \ --model_filename inference.pdmodel \ --params_filename inference.pdiparams \ - --serving_server ./ppocr_rec_mobile_2.0_serving/ \ - --serving_client ./ppocr_rec_mobile_2.0_client/ + --serving_server ./ppocrv2_rec_serving/ \ + --serving_client ./ppocrv2_rec_client/ ``` After the detection model is converted, there will be additional folders of `ppocr_det_mobile_2.0_serving` and `ppocr_det_mobile_2.0_client` in the current folder, with the following format: ``` -|- ppocr_det_mobile_2.0_serving/ - |- __model__ - |- __params__ - |- serving_server_conf.prototxt - |- serving_server_conf.stream.prototxt - -|- ppocr_det_mobile_2.0_client - |- serving_client_conf.prototxt - |- serving_client_conf.stream.prototxt +|- ppocrv2_det_serving/ + |- __model__ + |- __params__ + |- serving_server_conf.prototxt + |- serving_server_conf.stream.prototxt + +|- ppocrv2_det_client + |- serving_client_conf.prototxt + |- serving_client_conf.stream.prototxt ``` The recognition model is the same. diff --git a/deploy/pdserving/README_CN.md b/deploy/pdserving/README_CN.md index 067be8bbda10d971b709afdf822aea96a979d000..00024639b0b108225a0835499f62174b6618ae47 100644 --- a/deploy/pdserving/README_CN.md +++ b/deploy/pdserving/README_CN.md @@ -34,70 +34,66 @@ PaddleOCR提供2种服务部署方式: - 准备PaddleServing的运行环境,步骤如下 -1. 安装serving,用于启动服务 - ``` - pip3 install paddle-serving-server==0.6.1 # for CPU - pip3 install paddle-serving-server-gpu==0.6.1 # for GPU - # 其他GPU环境需要确认环境再选择执行如下命令 - pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6 - pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7 - ``` - -2. 安装client,用于向服务发送请求 - 在[下载链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)中找到对应python版本的client安装包,这里推荐python3.7版本: - - ``` - wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl - pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl - ``` - -3. 安装serving-app - ``` - pip3 install paddle-serving-app==0.6.1 - ``` +```bash +# 安装serving,用于启动服务 +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_server_gpu-0.7.0.post102-py3-none-any.whl +pip3 install paddle_serving_server_gpu-0.7.0.post102-py3-none-any.whl +# 如果是cuda10.1环境,可以使用下面的命令安装paddle-serving-server +# wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_server_gpu-0.7.0.post101-py3-none-any.whl +# pip3 install paddle_serving_server_gpu-0.7.0.post101-py3-none-any.whl + +# 安装client,用于向服务发送请求 +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.7.0-cp37-none-any.whl +pip3 install paddle_serving_client-0.7.0-cp37-none-any.whl + +# 安装serving-app +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_app-0.7.0-py3-none-any.whl +pip3 install paddle_serving_app-0.7.0-py3-none-any.whl +``` - **Note:** 如果要安装最新版本的PaddleServing参考[链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)。 +**Note:** 如果要安装最新版本的PaddleServing参考[链接](https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Latest_Packages_CN.md)。 ## 模型转换 使用PaddleServing做服务化部署时,需要将保存的inference模型转换为serving易于部署的模型。 -首先,下载PPOCR的[inference模型](https://github.com/PaddlePaddle/PaddleOCR#pp-ocr-20-series-model-listupdate-on-dec-15) -``` +首先,下载PPOCR的[inference模型](https://github.com/PaddlePaddle/PaddleOCR#pp-ocr-series-model-listupdate-on-september-8th) + +```bash # 下载并解压 OCR 文本检测模型 -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar -O ch_PP-OCRv2_det_infer.tar && tar -xf ch_PP-OCRv2_det_infer.tar # 下载并解压 OCR 文本识别模型 -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar +wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar -O ch_PP-OCRv2_rec_infer.tar && tar -xf ch_PP-OCRv2_rec_infer.tar ``` 接下来,用安装的paddle_serving_client把下载的inference模型转换成易于server部署的模型格式。 -``` +```bash # 转换检测模型 -python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_det_infer/ \ +python3 -m paddle_serving_client.convert --dirname ./ch_PP-OCRv2_det_infer/ \ --model_filename inference.pdmodel \ --params_filename inference.pdiparams \ - --serving_server ./ppocr_det_mobile_2.0_serving/ \ - --serving_client ./ppocr_det_mobile_2.0_client/ + --serving_server ./ppocrv2_det_serving/ \ + --serving_client ./ppocrv2_det_client/ # 转换识别模型 -python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_rec_infer/ \ +python3 -m paddle_serving_client.convert --dirname ./ch_PP-OCRv2_rec_infer/ \ --model_filename inference.pdmodel \ --params_filename inference.pdiparams \ - --serving_server ./ppocr_rec_mobile_2.0_serving/ \ - --serving_client ./ppocr_rec_mobile_2.0_client/ + --serving_server ./ppocrv2_rec_serving/ \ + --serving_client ./ppocrv2_rec_client/ ``` -检测模型转换完成后,会在当前文件夹多出`ppocr_det_mobile_2.0_serving` 和`ppocr_det_mobile_2.0_client`的文件夹,具备如下格式: +检测模型转换完成后,会在当前文件夹多出`ppocrv2_det_serving` 和`ppocrv2_det_client`的文件夹,具备如下格式: ``` -|- ppocr_det_mobile_2.0_serving/ +|- ppocrv2_det_serving/ |- __model__ |- __params__ |- serving_server_conf.prototxt |- serving_server_conf.stream.prototxt -|- ppocr_det_mobile_2.0_client +|- ppocrv2_det_client |- serving_client_conf.prototxt |- serving_client_conf.stream.prototxt diff --git a/deploy/pdserving/config.yml b/deploy/pdserving/config.yml index 2aae922dfa12f46d1c0ebd352e8d3a7077065cf8..f3b0f7ec5a47bb9c513ab3d75f7d2d4138f88c4a 100644 --- a/deploy/pdserving/config.yml +++ b/deploy/pdserving/config.yml @@ -34,7 +34,7 @@ op: client_type: local_predictor #det模型路径 - model_config: ./ppocr_det_mobile_2.0_serving + model_config: ./ppocrv2_det_serving #Fetch结果列表,以client_config中fetch_var的alias_name为准 fetch_list: ["save_infer_model/scale_0.tmp_1"] @@ -60,7 +60,7 @@ op: client_type: local_predictor #rec模型路径 - model_config: ./ppocr_rec_mobile_2.0_serving + model_config: ./ppocrv2_rec_serving #Fetch结果列表,以client_config中fetch_var的alias_name为准 fetch_list: ["save_infer_model/scale_0.tmp_1"] diff --git a/deploy/pdserving/web_service.py b/deploy/pdserving/web_service.py index 21db1e1411a8706dbbd9a22ce2ce7db8e16da5ec..b97c6e1f564a61bb9792542b9e9f1e88d782e80d 100644 --- a/deploy/pdserving/web_service.py +++ b/deploy/pdserving/web_service.py @@ -54,7 +54,7 @@ class DetOp(Op): _, self.new_h, self.new_w = det_img.shape return {"x": det_img[np.newaxis, :].copy()}, False, None, "" - def postprocess(self, input_dicts, fetch_dict, log_id): + def postprocess(self, input_dicts, fetch_dict, data_id, log_id): det_out = fetch_dict["save_infer_model/scale_0.tmp_1"] ratio_list = [ float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w @@ -129,7 +129,7 @@ class RecOp(Op): return feed_list, False, None, "" - def postprocess(self, input_dicts, fetch_data, log_id): + def postprocess(self, input_dicts, fetch_data, data_id, log_id): res_list = [] if isinstance(fetch_data, dict): if len(fetch_data) > 0: diff --git a/deploy/pdserving/web_service_det.py b/deploy/pdserving/web_service_det.py index 25ac2f37dbd3cdf05b3503abaab0c5651867fae9..ee39388425763d789ada76cf0a9db9f812fe8d2a 100644 --- a/deploy/pdserving/web_service_det.py +++ b/deploy/pdserving/web_service_det.py @@ -54,7 +54,7 @@ class DetOp(Op): _, self.new_h, self.new_w = det_img.shape return {"x": det_img[np.newaxis, :].copy()}, False, None, "" - def postprocess(self, input_dicts, fetch_dict, log_id): + def postprocess(self, input_dicts, fetch_dict, data_id, log_id): det_out = fetch_dict["save_infer_model/scale_0.tmp_1"] ratio_list = [ float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w diff --git a/deploy/pdserving/web_service_rec.py b/deploy/pdserving/web_service_rec.py index 6b3cf707f0f19034a0734fd27824feb4fb6cce20..f5cd8bf053c604786fecb9b71749b3c98f2552a2 100644 --- a/deploy/pdserving/web_service_rec.py +++ b/deploy/pdserving/web_service_rec.py @@ -56,7 +56,7 @@ class RecOp(Op): feed_list.append(feed) return feed_list, False, None, "" - def postprocess(self, input_dicts, fetch_data, log_id): + def postprocess(self, input_dicts, fetch_data, data_id, log_id): res_list = [] if isinstance(fetch_data, dict): if len(fetch_data) > 0: diff --git a/deploy/slim/prune/export_prune_model.py b/deploy/slim/prune/export_prune_model.py index 2c9d0a1831c3c0de321668dfdde55aecb825ab06..f4385972009e1b5382504754dc655381f0cc7717 100644 --- a/deploy/slim/prune/export_prune_model.py +++ b/deploy/slim/prune/export_prune_model.py @@ -52,12 +52,17 @@ def main(config, device, logger, vdl_writer): config['Architecture']["Head"]['out_channels'] = char_num model = build_model(config['Architecture']) - flops = paddle.flops(model, [1, 3, 640, 640]) - logger.info(f"FLOPs before pruning: {flops}") + if config['Architecture']['model_type'] == 'det': + input_shape = [1, 3, 640, 640] + elif config['Architecture']['model_type'] == 'rec': + input_shape = [1, 3, 32, 320] + + flops = paddle.flops(model, input_shape) + logger.info("FLOPs before pruning: {}".format(flops)) from paddleslim.dygraph import FPGMFilterPruner model.train() - pruner = FPGMFilterPruner(model, [1, 3, 640, 640]) + pruner = FPGMFilterPruner(model, input_shape) # build metric eval_class = build_metric(config['Metric']) @@ -65,8 +70,13 @@ def main(config, device, logger, vdl_writer): def eval_fn(): metric = program.eval(model, valid_dataloader, post_process_class, eval_class) - logger.info(f"metric['hmean']: {metric['hmean']}") - return metric['hmean'] + if config['Architecture']['model_type'] == 'det': + main_indicator = 'hmean' + else: + main_indicator = 'acc' + logger.info("metric[{}]: {}".format(main_indicator, metric[ + main_indicator])) + return metric[main_indicator] params_sensitive = pruner.sensitive( eval_func=eval_fn, @@ -81,18 +91,22 @@ def main(config, device, logger, vdl_writer): # calculate pruned params's ratio params_sensitive = pruner._get_ratios_by_loss(params_sensitive, loss=0.02) for key in params_sensitive.keys(): - logger.info(f"{key}, {params_sensitive[key]}") + logger.info("{}, {}".format(key, params_sensitive[key])) plan = pruner.prune_vars(params_sensitive, [0]) - flops = paddle.flops(model, [1, 3, 640, 640]) - logger.info(f"FLOPs after pruning: {flops}") + flops = paddle.flops(model, input_shape) + logger.info("FLOPs after pruning: {}".format(flops)) # load pretrain model load_model(config, model) metric = program.eval(model, valid_dataloader, post_process_class, eval_class) - logger.info(f"metric['hmean']: {metric['hmean']}") + if config['Architecture']['model_type'] == 'det': + main_indicator = 'hmean' + else: + main_indicator = 'acc' + logger.info("metric['']: {}".format(main_indicator, metric[main_indicator])) # start export model from paddle.jit import to_static diff --git a/deploy/slim/prune/sensitivity_anal.py b/deploy/slim/prune/sensitivity_anal.py index c5d008779eaafef36f4264b45295ec7bc78e3d27..306f1a83ae0945614518514dcd00ca869254d5f8 100644 --- a/deploy/slim/prune/sensitivity_anal.py +++ b/deploy/slim/prune/sensitivity_anal.py @@ -73,13 +73,18 @@ def main(config, device, logger, vdl_writer): char_num = len(getattr(post_process_class, 'character')) config['Architecture']["Head"]['out_channels'] = char_num model = build_model(config['Architecture']) + if config['Architecture']['model_type'] == 'det': + input_shape = [1, 3, 640, 640] + elif config['Architecture']['model_type'] == 'rec': + input_shape = [1, 3, 32, 320] + flops = paddle.flops(model, input_shape) - flops = paddle.flops(model, [1, 3, 640, 640]) logger.info("FLOPs before pruning: {}".format(flops)) from paddleslim.dygraph import FPGMFilterPruner model.train() - pruner = FPGMFilterPruner(model, [1, 3, 640, 640]) + + pruner = FPGMFilterPruner(model, input_shape) # build loss loss_class = build_loss(config['Loss']) @@ -107,8 +112,14 @@ def main(config, device, logger, vdl_writer): def eval_fn(): metric = program.eval(model, valid_dataloader, post_process_class, eval_class, False) - logger.info("metric['hmean']: {}".format(metric['hmean'])) - return metric['hmean'] + if config['Architecture']['model_type'] == 'det': + main_indicator = 'hmean' + else: + main_indicator = 'acc' + + logger.info("metric[{}]: {}".format(main_indicator, metric[ + main_indicator])) + return metric[main_indicator] run_sensitive_analysis = False """ @@ -149,7 +160,7 @@ def main(config, device, logger, vdl_writer): plan = pruner.prune_vars(params_sensitive, [0]) - flops = paddle.flops(model, [1, 3, 640, 640]) + flops = paddle.flops(model, input_shape) logger.info("FLOPs after pruning: {}".format(flops)) # start train diff --git a/deploy/slim/quantization/export_model.py b/deploy/slim/quantization/export_model.py index dddae923de223178665e3bfb55a2e7a8c0d5ba17..0cb86108d2275dc6ee1a74e118c27b94131975d3 100755 --- a/deploy/slim/quantization/export_model.py +++ b/deploy/slim/quantization/export_model.py @@ -111,7 +111,7 @@ def main(): valid_dataloader = build_dataloader(config, 'Eval', device, logger) use_srn = config['Architecture']['algorithm'] == "SRN" - model_type = config['Architecture']['model_type'] + model_type = config['Architecture'].get('model_type', None) # start eval metric = program.eval(model, valid_dataloader, post_process_class, eval_class, model_type, use_srn) @@ -120,8 +120,7 @@ def main(): for k, v in metric.items(): logger.info('{}:{}'.format(k, v)) - infer_shape = [3, 32, 100] if config['Architecture'][ - 'model_type'] != "det" else [3, 640, 640] + infer_shape = [3, 32, 100] if model_type == "rec" else [3, 640, 640] save_path = config["Global"]["save_inference_dir"] diff --git a/doc/datasets/ch_doc2.jpg b/doc/datasets/ch_doc2.jpg deleted file mode 100644 index 23343b8dedbae7be025552e3a45f9b7af7cf49ee..0000000000000000000000000000000000000000 Binary files a/doc/datasets/ch_doc2.jpg and /dev/null differ diff --git a/doc/doc_ch/code_and_doc.md b/doc/doc_ch/code_and_doc.md new file mode 100644 index 0000000000000000000000000000000000000000..7a4c64efaff22e99b6d95151ec3675c50a5a0910 --- /dev/null +++ b/doc/doc_ch/code_and_doc.md @@ -0,0 +1,324 @@ +# 附录 + +本附录包含了Python、文档规范以及Pull Request流程,请各位开发者遵循相关内容 + +- [附录1:Python代码规范](#附录1) + +- [附录2:文档规范](#附录2) + +- [附录3:Pull Request说明](#附录3) + + + +## 附录1:Python代码规范 + +PaddleOCR的Python代码遵循 [PEP8规范](https://www.python.org/dev/peps/pep-0008/),其中一些关注的重点包括如下内容 + +- 空格 + + - 空格应该加在逗号、分号、冒号前,而非他们的后面 + + ```python + # 正确: + print(x, y) + + # 错误: + print(x , y) + ``` + + - 在函数中指定关键字参数或默认参数值时, 不要在其两侧使用空格 + + ```python + # 正确: + def complex(real, imag=0.0) + # 错误: + def complex(real, imag = 0.0) + ``` + +- 注释 + + - 行内注释:行内注释使用 `#` 号表示,在代码与 `#` 之间需要空两个空格, `#` 与注释之间应当空一个空格,例如 + + ```python + x = x + 1 # Compensate for border + ``` + + - 函数和方法:每个函数的定义后的描述应该包括以下内容: + + - 函数描述:函数的作用,输入输出的 + + - Args:每个参数的名字以及对该参数的描述 + - Returns:返回值的含义和类型 + + ```python + def fetch_bigtable_rows(big_table, keys, other_silly_variable=None): + """Fetches rows from a Bigtable. + + Retrieves rows pertaining to the given keys from the Table instance + represented by big_table. Silly things may happen if + other_silly_variable is not None. + + Args: + big_table: An open Bigtable Table instance. + keys: A sequence of strings representing the key of each table row + to fetch. + other_silly_variable: Another optional variable, that has a much + longer name than the other args, and which does nothing. + + Returns: + A dict mapping keys to the corresponding table row data + fetched. Each row is represented as a tuple of strings. For + example: + + {'Serak': ('Rigel VII', 'Preparer'), + 'Zim': ('Irk', 'Invader'), + 'Lrrr': ('Omicron Persei 8', 'Emperor')} + + If a key from the keys argument is missing from the dictionary, + then that row was not found in the table. + """ + pass + ``` + + + +## 附录2:文档规范 + +### 2.1 总体说明 + +- 文档位置:如果您增加的新功能可以补充在原有的Markdown文件中,请**不要重新新建**一个文件。如果您对添加的位置不清楚,可以先PR代码,然后在commit中询问官方人员。 + +- 新增Markdown文档名称:使用英文描述文档内容,一般由小写字母与下划线组合而成,例如 `add_new_algorithm.md` + +- 新增Markdown文档格式:目录 - 正文 - FAQ + + > 目录生成方法可以使用 [此网站](https://ecotrust-canada.github.io/markdown-toc/) 将md内容复制之后自动提取目录,然后在md文件的每个标题前添加 `` + +- 中英双语:任何对文档的改动或新增都需要分别在中文和英文文档上进行。 + +### 2.2 格式规范 + +- 标题格式:文档标题格式按照:阿拉伯数字小数点组合 - 空格 - 标题的格式(例如 `2.1 XXXX` , `2. XXXX`) + +- 代码块:通过代码块格式展示需要运行的代码,在代码块前描述命令参数的含义。例如: + + > 检测+方向分类器+识别全流程:设置方向分类器参数 `--use_angle_cls true` 后可对竖排文本进行识别。 + > + > ``` + > paddleocr --image_dir ./imgs/11.jpg --use_angle_cls true + > ``` + +- 变量引用:如果在行内引用到代码变量或命令参数,需要用行内代码表示,例如上方 `--use_angle_cls true` ,并在前后各空一格 + +- 补充说明:通过引用格式 `>` 补充说明,或对注意事项进行说明 + +- 图片:如果在说明文档中增加了图片,请规范图片的命名形式(描述图片内容),并将图片添加在 `doc/` 下 + + + +## 附录3:Pull Request说明 + +### 3.1 PaddleOCR分支说明 + +PaddleOCR未来将维护2种分支,分别为: + +- release/x.x系列分支:为稳定的发行版本分支,也是默认分支。PaddleOCR会根据功能更新情况发布新的release分支,同时适配Paddle的release版本。随着版本迭代,release/x.x系列分支会越来越多,默认维护最新版本的release分支。 +- dygraph分支:为开发分支,适配Paddle动态图的dygraph版本,主要用于开发新功能。如果有同学需要进行二次开发,请选择dygraph分支。为了保证dygraph分支能在需要的时候拉出release/x.x分支,dygraph分支的代码只能使用Paddle最新release分支中有效的api。也就是说,如果Paddle dygraph分支中开发了新的api,但尚未出现在release分支代码中,那么请不要在PaddleOCR中使用。除此之外,对于不涉及api的性能优化、参数调整、策略更新等,都可以正常进行开发。 + +PaddleOCR的历史分支,未来将不再维护。考虑到一些同学可能仍在使用,这些分支还会继续保留: + +- develop分支:这个分支曾用于静态图的开发与测试,目前兼容>=1.7版本的Paddle。如果有特殊需求,要适配旧版本的Paddle,那还可以使用这个分支,但除了修复bug外不再更新代码。 + +PaddleOCR欢迎大家向repo中积极贡献代码,下面给出一些贡献代码的基本流程。 + +### 3.2 PaddleOCR代码提交流程与规范 + +> 如果你熟悉Git使用,可以直接跳转到 [3.2.10 提交代码的一些约定](#提交代码的一些约定) + +#### 3.2.1 创建你的 `远程仓库` + +- 在PaddleOCR的 [GitHub首页](https://github.com/PaddlePaddle/PaddleOCR),点击左上角 `Fork` 按钮,在你的个人目录下创建 `远程仓库`,比如`https://github.com/{your_name}/PaddleOCR`。 + +![banner](../banner.png) + +- 将 `远程仓库` Clone到本地 + +``` +# 拉取develop分支的代码 +git clone https://github.com/{your_name}/PaddleOCR.git -b dygraph +cd PaddleOCR +``` + +> 多数情况下clone失败是由于网络原因,请稍后重试或配置代理 + +#### 3.2.2 和 `远程仓库` 建立连接 + +首先查看当前 `远程仓库` 的信息。 + +``` +git remote -v +# origin https://github.com/{your_name}/PaddleOCR.git (fetch) +# origin https://github.com/{your_name}/PaddleOCR.git (push) +``` + +只有clone的 `远程仓库` 的信息,也就是自己用户名下的 PaddleOCR,接下来我们创建一个原始 PaddleOCR 仓库的远程主机,命名为 upstream。 + +``` +git remote add upstream https://github.com/PaddlePaddle/PaddleOCR.git +``` + +使用 `git remote -v` 查看当前 `远程仓库` 的信息,输出如下,发现包括了origin和upstream 2个 `远程仓库` 。 + +``` +origin https://github.com/{your_name}/PaddleOCR.git (fetch) +origin https://github.com/{your_name}/PaddleOCR.git (push) +upstream https://github.com/PaddlePaddle/PaddleOCR.git (fetch) +upstream https://github.com/PaddlePaddle/PaddleOCR.git (push) +``` + +这主要是为了后续在提交pull request(PR)时,始终保持本地仓库最新。 + +#### 3.2.3 创建本地分支 + +可以基于当前分支创建新的本地分支,命令如下。 + +``` +git checkout -b new_branch +``` + +也可以基于远程或者上游的分支创建新的分支,命令如下。 + +``` +# 基于用户远程仓库(origin)的develop创建new_branch分支 +git checkout -b new_branch origin/develop +# 基于上游远程仓库(upstream)的develop创建new_branch分支 +# 如果需要从upstream创建新的分支,需要首先使用git fetch upstream获取上游代码 +git checkout -b new_branch upstream/develop +``` + +最终会显示切换到新的分支,输出信息如下 + +``` +Branch new_branch set up to track remote branch develop from upstream. +Switched to a new branch 'new_branch' +``` + +#### 3.2.4 使用pre-commit勾子 + +Paddle 开发人员使用 pre-commit 工具来管理 Git 预提交钩子。 它可以帮助我们格式化源代码(C++,Python),在提交(commit)前自动检查一些基本事宜(如每个文件只有一个 EOL,Git 中不要添加大文件等)。 + +pre-commit测试是 Travis-CI 中单元测试的一部分,不满足钩子的 PR 不能被提交到 PaddleOCR,首先安装并在当前目录运行它: + +``` +pip install pre-commit +pre-commit install +``` + + > 1. Paddle 使用 clang-format 来调整 C/C++ 源代码格式,请确保 `clang-format` 版本在 3.8 以上。 + > + > 2. 通过pip install pre-commit和conda install -c conda-forge pre-commit安装的yapf稍有不同的,PaddleOCR 开发人员使用的是 `pip install pre-commit`。 + +#### 3.2.5 修改与提交代码 + + 假设对PaddleOCR的 `README.md` 做了一些修改,可以通过 `git status` 查看改动的文件,然后使用 `git add` 添加改动文件。 + +``` +git status # 查看改动文件 +git add README.md +pre-commit +``` + +重复上述步骤,直到pre-comit格式检查不报错。如下所示。 + +![img](../precommit_pass.png) + +使用下面的命令完成提交。 + +``` +git commit -m "your commit info" +``` + +#### 3.2.6 保持本地仓库最新 + +获取 upstream 的最新代码并更新当前分支。这里的upstream来自于2.2节的`和远程仓库建立连接`部分。 + +``` +git fetch upstream +# 如果是希望提交到其他分支,则需要从upstream的其他分支pull代码,这里是develop +git pull upstream develop +``` + +#### 3.2.7 push到远程仓库 + +``` +git push origin new_branch +``` + +#### 3.2.7 提交Pull Request + +点击new pull request,选择本地分支和目标分支,如下图所示。在PR的描述说明中,填写该PR所完成的功能。接下来等待review,如果有需要修改的地方,参照上述步骤更新 origin 中的对应分支即可。 + +![banner](../pr.png) + +#### 3.2.8 签署CLA协议和通过单元测试 + +- 签署CLA 在首次向PaddlePaddle提交Pull Request时,您需要您签署一次CLA(Contributor License Agreement)协议,以保证您的代码可以被合入,具体签署方式如下: + + 1. 请您查看PR中的Check部分,找到license/cla,并点击右侧detail,进入CLA网站 + + 2. 点击CLA网站中的“Sign in with GitHub to agree”,点击完成后将会跳转回您的Pull Request页面 + +#### 3.2.9 删除分支 + +- 删除远程分支 + + 在 PR 被 merge 进主仓库后,我们可以在 PR 的页面删除远程仓库的分支。 + + 也可以使用 `git push origin :分支名` 删除远程分支,如: + + ``` + git push origin :new_branch + ``` + +- 删除本地分支 + + ``` + # 切换到develop分支,否则无法删除当前分支 + git checkout develop + + # 删除new_branch分支 + git branch -D new_branch + ``` + + + +#### 3.2.10 提交代码的一些约定 + +为了使官方维护人员在评审代码时更好地专注于代码本身,请您每次提交代码时,遵守以下约定: + +1)请保证Travis-CI 中单元测试能顺利通过。如果没过,说明提交的代码存在问题,官方维护人员一般不做评审。 + +2)提交Pull Request前: + +- 请注意commit的数量。 + + 原因:如果仅仅修改一个文件但提交了十几个commit,每个commit只做了少量的修改,这会给评审人带来很大困扰。评审人需要逐一查看每个commit才能知道做了哪些修改,且不排除commit之间的修改存在相互覆盖的情况。 + + 建议:每次提交时,保持尽量少的commit,可以通过git commit --amend补充上次的commit。对已经Push到远程仓库的多个commit,可以参考[squash commits after push](https://stackoverflow.com/questions/5667884/how-to-squash-commits-in-git-after-they-have-been-pushed)。 + +- 请注意每个commit的名称:应能反映当前commit的内容,不能太随意。 + + +3)如果解决了某个Issue的问题,请在该Pull Request的第一个评论框中加上:fix #issue_number,这样当该Pull Request被合并后,会自动关闭对应的Issue。关键词包括:close, closes, closed, fix, fixes, fixed, resolve, resolves, resolved,请选择合适的词汇。详细可参考[Closing issues via commit messages](https://help.github.com/articles/closing-issues-via-commit-messages)。 + +此外,在回复评审人意见时,请您遵守以下约定: + +1)官方维护人员的每一个review意见都希望得到回复,这样会更好地提升开源社区的贡献。 + +- 对评审意见同意且按其修改完的,给个简单的Done即可; +- 对评审意见不同意的,请给出您自己的反驳理由。 + +2)如果评审意见比较多: + +- 请给出总体的修改情况。 +- 请采用`start a review`进行回复,而非直接回复的方式。原因是每个回复都会发送一封邮件,会造成邮件灾难。 \ No newline at end of file diff --git a/doc/doc_ch/datasets.md b/doc/doc_ch/datasets.md index 6d84dbbe484be1e2b19a4dedced90f61b7085148..d365fd711aff2dffcd30dd06028734cc707d5df0 100644 --- a/doc/doc_ch/datasets.md +++ b/doc/doc_ch/datasets.md @@ -49,7 +49,6 @@ https://aistudio.baidu.com/aistudio/datasetdetail/8429 - 每个样本固定10个字符,字符随机截取自语料库中的句子 - 图片分辨率统一为280x32 ![](../datasets/ch_doc1.jpg) - ![](../datasets/ch_doc2.jpg) ![](../datasets/ch_doc3.jpg) - **下载地址**:https://pan.baidu.com/s/1QkI7kjah8SPHwOQ40rS1Pw (密码:lu7m) diff --git a/doc/doc_ch/detection.md b/doc/doc_ch/detection.md index cfc9d52bf280400982a9fcd9941ddc4cce3f5e5c..f76ae7f842fb6b7002e084be59dc7ccb31f39771 100644 --- a/doc/doc_ch/detection.md +++ b/doc/doc_ch/detection.md @@ -247,3 +247,7 @@ Q1: 训练模型转inference 模型之后预测效果不一致? **A**:此类问题出现较多,问题多是trained model预测时候的预处理、后处理参数和inference model预测的时候的预处理、后处理参数不一致导致的。以det_mv3_db.yml配置文件训练的模型为例,训练模型、inference模型预测结果不一致问题解决方式如下: - 检查[trained model预处理](https://github.com/PaddlePaddle/PaddleOCR/blob/c1ed243fb68d5d466258243092e56cbae32e2c14/configs/det/det_mv3_db.yml#L116),和[inference model的预测预处理](https://github.com/PaddlePaddle/PaddleOCR/blob/c1ed243fb68d5d466258243092e56cbae32e2c14/tools/infer/predict_det.py#L42)函数是否一致。算法在评估的时候,输入图像大小会影响精度,为了和论文保持一致,训练icdar15配置文件中将图像resize到[736, 1280],但是在inference model预测的时候只有一套默认参数,会考虑到预测速度问题,默认限制图像最长边为960做resize的。训练模型预处理和inference模型的预处理函数位于[ppocr/data/imaug/operators.py](https://github.com/PaddlePaddle/PaddleOCR/blob/c1ed243fb68d5d466258243092e56cbae32e2c14/ppocr/data/imaug/operators.py#L147) - 检查[trained model后处理](https://github.com/PaddlePaddle/PaddleOCR/blob/c1ed243fb68d5d466258243092e56cbae32e2c14/configs/det/det_mv3_db.yml#L51),和[inference 后处理参数](https://github.com/PaddlePaddle/PaddleOCR/blob/c1ed243fb68d5d466258243092e56cbae32e2c14/tools/infer/utility.py#L50)是否一致。 + +Q1: 训练EAST模型提示找不到lanms库? + +**A**:执行pip3 install lanms-nova 即可。 diff --git a/doc/doc_ch/distributed_training.md b/doc/doc_ch/distributed_training.md index 411ce5ba6aea26755cc65c405be6e0f0d5fd4738..e0251b21ea1157084e4e1b1d77429264d452aa20 100644 --- a/doc/doc_ch/distributed_training.md +++ b/doc/doc_ch/distributed_training.md @@ -13,7 +13,7 @@ ```shell python3 -m paddle.distributed.launch \ --log_dir=./log/ \ - --gpus '0,1,2,3,4,5,6,7' \ + --gpus "0,1,2,3,4,5,6,7" \ tools/train.py \ -c configs/rec/rec_mv3_none_bilstm_ctc.yml ``` diff --git a/doc/doc_ch/inference.md b/doc/doc_ch/inference.md index 4e0f1d131e2547f0d4a8bdf35c0f4a6f8bf2e7a3..c02da14af495cd807668dca6d7f3823d1de6820d 100755 --- a/doc/doc_ch/inference.md +++ b/doc/doc_ch/inference.md @@ -34,6 +34,8 @@ inference 模型(`paddle.jit.save`保存的模型) - [1. 超轻量中文OCR模型推理](#超轻量中文OCR模型推理) - [2. 其他模型推理](#其他模型推理) +- [六、参数解释](#参数解释) + ## 一、训练模型转inference模型 @@ -394,3 +396,127 @@ python3 tools/infer/predict_system.py --image_dir="./doc/imgs_en/img_10.jpg" --d 执行命令后,识别结果图像如下: ![](../imgs_results/img_10_east_starnet.jpg) + + + + +# 六、参数解释 + +更多关于预测过程的参数解释如下所示。 + +* 全局信息 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| image_dir | str | 无,必须显式指定 | 图像或者文件夹路径 | +| vis_font_path | str | "./doc/fonts/simfang.ttf" | 用于可视化的字体路径 | +| drop_score | float | 0.5 | 识别得分小于该值的结果会被丢弃,不会作为返回结果 | +| use_pdserving | bool | False | 是否使用Paddle Serving进行预测 | +| warmup | bool | False | 是否开启warmup,在统计预测耗时的时候,可以使用这种方法 | +| draw_img_save_dir | str | "./inference_results" | 系统串联预测OCR结果的保存文件夹 | +| save_crop_res | bool | False | 是否保存OCR的识别文本图像 | +| crop_res_save_dir | str | "./output" | 保存OCR识别出来的文本图像路径 | +| use_mp | bool | False | 是否开启多进程预测 | +| total_process_num | int | 6 | 开启的进城数,`use_mp`为`True`时生效 | +| process_id | int | 0 | 当前进程的id号,无需自己修改 | +| benchmark | bool | False | 是否开启benchmark,对预测速度、显存占用等进行统计 | +| save_log_path | str | "./log_output/" | 开启`benchmark`时,日志结果的保存文件夹 | +| show_log | bool | True | 是否显示预测中的日志信息 | +| use_onnx | bool | False | 是否开启onnx预测 | + + +* 预测引擎相关 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| use_gpu | bool | True | 是否使用GPU进行预测 | +| ir_optim | bool | True | 是否对计算图进行分析与优化,开启后可以加速预测过程 | +| use_tensorrt | bool | False | 是否开启tensorrt | +| min_subgraph_size | int | 15 | tensorrt中最小子图size,当子图的size大于该值时,才会尝试对该子图使用trt engine计算 | +| precision | str | fp32 | 预测的精度,支持`fp32`, `fp16`, `int8` 3种输入 | +| enable_mkldnn | bool | True | 是否开启mkldnn | +| cpu_threads | int | 10 | 开启mkldnn时,cpu预测的线程数 | + +* 文本检测模型相关 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| det_algorithm | str | "DB" | 文本检测算法名称,目前支持`DB`, `EAST`, `SAST`, `PSE` | +| det_model_dir | str | xx | 检测inference模型路径 | +| det_limit_side_len | int | 960 | 检测的图像边长限制 | +| det_limit_type | str | "max" | 检测的变成限制类型,目前支持`min`, `max`,`min`表示保证图像最短边不小于`det_limit_side_len`,`max`表示保证图像最长边不大于`det_limit_side_len` | + +其中,DB算法相关参数如下 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| det_db_thresh | float | 0.3 | DB输出的概率图中,得分大于该阈值的像素点才会被认为是文字像素点 | +| det_db_box_thresh | float | 0.6 | 检测结果边框内,所有像素点的平均得分大于该阈值时,该结果会被认为是文字区域 | +| det_db_unclip_ratio | float | 1.5 | `Vatti clipping`算法的扩张系数,使用该方法对文字区域进行扩张 | +| max_batch_size | int | 10 | 预测的batch size | +| use_dilation | bool | False | 是否对分割结果进行膨胀以获取更优检测效果 | +| det_db_score_mode | str | "fast" | DB的检测结果得分计算方法,支持`fast`和`slow`,`fast`是根据polygon的外接矩形边框内的所有像素计算平均得分,`slow`是根据原始polygon内的所有像素计算平均得分,计算速度相对较慢一些,但是更加准确一些。 | + +EAST算法相关参数如下 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| det_east_score_thresh | float | 0.8 | EAST后处理中score map的阈值 | +| det_east_cover_thresh | float | 0.1 | EAST后处理中文本框的平均得分阈值 | +| det_east_nms_thresh | float | 0.2 | EAST后处理中nms的阈值 | + +SAST算法相关参数如下 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| det_sast_score_thresh | float | 0.5 | SAST后处理中的得分阈值 | +| det_sast_nms_thresh | float | 0.5 | SAST后处理中nms的阈值 | +| det_sast_polygon | bool | False | 是否多边形检测,弯曲文本场景(如Total-Text)设置为True | + +PSE算法相关参数如下 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| det_pse_thresh | float | 0.0 | 对输出图做二值化的阈值 | +| det_pse_box_thresh | float | 0.85 | 对box进行过滤的阈值,低于此阈值的丢弃 | +| det_pse_min_area | float | 16 | box的最小面积,低于此阈值的丢弃 | +| det_pse_box_type | str | "box" | 返回框的类型,box:四点坐标,poly: 弯曲文本的所有点坐标 | +| det_pse_scale | int | 1 | 输入图像相对于进后处理的图的比例,如`640*640`的图像,网络输出为`160*160`,scale为2的情况下,进后处理的图片shape为`320*320`。这个值调大可以加快后处理速度,但是会带来精度的下降 | + +* 文本识别模型相关 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| rec_algorithm | str | "CRNN" | 文本识别算法名称,目前支持`CRNN`, `SRN`, `RARE`, `NETR`, `SAR` | +| rec_model_dir | str | 无,如果使用识别模型,该项是必填项 | 识别inference模型路径 | +| rec_image_shape | list | [3, 32, 320] | 识别时的图像尺寸, | +| rec_batch_num | int | 6 | 识别的batch size | +| max_text_length | int | 25 | 识别结果最大长度,在`SRN`中有效 | +| rec_char_dict_path | str | "./ppocr/utils/ppocr_keys_v1.txt" | 识别的字符字典文件 | +| use_space_char | bool | True | 是否包含空格,如果为`True`,则会在最后字符字典中补充`空格`字符 | + + +* 端到端文本检测与识别模型相关 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| e2e_algorithm | str | "PGNet" | 端到端算法名称,目前支持`PGNet` | +| e2e_model_dir | str | 无,如果使用端到端模型,该项是必填项 | 端到端模型inference模型路径 | +| e2e_limit_side_len | int | 768 | 端到端的输入图像边长限制 | +| e2e_limit_type | str | "max" | 端到端的边长限制类型,目前支持`min`, `max`,`min`表示保证图像最短边不小于`e2e_limit_side_len`,`max`表示保证图像最长边不大于`e2e_limit_side_len` | +| e2e_pgnet_score_thresh | float | 0.5 | 端到端得分阈值,小于该阈值的结果会被丢弃 | +| e2e_char_dict_path | str | "./ppocr/utils/ic15_dict.txt" | 识别的字典文件路径 | +| e2e_pgnet_valid_set | str | "totaltext" | 验证集名称,目前支持`totaltext`, `partvgg`,不同数据集对应的后处理方式不同,与训练过程保持一致即可 | +| e2e_pgnet_mode | str | "fast" | PGNet的检测结果得分计算方法,支持`fast`和`slow`,`fast`是根据polygon的外接矩形边框内的所有像素计算平均得分,`slow`是根据原始polygon内的所有像素计算平均得分,计算速度相对较慢一些,但是更加准确一些。 | + + +* 方向分类器模型相关 + +| 参数名称 | 类型 | 默认值 | 含义 | +| :--: | :--: | :--: | :--: | +| use_angle_cls | bool | False | 是否使用方向分类器 | +| cls_model_dir | str | 无,如果需要使用,则必须显式指定路径 | 方向分类器inference模型路径 | +| cls_image_shape | list | [3, 48, 192] | 预测尺度 | +| label_list | list | ['0', '180'] | class id对应的角度值 | +| cls_batch_num | int | 6 | 方向分类器预测的batch size | +| cls_thresh | float | 0.9 | 预测阈值,模型预测结果为180度,且得分大于该阈值时,认为最终预测结果为180度,需要翻转 | diff --git a/doc/doc_ch/models_list.md b/doc/doc_ch/models_list.md index 8f1a53bccacde8e478e67c7eae5df3c818bb4004..6843ffdc19d5bde205124c30f1d0a5fc2144ce99 100644 --- a/doc/doc_ch/models_list.md +++ b/doc/doc_ch/models_list.md @@ -1,4 +1,4 @@ -# OCR模型列表(V2.1,2021年9月6日更新) +# PP-OCR系列模型列表(V2.1,2021年9月6日更新) > **说明** > 1. 2.1版模型相比2.0版模型,2.1的模型在模型精度上做了提升 diff --git a/doc/doc_ch/pgnet.md b/doc/doc_ch/pgnet.md index 9aa7f255e54ce8dec3a20d475cccb71847d95cc7..0aee58ec1aca24d06305c47569fdf156df6ee874 100644 --- a/doc/doc_ch/pgnet.md +++ b/doc/doc_ch/pgnet.md @@ -66,13 +66,13 @@ wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/e2e_server_pgnetA_infer. ### 单张图像或者图像集合预测 ```bash # 预测image_dir指定的单张图像 -python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --e2e_pgnet_polygon=True +python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --e2e_pgnet_valid_set="totaltext" # 预测image_dir指定的图像集合 -python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --e2e_pgnet_polygon=True +python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --e2e_pgnet_valid_set="totaltext" # 如果想使用CPU进行预测,需设置use_gpu参数为False -python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --e2e_pgnet_polygon=True --use_gpu=False +python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --e2e_pgnet_valid_set="totaltext" --use_gpu=False ``` ### 可视化结果 可视化文本检测结果默认保存到./inference_results文件夹里面,结果文件的名称前缀为'e2e_res'。结果示例如下: @@ -167,9 +167,9 @@ python3 tools/infer_e2e.py -c configs/e2e/e2e_r50_vd_pg.yml -o Global.infer_img= wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar && tar xf en_server_pgnetA.tar python3 tools/export_model.py -c configs/e2e/e2e_r50_vd_pg.yml -o Global.pretrained_model=./en_server_pgnetA/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/e2e ``` -**PGNet端到端模型推理,需要设置参数`--e2e_algorithm="PGNet"`**,可以执行如下命令: +**PGNet端到端模型推理,需要设置参数`--e2e_algorithm="PGNet"` and `--e2e_pgnet_valid_set="partvgg"`**,可以执行如下命令: ``` -python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img_10.jpg" --e2e_model_dir="./inference/e2e/" --e2e_pgnet_polygon=False +python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img_10.jpg" --e2e_model_dir="./inference/e2e/" --e2e_pgnet_valid_set="partvgg" --e2e_pgnet_valid_set="totaltext" ``` 可视化文本检测结果默认保存到`./inference_results`文件夹里面,结果文件的名称前缀为'e2e_res'。结果示例如下: @@ -178,9 +178,9 @@ python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/im #### (2). 弯曲文本检测模型(Total-Text) 对于弯曲文本样例 -**PGNet端到端模型推理,需要设置参数`--e2e_algorithm="PGNet"`,同时,还需要增加参数`--e2e_pgnet_polygon=True`,**可以执行如下命令: +**PGNet端到端模型推理,需要设置参数`--e2e_algorithm="PGNet"`,同时,还需要增加参数`--e2e_pgnet_valid_set="totaltext"`,**可以执行如下命令: ``` -python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e/" --e2e_pgnet_polygon=True +python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e/" --e2e_pgnet_valid_set="totaltext" ``` 可视化文本端到端结果默认保存到`./inference_results`文件夹里面,结果文件的名称前缀为'e2e_res'。结果示例如下: diff --git a/doc/doc_ch/thirdparty.md b/doc/doc_ch/thirdparty.md index d01f4b09c01d2c090c829bbb9c58c43557566118..b83b8fee8dbbf867d95c4cd0e087ebfde5f4bfc1 100644 --- a/doc/doc_ch/thirdparty.md +++ b/doc/doc_ch/thirdparty.md @@ -12,30 +12,37 @@ PaddleOCR希望可以通过AI的力量助力任何一位有梦想的开发者实 ## 1. 社区贡献 -### 1.1 为PaddleOCR新增功能 +### 1.1 基于PaddleOCR的社区贡献 + +- 【最新】 [FastOCRLabel](https://gitee.com/BaoJianQiang/FastOCRLabel):完整的C#版本标注工具 (@ [包建强](https://gitee.com/BaoJianQiang) ) + +#### 1.1.1 通用工具 + +- [DangoOCR离线版](https://github.com/PantsuDango/DangoOCR):通用型桌面级即时翻译工具 (@ [PantsuDango](https://github.com/PantsuDango)) +- [scr2txt](https://github.com/lstwzd/scr2txt):截屏转文字工具 (@ [lstwzd](https://github.com/lstwzd)) +- [AI Studio项目](https://aistudio.baidu.com/aistudio/projectdetail/1054614?channelType=0&channel=0):英文视频自动生成字幕( @ [叶月水狐](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/322052)) + +#### 1.1.2 垂类场景工具 + +- [id_card_ocr](https://github.com/baseli/id_card_ocr):身份证复印件识别(@ [baseli](https://github.com/baseli)) +- [Paddle_Table_Image_Reader](https://github.com/thunder95/Paddle_Table_Image_Reader):能看懂表格图片的数据助手(@ [thunder95](https://github.com/thunder95])) + +#### 1.1.3 前后处理 + +- [paddleOCRCorrectOutputs](https://github.com/yuranusduke/paddleOCRCorrectOutputs):获取OCR识别结果的key-value(@ [yuranusduke](https://github.com/yuranusduke)) + +### 1.2 为PaddleOCR新增功能 - 非常感谢 [authorfu](https://github.com/authorfu) 贡献Android([#340](https://github.com/PaddlePaddle/PaddleOCR/pull/340))和[xiadeye](https://github.com/xiadeye) 贡献IOS的demo代码([#325](https://github.com/PaddlePaddle/PaddleOCR/pull/325)) - 非常感谢 [tangmq](https://gitee.com/tangmq) 给PaddleOCR增加Docker化部署服务,支持快速发布可调用的Restful API服务([#507](https://github.com/PaddlePaddle/PaddleOCR/pull/507))。 - 非常感谢 [lijinhan](https://github.com/lijinhan) 给PaddleOCR增加java SpringBoot 调用OCR Hubserving接口完成对OCR服务化部署的使用([#1027](https://github.com/PaddlePaddle/PaddleOCR/pull/1027))。 - 非常感谢 [Evezerest](https://github.com/Evezerest), [ninetailskim](https://github.com/ninetailskim), [edencfc](https://github.com/edencfc), [BeyondYourself](https://github.com/BeyondYourself), [1084667371](https://github.com/1084667371) 贡献了[PPOCRLabel](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.3/PPOCRLabel/README_ch.md) 的完整代码。 -### 1.2 基于PaddleOCR的社区贡献 - -- 【最新】完整的C#版本标注工具 [FastOCRLabel](https://gitee.com/BaoJianQiang/FastOCRLabel) (@ [包建强](https://gitee.com/BaoJianQiang) ) -- 通用型桌面级即时翻译工具 [DangoOCR离线版](https://github.com/PantsuDango/DangoOCR) (@ [PantsuDango](https://github.com/PantsuDango)) -- 获取OCR识别结果的key-value [paddleOCRCorrectOutputs](https://github.com/yuranusduke/paddleOCRCorrectOutputs) (@ [yuranusduke](https://github.com/yuranusduke)) -- 截屏转文字工具 [scr2txt](https://github.com/lstwzd/scr2txt) (@ [lstwzd](https://github.com/lstwzd)) -- 身份证复印件识别 [id_card_ocr](https://github.com/baseli/id_card_ocr)(@ [baseli](https://github.com/baseli)) -- 能看懂表格图片的数据助手:[Paddle_Table_Image_Reader](https://github.com/thunder95/Paddle_Table_Image_Reader) (@ [thunder95][https://github.com/thunder95]) -- 英文视频自动生成字幕 [AI Studio项目](https://aistudio.baidu.com/aistudio/projectdetail/1054614?channelType=0&channel=0)( @ [叶月水狐](https://aistudio.baidu.com/aistudio/personalcenter/thirdview/322052)) - ### 1.3 代码与文档优化 - - 非常感谢 [zhangxin](https://github.com/ZhangXinNan)([Blog](https://blog.csdn.net/sdlypyzq)) 贡献新的可视化方式、添加.gitgnore、处理手动设置PYTHONPATH环境变量的问题([#210](https://github.com/PaddlePaddle/PaddleOCR/pull/210))。 - 非常感谢 [lyl120117](https://github.com/lyl120117) 贡献打印网络结构的代码([#304](https://github.com/PaddlePaddle/PaddleOCR/pull/304))。 - 非常感谢 [BeyondYourself](https://github.com/BeyondYourself) 给PaddleOCR提了很多非常棒的建议,并简化了PaddleOCR的部分代码风格([so many commits)](https://github.com/PaddlePaddle/PaddleOCR/commits?author=BeyondYourself)。 - - 非常感谢 [Khanh Tran](https://github.com/xxxpsyduck) 和 [Karl Horky](https://github.com/karlhorky) 贡献修改英文文档。 ### 1.4 多语言语料 diff --git a/doc/doc_en/datasets_en.md b/doc/doc_en/datasets_en.md index 61d2033b4fe8f0077ad66fb9ae2cd559ce29fd65..0e6b6f381e9d008add802c5f8a30d5498a4f94b2 100644 --- a/doc/doc_en/datasets_en.md +++ b/doc/doc_en/datasets_en.md @@ -50,7 +50,6 @@ https://aistudio.baidu.com/aistudio/datasetdetail/8429 - Each sample is fixed with 10 characters, and the characters are randomly intercepted from the sentences in the corpus - Image resolution is 280x32 ![](../datasets/ch_doc1.jpg) - ![](../datasets/ch_doc2.jpg) ![](../datasets/ch_doc3.jpg) - **Download link**:https://pan.baidu.com/s/1QkI7kjah8SPHwOQ40rS1Pw (Password: lu7m) diff --git a/doc/doc_en/distributed_training.md b/doc/doc_en/distributed_training.md index 7a8b71ce308837568c84bf56292f78e9979d3907..519a42f0dc4b9bd4fa18f3f65019e4235282df92 100644 --- a/doc/doc_en/distributed_training.md +++ b/doc/doc_en/distributed_training.md @@ -13,7 +13,7 @@ Take recognition as an example. After the data is prepared locally, start the tr ```shell python3 -m paddle.distributed.launch \ --log_dir=./log/ \ - --gpus '0,1,2,3,4,5,6,7' \ + --gpus "0,1,2,3,4,5,6,7" \ tools/train.py \ -c configs/rec/rec_mv3_none_bilstm_ctc.yml ``` diff --git a/doc/doc_en/pgnet_en.md b/doc/doc_en/pgnet_en.md index d2c6b30248ebad920c41ca53ee38cce828dddb8c..e176a1260c734974e2dad843faeb3e5532176629 100644 --- a/doc/doc_en/pgnet_en.md +++ b/doc/doc_en/pgnet_en.md @@ -59,13 +59,13 @@ After decompression, there should be the following file structure: ### Single image or image set prediction ```bash # Prediction single image specified by image_dir -python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --e2e_pgnet_polygon=True +python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --e2e_pgnet_valid_set="totaltext" # Prediction the collection of images specified by image_dir -python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --e2e_pgnet_polygon=True +python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --e2e_pgnet_valid_set="totaltext" # If you want to use CPU for prediction, you need to set use_gpu parameter is false -python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --e2e_pgnet_polygon=True --use_gpu=False +python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e_server_pgnetA_infer/" --use_gpu=False --e2e_pgnet_valid_set="totaltext" ``` ### Visualization results The visualized end-to-end results are saved to the `./inference_results` folder by default, and the name of the result file is prefixed with 'e2e_res'. Examples of results are as follows: @@ -166,9 +166,9 @@ First, convert the model saved in the PGNet end-to-end training process into an wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar && tar xf en_server_pgnetA.tar python3 tools/export_model.py -c configs/e2e/e2e_r50_vd_pg.yml -o Global.pretrained_model=./en_server_pgnetA/best_accuracy Global.load_static_weights=False Global.save_inference_dir=./inference/e2e ``` -**For PGNet quadrangle end-to-end model inference, you need to set the parameter `--e2e_algorithm="PGNet"`**, run the following command: +**For PGNet quadrangle end-to-end model inference, you need to set the parameter `--e2e_algorithm="PGNet"` and `--e2e_pgnet_valid_set="partvgg"`**, run the following command: ``` -python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img_10.jpg" --e2e_model_dir="./inference/e2e/" --e2e_pgnet_polygon=False +python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img_10.jpg" --e2e_model_dir="./inference/e2e/" --e2e_pgnet_valid_set="partvgg" ``` The visualized text detection results are saved to the `./inference_results` folder by default, and the name of the result file is prefixed with 'e2e_res'. Examples of results are as follows: @@ -176,9 +176,9 @@ The visualized text detection results are saved to the `./inference_results` fol #### (2). Curved text detection model (Total-Text) For the curved text example, we use the same model as the quadrilateral -**For PGNet end-to-end curved text detection model inference, you need to set the parameter `--e2e_algorithm="PGNet"` and `--e2e_pgnet_polygon=True`**, run the following command: +**For PGNet end-to-end curved text detection model inference, you need to set the parameter `--e2e_algorithm="PGNet"` and `--e2e_pgnet_valid_set="totaltext"`**, run the following command: ``` -python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e/" --e2e_pgnet_polygon=True +python3 tools/infer/predict_e2e.py --e2e_algorithm="PGNet" --image_dir="./doc/imgs_en/img623.jpg" --e2e_model_dir="./inference/e2e/" --e2e_pgnet_valid_set="totaltext" ``` The visualized text detection results are saved to the `./inference_results` folder by default, and the name of the result file is prefixed with 'e2e_res'. Examples of results are as follows: diff --git a/doc/joinus.PNG b/doc/joinus.PNG index cd9de9c14beaf0be346a1f7f1d09450a0905a880..e2dd99383de10b5263c1ec9d255a8a31815b50b6 100644 Binary files a/doc/joinus.PNG and b/doc/joinus.PNG differ diff --git a/doc/precommit_pass.png b/doc/precommit_pass.png new file mode 100644 index 0000000000000000000000000000000000000000..067fb75ddb222ab0b9c71a46619c3fe7b239bc26 Binary files /dev/null and b/doc/precommit_pass.png differ diff --git a/ppocr/data/imaug/copy_paste.py b/ppocr/data/imaug/copy_paste.py index bbf62e2a3d813671551efa1a76c03754b1b764f5..0b3386c896792bd670cd2bfc757eb3b80f22bac4 100644 --- a/ppocr/data/imaug/copy_paste.py +++ b/ppocr/data/imaug/copy_paste.py @@ -32,6 +32,7 @@ class CopyPaste(object): self.aug = IaaAugment(augmenter_args) def __call__(self, data): + point_num = data['polys'].shape[1] src_img = data['image'] src_polys = data['polys'].tolist() src_ignores = data['ignore_tags'].tolist() @@ -57,6 +58,9 @@ class CopyPaste(object): src_img, box = self.paste_img(src_img, box_img, src_polys) if box is not None: + box = box.tolist() + for _ in range(len(box), point_num): + box.append(box[-1]) src_polys.append(box) src_ignores.append(tag) src_img = cv2.cvtColor(np.array(src_img), cv2.COLOR_RGB2BGR) diff --git a/ppocr/data/simple_dataset.py b/ppocr/data/simple_dataset.py index 6a33e1342506f26ccaa4a146f3f02fadfbd741a2..ee8571b8c452bbd834fc5dbcf01ce390562163d6 100644 --- a/ppocr/data/simple_dataset.py +++ b/ppocr/data/simple_dataset.py @@ -14,6 +14,7 @@ import numpy as np import os import random +import traceback from paddle.io import Dataset from .imaug import transform, create_operators @@ -93,7 +94,8 @@ class SimpleDataSet(Dataset): img = f.read() data['image'] = img data = transform(data, load_data_ops) - if data is None: + + if data is None or data['polys'].shape[1]!=4: continue ext_data.append(data) return ext_data @@ -115,10 +117,10 @@ class SimpleDataSet(Dataset): data['image'] = img data['ext_data'] = self.get_ext_data() outs = transform(data, self.ops) - except Exception as e: + except: self.logger.error( "When parsing line {}, error happened with msg: {}".format( - data_line, e)) + data_line, traceback.format_exc())) outs = None if outs is None: # during evaluation, we should fix the idx to get same results for many times of evaluation. diff --git a/ppocr/modeling/backbones/__init__.py b/ppocr/modeling/backbones/__init__.py index 4d6c2eb7241d642605a3552460590e374fa07b00..d10983487bedb0fc4278095db08d1f234ef5c595 100755 --- a/ppocr/modeling/backbones/__init__.py +++ b/ppocr/modeling/backbones/__init__.py @@ -16,7 +16,7 @@ __all__ = ["build_backbone"] def build_backbone(config, model_type): - if model_type == "det": + if model_type == "det" or model_type == "table": from .det_mobilenet_v3 import MobileNetV3 from .det_resnet_vd import ResNet from .det_resnet_vd_sast import ResNet_SAST diff --git a/ppocr/modeling/backbones/det_resnet_vd.py b/ppocr/modeling/backbones/det_resnet_vd.py index 3bb4a0d50501860d5e9df2971e93fba66c152187..a29cf1b5e1ff56e59984bc91226ef7e6b65d0da1 100644 --- a/ppocr/modeling/backbones/det_resnet_vd.py +++ b/ppocr/modeling/backbones/det_resnet_vd.py @@ -25,16 +25,14 @@ __all__ = ["ResNet"] class ConvBNLayer(nn.Layer): - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - groups=1, - is_vd_mode=False, - act=None, - name=None, ): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + groups=1, + is_vd_mode=False, + act=None): super(ConvBNLayer, self).__init__() self.is_vd_mode = is_vd_mode @@ -47,19 +45,8 @@ class ConvBNLayer(nn.Layer): stride=stride, padding=(kernel_size - 1) // 2, groups=groups, - weight_attr=ParamAttr(name=name + "_weights"), bias_attr=False) - if name == "conv1": - bn_name = "bn_" + name - else: - bn_name = "bn" + name[3:] - self._batch_norm = nn.BatchNorm( - out_channels, - act=act, - param_attr=ParamAttr(name=bn_name + '_scale'), - bias_attr=ParamAttr(bn_name + '_offset'), - moving_mean_name=bn_name + '_mean', - moving_variance_name=bn_name + '_variance') + self._batch_norm = nn.BatchNorm(out_channels, act=act) def forward(self, inputs): if self.is_vd_mode: @@ -75,29 +62,25 @@ class BottleneckBlock(nn.Layer): out_channels, stride, shortcut=True, - if_first=False, - name=None): + if_first=False): super(BottleneckBlock, self).__init__() self.conv0 = ConvBNLayer( in_channels=in_channels, out_channels=out_channels, kernel_size=1, - act='relu', - name=name + "_branch2a") + act='relu') self.conv1 = ConvBNLayer( in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=stride, - act='relu', - name=name + "_branch2b") + act='relu') self.conv2 = ConvBNLayer( in_channels=out_channels, out_channels=out_channels * 4, kernel_size=1, - act=None, - name=name + "_branch2c") + act=None) if not shortcut: self.short = ConvBNLayer( @@ -105,8 +88,7 @@ class BottleneckBlock(nn.Layer): out_channels=out_channels * 4, kernel_size=1, stride=1, - is_vd_mode=False if if_first else True, - name=name + "_branch1") + is_vd_mode=False if if_first else True) self.shortcut = shortcut @@ -125,13 +107,13 @@ class BottleneckBlock(nn.Layer): class BasicBlock(nn.Layer): - def __init__(self, - in_channels, - out_channels, - stride, - shortcut=True, - if_first=False, - name=None): + def __init__( + self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, ): super(BasicBlock, self).__init__() self.stride = stride self.conv0 = ConvBNLayer( @@ -139,14 +121,12 @@ class BasicBlock(nn.Layer): out_channels=out_channels, kernel_size=3, stride=stride, - act='relu', - name=name + "_branch2a") + act='relu') self.conv1 = ConvBNLayer( in_channels=out_channels, out_channels=out_channels, kernel_size=3, - act=None, - name=name + "_branch2b") + act=None) if not shortcut: self.short = ConvBNLayer( @@ -154,8 +134,7 @@ class BasicBlock(nn.Layer): out_channels=out_channels, kernel_size=1, stride=1, - is_vd_mode=False if if_first else True, - name=name + "_branch1") + is_vd_mode=False if if_first else True) self.shortcut = shortcut @@ -201,22 +180,19 @@ class ResNet(nn.Layer): out_channels=32, kernel_size=3, stride=2, - act='relu', - name="conv1_1") + act='relu') self.conv1_2 = ConvBNLayer( in_channels=32, out_channels=32, kernel_size=3, stride=1, - act='relu', - name="conv1_2") + act='relu') self.conv1_3 = ConvBNLayer( in_channels=32, out_channels=64, kernel_size=3, stride=1, - act='relu', - name="conv1_3") + act='relu') self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) self.stages = [] @@ -226,13 +202,6 @@ class ResNet(nn.Layer): block_list = [] shortcut = False for i in range(depth[block]): - if layers in [101, 152] and block == 2: - if i == 0: - conv_name = "res" + str(block + 2) + "a" - else: - conv_name = "res" + str(block + 2) + "b" + str(i) - else: - conv_name = "res" + str(block + 2) + chr(97 + i) bottleneck_block = self.add_sublayer( 'bb_%d_%d' % (block, i), BottleneckBlock( @@ -241,8 +210,7 @@ class ResNet(nn.Layer): out_channels=num_filters[block], stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut, - if_first=block == i == 0, - name=conv_name)) + if_first=block == i == 0)) shortcut = True block_list.append(bottleneck_block) self.out_channels.append(num_filters[block] * 4) @@ -252,7 +220,6 @@ class ResNet(nn.Layer): block_list = [] shortcut = False for i in range(depth[block]): - conv_name = "res" + str(block + 2) + chr(97 + i) basic_block = self.add_sublayer( 'bb_%d_%d' % (block, i), BasicBlock( @@ -261,8 +228,7 @@ class ResNet(nn.Layer): out_channels=num_filters[block], stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut, - if_first=block == i == 0, - name=conv_name)) + if_first=block == i == 0)) shortcut = True block_list.append(basic_block) self.out_channels.append(num_filters[block]) diff --git a/ppocr/modeling/backbones/rec_mobilenet_v3.py b/ppocr/modeling/backbones/rec_mobilenet_v3.py index c5dcfdd5a3ad1f2c356f488a89e0f1e660a4a832..917e000d94ea01ce0057e08c1f4839240561a368 100644 --- a/ppocr/modeling/backbones/rec_mobilenet_v3.py +++ b/ppocr/modeling/backbones/rec_mobilenet_v3.py @@ -26,8 +26,10 @@ class MobileNetV3(nn.Layer): scale=0.5, large_stride=None, small_stride=None, + disable_se=False, **kwargs): super(MobileNetV3, self).__init__() + self.disable_se = disable_se if small_stride is None: small_stride = [2, 2, 2, 2] if large_stride is None: @@ -101,6 +103,7 @@ class MobileNetV3(nn.Layer): block_list = [] inplanes = make_divisible(inplanes * scale) for (k, exp, c, se, nl, s) in cfg: + se = se and not self.disable_se block_list.append( ResidualUnit( in_channels=inplanes, diff --git a/ppocr/modeling/backbones/table_mobilenet_v3.py b/ppocr/modeling/backbones/table_mobilenet_v3.py deleted file mode 100644 index daa87f976038d8d5eeafadceb869b9232ba22cd9..0000000000000000000000000000000000000000 --- a/ppocr/modeling/backbones/table_mobilenet_v3.py +++ /dev/null @@ -1,287 +0,0 @@ -# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import paddle -from paddle import nn -import paddle.nn.functional as F -from paddle import ParamAttr - -__all__ = ['MobileNetV3'] - - -def make_divisible(v, divisor=8, min_value=None): - if min_value is None: - min_value = divisor - new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) - if new_v < 0.9 * v: - new_v += divisor - return new_v - - -class MobileNetV3(nn.Layer): - def __init__(self, - in_channels=3, - model_name='large', - scale=0.5, - disable_se=False, - **kwargs): - """ - the MobilenetV3 backbone network for detection module. - Args: - params(dict): the super parameters for build network - """ - super(MobileNetV3, self).__init__() - - self.disable_se = disable_se - - if model_name == "large": - cfg = [ - # k, exp, c, se, nl, s, - [3, 16, 16, False, 'relu', 1], - [3, 64, 24, False, 'relu', 2], - [3, 72, 24, False, 'relu', 1], - [5, 72, 40, True, 'relu', 2], - [5, 120, 40, True, 'relu', 1], - [5, 120, 40, True, 'relu', 1], - [3, 240, 80, False, 'hardswish', 2], - [3, 200, 80, False, 'hardswish', 1], - [3, 184, 80, False, 'hardswish', 1], - [3, 184, 80, False, 'hardswish', 1], - [3, 480, 112, True, 'hardswish', 1], - [3, 672, 112, True, 'hardswish', 1], - [5, 672, 160, True, 'hardswish', 2], - [5, 960, 160, True, 'hardswish', 1], - [5, 960, 160, True, 'hardswish', 1], - ] - cls_ch_squeeze = 960 - elif model_name == "small": - cfg = [ - # k, exp, c, se, nl, s, - [3, 16, 16, True, 'relu', 2], - [3, 72, 24, False, 'relu', 2], - [3, 88, 24, False, 'relu', 1], - [5, 96, 40, True, 'hardswish', 2], - [5, 240, 40, True, 'hardswish', 1], - [5, 240, 40, True, 'hardswish', 1], - [5, 120, 48, True, 'hardswish', 1], - [5, 144, 48, True, 'hardswish', 1], - [5, 288, 96, True, 'hardswish', 2], - [5, 576, 96, True, 'hardswish', 1], - [5, 576, 96, True, 'hardswish', 1], - ] - cls_ch_squeeze = 576 - else: - raise NotImplementedError("mode[" + model_name + - "_model] is not implemented!") - - supported_scale = [0.35, 0.5, 0.75, 1.0, 1.25] - assert scale in supported_scale, \ - "supported scale are {} but input scale is {}".format(supported_scale, scale) - inplanes = 16 - # conv1 - self.conv = ConvBNLayer( - in_channels=in_channels, - out_channels=make_divisible(inplanes * scale), - kernel_size=3, - stride=2, - padding=1, - groups=1, - if_act=True, - act='hardswish', - name='conv1') - - self.stages = [] - self.out_channels = [] - block_list = [] - i = 0 - inplanes = make_divisible(inplanes * scale) - for (k, exp, c, se, nl, s) in cfg: - se = se and not self.disable_se - start_idx = 2 if model_name == 'large' else 0 - if s == 2 and i > start_idx: - self.out_channels.append(inplanes) - self.stages.append(nn.Sequential(*block_list)) - block_list = [] - block_list.append( - ResidualUnit( - in_channels=inplanes, - mid_channels=make_divisible(scale * exp), - out_channels=make_divisible(scale * c), - kernel_size=k, - stride=s, - use_se=se, - act=nl, - name="conv" + str(i + 2))) - inplanes = make_divisible(scale * c) - i += 1 - block_list.append( - ConvBNLayer( - in_channels=inplanes, - out_channels=make_divisible(scale * cls_ch_squeeze), - kernel_size=1, - stride=1, - padding=0, - groups=1, - if_act=True, - act='hardswish', - name='conv_last')) - self.stages.append(nn.Sequential(*block_list)) - self.out_channels.append(make_divisible(scale * cls_ch_squeeze)) - for i, stage in enumerate(self.stages): - self.add_sublayer(sublayer=stage, name="stage{}".format(i)) - - def forward(self, x): - x = self.conv(x) - out_list = [] - for stage in self.stages: - x = stage(x) - out_list.append(x) - return out_list - - -class ConvBNLayer(nn.Layer): - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride, - padding, - groups=1, - if_act=True, - act=None, - name=None): - super(ConvBNLayer, self).__init__() - self.if_act = if_act - self.act = act - self.conv = nn.Conv2D( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - padding=padding, - groups=groups, - weight_attr=ParamAttr(name=name + '_weights'), - bias_attr=False) - - self.bn = nn.BatchNorm( - num_channels=out_channels, - act=None, - param_attr=ParamAttr(name=name + "_bn_scale"), - bias_attr=ParamAttr(name=name + "_bn_offset"), - moving_mean_name=name + "_bn_mean", - moving_variance_name=name + "_bn_variance") - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - if self.if_act: - if self.act == "relu": - x = F.relu(x) - elif self.act == "hardswish": - x = F.hardswish(x) - else: - print("The activation function({}) is selected incorrectly.". - format(self.act)) - exit() - return x - - -class ResidualUnit(nn.Layer): - def __init__(self, - in_channels, - mid_channels, - out_channels, - kernel_size, - stride, - use_se, - act=None, - name=''): - super(ResidualUnit, self).__init__() - self.if_shortcut = stride == 1 and in_channels == out_channels - self.if_se = use_se - - self.expand_conv = ConvBNLayer( - in_channels=in_channels, - out_channels=mid_channels, - kernel_size=1, - stride=1, - padding=0, - if_act=True, - act=act, - name=name + "_expand") - self.bottleneck_conv = ConvBNLayer( - in_channels=mid_channels, - out_channels=mid_channels, - kernel_size=kernel_size, - stride=stride, - padding=int((kernel_size - 1) // 2), - groups=mid_channels, - if_act=True, - act=act, - name=name + "_depthwise") - if self.if_se: - self.mid_se = SEModule(mid_channels, name=name + "_se") - self.linear_conv = ConvBNLayer( - in_channels=mid_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - padding=0, - if_act=False, - act=None, - name=name + "_linear") - - def forward(self, inputs): - x = self.expand_conv(inputs) - x = self.bottleneck_conv(x) - if self.if_se: - x = self.mid_se(x) - x = self.linear_conv(x) - if self.if_shortcut: - x = paddle.add(inputs, x) - return x - - -class SEModule(nn.Layer): - def __init__(self, in_channels, reduction=4, name=""): - super(SEModule, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2D(1) - self.conv1 = nn.Conv2D( - in_channels=in_channels, - out_channels=in_channels // reduction, - kernel_size=1, - stride=1, - padding=0, - weight_attr=ParamAttr(name=name + "_1_weights"), - bias_attr=ParamAttr(name=name + "_1_offset")) - self.conv2 = nn.Conv2D( - in_channels=in_channels // reduction, - out_channels=in_channels, - kernel_size=1, - stride=1, - padding=0, - weight_attr=ParamAttr(name + "_2_weights"), - bias_attr=ParamAttr(name=name + "_2_offset")) - - def forward(self, inputs): - outputs = self.avg_pool(inputs) - outputs = self.conv1(outputs) - outputs = F.relu(outputs) - outputs = self.conv2(outputs) - outputs = F.hardsigmoid(outputs, slope=0.2, offset=0.5) - return inputs * outputs \ No newline at end of file diff --git a/ppocr/modeling/backbones/table_resnet_vd.py b/ppocr/modeling/backbones/table_resnet_vd.py deleted file mode 100644 index 1c07c2684eec8d0c4a445cc88c543bfe1da9c864..0000000000000000000000000000000000000000 --- a/ppocr/modeling/backbones/table_resnet_vd.py +++ /dev/null @@ -1,280 +0,0 @@ -# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import paddle -from paddle import ParamAttr -import paddle.nn as nn -import paddle.nn.functional as F - -__all__ = ["ResNet"] - - -class ConvBNLayer(nn.Layer): - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - groups=1, - is_vd_mode=False, - act=None, - name=None, ): - super(ConvBNLayer, self).__init__() - - self.is_vd_mode = is_vd_mode - self._pool2d_avg = nn.AvgPool2D( - kernel_size=2, stride=2, padding=0, ceil_mode=True) - self._conv = nn.Conv2D( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - padding=(kernel_size - 1) // 2, - groups=groups, - weight_attr=ParamAttr(name=name + "_weights"), - bias_attr=False) - if name == "conv1": - bn_name = "bn_" + name - else: - bn_name = "bn" + name[3:] - self._batch_norm = nn.BatchNorm( - out_channels, - act=act, - param_attr=ParamAttr(name=bn_name + '_scale'), - bias_attr=ParamAttr(bn_name + '_offset'), - moving_mean_name=bn_name + '_mean', - moving_variance_name=bn_name + '_variance') - - def forward(self, inputs): - if self.is_vd_mode: - inputs = self._pool2d_avg(inputs) - y = self._conv(inputs) - y = self._batch_norm(y) - return y - - -class BottleneckBlock(nn.Layer): - def __init__(self, - in_channels, - out_channels, - stride, - shortcut=True, - if_first=False, - name=None): - super(BottleneckBlock, self).__init__() - - self.conv0 = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1, - act='relu', - name=name + "_branch2a") - self.conv1 = ConvBNLayer( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=3, - stride=stride, - act='relu', - name=name + "_branch2b") - self.conv2 = ConvBNLayer( - in_channels=out_channels, - out_channels=out_channels * 4, - kernel_size=1, - act=None, - name=name + "_branch2c") - - if not shortcut: - self.short = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels * 4, - kernel_size=1, - stride=1, - is_vd_mode=False if if_first else True, - name=name + "_branch1") - - self.shortcut = shortcut - - def forward(self, inputs): - y = self.conv0(inputs) - conv1 = self.conv1(y) - conv2 = self.conv2(conv1) - - if self.shortcut: - short = inputs - else: - short = self.short(inputs) - y = paddle.add(x=short, y=conv2) - y = F.relu(y) - return y - - -class BasicBlock(nn.Layer): - def __init__(self, - in_channels, - out_channels, - stride, - shortcut=True, - if_first=False, - name=None): - super(BasicBlock, self).__init__() - self.stride = stride - self.conv0 = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=3, - stride=stride, - act='relu', - name=name + "_branch2a") - self.conv1 = ConvBNLayer( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=3, - act=None, - name=name + "_branch2b") - - if not shortcut: - self.short = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - is_vd_mode=False if if_first else True, - name=name + "_branch1") - - self.shortcut = shortcut - - def forward(self, inputs): - y = self.conv0(inputs) - conv1 = self.conv1(y) - - if self.shortcut: - short = inputs - else: - short = self.short(inputs) - y = paddle.add(x=short, y=conv1) - y = F.relu(y) - return y - - -class ResNet(nn.Layer): - def __init__(self, in_channels=3, layers=50, **kwargs): - super(ResNet, self).__init__() - - self.layers = layers - supported_layers = [18, 34, 50, 101, 152, 200] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format( - supported_layers, layers) - - if layers == 18: - depth = [2, 2, 2, 2] - elif layers == 34 or layers == 50: - depth = [3, 4, 6, 3] - elif layers == 101: - depth = [3, 4, 23, 3] - elif layers == 152: - depth = [3, 8, 36, 3] - elif layers == 200: - depth = [3, 12, 48, 3] - num_channels = [64, 256, 512, - 1024] if layers >= 50 else [64, 64, 128, 256] - num_filters = [64, 128, 256, 512] - - self.conv1_1 = ConvBNLayer( - in_channels=in_channels, - out_channels=32, - kernel_size=3, - stride=2, - act='relu', - name="conv1_1") - self.conv1_2 = ConvBNLayer( - in_channels=32, - out_channels=32, - kernel_size=3, - stride=1, - act='relu', - name="conv1_2") - self.conv1_3 = ConvBNLayer( - in_channels=32, - out_channels=64, - kernel_size=3, - stride=1, - act='relu', - name="conv1_3") - self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) - - self.stages = [] - self.out_channels = [] - if layers >= 50: - for block in range(len(depth)): - block_list = [] - shortcut = False - for i in range(depth[block]): - if layers in [101, 152] and block == 2: - if i == 0: - conv_name = "res" + str(block + 2) + "a" - else: - conv_name = "res" + str(block + 2) + "b" + str(i) - else: - conv_name = "res" + str(block + 2) + chr(97 + i) - bottleneck_block = self.add_sublayer( - 'bb_%d_%d' % (block, i), - BottleneckBlock( - in_channels=num_channels[block] - if i == 0 else num_filters[block] * 4, - out_channels=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - shortcut=shortcut, - if_first=block == i == 0, - name=conv_name)) - shortcut = True - block_list.append(bottleneck_block) - self.out_channels.append(num_filters[block] * 4) - self.stages.append(nn.Sequential(*block_list)) - else: - for block in range(len(depth)): - block_list = [] - shortcut = False - for i in range(depth[block]): - conv_name = "res" + str(block + 2) + chr(97 + i) - basic_block = self.add_sublayer( - 'bb_%d_%d' % (block, i), - BasicBlock( - in_channels=num_channels[block] - if i == 0 else num_filters[block], - out_channels=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - shortcut=shortcut, - if_first=block == i == 0, - name=conv_name)) - shortcut = True - block_list.append(basic_block) - self.out_channels.append(num_filters[block]) - self.stages.append(nn.Sequential(*block_list)) - - def forward(self, inputs): - y = self.conv1_1(inputs) - y = self.conv1_2(y) - y = self.conv1_3(y) - y = self.pool2d_max(y) - out = [] - for block in self.stages: - y = block(y) - out.append(y) - return out diff --git a/ppocr/modeling/transforms/tps_spatial_transformer.py b/ppocr/modeling/transforms/tps_spatial_transformer.py index 4db34f7b4833c1c9b2901c68899bfb294b5843c4..043bb56b8a526c12b2e0799bf41e128c6499c1fc 100644 --- a/ppocr/modeling/transforms/tps_spatial_transformer.py +++ b/ppocr/modeling/transforms/tps_spatial_transformer.py @@ -53,7 +53,7 @@ def compute_partial_repr(input_points, control_points): 1] repr_matrix = 0.5 * pairwise_dist * paddle.log(pairwise_dist) # fix numerical error for 0 * log(0), substitute all nan with 0 - mask = repr_matrix != repr_matrix + mask = np.array(repr_matrix != repr_matrix) repr_matrix[mask] = 0 return repr_matrix diff --git a/ppocr/postprocess/east_postprocess.py b/ppocr/postprocess/east_postprocess.py index ec6bf663854d3391bf8c584aa749dc6d1805d344..c194c81c6911aac0f9210109c37b76b44532e9c4 100755 --- a/ppocr/postprocess/east_postprocess.py +++ b/ppocr/postprocess/east_postprocess.py @@ -20,7 +20,6 @@ import numpy as np from .locality_aware_nms import nms_locality import cv2 import paddle -import lanms import os import sys @@ -61,6 +60,7 @@ class EASTPostProcess(object): """ restore text boxes from score map and geo map """ + score_map = score_map[0] geo_map = np.swapaxes(geo_map, 1, 0) geo_map = np.swapaxes(geo_map, 1, 2) @@ -76,8 +76,15 @@ class EASTPostProcess(object): boxes = np.zeros((text_box_restored.shape[0], 9), dtype=np.float32) boxes[:, :8] = text_box_restored.reshape((-1, 8)) boxes[:, 8] = score_map[xy_text[:, 0], xy_text[:, 1]] - boxes = lanms.merge_quadrangle_n9(boxes, nms_thresh) - # boxes = nms_locality(boxes.astype(np.float64), nms_thresh) + + try: + import lanms + boxes = lanms.merge_quadrangle_n9(boxes, nms_thresh) + except: + print( + 'you should install lanms by pip3 install lanms-nova to speed up nms_locality' + ) + boxes = nms_locality(boxes.astype(np.float64), nms_thresh) if boxes.shape[0] == 0: return [] # Here we filter some low score boxes by the average score map, diff --git a/ppocr/utils/save_load.py b/ppocr/utils/save_load.py index 4b890f6fa352772e6ebe1614b798e1ce69cdd17c..f6013a406634ed110ea5af613a5f31e56ce90ead 100644 --- a/ppocr/utils/save_load.py +++ b/ppocr/utils/save_load.py @@ -67,6 +67,7 @@ def load_model(config, model, optimizer=None): if key not in params: logger.warning("{} not in loaded params {} !".format( key, params.keys())) + continue pre_value = params[key] if list(value.shape) == list(pre_value.shape): new_state_dict[key] = pre_value @@ -76,9 +77,14 @@ def load_model(config, model, optimizer=None): format(key, value.shape, pre_value.shape)) model.set_state_dict(new_state_dict) - optim_dict = paddle.load(checkpoints + '.pdopt') if optimizer is not None: - optimizer.set_state_dict(optim_dict) + if os.path.exists(checkpoints + '.pdopt'): + optim_dict = paddle.load(checkpoints + '.pdopt') + optimizer.set_state_dict(optim_dict) + else: + logger.warning( + "{}.pdopt is not exists, params of optimizer is not loaded". + format(checkpoints)) if os.path.exists(checkpoints + '.states'): with open(checkpoints + '.states', 'rb') as f: diff --git a/ppstructure/README.md b/ppstructure/README.md index 849c5c5667ff0532dfee35479715880192df0dc5..8994cdd46191a0fd4fb1beba2fcad91542e19b50 100644 --- a/ppstructure/README.md +++ b/ppstructure/README.md @@ -153,7 +153,7 @@ wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_in wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar cd .. -python3 predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf +python3 predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf ``` After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel and figure area will be cropped and saved, the excel and image file name will be the coordinates of the table in the image. diff --git a/ppstructure/README_ch.md b/ppstructure/README_ch.md index 821a6c3e36361abefa4d754537fdbd694e844efe..607efac1bf6bfaa58f0e96ceef1a0ee344189e9c 100644 --- a/ppstructure/README_ch.md +++ b/ppstructure/README_ch.md @@ -1,6 +1,12 @@ [English](README.md) | 简体中文 -# PP-Structure +## 简介 +PP-Structure是一个可用于复杂文档结构分析和处理的OCR工具包,旨在帮助开发者更好的完成文档理解相关任务。 + +## 近期更新 +* 2021.12.07 新增VQA任务-SER和RE。 + +## 特性 PP-Structure是一个可用于复杂文档结构分析和处理的OCR工具包,主要特性如下: - 支持对图片形式的文档进行版面分析,可以划分**文字、标题、表格、图片以及列表**5类区域(与Layout-Parser联合使用) @@ -8,181 +14,88 @@ PP-Structure是一个可用于复杂文档结构分析和处理的OCR工具包 - 支持表格区域进行结构化分析,最终结果输出Excel文件 - 支持python whl包和命令行两种方式,简单易用 - 支持版面分析和表格结构化两类任务自定义训练 +- 支持文档视觉问答(Document Visual Question Answering,DOC-VQA)任务-语义实体识别(Semantic Entity Recognition,SER)和关系抽取(Relation Extraction,RE) -## 1. 效果展示 - - - - - -## 2. 安装 - -### 2.1 安装依赖 - -- **(1) 安装PaddlePaddle** - -```bash -pip3 install --upgrade pip - -# GPU安装 -python3 -m pip install paddlepaddle-gpu==2.1.1 -i https://mirror.baidu.com/pypi/simple - -# CPU安装 - python3 -m pip install paddlepaddle==2.1.1 -i https://mirror.baidu.com/pypi/simple - -``` -更多需求,请参照[安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 - -- **(2) 安装 Layout-Parser** - -```bash -pip3 install -U https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl -``` - -### 2.2 安装PaddleOCR(包含PP-OCR和PP-Structure) - -- **(1) PIP快速安装PaddleOCR whl包(仅预测)** -```bash -pip install "paddleocr>=2.2" # 推荐使用2.2+版本 -``` - -- **(2) 完整克隆PaddleOCR源码(预测+训练)** - -```bash -【推荐】git clone https://github.com/PaddlePaddle/PaddleOCR - -#如果因为网络问题无法pull成功,也可选择使用码云上的托管: -git clone https://gitee.com/paddlepaddle/PaddleOCR - -#注:码云托管代码可能无法实时同步本github项目更新,存在3~5天延时,请优先使用推荐方式。 -``` - - -## 3. PP-Structure 快速开始 - -### 3.1 命令行使用(默认参数,极简) - -```bash -paddleocr --image_dir=../doc/table/1.png --type=structure -``` - -### 3.2 Python脚本使用(自定义参数,灵活) +## 1. 效果展示 -```python -import os -import cv2 -from paddleocr import PPStructure,draw_structure_result,save_structure_res +### 1.1 版面分析和表格识别 -table_engine = PPStructure(show_log=True) + -save_folder = './output/table' -img_path = '../doc/table/1.png' -img = cv2.imread(img_path) -result = table_engine(img) -save_structure_res(result, save_folder,os.path.basename(img_path).split('.')[0]) +### 1.2 VQA -for line in result: - line.pop('img') - print(line) +* SER -from PIL import Image +![](./vqa/images/result_ser/zh_val_0_ser.jpg) | ![](./vqa/images/result_ser/zh_val_42_ser.jpg) +---|--- -font_path = '../doc/fonts/simfang.ttf' # PaddleOCR下提供字体包 -image = Image.open(img_path).convert('RGB') -im_show = draw_structure_result(image, result,font_path=font_path) -im_show = Image.fromarray(im_show) -im_show.save('result.jpg') -``` +图中不同颜色的框表示不同的类别,对于XFUN数据集,有`QUESTION`, `ANSWER`, `HEADER` 3种类别 -### 3.3 返回结果说明 -PP-Structure的返回结果为一个dict组成的list,示例如下 +* 深紫色:HEADER +* 浅紫色:QUESTION +* 军绿色:ANSWER -```shell -[ - { 'type': 'Text', - 'bbox': [34, 432, 345, 462], - 'res': ([[36.0, 437.0, 341.0, 437.0, 341.0, 446.0, 36.0, 447.0], [41.0, 454.0, 125.0, 453.0, 125.0, 459.0, 41.0, 460.0]], - [('Tigure-6. The performance of CNN and IPT models using difforen', 0.90060663), ('Tent ', 0.465441)]) - } -] -``` -dict 里各个字段说明如下 +在OCR检测框的左上方也标出了对应的类别和OCR识别结果。 -| 字段 | 说明 | -| --------------- | -------------| -|type|图片区域的类型| -|bbox|图片区域的在原图的坐标,分别[左上角x,左上角y,右下角x,右下角y]| -|res|图片区域的OCR或表格识别结果。
表格: 表格的HTML字符串;
OCR: 一个包含各个单行文字的检测坐标和识别结果的元组| +* RE +![](./vqa/images/result_re/zh_val_21_re.jpg) | ![](./vqa/images/result_re/zh_val_40_re.jpg) +---|--- -### 3.4 参数说明 -| 字段 | 说明 | 默认值 | -| --------------- | ---------------------------------------- | ------------------------------------------- | -| output | excel和识别结果保存的地址 | ./output/table | -| table_max_len | 表格结构模型预测时,图像的长边resize尺度 | 488 | -| table_model_dir | 表格结构模型 inference 模型地址 | None | -| table_char_type | 表格结构模型所用字典地址 | ../ppocr/utils/dict/table_structure_dict.tx | +图中红色框表示问题,蓝色框表示答案,问题和答案之间使用绿色线连接。在OCR检测框的左上方也标出了对应的类别和OCR识别结果。 -大部分参数和paddleocr whl包保持一致,见 [whl包文档](../doc/doc_ch/whl.md) +## 2. 快速体验 -运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 +代码体验:从 [快速安装](./docs/quickstart.md) 开始 +## 3. PP-Structure Pipeline介绍 -## 4. PP-Structure Pipeline介绍 +### 3.1 版面分析+表格识别 ![pipeline](../doc/table/pipeline.jpg) 在PP-Structure中,图片会先经由Layout-Parser进行版面分析,在版面分析中,会对图片里的区域进行分类,包括**文字、标题、图片、列表和表格**5类。对于前4类区域,直接使用PP-OCR完成对应区域文字检测与识别。对于表格类区域,经过表格结构化处理后,表格图片转换为相同表格样式的Excel文件。 -### 4.1 版面分析 +#### 3.1.1 版面分析 版面分析对文档数据进行区域分类,其中包括版面分析工具的Python脚本使用、提取指定类别检测框、性能指标以及自定义训练版面分析模型,详细内容可以参考[文档](layout/README_ch.md)。 -### 4.2 表格识别 +#### 3.1.2 表格识别 表格识别将表格图片转换为excel文档,其中包含对于表格文本的检测和识别以及对于表格结构和单元格坐标的预测,详细说明参考[文档](table/README_ch.md) -## 5. 预测引擎推理(与whl包效果相同) -使用如下命令即可完成预测引擎的推理 +### 3.2 VQA -```python -cd ppstructure +coming soon -# 下载模型 -mkdir inference && cd inference -# 下载超轻量级中文OCR模型的检测模型并解压 -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar -# 下载超轻量级中文OCR模型的识别模型并解压 -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar -# 下载超轻量级英文表格英寸模型并解压 -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar -cd .. +## 4. 模型库 -python3 predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf -``` -运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 +PP-Structure系列模型列表(更新中) -**Model List** - -LayoutParser 模型 +* LayoutParser 模型 |模型名称|模型简介|下载地址| | --- | --- | --- | | ppyolov2_r50vd_dcn_365e_publaynet | PubLayNet 数据集训练的版面分析模型,可以划分**文字、标题、表格、图片以及列表**5类区域 | [PubLayNet](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_publaynet.tar) | -| ppyolov2_r50vd_dcn_365e_tableBank_word | TableBank Word 数据集训练的版面分析模型,只能检测表格 | [TableBank Word](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_word.tar) | -| ppyolov2_r50vd_dcn_365e_tableBank_latex | TableBank Latex 数据集训练的版面分析模型,只能检测表格 | [TableBank Latex](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_latex.tar) | -OCR和表格识别模型 -|模型名称|模型简介|推理模型大小|下载地址| +* OCR和表格识别模型 + +|模型名称|模型简介|模型大小|下载地址| | --- | --- | --- | --- | |ch_ppocr_mobile_slim_v2.0_det|slim裁剪版超轻量模型,支持中英文、多语种文本检测|2.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) | |ch_ppocr_mobile_slim_v2.0_rec|slim裁剪量化版超轻量模型,支持中英文、数字识别|6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_train.tar) | -|en_ppocr_mobile_v2.0_table_det|PubLayNet数据集训练的英文表格场景的文字检测|4.7M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_det_train.tar) | -|en_ppocr_mobile_v2.0_table_rec|PubLayNet数据集训练的英文表格场景的文字识别|6.9M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_rec_train.tar) | |en_ppocr_mobile_v2.0_table_structure|PubLayNet数据集训练的英文表格场景的表格结构预测|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar) | -如需要使用其他模型,可以在 [model_list](../doc/doc_ch/models_list.md) 下载模型或者使用自己训练好的模型配置到`det_model_dir`,`rec_model_dir`,`table_model_dir`三个字段即可。 +* VQA模型 + +|模型名称|模型简介|模型大小|下载地址| +| --- | --- | --- | --- | +|PP-Layout_v1.0_ser_pretrained|基于LayoutXLM在xfun中文数据集上训练的SER模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_ser_pretrained.tar) | +|PP-Layout_v1.0_re_pretrained|基于LayoutXLM在xfun中文数据集上训练的RE模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_re_pretrained.tar) | + + +更多模型下载,可以参考 [模型库](./docs/model_list.md) diff --git a/ppstructure/docs/installation.md b/ppstructure/docs/installation.md new file mode 100644 index 0000000000000000000000000000000000000000..30c25d5dc92f6ccdb0d93dafe9707f30eca0c0a9 --- /dev/null +++ b/ppstructure/docs/installation.md @@ -0,0 +1,28 @@ +# 快速安装 + +## 1. PaddlePaddle 和 PaddleOCR + +可参考[PaddleOCR安装文档](../../doc/doc_ch/installation.md) + +## 2. 安装其他依赖 + +### 2.1 版面分析所需 Layout-Parser + +Layout-Parser 可通过如下命令安装 + +```bash +pip3 install -U https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl +``` +### 2.2 VQA所需依赖 +* paddleocr + +```bash +pip3 install paddleocr +``` + +* PaddleNLP +```bash +git clone https://github.com/PaddlePaddle/PaddleNLP -b develop +cd PaddleNLP +pip3 install -e . +``` diff --git a/ppstructure/docs/model_list.md b/ppstructure/docs/model_list.md new file mode 100644 index 0000000000000000000000000000000000000000..835d39a735462edb0d9f51493ec0529248aeadbf --- /dev/null +++ b/ppstructure/docs/model_list.md @@ -0,0 +1,28 @@ +# Model List + +## 1. LayoutParser 模型 + +|模型名称|模型简介|下载地址| +| --- | --- | --- | +| ppyolov2_r50vd_dcn_365e_publaynet | PubLayNet 数据集训练的版面分析模型,可以划分**文字、标题、表格、图片以及列表**5类区域 | [PubLayNet](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_publaynet.tar) | +| ppyolov2_r50vd_dcn_365e_tableBank_word | TableBank Word 数据集训练的版面分析模型,只能检测表格 | [TableBank Word](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_word.tar) | +| ppyolov2_r50vd_dcn_365e_tableBank_latex | TableBank Latex 数据集训练的版面分析模型,只能检测表格 | [TableBank Latex](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_latex.tar) | + +## 2. OCR和表格识别模型 + +|模型名称|模型简介|推理模型大小|下载地址| +| --- | --- | --- | --- | +|ch_ppocr_mobile_slim_v2.0_det|slim裁剪版超轻量模型,支持中英文、多语种文本检测|2.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) | +|ch_ppocr_mobile_slim_v2.0_rec|slim裁剪量化版超轻量模型,支持中英文、数字识别|6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_train.tar) | +|en_ppocr_mobile_v2.0_table_det|PubLayNet数据集训练的英文表格场景的文字检测|4.7M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_det_train.tar) | +|en_ppocr_mobile_v2.0_table_rec|PubLayNet数据集训练的英文表格场景的文字识别|6.9M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_rec_train.tar) | +|en_ppocr_mobile_v2.0_table_structure|PubLayNet数据集训练的英文表格场景的表格结构预测|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) / [训练模型](https://paddleocr.bj.bcebos.com/dygraph_v2.1/table/en_ppocr_mobile_v2.0_table_structure_train.tar) | + +如需要使用其他OCR模型,可以在 [model_list](../../doc/doc_ch/models_list.md) 下载模型或者使用自己训练好的模型配置到`det_model_dir`,`rec_model_dir`两个字段即可。 + +## 3. VQA模型 + +|模型名称|模型简介|推理模型大小|下载地址| +| --- | --- | --- | --- | +|PP-Layout_v1.0_ser_pretrained|基于LayoutXLM在xfun中文数据集上训练的SER模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_ser_pretrained.tar) | +|PP-Layout_v1.0_re_pretrained|基于LayoutXLM在xfun中文数据集上训练的RE模型|1.4G|[推理模型 coming soon]() / [训练模型](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_re_pretrained.tar) | diff --git a/ppstructure/docs/quickstart.md b/ppstructure/docs/quickstart.md new file mode 100644 index 0000000000000000000000000000000000000000..446c577ec39cf24dd4b8699558c633a1308fa444 --- /dev/null +++ b/ppstructure/docs/quickstart.md @@ -0,0 +1,171 @@ +# PP-Structure 快速开始 + +* [1. 安装PaddleOCR whl包](#1) +* [2. 便捷使用](#2) + + [2.1 命令行使用](#21) + + [2.2 Python脚本使用](#22) + + [2.3 返回结果说明](#23) + + [2.4 参数说明](#24) +* [3. Python脚本使用](#3) + + + + +## 1. 安装依赖包 + +```bash +pip install "paddleocr>=2.3.0.2" # 推荐使用2.3.0.2+版本 +pip3 install -U https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl + +# 安装 PaddleNLP +git clone https://github.com/PaddlePaddle/PaddleNLP -b develop +cd PaddleNLP +pip3 install -e . + +``` + + + +## 2. 便捷使用 + + + +### 2.1 命令行使用 + +* 版面分析+表格识别 +```bash +paddleocr --image_dir=../doc/table/1.png --type=structure +``` + +* VQA + +coming soon + + + +### 2.2 Python脚本使用 + +* 版面分析+表格识别 +```python +import os +import cv2 +from paddleocr import PPStructure,draw_structure_result,save_structure_res + +table_engine = PPStructure(show_log=True) + +save_folder = './output/table' +img_path = '../doc/table/1.png' +img = cv2.imread(img_path) +result = table_engine(img) +save_structure_res(result, save_folder,os.path.basename(img_path).split('.')[0]) + +for line in result: + line.pop('img') + print(line) + +from PIL import Image + +font_path = '../doc/fonts/simfang.ttf' # PaddleOCR下提供字体包 +image = Image.open(img_path).convert('RGB') +im_show = draw_structure_result(image, result,font_path=font_path) +im_show = Image.fromarray(im_show) +im_show.save('result.jpg') +``` + +* VQA + +comming soon + + + +### 2.3 返回结果说明 +PP-Structure的返回结果为一个dict组成的list,示例如下 + +* 版面分析+表格识别 +```shell +[ + { 'type': 'Text', + 'bbox': [34, 432, 345, 462], + 'res': ([[36.0, 437.0, 341.0, 437.0, 341.0, 446.0, 36.0, 447.0], [41.0, 454.0, 125.0, 453.0, 125.0, 459.0, 41.0, 460.0]], + [('Tigure-6. The performance of CNN and IPT models using difforen', 0.90060663), ('Tent ', 0.465441)]) + } +] +``` +dict 里各个字段说明如下 + +| 字段 | 说明 | +| --------------- | -------------| +|type|图片区域的类型| +|bbox|图片区域的在原图的坐标,分别[左上角x,左上角y,右下角x,右下角y]| +|res|图片区域的OCR或表格识别结果。
表格: 表格的HTML字符串;
OCR: 一个包含各个单行文字的检测坐标和识别结果的元组| + +* VQA + +comming soon + + + +### 2.4 参数说明 + +| 字段 | 说明 | 默认值 | +| --------------- | ---------------------------------------- | ------------------------------------------- | +| output | excel和识别结果保存的地址 | ./output/table | +| table_max_len | 表格结构模型预测时,图像的长边resize尺度 | 488 | +| table_model_dir | 表格结构模型 inference 模型地址 | None | +| table_char_type | 表格结构模型所用字典地址 | ../ppocr/utils/dict/table_structure_dict.txt | +| model_name_or_path | VQA SER模型地址 | None | +| max_seq_length | VQA SER模型最大支持token长度 | 512 | +| label_map_path | VQA SER 标签文件地址 | ./vqa/labels/labels_ser.txt | +| mode | pipeline预测模式,structure: 版面分析+表格识别; vqa: ser文档信息抽取 | structure | + +大部分参数和paddleocr whl包保持一致,见 [whl包文档](../doc/doc_ch/whl.md) + +运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 + + + +## 3. Python脚本使用 + +* 版面分析+表格识别 + +```bash +cd ppstructure + +# 下载模型 +mkdir inference && cd inference +# 下载超轻量级中文OCR模型的检测模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +# 下载超轻量级中文OCR模型的识别模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar +# 下载超轻量级英文表格英寸模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar +cd .. + +python3 predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer \ + --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer \ + --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer \ + --image_dir=../doc/table/1.png \ + --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt \ + --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt \ + --output=../output/table \ + --vis_font_path=../doc/fonts/simfang.ttf +``` +运行完成后,每张图片会在`output`字段指定的目录下的`talbe`目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 + +* VQA + +```bash +cd ppstructure + +# 下载模型 +mkdir inference && cd inference +# 下载SER xfun 模型并解压 +wget https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_ser_pretrained.tar && tar xf PP-Layout_v1.0_ser_pretrained.tar +cd .. + +python3 predict_system.py --model_name_or_path=vqa/PP-Layout_v1.0_ser_pretrained/ \ + --mode=vqa \ + --image_dir=vqa/images/input/zh_val_0.jpg \ + --vis_font_path=../doc/fonts/simfang.ttf +``` +运行完成后,每张图片会在`output`字段指定的目录下的`vqa`目录下存放可视化之后的图片,图片名和输入图片名一致。 diff --git a/ppstructure/predict_system.py b/ppstructure/predict_system.py index b2de3d4de80b39f046cf6cbc8a9ebbc52bf69334..e87499ccc410ae67a170f63301e5a99ef948b161 100644 --- a/ppstructure/predict_system.py +++ b/ppstructure/predict_system.py @@ -30,6 +30,7 @@ from ppocr.utils.utility import get_image_file_list, check_and_read_gif from ppocr.utils.logging import get_logger from tools.infer.predict_system import TextSystem from ppstructure.table.predict_table import TableSystem, to_excel +from ppstructure.vqa.infer_ser_e2e import SerPredictor, draw_ser_results from ppstructure.utility import parse_args, draw_structure_result logger = get_logger() @@ -37,53 +38,75 @@ logger = get_logger() class OCRSystem(object): def __init__(self, args): - import layoutparser as lp - # args.det_limit_type = 'resize_long' - args.drop_score = 0 - if not args.show_log: - logger.setLevel(logging.INFO) - self.text_system = TextSystem(args) - self.table_system = TableSystem(args, self.text_system.text_detector, self.text_system.text_recognizer) - - config_path = None - model_path = None - if os.path.isdir(args.layout_path_model): - model_path = args.layout_path_model - else: - config_path = args.layout_path_model - self.table_layout = lp.PaddleDetectionLayoutModel(config_path=config_path, - model_path=model_path, - threshold=0.5, enable_mkldnn=args.enable_mkldnn, - enforce_cpu=not args.use_gpu, thread_num=args.cpu_threads) - self.use_angle_cls = args.use_angle_cls - self.drop_score = args.drop_score + self.mode = args.mode + if self.mode == 'structure': + import layoutparser as lp + # args.det_limit_type = 'resize_long' + args.drop_score = 0 + if not args.show_log: + logger.setLevel(logging.INFO) + self.text_system = TextSystem(args) + self.table_system = TableSystem(args, + self.text_system.text_detector, + self.text_system.text_recognizer) + + config_path = None + model_path = None + if os.path.isdir(args.layout_path_model): + model_path = args.layout_path_model + else: + config_path = args.layout_path_model + self.table_layout = lp.PaddleDetectionLayoutModel( + config_path=config_path, + model_path=model_path, + threshold=0.5, + enable_mkldnn=args.enable_mkldnn, + enforce_cpu=not args.use_gpu, + thread_num=args.cpu_threads) + self.use_angle_cls = args.use_angle_cls + self.drop_score = args.drop_score + elif self.mode == 'vqa': + self.vqa_engine = SerPredictor(args) def __call__(self, img): - ori_im = img.copy() - layout_res = self.table_layout.detect(img[..., ::-1]) - res_list = [] - for region in layout_res: - x1, y1, x2, y2 = region.coordinates - x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) - roi_img = ori_im[y1:y2, x1:x2, :] - if region.type == 'Table': - res = self.table_system(roi_img) - else: - filter_boxes, filter_rec_res = self.text_system(roi_img) - filter_boxes = [x + [x1, y1] for x in filter_boxes] - filter_boxes = [x.reshape(-1).tolist() for x in filter_boxes] - # remove style char - style_token = ['', '', '', '', '', '', '', '', - '', '', '', '', '', ''] - filter_rec_res_tmp = [] - for rec_res in filter_rec_res: - rec_str, rec_conf = rec_res - for token in style_token: - if token in rec_str: - rec_str = rec_str.replace(token, '') - filter_rec_res_tmp.append((rec_str, rec_conf)) - res = (filter_boxes, filter_rec_res_tmp) - res_list.append({'type': region.type, 'bbox': [x1, y1, x2, y2], 'img': roi_img, 'res': res}) + if self.mode == 'structure': + ori_im = img.copy() + layout_res = self.table_layout.detect(img[..., ::-1]) + res_list = [] + for region in layout_res: + x1, y1, x2, y2 = region.coordinates + x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) + roi_img = ori_im[y1:y2, x1:x2, :] + if region.type == 'Table': + res = self.table_system(roi_img) + else: + filter_boxes, filter_rec_res = self.text_system(roi_img) + filter_boxes = [x + [x1, y1] for x in filter_boxes] + filter_boxes = [ + x.reshape(-1).tolist() for x in filter_boxes + ] + # remove style char + style_token = [ + '', '', '', '', '', + '', '', '', '', '', + '', '', '', '' + ] + filter_rec_res_tmp = [] + for rec_res in filter_rec_res: + rec_str, rec_conf = rec_res + for token in style_token: + if token in rec_str: + rec_str = rec_str.replace(token, '') + filter_rec_res_tmp.append((rec_str, rec_conf)) + res = (filter_boxes, filter_rec_res_tmp) + res_list.append({ + 'type': region.type, + 'bbox': [x1, y1, x2, y2], + 'img': roi_img, + 'res': res + }) + elif self.mode == 'vqa': + res_list, _ = self.vqa_engine(img) return res_list @@ -91,29 +114,35 @@ def save_structure_res(res, save_folder, img_name): excel_save_folder = os.path.join(save_folder, img_name) os.makedirs(excel_save_folder, exist_ok=True) # save res - with open(os.path.join(excel_save_folder, 'res.txt'), 'w', encoding='utf8') as f: + with open( + os.path.join(excel_save_folder, 'res.txt'), 'w', + encoding='utf8') as f: for region in res: if region['type'] == 'Table': - excel_path = os.path.join(excel_save_folder, '{}.xlsx'.format(region['bbox'])) + excel_path = os.path.join(excel_save_folder, + '{}.xlsx'.format(region['bbox'])) to_excel(region['res'], excel_path) if region['type'] == 'Figure': roi_img = region['img'] - img_path = os.path.join(excel_save_folder, '{}.jpg'.format(region['bbox'])) + img_path = os.path.join(excel_save_folder, + '{}.jpg'.format(region['bbox'])) cv2.imwrite(img_path, roi_img) else: for box, rec_res in zip(region['res'][0], region['res'][1]): - f.write('{}\t{}\n'.format(np.array(box).reshape(-1).tolist(), rec_res)) + f.write('{}\t{}\n'.format( + np.array(box).reshape(-1).tolist(), rec_res)) def main(args): image_file_list = get_image_file_list(args.image_dir) image_file_list = image_file_list image_file_list = image_file_list[args.process_id::args.total_process_num] - save_folder = args.output - os.makedirs(save_folder, exist_ok=True) structure_sys = OCRSystem(args) img_num = len(image_file_list) + save_folder = os.path.join(args.output, structure_sys.mode) + os.makedirs(save_folder, exist_ok=True) + for i, image_file in enumerate(image_file_list): logger.info("[{}/{}] {}".format(i, img_num, image_file)) img, flag = check_and_read_gif(image_file) @@ -126,10 +155,16 @@ def main(args): continue starttime = time.time() res = structure_sys(img) - save_structure_res(res, save_folder, img_name) - draw_img = draw_structure_result(img, res, args.vis_font_path) - cv2.imwrite(os.path.join(save_folder, img_name, 'show.jpg'), draw_img) - logger.info('result save to {}'.format(os.path.join(save_folder, img_name))) + + if structure_sys.mode == 'structure': + save_structure_res(res, save_folder, img_name) + draw_img = draw_structure_result(img, res, args.vis_font_path) + img_save_path = os.path.join(save_folder, img_name, 'show.jpg') + elif structure_sys.mode == 'vqa': + draw_img = draw_ser_results(img, res, args.vis_font_path) + img_save_path = os.path.join(save_folder, img_name + '.jpg') + cv2.imwrite(img_save_path, draw_img) + logger.info('result save to {}'.format(img_save_path)) elapse = time.time() - starttime logger.info("Predict time : {:.3f}s".format(elapse)) diff --git a/ppstructure/table/README.md b/ppstructure/table/README.md index 67c4d8e26d5c615f4a930752005420ba1abcc834..30a11a20e5de90500d1408f671ba914f336a0b43 100644 --- a/ppstructure/table/README.md +++ b/ppstructure/table/README.md @@ -20,9 +20,9 @@ We evaluated the algorithm on the PubTabNet[1] eval dataset, and the |Method|[TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src)| -| --- | --- | -| EDD[2] | 88.3 | -| Ours | 93.32 | +| --- | --- | +| EDD[2] | 88.3 | +| Ours | 93.32 | ## 3. How to use @@ -41,7 +41,7 @@ wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_tab wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar cd .. # run -python3 table/predict_table.py --det_model_dir=inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=inference/en_ppocr_mobile_v2.0_table_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table +python3 table/predict_table.py --det_model_dir=inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=inference/en_ppocr_mobile_v2.0_table_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_dict_path=../ppocr/utils/dict/en_dict.txt --det_limit_side_len=736 --det_limit_type=min --output ../output/table ``` Note: The above model is trained on the PubLayNet dataset and only supports English scanning scenarios. If you need to identify other scenarios, you need to train the model yourself and replace the three fields `det_model_dir`, `rec_model_dir`, `table_model_dir`. @@ -82,8 +82,8 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo The table uses [TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src) as the evaluation metric of the model. Before the model evaluation, the three models in the pipeline need to be exported as inference models (we have provided them), and the gt for evaluation needs to be prepared. Examples of gt are as follows: ```json {"PMC4289340_004_00.png": [ - ["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "
", "", "", "
", "", "", "
", "", ""], - [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]], + ["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "
", "", "", "
", "", "", "
", "", ""], + [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]], [["", "F", "e", "a", "t", "u", "r", "e", ""], ["", "G", "b", "3", " ", "+", ""], ["", "G", "b", "3", " ", "-", ""], ["", "P", "a", "t", "i", "e", "n", "t", "s", ""], ["6", "2"], ["4", "5"]] ]} ``` @@ -95,7 +95,7 @@ In gt json, the key is the image name, the value is the corresponding gt, and gt Use the following command to evaluate. After the evaluation is completed, the teds indicator will be output. ```python cd PaddleOCR/ppstructure -python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json +python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json ``` If the PubLatNet eval dataset is used, it will be output @@ -113,4 +113,4 @@ After running, the excel sheet of each picture will be saved in the directory sp Reference 1. https://github.com/ibm-aur-nlp/PubTabNet -2. https://arxiv.org/pdf/1911.10683 \ No newline at end of file +2. https://arxiv.org/pdf/1911.10683 diff --git a/ppstructure/table/README_ch.md b/ppstructure/table/README_ch.md index 2e90ad33423da347b5a51444f2be53ed2eb67a7a..33276b36e4973e83d7efa673b90013cf5727dfe2 100644 --- a/ppstructure/table/README_ch.md +++ b/ppstructure/table/README_ch.md @@ -34,9 +34,9 @@ |算法|[TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src)| -| --- | --- | -| EDD[2] | 88.3 | -| Ours | 93.32 | +| --- | --- | +| EDD[2] | 88.3 | +| Ours | 93.32 | ## 3. 使用 @@ -56,7 +56,7 @@ wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_tab wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar cd .. # 执行预测 -python3 table/predict_table.py --det_model_dir=inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=inference/en_ppocr_mobile_v2.0_table_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table +python3 table/predict_table.py --det_model_dir=inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=inference/en_ppocr_mobile_v2.0_table_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_dict_path=../ppocr/utils/dict/en_dict.txt --det_limit_side_len=736 --det_limit_type=min --output ../output/table ``` 运行完成后,每张图片的excel表格会保存到output字段指定的目录下 @@ -94,8 +94,8 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo 表格使用 [TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src) 作为模型的评估指标。在进行模型评估之前,需要将pipeline中的三个模型分别导出为inference模型(我们已经提供好),还需要准备评估的gt, gt示例如下: ```json {"PMC4289340_004_00.png": [ - ["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "
", "", "", "
", "", "", "
", "", ""], - [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]], + ["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "
", "", "", "
", "", "", "
", "", ""], + [[1, 4, 29, 13], [137, 4, 161, 13], [215, 4, 236, 13], [1, 17, 30, 27], [137, 17, 147, 27], [215, 17, 225, 27]], [["", "F", "e", "a", "t", "u", "r", "e", ""], ["", "G", "b", "3", " ", "+", ""], ["", "G", "b", "3", " ", "-", ""], ["", "P", "a", "t", "i", "e", "n", "t", "s", ""], ["6", "2"], ["4", "5"]] ]} ``` @@ -107,7 +107,7 @@ json 中,key为图片名,value为对应的gt,gt是一个由三个item组 准备完成后使用如下命令进行评估,评估完成后会输出teds指标。 ```python cd PaddleOCR/ppstructure -python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json +python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json ``` 如使用PubLatNet评估数据集,将会输出 ```bash @@ -123,4 +123,4 @@ python3 table/predict_table.py --det_model_dir=path/to/det_model_dir --rec_model Reference 1. https://github.com/ibm-aur-nlp/PubTabNet -2. https://arxiv.org/pdf/1911.10683 \ No newline at end of file +2. https://arxiv.org/pdf/1911.10683 diff --git a/ppstructure/utility.py b/ppstructure/utility.py index 7d9fa76d0ada58e363243c114519d001de3fbf2a..ce7a801b1bb4094d3f4d2ba467332c6763ad6287 100644 --- a/ppstructure/utility.py +++ b/ppstructure/utility.py @@ -21,13 +21,31 @@ def init_args(): parser = infer_args() # params for output - parser.add_argument("--output", type=str, default='./output/table') + parser.add_argument("--output", type=str, default='./output') # params for table structure parser.add_argument("--table_max_len", type=int, default=488) parser.add_argument("--table_model_dir", type=str) parser.add_argument("--table_char_type", type=str, default='en') - parser.add_argument("--table_char_dict_path", type=str, default="../ppocr/utils/dict/table_structure_dict.txt") - parser.add_argument("--layout_path_model", type=str, default="lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config") + parser.add_argument( + "--table_char_dict_path", + type=str, + default="../ppocr/utils/dict/table_structure_dict.txt") + parser.add_argument( + "--layout_path_model", + type=str, + default="lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config") + + # params for ser + parser.add_argument("--model_name_or_path", type=str) + parser.add_argument("--max_seq_length", type=int, default=512) + parser.add_argument( + "--label_map_path", type=str, default='./vqa/labels/labels_ser.txt') + + parser.add_argument( + "--mode", + type=str, + default='structure', + help='structure and vqa is supported') return parser @@ -48,5 +66,6 @@ def draw_structure_result(image, result, font_path): boxes.append(np.array(box).reshape(-1, 2)) txts.append(rec_res[0]) scores.append(rec_res[1]) - im_show = draw_ocr_box_txt(image, boxes, txts, scores, font_path=font_path,drop_score=0) - return im_show \ No newline at end of file + im_show = draw_ocr_box_txt( + image, boxes, txts, scores, font_path=font_path, drop_score=0) + return im_show diff --git a/ppstructure/vqa/README.md b/ppstructure/vqa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23fe28f8494ce84e774c3dd21811003f772c41f8 --- /dev/null +++ b/ppstructure/vqa/README.md @@ -0,0 +1,246 @@ +# 文档视觉问答(DOC-VQA) + +VQA指视觉问答,主要针对图像内容进行提问和回答,DOC-VQA是VQA任务中的一种,DOC-VQA主要针对文本图像的文字内容提出问题。 + +PP-Structure 里的 DOC-VQA算法基于PaddleNLP自然语言处理算法库进行开发。 + +主要特性如下: + +- 集成[LayoutXLM](https://arxiv.org/pdf/2104.08836.pdf)模型以及PP-OCR预测引擎。 +- 支持基于多模态方法的语义实体识别 (Semantic Entity Recognition, SER) 以及关系抽取 (Relation Extraction, RE) 任务。基于 SER 任务,可以完成对图像中的文本识别与分类;基于 RE 任务,可以完成对图象中的文本内容的关系提取,如判断问题对(pair)。 +- 支持SER任务和RE任务的自定义训练。 +- 支持OCR+SER的端到端系统预测与评估。 +- 支持OCR+SER+RE的端到端系统预测。 + + +本项目是 [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/pdf/2104.08836.pdf) 在 Paddle 2.2上的开源实现, +包含了在 [XFUND数据集](https://github.com/doc-analysis/XFUND) 上的微调代码。 + +## 1 性能 + +我们在 [XFUN](https://github.com/doc-analysis/XFUND) 评估数据集上对算法进行了评估,性能如下 + +|任务| f1 | 模型下载地址| +|:---:|:---:| :---:| +|SER|0.9056| [链接](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_ser_pretrained.tar)| +|RE|0.7113| [链接](https://paddleocr.bj.bcebos.com/pplayout/PP-Layout_v1.0_re_pretrained.tar)| + + + +## 2. 效果演示 + +**注意:** 测试图片来源于XFUN数据集。 + +### 2.1 SER + +![](./images/result_ser/zh_val_0_ser.jpg) | ![](./images/result_ser/zh_val_42_ser.jpg) +---|--- + +图中不同颜色的框表示不同的类别,对于XFUN数据集,有`QUESTION`, `ANSWER`, `HEADER` 3种类别 + +* 深紫色:HEADER +* 浅紫色:QUESTION +* 军绿色:ANSWER + +在OCR检测框的左上方也标出了对应的类别和OCR识别结果。 + + +### 2.2 RE + +![](./images/result_re/zh_val_21_re.jpg) | ![](./images/result_re/zh_val_40_re.jpg) +---|--- + + +图中红色框表示问题,蓝色框表示答案,问题和答案之间使用绿色线连接。在OCR检测框的左上方也标出了对应的类别和OCR识别结果。 + + +## 3. 安装 + +### 3.1 安装依赖 + +- **(1) 安装PaddlePaddle** + +```bash +pip3 install --upgrade pip + +# GPU安装 +python3 -m pip install paddlepaddle-gpu==2.2 -i https://mirror.baidu.com/pypi/simple + +# CPU安装 +python3 -m pip install paddlepaddle==2.2 -i https://mirror.baidu.com/pypi/simple + +``` +更多需求,请参照[安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 + + +### 3.2 安装PaddleOCR(包含 PP-OCR 和 VQA ) + +- **(1)pip快速安装PaddleOCR whl包(仅预测)** + +```bash +pip install paddleocr +``` + +- **(2)下载VQA源码(预测+训练)** + +```bash +【推荐】git clone https://github.com/PaddlePaddle/PaddleOCR + +# 如果因为网络问题无法pull成功,也可选择使用码云上的托管: +git clone https://gitee.com/paddlepaddle/PaddleOCR + +# 注:码云托管代码可能无法实时同步本github项目更新,存在3~5天延时,请优先使用推荐方式。 +``` + +- **(3)安装PaddleNLP** + +```bash +# 需要使用PaddleNLP最新的代码版本进行安装 +git clone https://github.com/PaddlePaddle/PaddleNLP -b develop +cd PaddleNLP +pip install -e . +``` + + +- **(4)安装VQA的`requirements`** + +```bash +cd ppstructure/vqa +pip install -r requirements.txt +``` + +## 4. 使用 + + +### 4.1 数据和预训练模型准备 + +处理好的XFUN中文数据集下载地址:[https://paddleocr.bj.bcebos.com/dataset/XFUND.tar](https://paddleocr.bj.bcebos.com/dataset/XFUND.tar)。 + + +下载并解压该数据集,解压后将数据集放置在当前目录下。 + +```shell +wget https://paddleocr.bj.bcebos.com/dataset/XFUND.tar +``` + +如果希望转换XFUN中其他语言的数据集,可以参考[XFUN数据转换脚本](helper/trans_xfun_data.py)。 + +如果希望直接体验预测过程,可以下载我们提供的预训练模型,跳过训练过程,直接预测即可。 + + +### 4.2 SER任务 + +* 启动训练 + +```shell +python3.7 train_ser.py \ + --model_name_or_path "layoutxlm-base-uncased" \ + --train_data_dir "XFUND/zh_train/image" \ + --train_label_path "XFUND/zh_train/xfun_normalize_train.json" \ + --eval_data_dir "XFUND/zh_val/image" \ + --eval_label_path "XFUND/zh_val/xfun_normalize_val.json" \ + --num_train_epochs 200 \ + --eval_steps 10 \ + --save_steps 500 \ + --output_dir "./output/ser/" \ + --learning_rate 5e-5 \ + --warmup_steps 50 \ + --evaluate_during_training \ + --seed 2048 +``` + +最终会打印出`precision`, `recall`, `f1`等指标,模型和训练日志会保存在`./output/ser/`文件夹中。 + +* 使用评估集合中提供的OCR识别结果进行预测 + +```shell +export CUDA_VISIBLE_DEVICES=0 +python3.7 infer_ser.py \ + --model_name_or_path "./PP-Layout_v1.0_ser_pretrained/" \ + --output_dir "output_res/" \ + --infer_imgs "XFUND/zh_val/image/" \ + --ocr_json_path "XFUND/zh_val/xfun_normalize_val.json" +``` + +最终会在`output_res`目录下保存预测结果可视化图像以及预测结果文本文件,文件名为`infer_results.txt`。 + +* 使用`OCR引擎 + SER`串联结果 + +```shell +export CUDA_VISIBLE_DEVICES=0 +python3.7 infer_ser_e2e.py \ + --model_name_or_path "./output/PP-Layout_v1.0_ser_pretrained/" \ + --max_seq_length 512 \ + --output_dir "output_res_e2e/" \ + --infer_imgs "images/input/zh_val_0.jpg" +``` + +* 对`OCR引擎 + SER`预测系统进行端到端评估 + +```shell +export CUDA_VISIBLE_DEVICES=0 +python3.7 helper/eval_with_label_end2end.py --gt_json_path XFUND/zh_val/xfun_normalize_val.json --pred_json_path output_res/infer_results.txt +``` + + +### 3.3 RE任务 + +* 启动训练 + +```shell +python3 train_re.py \ + --model_name_or_path "layoutxlm-base-uncased" \ + --train_data_dir "XFUND/zh_train/image" \ + --train_label_path "XFUND/zh_train/xfun_normalize_train.json" \ + --eval_data_dir "XFUND/zh_val/image" \ + --eval_label_path "XFUND/zh_val/xfun_normalize_val.json" \ + --label_map_path 'labels/labels_ser.txt' \ + --num_train_epochs 2 \ + --eval_steps 10 \ + --save_steps 500 \ + --output_dir "output/re/" \ + --learning_rate 5e-5 \ + --warmup_steps 50 \ + --per_gpu_train_batch_size 8 \ + --per_gpu_eval_batch_size 8 \ + --evaluate_during_training \ + --seed 2048 + +``` + +最终会打印出`precision`, `recall`, `f1`等指标,模型和训练日志会保存在`./output/re/`文件夹中。 + +* 使用评估集合中提供的OCR识别结果进行预测 + +```shell +export CUDA_VISIBLE_DEVICES=0 +python3 infer_re.py \ + --model_name_or_path "./PP-Layout_v1.0_re_pretrained/" \ + --max_seq_length 512 \ + --eval_data_dir "XFUND/zh_val/image" \ + --eval_label_path "XFUND/zh_val/xfun_normalize_val.json" \ + --label_map_path 'labels/labels_ser.txt' \ + --output_dir "output_res" \ + --per_gpu_eval_batch_size 1 \ + --seed 2048 +``` + +最终会在`output_res`目录下保存预测结果可视化图像以及预测结果文本文件,文件名为`infer_results.txt`。 + +* 使用`OCR引擎 + SER + RE`串联结果 + +```shell +export CUDA_VISIBLE_DEVICES=0 +# python3.7 infer_ser_re_e2e.py \ + --model_name_or_path "./PP-Layout_v1.0_ser_pretrained/" \ + --re_model_name_or_path "./PP-Layout_v1.0_re_pretrained/" \ + --max_seq_length 512 \ + --output_dir "output_ser_re_e2e_train/" \ + --infer_imgs "images/input/zh_val_21.jpg" +``` + +## 参考链接 + +- LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding, https://arxiv.org/pdf/2104.08836.pdf +- microsoft/unilm/layoutxlm, https://github.com/microsoft/unilm/tree/master/layoutxlm +- XFUND dataset, https://github.com/doc-analysis/XFUND diff --git a/ppstructure/vqa/data_collator.py b/ppstructure/vqa/data_collator.py new file mode 100644 index 0000000000000000000000000000000000000000..a969935b487e3d22ea5c4a3527028aa2cfe1a797 --- /dev/null +++ b/ppstructure/vqa/data_collator.py @@ -0,0 +1,38 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import numbers +import numpy as np + + +class DataCollator: + """ + data batch + """ + + def __call__(self, batch): + data_dict = {} + to_tensor_keys = [] + for sample in batch: + for k, v in sample.items(): + if k not in data_dict: + data_dict[k] = [] + if isinstance(v, (np.ndarray, paddle.Tensor, numbers.Number)): + if k not in to_tensor_keys: + to_tensor_keys.append(k) + data_dict[k].append(v) + for k in to_tensor_keys: + data_dict[k] = paddle.to_tensor(data_dict[k]) + return data_dict diff --git a/ppstructure/vqa/helper/eval_with_label_end2end.py b/ppstructure/vqa/helper/eval_with_label_end2end.py new file mode 100644 index 0000000000000000000000000000000000000000..c8dd3e0ad437e51e21ebc53daeec9fdf9aa76b63 --- /dev/null +++ b/ppstructure/vqa/helper/eval_with_label_end2end.py @@ -0,0 +1,262 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +import sys +# import Polygon +import shapely +from shapely.geometry import Polygon +import numpy as np +from collections import defaultdict +import operator +import editdistance +import argparse +import json +import copy + + +def parse_ser_results_fp(fp, fp_type="gt", ignore_background=True): + # img/zh_val_0.jpg { + # "height": 3508, + # "width": 2480, + # "ocr_info": [ + # {"text": "Maribyrnong", "label": "other", "bbox": [1958, 144, 2184, 198]}, + # {"text": "CITYCOUNCIL", "label": "other", "bbox": [2052, 183, 2171, 214]}, + # ] + assert fp_type in ["gt", "pred"] + key = "label" if fp_type == "gt" else "pred" + res_dict = dict() + with open(fp, "r") as fin: + lines = fin.readlines() + + for _, line in enumerate(lines): + img_path, info = line.strip().split("\t") + # get key + image_name = os.path.basename(img_path) + res_dict[image_name] = [] + # get infos + json_info = json.loads(info) + for single_ocr_info in json_info["ocr_info"]: + label = single_ocr_info[key].upper() + if label in ["O", "OTHERS", "OTHER"]: + label = "O" + if ignore_background and label == "O": + continue + single_ocr_info["label"] = label + res_dict[image_name].append(copy.deepcopy(single_ocr_info)) + return res_dict + + +def polygon_from_str(polygon_points): + """ + Create a shapely polygon object from gt or dt line. + """ + polygon_points = np.array(polygon_points).reshape(4, 2) + polygon = Polygon(polygon_points).convex_hull + return polygon + + +def polygon_iou(poly1, poly2): + """ + Intersection over union between two shapely polygons. + """ + if not poly1.intersects( + poly2): # this test is fast and can accelerate calculation + iou = 0 + else: + try: + inter_area = poly1.intersection(poly2).area + union_area = poly1.area + poly2.area - inter_area + iou = float(inter_area) / union_area + except shapely.geos.TopologicalError: + # except Exception as e: + # print(e) + print('shapely.geos.TopologicalError occured, iou set to 0') + iou = 0 + return iou + + +def ed(args, str1, str2): + if args.ignore_space: + str1 = str1.replace(" ", "") + str2 = str2.replace(" ", "") + if args.ignore_case: + str1 = str1.lower() + str2 = str2.lower() + return editdistance.eval(str1, str2) + + +def convert_bbox_to_polygon(bbox): + """ + bbox : [x1, y1, x2, y2] + output: [[x1, y1], [x2, y2], [x3, y3], [x4, y4]] + """ + xmin, ymin, xmax, ymax = bbox + poly = [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]] + return poly + + +def eval_e2e(args): + # gt + gt_results = parse_ser_results_fp(args.gt_json_path, "gt", + args.ignore_background) + # pred + dt_results = parse_ser_results_fp(args.pred_json_path, "pred", + args.ignore_background) + assert set(gt_results.keys()) == set(dt_results.keys()) + + iou_thresh = args.iou_thres + num_gt_chars = 0 + gt_count = 0 + dt_count = 0 + hit = 0 + ed_sum = 0 + + for img_name in gt_results: + gt_info = gt_results[img_name] + gt_count += len(gt_info) + + dt_info = dt_results[img_name] + dt_count += len(dt_info) + + dt_match = [False] * len(dt_info) + gt_match = [False] * len(gt_info) + + all_ious = defaultdict(tuple) + # gt: {text, label, bbox or poly} + for index_gt, gt in enumerate(gt_info): + if "poly" not in gt: + gt["poly"] = convert_bbox_to_polygon(gt["bbox"]) + gt_poly = polygon_from_str(gt["poly"]) + for index_dt, dt in enumerate(dt_info): + if "poly" not in dt: + dt["poly"] = convert_bbox_to_polygon(dt["bbox"]) + dt_poly = polygon_from_str(dt["poly"]) + iou = polygon_iou(dt_poly, gt_poly) + if iou >= iou_thresh: + all_ious[(index_gt, index_dt)] = iou + sorted_ious = sorted( + all_ious.items(), key=operator.itemgetter(1), reverse=True) + sorted_gt_dt_pairs = [item[0] for item in sorted_ious] + + # matched gt and dt + for gt_dt_pair in sorted_gt_dt_pairs: + index_gt, index_dt = gt_dt_pair + if gt_match[index_gt] == False and dt_match[index_dt] == False: + gt_match[index_gt] = True + dt_match[index_dt] = True + # ocr rec results + gt_text = gt_info[index_gt]["text"] + dt_text = dt_info[index_dt]["text"] + + # ser results + gt_label = gt_info[index_gt]["label"] + dt_label = dt_info[index_dt]["pred"] + + if True: # ignore_masks[index_gt] == '0': + ed_sum += ed(args, gt_text, dt_text) + num_gt_chars += len(gt_text) + if gt_text == dt_text: + if args.ignore_ser_prediction or gt_label == dt_label: + hit += 1 + +# unmatched dt + for tindex, dt_match_flag in enumerate(dt_match): + if dt_match_flag == False: + dt_text = dt_info[tindex]["text"] + gt_text = "" + ed_sum += ed(args, dt_text, gt_text) + +# unmatched gt + for tindex, gt_match_flag in enumerate(gt_match): + if gt_match_flag == False: + dt_text = "" + gt_text = gt_info[tindex]["text"] + ed_sum += ed(args, gt_text, dt_text) + num_gt_chars += len(gt_text) + + eps = 1e-9 + print("config: ", args) + print('hit, dt_count, gt_count', hit, dt_count, gt_count) + precision = hit / (dt_count + eps) + recall = hit / (gt_count + eps) + fmeasure = 2.0 * precision * recall / (precision + recall + eps) + avg_edit_dist_img = ed_sum / len(gt_results) + avg_edit_dist_field = ed_sum / (gt_count + eps) + character_acc = 1 - ed_sum / (num_gt_chars + eps) + + print('character_acc: %.2f' % (character_acc * 100) + "%") + print('avg_edit_dist_field: %.2f' % (avg_edit_dist_field)) + print('avg_edit_dist_img: %.2f' % (avg_edit_dist_img)) + print('precision: %.2f' % (precision * 100) + "%") + print('recall: %.2f' % (recall * 100) + "%") + print('fmeasure: %.2f' % (fmeasure * 100) + "%") + + return + + +def parse_args(): + """ + """ + + def str2bool(v): + return v.lower() in ("true", "t", "1") + + parser = argparse.ArgumentParser() + ## Required parameters + parser.add_argument( + "--gt_json_path", + default=None, + type=str, + required=True, ) + parser.add_argument( + "--pred_json_path", + default=None, + type=str, + required=True, ) + + parser.add_argument("--iou_thres", default=0.5, type=float) + + parser.add_argument( + "--ignore_case", + default=False, + type=str2bool, + help="whether to do lower case for the strs") + + parser.add_argument( + "--ignore_space", + default=True, + type=str2bool, + help="whether to ignore space") + + parser.add_argument( + "--ignore_background", + default=True, + type=str2bool, + help="whether to ignore other label") + + parser.add_argument( + "--ignore_ser_prediction", + default=False, + type=str2bool, + help="whether to ignore ocr pred results") + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_args() + eval_e2e(args) diff --git a/ppstructure/vqa/helper/trans_xfun_data.py b/ppstructure/vqa/helper/trans_xfun_data.py new file mode 100644 index 0000000000000000000000000000000000000000..b5ebd5dfbd8addda0701a7cfd2387133f7a8776b --- /dev/null +++ b/ppstructure/vqa/helper/trans_xfun_data.py @@ -0,0 +1,52 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + + +def transfer_xfun_data(json_path=None, output_file=None): + with open(json_path, "r") as fin: + lines = fin.readlines() + + json_info = json.loads(lines[0]) + documents = json_info["documents"] + label_info = {} + with open(output_file, "w") as fout: + for idx, document in enumerate(documents): + img_info = document["img"] + document = document["document"] + image_path = img_info["fname"] + + label_info["height"] = img_info["height"] + label_info["width"] = img_info["width"] + + label_info["ocr_info"] = [] + + for doc in document: + label_info["ocr_info"].append({ + "text": doc["text"], + "label": doc["label"], + "bbox": doc["box"], + "id": doc["id"], + "linking": doc["linking"], + "words": doc["words"] + }) + + fout.write(image_path + "\t" + json.dumps( + label_info, ensure_ascii=False) + "\n") + + print("===ok====") + + +transfer_xfun_data("./xfun/zh.val.json", "./xfun_normalize_val.json") diff --git a/ppstructure/vqa/images/input/zh_val_0.jpg b/ppstructure/vqa/images/input/zh_val_0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..479b60bcd3a859b187ce5325dfc381c1b87ee27f Binary files /dev/null and b/ppstructure/vqa/images/input/zh_val_0.jpg differ diff --git a/ppstructure/vqa/images/input/zh_val_21.jpg b/ppstructure/vqa/images/input/zh_val_21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35b572d7dd6a6b42cf43a8a4b33567c0af527d30 Binary files /dev/null and b/ppstructure/vqa/images/input/zh_val_21.jpg differ diff --git a/ppstructure/vqa/images/input/zh_val_40.jpg b/ppstructure/vqa/images/input/zh_val_40.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a858cc33d54831335c209146853b6c302c734f8 Binary files /dev/null and b/ppstructure/vqa/images/input/zh_val_40.jpg differ diff --git a/ppstructure/vqa/images/input/zh_val_42.jpg b/ppstructure/vqa/images/input/zh_val_42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42151bdd94929ede9da1a63ce8d9339971094a46 Binary files /dev/null and b/ppstructure/vqa/images/input/zh_val_42.jpg differ diff --git a/ppstructure/vqa/images/result_re/zh_val_21_re.jpg b/ppstructure/vqa/images/result_re/zh_val_21_re.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7bf248dd0e69057c4775ff9c205317044e94ee65 Binary files /dev/null and b/ppstructure/vqa/images/result_re/zh_val_21_re.jpg differ diff --git a/ppstructure/vqa/images/result_re/zh_val_40_re.jpg b/ppstructure/vqa/images/result_re/zh_val_40_re.jpg new file mode 100644 index 0000000000000000000000000000000000000000..242f9d6e80be39c595d98b57d59d48673ce62f20 Binary files /dev/null and b/ppstructure/vqa/images/result_re/zh_val_40_re.jpg differ diff --git a/ppstructure/vqa/images/result_ser/zh_val_0_ser.jpg b/ppstructure/vqa/images/result_ser/zh_val_0_ser.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4605c3a7f395e9868ba55cd31a99367694c78f5c Binary files /dev/null and b/ppstructure/vqa/images/result_ser/zh_val_0_ser.jpg differ diff --git a/ppstructure/vqa/images/result_ser/zh_val_42_ser.jpg b/ppstructure/vqa/images/result_ser/zh_val_42_ser.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13bc7272e49a03115085d4a7420a7acfb92d3260 Binary files /dev/null and b/ppstructure/vqa/images/result_ser/zh_val_42_ser.jpg differ diff --git a/ppstructure/vqa/infer_re.py b/ppstructure/vqa/infer_re.py new file mode 100644 index 0000000000000000000000000000000000000000..ae2f52550294b072179c3bdba28c3572369e11a3 --- /dev/null +++ b/ppstructure/vqa/infer_re.py @@ -0,0 +1,162 @@ +import os +import sys + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) + +import random + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import paddle + +from paddlenlp.transformers import LayoutXLMTokenizer, LayoutXLMModel, LayoutXLMForRelationExtraction + +from xfun import XFUNDataset +from utils import parse_args, get_bio_label_maps, draw_re_results +from data_collator import DataCollator + +from ppocr.utils.logging import get_logger + + +def infer(args): + os.makedirs(args.output_dir, exist_ok=True) + logger = get_logger() + label2id_map, id2label_map = get_bio_label_maps(args.label_map_path) + pad_token_label_id = paddle.nn.CrossEntropyLoss().ignore_index + + tokenizer = LayoutXLMTokenizer.from_pretrained(args.model_name_or_path) + + model = LayoutXLMForRelationExtraction.from_pretrained( + args.model_name_or_path) + + eval_dataset = XFUNDataset( + tokenizer, + data_dir=args.eval_data_dir, + label_path=args.eval_label_path, + label2id_map=label2id_map, + img_size=(224, 224), + max_seq_len=args.max_seq_length, + pad_token_label_id=pad_token_label_id, + contains_re=True, + add_special_ids=False, + return_attention_mask=True, + load_mode='all') + + eval_dataloader = paddle.io.DataLoader( + eval_dataset, + batch_size=args.per_gpu_eval_batch_size, + num_workers=8, + shuffle=False, + collate_fn=DataCollator()) + + # 读取gt的oct数据 + ocr_info_list = load_ocr(args.eval_data_dir, args.eval_label_path) + + for idx, batch in enumerate(eval_dataloader): + logger.info("[Infer] process: {}/{}".format(idx, len(eval_dataloader))) + with paddle.no_grad(): + outputs = model(**batch) + pred_relations = outputs['pred_relations'] + + ocr_info = ocr_info_list[idx] + image_path = ocr_info['image_path'] + ocr_info = ocr_info['ocr_info'] + + # 根据entity里的信息,做token解码后去过滤不要的ocr_info + ocr_info = filter_bg_by_txt(ocr_info, batch, tokenizer) + + # 进行 relations 到 ocr信息的转换 + result = [] + used_tail_id = [] + for relations in pred_relations: + for relation in relations: + if relation['tail_id'] in used_tail_id: + continue + if relation['head_id'] not in ocr_info or relation[ + 'tail_id'] not in ocr_info: + continue + used_tail_id.append(relation['tail_id']) + ocr_info_head = ocr_info[relation['head_id']] + ocr_info_tail = ocr_info[relation['tail_id']] + result.append((ocr_info_head, ocr_info_tail)) + + img = cv2.imread(image_path) + img_show = draw_re_results(img, result) + save_path = os.path.join(args.output_dir, os.path.basename(image_path)) + cv2.imwrite(save_path, img_show) + + +def load_ocr(img_folder, json_path): + import json + d = [] + with open(json_path, "r") as fin: + lines = fin.readlines() + for line in lines: + image_name, info_str = line.split("\t") + info_dict = json.loads(info_str) + info_dict['image_path'] = os.path.join(img_folder, image_name) + d.append(info_dict) + return d + + +def filter_bg_by_txt(ocr_info, batch, tokenizer): + entities = batch['entities'][0] + input_ids = batch['input_ids'][0] + + new_info_dict = {} + for i in range(len(entities['start'])): + entitie_head = entities['start'][i] + entitie_tail = entities['end'][i] + word_input_ids = input_ids[entitie_head:entitie_tail].numpy().tolist() + txt = tokenizer.convert_ids_to_tokens(word_input_ids) + txt = tokenizer.convert_tokens_to_string(txt) + + for i, info in enumerate(ocr_info): + if info['text'] == txt: + new_info_dict[i] = info + return new_info_dict + + +def post_process(pred_relations, ocr_info, img): + result = [] + for relations in pred_relations: + for relation in relations: + ocr_info_head = ocr_info[relation['head_id']] + ocr_info_tail = ocr_info[relation['tail_id']] + result.append((ocr_info_head, ocr_info_tail)) + return result + + +def draw_re(result, image_path, output_folder): + img = cv2.imread(image_path) + + from matplotlib import pyplot as plt + for ocr_info_head, ocr_info_tail in result: + cv2.rectangle( + img, + tuple(ocr_info_head['bbox'][:2]), + tuple(ocr_info_head['bbox'][2:]), (255, 0, 0), + thickness=2) + cv2.rectangle( + img, + tuple(ocr_info_tail['bbox'][:2]), + tuple(ocr_info_tail['bbox'][2:]), (0, 0, 255), + thickness=2) + center_p1 = [(ocr_info_head['bbox'][0] + ocr_info_head['bbox'][2]) // 2, + (ocr_info_head['bbox'][1] + ocr_info_head['bbox'][3]) // 2] + center_p2 = [(ocr_info_tail['bbox'][0] + ocr_info_tail['bbox'][2]) // 2, + (ocr_info_tail['bbox'][1] + ocr_info_tail['bbox'][3]) // 2] + cv2.line( + img, tuple(center_p1), tuple(center_p2), (0, 255, 0), thickness=2) + plt.imshow(img) + plt.savefig( + os.path.join(output_folder, os.path.basename(image_path)), dpi=600) + # plt.show() + + +if __name__ == "__main__": + args = parse_args() + infer(args) diff --git a/ppstructure/vqa/infer_ser.py b/ppstructure/vqa/infer_ser.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad220094a26b330555fbe9122a46fb56e64fe1e --- /dev/null +++ b/ppstructure/vqa/infer_ser.py @@ -0,0 +1,279 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import json +import cv2 +import numpy as np +from copy import deepcopy + +import paddle + +# relative reference +from utils import parse_args, get_image_file_list, draw_ser_results, get_bio_label_maps +from paddlenlp.transformers import LayoutXLMModel, LayoutXLMTokenizer, LayoutXLMForTokenClassification + + +def pad_sentences(tokenizer, + encoded_inputs, + max_seq_len=512, + pad_to_max_seq_len=True, + return_attention_mask=True, + return_token_type_ids=True, + return_overflowing_tokens=False, + return_special_tokens_mask=False): + # Padding with larger size, reshape is carried out + max_seq_len = ( + len(encoded_inputs["input_ids"]) // max_seq_len + 1) * max_seq_len + + needs_to_be_padded = pad_to_max_seq_len and \ + max_seq_len and len(encoded_inputs["input_ids"]) < max_seq_len + + if needs_to_be_padded: + difference = max_seq_len - len(encoded_inputs["input_ids"]) + if tokenizer.padding_side == 'right': + if return_attention_mask: + encoded_inputs["attention_mask"] = [1] * len(encoded_inputs[ + "input_ids"]) + [0] * difference + if return_token_type_ids: + encoded_inputs["token_type_ids"] = ( + encoded_inputs["token_type_ids"] + + [tokenizer.pad_token_type_id] * difference) + if return_special_tokens_mask: + encoded_inputs["special_tokens_mask"] = encoded_inputs[ + "special_tokens_mask"] + [1] * difference + encoded_inputs["input_ids"] = encoded_inputs[ + "input_ids"] + [tokenizer.pad_token_id] * difference + encoded_inputs["bbox"] = encoded_inputs["bbox"] + [[0, 0, 0, 0] + ] * difference + else: + assert False, f"padding_side of tokenizer just supports [\"right\"] but got {tokenizer.padding_side}" + else: + if return_attention_mask: + encoded_inputs["attention_mask"] = [1] * len(encoded_inputs[ + "input_ids"]) + + return encoded_inputs + + +def split_page(encoded_inputs, max_seq_len=512): + """ + truncate is often used in training process + """ + for key in encoded_inputs: + encoded_inputs[key] = paddle.to_tensor(encoded_inputs[key]) + if encoded_inputs[key].ndim <= 1: # for input_ids, att_mask and so on + encoded_inputs[key] = encoded_inputs[key].reshape([-1, max_seq_len]) + else: # for bbox + encoded_inputs[key] = encoded_inputs[key].reshape( + [-1, max_seq_len, 4]) + return encoded_inputs + + +def preprocess( + tokenizer, + ori_img, + ocr_info, + img_size=(224, 224), + pad_token_label_id=-100, + max_seq_len=512, + add_special_ids=False, + return_attention_mask=True, ): + ocr_info = deepcopy(ocr_info) + height = ori_img.shape[0] + width = ori_img.shape[1] + + img = cv2.resize(ori_img, + (224, 224)).transpose([2, 0, 1]).astype(np.float32) + + segment_offset_id = [] + words_list = [] + bbox_list = [] + input_ids_list = [] + token_type_ids_list = [] + + for info in ocr_info: + # x1, y1, x2, y2 + bbox = info["bbox"] + bbox[0] = int(bbox[0] * 1000.0 / width) + bbox[2] = int(bbox[2] * 1000.0 / width) + bbox[1] = int(bbox[1] * 1000.0 / height) + bbox[3] = int(bbox[3] * 1000.0 / height) + + text = info["text"] + encode_res = tokenizer.encode( + text, pad_to_max_seq_len=False, return_attention_mask=True) + + if not add_special_ids: + # TODO: use tok.all_special_ids to remove + encode_res["input_ids"] = encode_res["input_ids"][1:-1] + encode_res["token_type_ids"] = encode_res["token_type_ids"][1:-1] + encode_res["attention_mask"] = encode_res["attention_mask"][1:-1] + + input_ids_list.extend(encode_res["input_ids"]) + token_type_ids_list.extend(encode_res["token_type_ids"]) + bbox_list.extend([bbox] * len(encode_res["input_ids"])) + words_list.append(text) + segment_offset_id.append(len(input_ids_list)) + + encoded_inputs = { + "input_ids": input_ids_list, + "token_type_ids": token_type_ids_list, + "bbox": bbox_list, + "attention_mask": [1] * len(input_ids_list), + } + + encoded_inputs = pad_sentences( + tokenizer, + encoded_inputs, + max_seq_len=max_seq_len, + return_attention_mask=return_attention_mask) + + encoded_inputs = split_page(encoded_inputs) + + fake_bs = encoded_inputs["input_ids"].shape[0] + + encoded_inputs["image"] = paddle.to_tensor(img).unsqueeze(0).expand( + [fake_bs] + list(img.shape)) + + encoded_inputs["segment_offset_id"] = segment_offset_id + + return encoded_inputs + + +def postprocess(attention_mask, preds, label_map_path): + if isinstance(preds, paddle.Tensor): + preds = preds.numpy() + preds = np.argmax(preds, axis=2) + + _, label_map = get_bio_label_maps(label_map_path) + + preds_list = [[] for _ in range(preds.shape[0])] + + # keep batch info + for i in range(preds.shape[0]): + for j in range(preds.shape[1]): + if attention_mask[i][j] == 1: + preds_list[i].append(label_map[preds[i][j]]) + + return preds_list + + +def merge_preds_list_with_ocr_info(label_map_path, ocr_info, segment_offset_id, + preds_list): + # must ensure the preds_list is generated from the same image + preds = [p for pred in preds_list for p in pred] + label2id_map, _ = get_bio_label_maps(label_map_path) + for key in label2id_map: + if key.startswith("I-"): + label2id_map[key] = label2id_map["B" + key[1:]] + + id2label_map = dict() + for key in label2id_map: + val = label2id_map[key] + if key == "O": + id2label_map[val] = key + if key.startswith("B-") or key.startswith("I-"): + id2label_map[val] = key[2:] + else: + id2label_map[val] = key + + for idx in range(len(segment_offset_id)): + if idx == 0: + start_id = 0 + else: + start_id = segment_offset_id[idx - 1] + + end_id = segment_offset_id[idx] + + curr_pred = preds[start_id:end_id] + curr_pred = [label2id_map[p] for p in curr_pred] + + if len(curr_pred) <= 0: + pred_id = 0 + else: + counts = np.bincount(curr_pred) + pred_id = np.argmax(counts) + ocr_info[idx]["pred_id"] = int(pred_id) + ocr_info[idx]["pred"] = id2label_map[pred_id] + return ocr_info + + +@paddle.no_grad() +def infer(args): + os.makedirs(args.output_dir, exist_ok=True) + + # init token and model + tokenizer = LayoutXLMTokenizer.from_pretrained(args.model_name_or_path) + # model = LayoutXLMModel.from_pretrained(args.model_name_or_path) + model = LayoutXLMForTokenClassification.from_pretrained( + args.model_name_or_path) + model.eval() + + # load ocr results json + ocr_results = dict() + with open(args.ocr_json_path, "r") as fin: + lines = fin.readlines() + for line in lines: + img_name, json_info = line.split("\t") + ocr_results[os.path.basename(img_name)] = json.loads(json_info) + + # get infer img list + infer_imgs = get_image_file_list(args.infer_imgs) + + # loop for infer + with open(os.path.join(args.output_dir, "infer_results.txt"), "w") as fout: + for idx, img_path in enumerate(infer_imgs): + print("process: [{}/{}]".format(idx, len(infer_imgs), img_path)) + + img = cv2.imread(img_path) + + ocr_info = ocr_results[os.path.basename(img_path)]["ocr_info"] + inputs = preprocess( + tokenizer=tokenizer, + ori_img=img, + ocr_info=ocr_info, + max_seq_len=args.max_seq_length) + + outputs = model( + input_ids=inputs["input_ids"], + bbox=inputs["bbox"], + image=inputs["image"], + token_type_ids=inputs["token_type_ids"], + attention_mask=inputs["attention_mask"]) + + preds = outputs[0] + preds = postprocess(inputs["attention_mask"], preds, + args.label_map_path) + ocr_info = merge_preds_list_with_ocr_info( + args.label_map_path, ocr_info, inputs["segment_offset_id"], + preds) + + fout.write(img_path + "\t" + json.dumps( + { + "ocr_info": ocr_info, + }, ensure_ascii=False) + "\n") + + img_res = draw_ser_results(img, ocr_info) + cv2.imwrite( + os.path.join(args.output_dir, os.path.basename(img_path)), + img_res) + + return + + +if __name__ == "__main__": + args = parse_args() + infer(args) diff --git a/ppstructure/vqa/infer_ser_e2e.py b/ppstructure/vqa/infer_ser_e2e.py new file mode 100644 index 0000000000000000000000000000000000000000..3ebb350fd9ce90fa5a5688c34f041f67105fcf86 --- /dev/null +++ b/ppstructure/vqa/infer_ser_e2e.py @@ -0,0 +1,132 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import json +import cv2 +import numpy as np +from copy import deepcopy +from PIL import Image + +import paddle +from paddlenlp.transformers import LayoutXLMModel, LayoutXLMTokenizer, LayoutXLMForTokenClassification + +# relative reference +from .utils import parse_args, get_image_file_list, draw_ser_results, get_bio_label_maps + +from .utils import pad_sentences, split_page, preprocess, postprocess, merge_preds_list_with_ocr_info + + +def trans_poly_to_bbox(poly): + x1 = np.min([p[0] for p in poly]) + x2 = np.max([p[0] for p in poly]) + y1 = np.min([p[1] for p in poly]) + y2 = np.max([p[1] for p in poly]) + return [x1, y1, x2, y2] + + +def parse_ocr_info_for_ser(ocr_result): + ocr_info = [] + for res in ocr_result: + ocr_info.append({ + "text": res[1][0], + "bbox": trans_poly_to_bbox(res[0]), + "poly": res[0], + }) + return ocr_info + + +class SerPredictor(object): + def __init__(self, args): + + self.max_seq_length = args.max_seq_length + + # init ser token and model + self.tokenizer = LayoutXLMTokenizer.from_pretrained( + args.model_name_or_path) + self.model = LayoutXLMForTokenClassification.from_pretrained( + args.model_name_or_path) + self.model.eval() + + # init ocr_engine + from paddleocr import PaddleOCR + + self.ocr_engine = PaddleOCR( + rec_model_dir=args.rec_model_dir, + det_model_dir=args.det_model_dir, + use_angle_cls=False, + show_log=False) + # init dict + label2id_map, self.id2label_map = get_bio_label_maps( + args.label_map_path) + self.label2id_map_for_draw = dict() + for key in label2id_map: + if key.startswith("I-"): + self.label2id_map_for_draw[key] = label2id_map["B" + key[1:]] + else: + self.label2id_map_for_draw[key] = label2id_map[key] + + def __call__(self, img): + ocr_result = self.ocr_engine.ocr(img, cls=False) + + ocr_info = parse_ocr_info_for_ser(ocr_result) + + inputs = preprocess( + tokenizer=self.tokenizer, + ori_img=img, + ocr_info=ocr_info, + max_seq_len=self.max_seq_length) + + outputs = self.model( + input_ids=inputs["input_ids"], + bbox=inputs["bbox"], + image=inputs["image"], + token_type_ids=inputs["token_type_ids"], + attention_mask=inputs["attention_mask"]) + + preds = outputs[0] + preds = postprocess(inputs["attention_mask"], preds, self.id2label_map) + ocr_info = merge_preds_list_with_ocr_info( + ocr_info, inputs["segment_offset_id"], preds, + self.label2id_map_for_draw) + return ocr_info, inputs + + +if __name__ == "__main__": + args = parse_args() + os.makedirs(args.output_dir, exist_ok=True) + + # get infer img list + infer_imgs = get_image_file_list(args.infer_imgs) + + # loop for infer + ser_engine = SerPredictor(args) + with open(os.path.join(args.output_dir, "infer_results.txt"), "w") as fout: + for idx, img_path in enumerate(infer_imgs): + print("process: [{}/{}], {}".format(idx, len(infer_imgs), img_path)) + + img = cv2.imread(img_path) + + result, _ = ser_engine(img) + fout.write(img_path + "\t" + json.dumps( + { + "ser_resule": result, + }, ensure_ascii=False) + "\n") + + img_res = draw_ser_results(img, result) + cv2.imwrite( + os.path.join(args.output_dir, + os.path.splitext(os.path.basename(img_path))[0] + + "_ser.jpg"), img_res) diff --git a/ppstructure/vqa/infer_ser_re_e2e.py b/ppstructure/vqa/infer_ser_re_e2e.py new file mode 100644 index 0000000000000000000000000000000000000000..a1d0f52eeecbc6c2ceba5964355008f638f371dd --- /dev/null +++ b/ppstructure/vqa/infer_ser_re_e2e.py @@ -0,0 +1,131 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import json +import cv2 +import numpy as np +from copy import deepcopy +from PIL import Image + +import paddle +from paddlenlp.transformers import LayoutXLMModel, LayoutXLMTokenizer, LayoutXLMForRelationExtraction + +# relative reference +from utils import parse_args, get_image_file_list, draw_re_results +from infer_ser_e2e import SerPredictor + + +def make_input(ser_input, ser_result, max_seq_len=512): + entities_labels = {'HEADER': 0, 'QUESTION': 1, 'ANSWER': 2} + + entities = ser_input['entities'][0] + assert len(entities) == len(ser_result) + + # entities + start = [] + end = [] + label = [] + entity_idx_dict = {} + for i, (res, entity) in enumerate(zip(ser_result, entities)): + if res['pred'] == 'O': + continue + entity_idx_dict[len(start)] = i + start.append(entity['start']) + end.append(entity['end']) + label.append(entities_labels[res['pred']]) + entities = dict(start=start, end=end, label=label) + + # relations + head = [] + tail = [] + for i in range(len(entities["label"])): + for j in range(len(entities["label"])): + if entities["label"][i] == 1 and entities["label"][j] == 2: + head.append(i) + tail.append(j) + + relations = dict(head=head, tail=tail) + + batch_size = ser_input["input_ids"].shape[0] + entities_batch = [] + relations_batch = [] + for b in range(batch_size): + entities_batch.append(entities) + relations_batch.append(relations) + + ser_input['entities'] = entities_batch + ser_input['relations'] = relations_batch + + ser_input.pop('segment_offset_id') + return ser_input, entity_idx_dict + + +class SerReSystem(object): + def __init__(self, args): + self.ser_engine = SerPredictor(args) + self.tokenizer = LayoutXLMTokenizer.from_pretrained( + args.re_model_name_or_path) + self.model = LayoutXLMForRelationExtraction.from_pretrained( + args.re_model_name_or_path) + self.model.eval() + + def __call__(self, img): + ser_result, ser_inputs = self.ser_engine(img) + re_input, entity_idx_dict = make_input(ser_inputs, ser_result) + + re_result = self.model(**re_input) + + pred_relations = re_result['pred_relations'][0] + # 进行 relations 到 ocr信息的转换 + result = [] + used_tail_id = [] + for relation in pred_relations: + if relation['tail_id'] in used_tail_id: + continue + used_tail_id.append(relation['tail_id']) + ocr_info_head = ser_result[entity_idx_dict[relation['head_id']]] + ocr_info_tail = ser_result[entity_idx_dict[relation['tail_id']]] + result.append((ocr_info_head, ocr_info_tail)) + + return result + + +if __name__ == "__main__": + args = parse_args() + os.makedirs(args.output_dir, exist_ok=True) + + # get infer img list + infer_imgs = get_image_file_list(args.infer_imgs) + + # loop for infer + ser_re_engine = SerReSystem(args) + with open(os.path.join(args.output_dir, "infer_results.txt"), "w") as fout: + for idx, img_path in enumerate(infer_imgs): + print("process: [{}/{}], {}".format(idx, len(infer_imgs), img_path)) + + img = cv2.imread(img_path) + + result = ser_re_engine(img) + fout.write(img_path + "\t" + json.dumps( + { + "result": result, + }, ensure_ascii=False) + "\n") + + img_res = draw_re_results(img, result) + cv2.imwrite( + os.path.join(args.output_dir, + os.path.splitext(os.path.basename(img_path))[0] + + "_re.jpg"), img_res) diff --git a/ppstructure/vqa/labels/labels_ser.txt b/ppstructure/vqa/labels/labels_ser.txt new file mode 100644 index 0000000000000000000000000000000000000000..508e48112412f62538baf0c78bcf99ec8945196e --- /dev/null +++ b/ppstructure/vqa/labels/labels_ser.txt @@ -0,0 +1,3 @@ +QUESTION +ANSWER +HEADER diff --git a/ppstructure/vqa/metric.py b/ppstructure/vqa/metric.py new file mode 100644 index 0000000000000000000000000000000000000000..cb58370521296886670486982caf1202cf99a489 --- /dev/null +++ b/ppstructure/vqa/metric.py @@ -0,0 +1,175 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re + +import numpy as np + +import logging + +logger = logging.getLogger(__name__) + +PREFIX_CHECKPOINT_DIR = "checkpoint" +_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") + + +def get_last_checkpoint(folder): + content = os.listdir(folder) + checkpoints = [ + path for path in content + if _re_checkpoint.search(path) is not None and os.path.isdir( + os.path.join(folder, path)) + ] + if len(checkpoints) == 0: + return + return os.path.join( + folder, + max(checkpoints, + key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) + + +def re_score(pred_relations, gt_relations, mode="strict"): + """Evaluate RE predictions + + Args: + pred_relations (list) : list of list of predicted relations (several relations in each sentence) + gt_relations (list) : list of list of ground truth relations + + rel = { "head": (start_idx (inclusive), end_idx (exclusive)), + "tail": (start_idx (inclusive), end_idx (exclusive)), + "head_type": ent_type, + "tail_type": ent_type, + "type": rel_type} + + vocab (Vocab) : dataset vocabulary + mode (str) : in 'strict' or 'boundaries'""" + + assert mode in ["strict", "boundaries"] + + relation_types = [v for v in [0, 1] if not v == 0] + scores = { + rel: { + "tp": 0, + "fp": 0, + "fn": 0 + } + for rel in relation_types + ["ALL"] + } + + # Count GT relations and Predicted relations + n_sents = len(gt_relations) + n_rels = sum([len([rel for rel in sent]) for sent in gt_relations]) + n_found = sum([len([rel for rel in sent]) for sent in pred_relations]) + + # Count TP, FP and FN per type + for pred_sent, gt_sent in zip(pred_relations, gt_relations): + for rel_type in relation_types: + # strict mode takes argument types into account + if mode == "strict": + pred_rels = {(rel["head"], rel["head_type"], rel["tail"], + rel["tail_type"]) + for rel in pred_sent if rel["type"] == rel_type} + gt_rels = {(rel["head"], rel["head_type"], rel["tail"], + rel["tail_type"]) + for rel in gt_sent if rel["type"] == rel_type} + + # boundaries mode only takes argument spans into account + elif mode == "boundaries": + pred_rels = {(rel["head"], rel["tail"]) + for rel in pred_sent if rel["type"] == rel_type} + gt_rels = {(rel["head"], rel["tail"]) + for rel in gt_sent if rel["type"] == rel_type} + + scores[rel_type]["tp"] += len(pred_rels & gt_rels) + scores[rel_type]["fp"] += len(pred_rels - gt_rels) + scores[rel_type]["fn"] += len(gt_rels - pred_rels) + + # Compute per entity Precision / Recall / F1 + for rel_type in scores.keys(): + if scores[rel_type]["tp"]: + scores[rel_type]["p"] = scores[rel_type]["tp"] / ( + scores[rel_type]["fp"] + scores[rel_type]["tp"]) + scores[rel_type]["r"] = scores[rel_type]["tp"] / ( + scores[rel_type]["fn"] + scores[rel_type]["tp"]) + else: + scores[rel_type]["p"], scores[rel_type]["r"] = 0, 0 + + if not scores[rel_type]["p"] + scores[rel_type]["r"] == 0: + scores[rel_type]["f1"] = ( + 2 * scores[rel_type]["p"] * scores[rel_type]["r"] / + (scores[rel_type]["p"] + scores[rel_type]["r"])) + else: + scores[rel_type]["f1"] = 0 + + # Compute micro F1 Scores + tp = sum([scores[rel_type]["tp"] for rel_type in relation_types]) + fp = sum([scores[rel_type]["fp"] for rel_type in relation_types]) + fn = sum([scores[rel_type]["fn"] for rel_type in relation_types]) + + if tp: + precision = tp / (tp + fp) + recall = tp / (tp + fn) + f1 = 2 * precision * recall / (precision + recall) + + else: + precision, recall, f1 = 0, 0, 0 + + scores["ALL"]["p"] = precision + scores["ALL"]["r"] = recall + scores["ALL"]["f1"] = f1 + scores["ALL"]["tp"] = tp + scores["ALL"]["fp"] = fp + scores["ALL"]["fn"] = fn + + # Compute Macro F1 Scores + scores["ALL"]["Macro_f1"] = np.mean( + [scores[ent_type]["f1"] for ent_type in relation_types]) + scores["ALL"]["Macro_p"] = np.mean( + [scores[ent_type]["p"] for ent_type in relation_types]) + scores["ALL"]["Macro_r"] = np.mean( + [scores[ent_type]["r"] for ent_type in relation_types]) + + # logger.info(f"RE Evaluation in *** {mode.upper()} *** mode") + + # logger.info( + # "processed {} sentences with {} relations; found: {} relations; correct: {}.".format( + # n_sents, n_rels, n_found, tp + # ) + # ) + # logger.info( + # "\tALL\t TP: {};\tFP: {};\tFN: {}".format(scores["ALL"]["tp"], scores["ALL"]["fp"], scores["ALL"]["fn"]) + # ) + # logger.info("\t\t(m avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (micro)".format(precision, recall, f1)) + # logger.info( + # "\t\t(M avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (Macro)\n".format( + # scores["ALL"]["Macro_p"], scores["ALL"]["Macro_r"], scores["ALL"]["Macro_f1"] + # ) + # ) + + # for rel_type in relation_types: + # logger.info( + # "\t{}: \tTP: {};\tFP: {};\tFN: {};\tprecision: {:.2f};\trecall: {:.2f};\tf1: {:.2f};\t{}".format( + # rel_type, + # scores[rel_type]["tp"], + # scores[rel_type]["fp"], + # scores[rel_type]["fn"], + # scores[rel_type]["p"], + # scores[rel_type]["r"], + # scores[rel_type]["f1"], + # scores[rel_type]["tp"] + scores[rel_type]["fp"], + # ) + # ) + + return scores diff --git a/ppstructure/vqa/requirements.txt b/ppstructure/vqa/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..c259fadc395335b336cb0ecdb5aa6bca48631987 --- /dev/null +++ b/ppstructure/vqa/requirements.txt @@ -0,0 +1,2 @@ +sentencepiece +yacs diff --git a/ppstructure/vqa/train_re.py b/ppstructure/vqa/train_re.py new file mode 100644 index 0000000000000000000000000000000000000000..ed19646cf57e69ac99e417ae27568655a4e00039 --- /dev/null +++ b/ppstructure/vqa/train_re.py @@ -0,0 +1,261 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) + +import random +import numpy as np +import paddle + +from paddlenlp.transformers import LayoutXLMTokenizer, LayoutXLMModel, LayoutXLMForRelationExtraction + +from xfun import XFUNDataset +from utils import parse_args, get_bio_label_maps, print_arguments +from data_collator import DataCollator +from metric import re_score + +from ppocr.utils.logging import get_logger + + +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + paddle.seed(seed) + + +def cal_metric(re_preds, re_labels, entities): + gt_relations = [] + for b in range(len(re_labels)): + rel_sent = [] + for head, tail in zip(re_labels[b]["head"], re_labels[b]["tail"]): + rel = {} + rel["head_id"] = head + rel["head"] = (entities[b]["start"][rel["head_id"]], + entities[b]["end"][rel["head_id"]]) + rel["head_type"] = entities[b]["label"][rel["head_id"]] + + rel["tail_id"] = tail + rel["tail"] = (entities[b]["start"][rel["tail_id"]], + entities[b]["end"][rel["tail_id"]]) + rel["tail_type"] = entities[b]["label"][rel["tail_id"]] + + rel["type"] = 1 + rel_sent.append(rel) + gt_relations.append(rel_sent) + re_metrics = re_score(re_preds, gt_relations, mode="boundaries") + return re_metrics + + +def evaluate(model, eval_dataloader, logger, prefix=""): + # Eval! + logger.info("***** Running evaluation {} *****".format(prefix)) + logger.info(" Num examples = {}".format(len(eval_dataloader.dataset))) + + re_preds = [] + re_labels = [] + entities = [] + eval_loss = 0.0 + model.eval() + for idx, batch in enumerate(eval_dataloader): + with paddle.no_grad(): + outputs = model(**batch) + loss = outputs['loss'].mean().item() + if paddle.distributed.get_rank() == 0: + logger.info("[Eval] process: {}/{}, loss: {:.5f}".format( + idx, len(eval_dataloader), loss)) + + eval_loss += loss + re_preds.extend(outputs['pred_relations']) + re_labels.extend(batch['relations']) + entities.extend(batch['entities']) + re_metrics = cal_metric(re_preds, re_labels, entities) + re_metrics = { + "precision": re_metrics["ALL"]["p"], + "recall": re_metrics["ALL"]["r"], + "f1": re_metrics["ALL"]["f1"], + } + model.train() + return re_metrics + + +def train(args): + logger = get_logger(log_file=os.path.join(args.output_dir, "train.log")) + print_arguments(args, logger) + + # Added here for reproducibility (even between python 2 and 3) + set_seed(args.seed) + + label2id_map, id2label_map = get_bio_label_maps(args.label_map_path) + pad_token_label_id = paddle.nn.CrossEntropyLoss().ignore_index + + # dist mode + if paddle.distributed.get_world_size() > 1: + paddle.distributed.init_parallel_env() + + tokenizer = LayoutXLMTokenizer.from_pretrained(args.model_name_or_path) + + model = LayoutXLMModel.from_pretrained(args.model_name_or_path) + model = LayoutXLMForRelationExtraction(model, dropout=None) + + # dist mode + if paddle.distributed.get_world_size() > 1: + model = paddle.distributed.DataParallel(model) + + train_dataset = XFUNDataset( + tokenizer, + data_dir=args.train_data_dir, + label_path=args.train_label_path, + label2id_map=label2id_map, + img_size=(224, 224), + max_seq_len=args.max_seq_length, + pad_token_label_id=pad_token_label_id, + contains_re=True, + add_special_ids=False, + return_attention_mask=True, + load_mode='all') + + eval_dataset = XFUNDataset( + tokenizer, + data_dir=args.eval_data_dir, + label_path=args.eval_label_path, + label2id_map=label2id_map, + img_size=(224, 224), + max_seq_len=args.max_seq_length, + pad_token_label_id=pad_token_label_id, + contains_re=True, + add_special_ids=False, + return_attention_mask=True, + load_mode='all') + + train_sampler = paddle.io.DistributedBatchSampler( + train_dataset, batch_size=args.per_gpu_train_batch_size, shuffle=True) + args.train_batch_size = args.per_gpu_train_batch_size * \ + max(1, paddle.distributed.get_world_size()) + train_dataloader = paddle.io.DataLoader( + train_dataset, + batch_sampler=train_sampler, + num_workers=8, + use_shared_memory=True, + collate_fn=DataCollator()) + + eval_dataloader = paddle.io.DataLoader( + eval_dataset, + batch_size=args.per_gpu_eval_batch_size, + num_workers=8, + shuffle=False, + collate_fn=DataCollator()) + + t_total = len(train_dataloader) * args.num_train_epochs + + # build linear decay with warmup lr sch + lr_scheduler = paddle.optimizer.lr.PolynomialDecay( + learning_rate=args.learning_rate, + decay_steps=t_total, + end_lr=0.0, + power=1.0) + if args.warmup_steps > 0: + lr_scheduler = paddle.optimizer.lr.LinearWarmup( + lr_scheduler, + args.warmup_steps, + start_lr=0, + end_lr=args.learning_rate, ) + grad_clip = paddle.nn.ClipGradByNorm(clip_norm=10) + optimizer = paddle.optimizer.Adam( + learning_rate=args.learning_rate, + parameters=model.parameters(), + epsilon=args.adam_epsilon, + grad_clip=grad_clip, + weight_decay=args.weight_decay) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = {}".format(len(train_dataset))) + logger.info(" Num Epochs = {}".format(args.num_train_epochs)) + logger.info(" Instantaneous batch size per GPU = {}".format( + args.per_gpu_train_batch_size)) + logger.info( + " Total train batch size (w. parallel, distributed & accumulation) = {}". + format(args.train_batch_size * paddle.distributed.get_world_size())) + logger.info(" Total optimization steps = {}".format(t_total)) + + global_step = 0 + model.clear_gradients() + train_dataloader_len = len(train_dataloader) + best_metirc = {'f1': 0} + model.train() + + for epoch in range(int(args.num_train_epochs)): + for step, batch in enumerate(train_dataloader): + outputs = model(**batch) + # model outputs are always tuple in ppnlp (see doc) + loss = outputs['loss'] + loss = loss.mean() + + logger.info( + "epoch: [{}/{}], iter: [{}/{}], global_step:{}, train loss: {}, lr: {}". + format(epoch, args.num_train_epochs, step, train_dataloader_len, + global_step, np.mean(loss.numpy()), optimizer.get_lr())) + + loss.backward() + optimizer.step() + optimizer.clear_grad() + # lr_scheduler.step() # Update learning rate schedule + + global_step += 1 + + if (paddle.distributed.get_rank() == 0 and args.eval_steps > 0 and + global_step % args.eval_steps == 0): + # Log metrics + if (paddle.distributed.get_rank() == 0 and args. + evaluate_during_training): # Only evaluate when single GPU otherwise metrics may not average well + results = evaluate(model, eval_dataloader, logger) + if results['f1'] > best_metirc['f1']: + best_metirc = results + output_dir = os.path.join(args.output_dir, + "checkpoint-best") + os.makedirs(output_dir, exist_ok=True) + model.save_pretrained(output_dir) + tokenizer.save_pretrained(output_dir) + paddle.save(args, + os.path.join(output_dir, + "training_args.bin")) + logger.info("Saving model checkpoint to {}".format( + output_dir)) + logger.info("eval results: {}".format(results)) + logger.info("best_metirc: {}".format(best_metirc)) + + if (paddle.distributed.get_rank() == 0 and args.save_steps > 0 and + global_step % args.save_steps == 0): + # Save model checkpoint + output_dir = os.path.join(args.output_dir, "checkpoint-latest") + os.makedirs(output_dir, exist_ok=True) + if paddle.distributed.get_rank() == 0: + model.save_pretrained(output_dir) + tokenizer.save_pretrained(output_dir) + paddle.save(args, + os.path.join(output_dir, "training_args.bin")) + logger.info("Saving model checkpoint to {}".format( + output_dir)) + logger.info("best_metirc: {}".format(best_metirc)) + + +if __name__ == "__main__": + args = parse_args() + os.makedirs(args.output_dir, exist_ok=True) + train(args) diff --git a/ppstructure/vqa/train_ser.py b/ppstructure/vqa/train_ser.py new file mode 100644 index 0000000000000000000000000000000000000000..d3144e7167c59b5883047a948abaedfd21ba9b1c --- /dev/null +++ b/ppstructure/vqa/train_ser.py @@ -0,0 +1,298 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +__dir__ = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(__dir__) +sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) + +import random +import copy +import logging + +import argparse +import paddle +import numpy as np +from seqeval.metrics import classification_report, f1_score, precision_score, recall_score +from paddlenlp.transformers import LayoutXLMModel, LayoutXLMTokenizer, LayoutXLMForTokenClassification +from xfun import XFUNDataset +from utils import parse_args +from utils import get_bio_label_maps +from utils import print_arguments + +from ppocr.utils.logging import get_logger + + +def set_seed(args): + random.seed(args.seed) + np.random.seed(args.seed) + paddle.seed(args.seed) + + +def train(args): + os.makedirs(args.output_dir, exist_ok=True) + logger = get_logger(log_file=os.path.join(args.output_dir, "train.log")) + print_arguments(args, logger) + + label2id_map, id2label_map = get_bio_label_maps(args.label_map_path) + pad_token_label_id = paddle.nn.CrossEntropyLoss().ignore_index + + # dist mode + if paddle.distributed.get_world_size() > 1: + paddle.distributed.init_parallel_env() + + tokenizer = LayoutXLMTokenizer.from_pretrained(args.model_name_or_path) + base_model = LayoutXLMModel.from_pretrained(args.model_name_or_path) + model = LayoutXLMForTokenClassification( + base_model, num_classes=len(label2id_map), dropout=None) + + # dist mode + if paddle.distributed.get_world_size() > 1: + model = paddle.DataParallel(model) + + train_dataset = XFUNDataset( + tokenizer, + data_dir=args.train_data_dir, + label_path=args.train_label_path, + label2id_map=label2id_map, + img_size=(224, 224), + pad_token_label_id=pad_token_label_id, + contains_re=False, + add_special_ids=False, + return_attention_mask=True, + load_mode='all') + + train_sampler = paddle.io.DistributedBatchSampler( + train_dataset, batch_size=args.per_gpu_train_batch_size, shuffle=True) + + args.train_batch_size = args.per_gpu_train_batch_size * max( + 1, paddle.distributed.get_world_size()) + + train_dataloader = paddle.io.DataLoader( + train_dataset, + batch_sampler=train_sampler, + num_workers=0, + use_shared_memory=True, + collate_fn=None, ) + + t_total = len(train_dataloader) * args.num_train_epochs + + # build linear decay with warmup lr sch + lr_scheduler = paddle.optimizer.lr.PolynomialDecay( + learning_rate=args.learning_rate, + decay_steps=t_total, + end_lr=0.0, + power=1.0) + if args.warmup_steps > 0: + lr_scheduler = paddle.optimizer.lr.LinearWarmup( + lr_scheduler, + args.warmup_steps, + start_lr=0, + end_lr=args.learning_rate, ) + + optimizer = paddle.optimizer.AdamW( + learning_rate=lr_scheduler, + parameters=model.parameters(), + epsilon=args.adam_epsilon, + weight_decay=args.weight_decay) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", + args.per_gpu_train_batch_size) + logger.info( + " Total train batch size (w. parallel, distributed) = %d", + args.train_batch_size * paddle.distributed.get_world_size(), ) + logger.info(" Total optimization steps = %d", t_total) + + global_step = 0 + tr_loss = 0.0 + set_seed(args) + best_metrics = None + + for epoch_id in range(args.num_train_epochs): + for step, batch in enumerate(train_dataloader): + model.train() + outputs = model(**batch) + # model outputs are always tuple in ppnlp (see doc) + loss = outputs[0] + loss = loss.mean() + logger.info( + "epoch: [{}/{}], iter: [{}/{}], global_step:{}, train loss: {}, lr: {}". + format(epoch_id, args.num_train_epochs, step, + len(train_dataloader), global_step, + loss.numpy()[0], lr_scheduler.get_lr())) + + loss.backward() + tr_loss += loss.item() + optimizer.step() + lr_scheduler.step() # Update learning rate schedule + optimizer.clear_grad() + global_step += 1 + + if (paddle.distributed.get_rank() == 0 and args.eval_steps > 0 and + global_step % args.eval_steps == 0): + # Log metrics + # Only evaluate when single GPU otherwise metrics may not average well + if paddle.distributed.get_rank( + ) == 0 and args.evaluate_during_training: + results, _ = evaluate(args, model, tokenizer, label2id_map, + id2label_map, pad_token_label_id, + logger) + + if best_metrics is None or results["f1"] >= best_metrics[ + "f1"]: + best_metrics = copy.deepcopy(results) + output_dir = os.path.join(args.output_dir, "best_model") + os.makedirs(output_dir, exist_ok=True) + if paddle.distributed.get_rank() == 0: + model.save_pretrained(output_dir) + tokenizer.save_pretrained(output_dir) + paddle.save( + args, + os.path.join(output_dir, "training_args.bin")) + logger.info("Saving model checkpoint to %s", + output_dir) + + logger.info("[epoch {}/{}][iter: {}/{}] results: {}".format( + epoch_id, args.num_train_epochs, step, + len(train_dataloader), results)) + if best_metrics is not None: + logger.info("best metrics: {}".format(best_metrics)) + + if paddle.distributed.get_rank( + ) == 0 and args.save_steps > 0 and global_step % args.save_steps == 0: + # Save model checkpoint + output_dir = os.path.join(args.output_dir, + "checkpoint-{}".format(global_step)) + os.makedirs(output_dir, exist_ok=True) + if paddle.distributed.get_rank() == 0: + model.save_pretrained(output_dir) + tokenizer.save_pretrained(output_dir) + paddle.save(args, + os.path.join(output_dir, "training_args.bin")) + logger.info("Saving model checkpoint to %s", output_dir) + + return global_step, tr_loss / global_step + + +def evaluate(args, + model, + tokenizer, + label2id_map, + id2label_map, + pad_token_label_id, + logger, + prefix=""): + eval_dataset = XFUNDataset( + tokenizer, + data_dir=args.eval_data_dir, + label_path=args.eval_label_path, + label2id_map=label2id_map, + img_size=(224, 224), + pad_token_label_id=pad_token_label_id, + contains_re=False, + add_special_ids=False, + return_attention_mask=True, + load_mode='all') + + args.eval_batch_size = args.per_gpu_eval_batch_size * max( + 1, paddle.distributed.get_world_size()) + + eval_dataloader = paddle.io.DataLoader( + eval_dataset, + batch_size=args.eval_batch_size, + num_workers=0, + use_shared_memory=True, + collate_fn=None, ) + + # Eval! + logger.info("***** Running evaluation %s *****", prefix) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + preds = None + out_label_ids = None + model.eval() + for idx, batch in enumerate(eval_dataloader): + with paddle.no_grad(): + outputs = model(**batch) + tmp_eval_loss, logits = outputs[:2] + + tmp_eval_loss = tmp_eval_loss.mean() + + if paddle.distributed.get_rank() == 0: + logger.info("[Eval]process: {}/{}, loss: {:.5f}".format( + idx, len(eval_dataloader), tmp_eval_loss.numpy()[0])) + + eval_loss += tmp_eval_loss.item() + nb_eval_steps += 1 + if preds is None: + preds = logits.numpy() + out_label_ids = batch["labels"].numpy() + else: + preds = np.append(preds, logits.numpy(), axis=0) + out_label_ids = np.append( + out_label_ids, batch["labels"].numpy(), axis=0) + + eval_loss = eval_loss / nb_eval_steps + preds = np.argmax(preds, axis=2) + + # label_map = {i: label.upper() for i, label in enumerate(labels)} + + out_label_list = [[] for _ in range(out_label_ids.shape[0])] + preds_list = [[] for _ in range(out_label_ids.shape[0])] + + for i in range(out_label_ids.shape[0]): + for j in range(out_label_ids.shape[1]): + if out_label_ids[i, j] != pad_token_label_id: + out_label_list[i].append(id2label_map[out_label_ids[i][j]]) + preds_list[i].append(id2label_map[preds[i][j]]) + + results = { + "loss": eval_loss, + "precision": precision_score(out_label_list, preds_list), + "recall": recall_score(out_label_list, preds_list), + "f1": f1_score(out_label_list, preds_list), + } + + with open(os.path.join(args.output_dir, "test_gt.txt"), "w") as fout: + for lbl in out_label_list: + for l in lbl: + fout.write(l + "\t") + fout.write("\n") + with open(os.path.join(args.output_dir, "test_pred.txt"), "w") as fout: + for lbl in preds_list: + for l in lbl: + fout.write(l + "\t") + fout.write("\n") + + report = classification_report(out_label_list, preds_list) + logger.info("\n" + report) + + logger.info("***** Eval results %s *****", prefix) + for key in sorted(results.keys()): + logger.info(" %s = %s", key, str(results[key])) + + return results, preds_list + + +if __name__ == "__main__": + args = parse_args() + train(args) diff --git a/ppstructure/vqa/utils.py b/ppstructure/vqa/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f4db20d5cbcb6cf510bb794bb0e7d836da028b2f --- /dev/null +++ b/ppstructure/vqa/utils.py @@ -0,0 +1,392 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse +import cv2 +import random +import numpy as np +import imghdr +from copy import deepcopy + +import paddle + +from PIL import Image, ImageDraw, ImageFont + + +def get_bio_label_maps(label_map_path): + with open(label_map_path, "r") as fin: + lines = fin.readlines() + lines = [line.strip() for line in lines] + if "O" not in lines: + lines.insert(0, "O") + labels = [] + for line in lines: + if line == "O": + labels.append("O") + else: + labels.append("B-" + line) + labels.append("I-" + line) + label2id_map = {label: idx for idx, label in enumerate(labels)} + id2label_map = {idx: label for idx, label in enumerate(labels)} + return label2id_map, id2label_map + + +def get_image_file_list(img_file): + imgs_lists = [] + if img_file is None or not os.path.exists(img_file): + raise Exception("not found any img file in {}".format(img_file)) + + img_end = {'jpg', 'bmp', 'png', 'jpeg', 'rgb', 'tif', 'tiff', 'gif', 'GIF'} + if os.path.isfile(img_file) and imghdr.what(img_file) in img_end: + imgs_lists.append(img_file) + elif os.path.isdir(img_file): + for single_file in os.listdir(img_file): + file_path = os.path.join(img_file, single_file) + if os.path.isfile(file_path) and imghdr.what(file_path) in img_end: + imgs_lists.append(file_path) + if len(imgs_lists) == 0: + raise Exception("not found any img file in {}".format(img_file)) + imgs_lists = sorted(imgs_lists) + return imgs_lists + + +def draw_ser_results(image, + ocr_results, + font_path="../../doc/fonts/simfang.ttf", + font_size=18): + np.random.seed(2021) + color = (np.random.permutation(range(255)), + np.random.permutation(range(255)), + np.random.permutation(range(255))) + color_map = { + idx: (color[0][idx], color[1][idx], color[2][idx]) + for idx in range(1, 255) + } + if isinstance(image, np.ndarray): + image = Image.fromarray(image) + img_new = image.copy() + draw = ImageDraw.Draw(img_new) + + font = ImageFont.truetype(font_path, font_size, encoding="utf-8") + for ocr_info in ocr_results: + if ocr_info["pred_id"] not in color_map: + continue + color = color_map[ocr_info["pred_id"]] + text = "{}: {}".format(ocr_info["pred"], ocr_info["text"]) + + draw_box_txt(ocr_info["bbox"], text, draw, font, font_size, color) + + img_new = Image.blend(image, img_new, 0.5) + return np.array(img_new) + + +def draw_box_txt(bbox, text, draw, font, font_size, color): + # draw ocr results outline + bbox = ((bbox[0], bbox[1]), (bbox[2], bbox[3])) + draw.rectangle(bbox, fill=color) + + # draw ocr results + start_y = max(0, bbox[0][1] - font_size) + tw = font.getsize(text)[0] + draw.rectangle( + [(bbox[0][0] + 1, start_y), (bbox[0][0] + tw + 1, start_y + font_size)], + fill=(0, 0, 255)) + draw.text((bbox[0][0] + 1, start_y), text, fill=(255, 255, 255), font=font) + + +def draw_re_results(image, + result, + font_path="../../doc/fonts/simfang.ttf", + font_size=18): + np.random.seed(0) + if isinstance(image, np.ndarray): + image = Image.fromarray(image) + img_new = image.copy() + draw = ImageDraw.Draw(img_new) + + font = ImageFont.truetype(font_path, font_size, encoding="utf-8") + color_head = (0, 0, 255) + color_tail = (255, 0, 0) + color_line = (0, 255, 0) + + for ocr_info_head, ocr_info_tail in result: + draw_box_txt(ocr_info_head["bbox"], ocr_info_head["text"], draw, font, + font_size, color_head) + draw_box_txt(ocr_info_tail["bbox"], ocr_info_tail["text"], draw, font, + font_size, color_tail) + + center_head = ( + (ocr_info_head['bbox'][0] + ocr_info_head['bbox'][2]) // 2, + (ocr_info_head['bbox'][1] + ocr_info_head['bbox'][3]) // 2) + center_tail = ( + (ocr_info_tail['bbox'][0] + ocr_info_tail['bbox'][2]) // 2, + (ocr_info_tail['bbox'][1] + ocr_info_tail['bbox'][3]) // 2) + + draw.line([center_head, center_tail], fill=color_line, width=5) + + img_new = Image.blend(image, img_new, 0.5) + return np.array(img_new) + + +# pad sentences +def pad_sentences(tokenizer, + encoded_inputs, + max_seq_len=512, + pad_to_max_seq_len=True, + return_attention_mask=True, + return_token_type_ids=True, + return_overflowing_tokens=False, + return_special_tokens_mask=False): + # Padding with larger size, reshape is carried out + max_seq_len = ( + len(encoded_inputs["input_ids"]) // max_seq_len + 1) * max_seq_len + + needs_to_be_padded = pad_to_max_seq_len and \ + max_seq_len and len(encoded_inputs["input_ids"]) < max_seq_len + + if needs_to_be_padded: + difference = max_seq_len - len(encoded_inputs["input_ids"]) + if tokenizer.padding_side == 'right': + if return_attention_mask: + encoded_inputs["attention_mask"] = [1] * len(encoded_inputs[ + "input_ids"]) + [0] * difference + if return_token_type_ids: + encoded_inputs["token_type_ids"] = ( + encoded_inputs["token_type_ids"] + + [tokenizer.pad_token_type_id] * difference) + if return_special_tokens_mask: + encoded_inputs["special_tokens_mask"] = encoded_inputs[ + "special_tokens_mask"] + [1] * difference + encoded_inputs["input_ids"] = encoded_inputs[ + "input_ids"] + [tokenizer.pad_token_id] * difference + encoded_inputs["bbox"] = encoded_inputs["bbox"] + [[0, 0, 0, 0] + ] * difference + else: + if return_attention_mask: + encoded_inputs["attention_mask"] = [1] * len(encoded_inputs[ + "input_ids"]) + + return encoded_inputs + + +def split_page(encoded_inputs, max_seq_len=512): + """ + truncate is often used in training process + """ + for key in encoded_inputs: + if key == 'entities': + encoded_inputs[key] = [encoded_inputs[key]] + continue + encoded_inputs[key] = paddle.to_tensor(encoded_inputs[key]) + if encoded_inputs[key].ndim <= 1: # for input_ids, att_mask and so on + encoded_inputs[key] = encoded_inputs[key].reshape([-1, max_seq_len]) + else: # for bbox + encoded_inputs[key] = encoded_inputs[key].reshape( + [-1, max_seq_len, 4]) + return encoded_inputs + + +def preprocess( + tokenizer, + ori_img, + ocr_info, + img_size=(224, 224), + pad_token_label_id=-100, + max_seq_len=512, + add_special_ids=False, + return_attention_mask=True, ): + ocr_info = deepcopy(ocr_info) + height = ori_img.shape[0] + width = ori_img.shape[1] + + img = cv2.resize(ori_img, img_size).transpose([2, 0, 1]).astype(np.float32) + + segment_offset_id = [] + words_list = [] + bbox_list = [] + input_ids_list = [] + token_type_ids_list = [] + entities = [] + + for info in ocr_info: + # x1, y1, x2, y2 + bbox = info["bbox"] + bbox[0] = int(bbox[0] * 1000.0 / width) + bbox[2] = int(bbox[2] * 1000.0 / width) + bbox[1] = int(bbox[1] * 1000.0 / height) + bbox[3] = int(bbox[3] * 1000.0 / height) + + text = info["text"] + encode_res = tokenizer.encode( + text, pad_to_max_seq_len=False, return_attention_mask=True) + + if not add_special_ids: + # TODO: use tok.all_special_ids to remove + encode_res["input_ids"] = encode_res["input_ids"][1:-1] + encode_res["token_type_ids"] = encode_res["token_type_ids"][1:-1] + encode_res["attention_mask"] = encode_res["attention_mask"][1:-1] + + # for re + entities.append({ + "start": len(input_ids_list), + "end": len(input_ids_list) + len(encode_res["input_ids"]), + "label": "O", + }) + + input_ids_list.extend(encode_res["input_ids"]) + token_type_ids_list.extend(encode_res["token_type_ids"]) + bbox_list.extend([bbox] * len(encode_res["input_ids"])) + words_list.append(text) + segment_offset_id.append(len(input_ids_list)) + + encoded_inputs = { + "input_ids": input_ids_list, + "token_type_ids": token_type_ids_list, + "bbox": bbox_list, + "attention_mask": [1] * len(input_ids_list), + "entities": entities + } + + encoded_inputs = pad_sentences( + tokenizer, + encoded_inputs, + max_seq_len=max_seq_len, + return_attention_mask=return_attention_mask) + + encoded_inputs = split_page(encoded_inputs) + + fake_bs = encoded_inputs["input_ids"].shape[0] + + encoded_inputs["image"] = paddle.to_tensor(img).unsqueeze(0).expand( + [fake_bs] + list(img.shape)) + + encoded_inputs["segment_offset_id"] = segment_offset_id + + return encoded_inputs + + +def postprocess(attention_mask, preds, id2label_map): + if isinstance(preds, paddle.Tensor): + preds = preds.numpy() + preds = np.argmax(preds, axis=2) + + preds_list = [[] for _ in range(preds.shape[0])] + + # keep batch info + for i in range(preds.shape[0]): + for j in range(preds.shape[1]): + if attention_mask[i][j] == 1: + preds_list[i].append(id2label_map[preds[i][j]]) + + return preds_list + + +def merge_preds_list_with_ocr_info(ocr_info, segment_offset_id, preds_list, + label2id_map_for_draw): + # must ensure the preds_list is generated from the same image + preds = [p for pred in preds_list for p in pred] + + id2label_map = dict() + for key in label2id_map_for_draw: + val = label2id_map_for_draw[key] + if key == "O": + id2label_map[val] = key + if key.startswith("B-") or key.startswith("I-"): + id2label_map[val] = key[2:] + else: + id2label_map[val] = key + + for idx in range(len(segment_offset_id)): + if idx == 0: + start_id = 0 + else: + start_id = segment_offset_id[idx - 1] + + end_id = segment_offset_id[idx] + + curr_pred = preds[start_id:end_id] + curr_pred = [label2id_map_for_draw[p] for p in curr_pred] + + if len(curr_pred) <= 0: + pred_id = 0 + else: + counts = np.bincount(curr_pred) + pred_id = np.argmax(counts) + ocr_info[idx]["pred_id"] = int(pred_id) + ocr_info[idx]["pred"] = id2label_map[int(pred_id)] + return ocr_info + + +def print_arguments(args, logger=None): + print_func = logger.info if logger is not None else print + """print arguments""" + print_func('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).items()): + print_func('%s: %s' % (arg, value)) + print_func('------------------------------------------------') + + +def parse_args(): + parser = argparse.ArgumentParser() + # Required parameters + # yapf: disable + parser.add_argument("--model_name_or_path", + default=None, type=str, required=True,) + parser.add_argument("--re_model_name_or_path", + default=None, type=str, required=False,) + parser.add_argument("--train_data_dir", default=None, + type=str, required=False,) + parser.add_argument("--train_label_path", default=None, + type=str, required=False,) + parser.add_argument("--eval_data_dir", default=None, + type=str, required=False,) + parser.add_argument("--eval_label_path", default=None, + type=str, required=False,) + parser.add_argument("--output_dir", default=None, type=str, required=True,) + parser.add_argument("--max_seq_length", default=512, type=int,) + parser.add_argument("--evaluate_during_training", action="store_true",) + parser.add_argument("--per_gpu_train_batch_size", default=8, + type=int, help="Batch size per GPU/CPU for training.",) + parser.add_argument("--per_gpu_eval_batch_size", default=8, + type=int, help="Batch size per GPU/CPU for eval.",) + parser.add_argument("--learning_rate", default=5e-5, + type=float, help="The initial learning rate for Adam.",) + parser.add_argument("--weight_decay", default=0.0, + type=float, help="Weight decay if we apply some.",) + parser.add_argument("--adam_epsilon", default=1e-8, + type=float, help="Epsilon for Adam optimizer.",) + parser.add_argument("--max_grad_norm", default=1.0, + type=float, help="Max gradient norm.",) + parser.add_argument("--num_train_epochs", default=3, type=int, + help="Total number of training epochs to perform.",) + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.",) + parser.add_argument("--eval_steps", type=int, default=10, + help="eval every X updates steps.",) + parser.add_argument("--save_steps", type=int, default=50, + help="Save checkpoint every X updates steps.",) + parser.add_argument("--seed", type=int, default=2048, + help="random seed for initialization",) + + parser.add_argument("--rec_model_dir", default=None, type=str, ) + parser.add_argument("--det_model_dir", default=None, type=str, ) + parser.add_argument( + "--label_map_path", default="./labels/labels_ser.txt", type=str, required=False, ) + parser.add_argument("--infer_imgs", default=None, type=str, required=False) + parser.add_argument("--ocr_json_path", default=None, + type=str, required=False, help="ocr prediction results") + # yapf: enable + args = parser.parse_args() + return args diff --git a/ppstructure/vqa/xfun.py b/ppstructure/vqa/xfun.py new file mode 100644 index 0000000000000000000000000000000000000000..d62cdb5da5514280b62687d80d345ede9484ee90 --- /dev/null +++ b/ppstructure/vqa/xfun.py @@ -0,0 +1,442 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import cv2 +import numpy as np +import paddle +import copy +from paddle.io import Dataset + +__all__ = ["XFUNDataset"] + + +class XFUNDataset(Dataset): + """ + Example: + print("=====begin to build dataset=====") + from paddlenlp.transformers import LayoutXLMTokenizer + tokenizer = LayoutXLMTokenizer.from_pretrained("/paddle/models/transformers/layoutxlm-base-paddle/") + tok_res = tokenizer.tokenize("Maribyrnong") + # res = tokenizer.convert_ids_to_tokens(val_data["input_ids"][0]) + dataset = XfunDatasetForSer( + tokenizer, + data_dir="./zh.val/", + label_path="zh.val/xfun_normalize_val.json", + img_size=(224,224)) + print(len(dataset)) + + data = dataset[0] + print(data.keys()) + print("input_ids: ", data["input_ids"]) + print("labels: ", data["labels"]) + print("token_type_ids: ", data["token_type_ids"]) + print("words_list: ", data["words_list"]) + print("image shape: ", data["image"].shape) + """ + + def __init__(self, + tokenizer, + data_dir, + label_path, + contains_re=False, + label2id_map=None, + img_size=(224, 224), + pad_token_label_id=None, + add_special_ids=False, + return_attention_mask=True, + load_mode='all', + max_seq_len=512): + super().__init__() + self.tokenizer = tokenizer + self.data_dir = data_dir + self.label_path = label_path + self.contains_re = contains_re + self.label2id_map = label2id_map + self.img_size = img_size + self.pad_token_label_id = pad_token_label_id + self.add_special_ids = add_special_ids + self.return_attention_mask = return_attention_mask + self.load_mode = load_mode + self.max_seq_len = max_seq_len + + if self.pad_token_label_id is None: + self.pad_token_label_id = paddle.nn.CrossEntropyLoss().ignore_index + + self.all_lines = self.read_all_lines() + + self.entities_labels = {'HEADER': 0, 'QUESTION': 1, 'ANSWER': 2} + self.return_keys = { + 'bbox': 'np', + 'input_ids': 'np', + 'labels': 'np', + 'attention_mask': 'np', + 'image': 'np', + 'token_type_ids': 'np', + 'entities': 'dict', + 'relations': 'dict', + } + + if load_mode == "all": + self.encoded_inputs_all = self._parse_label_file_all() + + def pad_sentences(self, + encoded_inputs, + max_seq_len=512, + pad_to_max_seq_len=True, + return_attention_mask=True, + return_token_type_ids=True, + truncation_strategy="longest_first", + return_overflowing_tokens=False, + return_special_tokens_mask=False): + # Padding + needs_to_be_padded = pad_to_max_seq_len and \ + max_seq_len and len(encoded_inputs["input_ids"]) < max_seq_len + + if needs_to_be_padded: + difference = max_seq_len - len(encoded_inputs["input_ids"]) + if self.tokenizer.padding_side == 'right': + if return_attention_mask: + encoded_inputs["attention_mask"] = [1] * len(encoded_inputs[ + "input_ids"]) + [0] * difference + if return_token_type_ids: + encoded_inputs["token_type_ids"] = ( + encoded_inputs["token_type_ids"] + + [self.tokenizer.pad_token_type_id] * difference) + if return_special_tokens_mask: + encoded_inputs["special_tokens_mask"] = encoded_inputs[ + "special_tokens_mask"] + [1] * difference + encoded_inputs["input_ids"] = encoded_inputs[ + "input_ids"] + [self.tokenizer.pad_token_id] * difference + encoded_inputs["labels"] = encoded_inputs[ + "labels"] + [self.pad_token_label_id] * difference + encoded_inputs["bbox"] = encoded_inputs[ + "bbox"] + [[0, 0, 0, 0]] * difference + elif self.tokenizer.padding_side == 'left': + if return_attention_mask: + encoded_inputs["attention_mask"] = [0] * difference + [ + 1 + ] * len(encoded_inputs["input_ids"]) + if return_token_type_ids: + encoded_inputs["token_type_ids"] = ( + [self.tokenizer.pad_token_type_id] * difference + + encoded_inputs["token_type_ids"]) + if return_special_tokens_mask: + encoded_inputs["special_tokens_mask"] = [ + 1 + ] * difference + encoded_inputs["special_tokens_mask"] + encoded_inputs["input_ids"] = [ + self.tokenizer.pad_token_id + ] * difference + encoded_inputs["input_ids"] + encoded_inputs["labels"] = [ + self.pad_token_label_id + ] * difference + encoded_inputs["labels"] + encoded_inputs["bbox"] = [ + [0, 0, 0, 0] + ] * difference + encoded_inputs["bbox"] + else: + if return_attention_mask: + encoded_inputs["attention_mask"] = [1] * len(encoded_inputs[ + "input_ids"]) + + return encoded_inputs + + def truncate_inputs(self, encoded_inputs, max_seq_len=512): + for key in encoded_inputs: + if key == "sample_id": + continue + length = min(len(encoded_inputs[key]), max_seq_len) + encoded_inputs[key] = encoded_inputs[key][:length] + return encoded_inputs + + def read_all_lines(self, ): + with open(self.label_path, "r") as fin: + lines = fin.readlines() + return lines + + def _parse_label_file_all(self): + """ + parse all samples + """ + encoded_inputs_all = [] + for line in self.all_lines: + encoded_inputs_all.extend(self._parse_label_file(line)) + return encoded_inputs_all + + def _parse_label_file(self, line): + """ + parse single sample + """ + + image_name, info_str = line.split("\t") + image_path = os.path.join(self.data_dir, image_name) + + def add_imgge_path(x): + x['image_path'] = image_path + return x + + encoded_inputs = self._read_encoded_inputs_sample(info_str) + if self.contains_re: + encoded_inputs = self._chunk_re(encoded_inputs) + else: + encoded_inputs = self._chunk_ser(encoded_inputs) + encoded_inputs = list(map(add_imgge_path, encoded_inputs)) + return encoded_inputs + + def _read_encoded_inputs_sample(self, info_str): + """ + parse label info + """ + # read text info + info_dict = json.loads(info_str) + height = info_dict["height"] + width = info_dict["width"] + + words_list = [] + bbox_list = [] + input_ids_list = [] + token_type_ids_list = [] + gt_label_list = [] + + if self.contains_re: + # for re + entities = [] + relations = [] + id2label = {} + entity_id_to_index_map = {} + empty_entity = set() + for info in info_dict["ocr_info"]: + if self.contains_re: + # for re + if len(info["text"]) == 0: + empty_entity.add(info["id"]) + continue + id2label[info["id"]] = info["label"] + relations.extend([tuple(sorted(l)) for l in info["linking"]]) + + # x1, y1, x2, y2 + bbox = info["bbox"] + label = info["label"] + bbox[0] = int(bbox[0] * 1000.0 / width) + bbox[2] = int(bbox[2] * 1000.0 / width) + bbox[1] = int(bbox[1] * 1000.0 / height) + bbox[3] = int(bbox[3] * 1000.0 / height) + + text = info["text"] + encode_res = self.tokenizer.encode( + text, pad_to_max_seq_len=False, return_attention_mask=True) + + gt_label = [] + if not self.add_special_ids: + # TODO: use tok.all_special_ids to remove + encode_res["input_ids"] = encode_res["input_ids"][1:-1] + encode_res["token_type_ids"] = encode_res["token_type_ids"][1: + -1] + encode_res["attention_mask"] = encode_res["attention_mask"][1: + -1] + if label.lower() == "other": + gt_label.extend([0] * len(encode_res["input_ids"])) + else: + gt_label.append(self.label2id_map[("b-" + label).upper()]) + gt_label.extend([self.label2id_map[("i-" + label).upper()]] * + (len(encode_res["input_ids"]) - 1)) + if self.contains_re: + if gt_label[0] != self.label2id_map["O"]: + entity_id_to_index_map[info["id"]] = len(entities) + entities.append({ + "start": len(input_ids_list), + "end": + len(input_ids_list) + len(encode_res["input_ids"]), + "label": label.upper(), + }) + input_ids_list.extend(encode_res["input_ids"]) + token_type_ids_list.extend(encode_res["token_type_ids"]) + bbox_list.extend([bbox] * len(encode_res["input_ids"])) + gt_label_list.extend(gt_label) + words_list.append(text) + + encoded_inputs = { + "input_ids": input_ids_list, + "labels": gt_label_list, + "token_type_ids": token_type_ids_list, + "bbox": bbox_list, + "attention_mask": [1] * len(input_ids_list), + # "words_list": words_list, + } + encoded_inputs = self.pad_sentences( + encoded_inputs, + max_seq_len=self.max_seq_len, + return_attention_mask=self.return_attention_mask) + encoded_inputs = self.truncate_inputs(encoded_inputs) + + if self.contains_re: + relations = self._relations(entities, relations, id2label, + empty_entity, entity_id_to_index_map) + encoded_inputs['relations'] = relations + encoded_inputs['entities'] = entities + return encoded_inputs + + def _chunk_ser(self, encoded_inputs): + encoded_inputs_all = [] + seq_len = len(encoded_inputs['input_ids']) + chunk_size = 512 + for chunk_id, index in enumerate(range(0, seq_len, chunk_size)): + chunk_beg = index + chunk_end = min(index + chunk_size, seq_len) + encoded_inputs_example = {} + for key in encoded_inputs: + encoded_inputs_example[key] = encoded_inputs[key][chunk_beg: + chunk_end] + + encoded_inputs_all.append(encoded_inputs_example) + return encoded_inputs_all + + def _chunk_re(self, encoded_inputs): + # prepare data + entities = encoded_inputs.pop('entities') + relations = encoded_inputs.pop('relations') + encoded_inputs_all = [] + chunk_size = 512 + for chunk_id, index in enumerate( + range(0, len(encoded_inputs["input_ids"]), chunk_size)): + item = {} + for k in encoded_inputs: + item[k] = encoded_inputs[k][index:index + chunk_size] + + # select entity in current chunk + entities_in_this_span = [] + global_to_local_map = {} # + for entity_id, entity in enumerate(entities): + if (index <= entity["start"] < index + chunk_size and + index <= entity["end"] < index + chunk_size): + entity["start"] = entity["start"] - index + entity["end"] = entity["end"] - index + global_to_local_map[entity_id] = len(entities_in_this_span) + entities_in_this_span.append(entity) + + # select relations in current chunk + relations_in_this_span = [] + for relation in relations: + if (index <= relation["start_index"] < index + chunk_size and + index <= relation["end_index"] < index + chunk_size): + relations_in_this_span.append({ + "head": global_to_local_map[relation["head"]], + "tail": global_to_local_map[relation["tail"]], + "start_index": relation["start_index"] - index, + "end_index": relation["end_index"] - index, + }) + item.update({ + "entities": reformat(entities_in_this_span), + "relations": reformat(relations_in_this_span), + }) + item['entities']['label'] = [ + self.entities_labels[x] for x in item['entities']['label'] + ] + encoded_inputs_all.append(item) + return encoded_inputs_all + + def _relations(self, entities, relations, id2label, empty_entity, + entity_id_to_index_map): + """ + build relations + """ + relations = list(set(relations)) + relations = [ + rel for rel in relations + if rel[0] not in empty_entity and rel[1] not in empty_entity + ] + kv_relations = [] + for rel in relations: + pair = [id2label[rel[0]], id2label[rel[1]]] + if pair == ["question", "answer"]: + kv_relations.append({ + "head": entity_id_to_index_map[rel[0]], + "tail": entity_id_to_index_map[rel[1]] + }) + elif pair == ["answer", "question"]: + kv_relations.append({ + "head": entity_id_to_index_map[rel[1]], + "tail": entity_id_to_index_map[rel[0]] + }) + else: + continue + relations = sorted( + [{ + "head": rel["head"], + "tail": rel["tail"], + "start_index": get_relation_span(rel, entities)[0], + "end_index": get_relation_span(rel, entities)[1], + } for rel in kv_relations], + key=lambda x: x["head"], ) + return relations + + def load_img(self, image_path): + # read img + img = cv2.imread(image_path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + resize_h, resize_w = self.img_size + im_shape = img.shape[0:2] + im_scale_y = resize_h / im_shape[0] + im_scale_x = resize_w / im_shape[1] + img_new = cv2.resize( + img, None, None, fx=im_scale_x, fy=im_scale_y, interpolation=2) + mean = np.array([0.485, 0.456, 0.406])[np.newaxis, np.newaxis, :] + std = np.array([0.229, 0.224, 0.225])[np.newaxis, np.newaxis, :] + img_new = img_new / 255.0 + img_new -= mean + img_new /= std + img = img_new.transpose((2, 0, 1)) + return img + + def __getitem__(self, idx): + if self.load_mode == "all": + data = copy.deepcopy(self.encoded_inputs_all[idx]) + else: + data = self._parse_label_file(self.all_lines[idx])[0] + + image_path = data.pop('image_path') + data["image"] = self.load_img(image_path) + + return_data = {} + for k, v in data.items(): + if k in self.return_keys: + if self.return_keys[k] == 'np': + v = np.array(v) + return_data[k] = v + return return_data + + def __len__(self, ): + if self.load_mode == "all": + return len(self.encoded_inputs_all) + else: + return len(self.all_lines) + + +def get_relation_span(rel, entities): + bound = [] + for entity_index in [rel["head"], rel["tail"]]: + bound.append(entities[entity_index]["start"]) + bound.append(entities[entity_index]["end"]) + return min(bound), max(bound) + + +def reformat(data): + new_data = {} + for item in data: + for k, v in item.items(): + if k not in new_data: + new_data[k] = [] + new_data[k].append(v) + return new_data diff --git a/requirements.txt b/requirements.txt index 1d9522aa0167c60ffce263a35b86640efb1438b2..9900588b25df99e0853ec4521f0632578c55f530 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,3 +13,4 @@ lxml premailer openpyxl fasttext==0.9.1 + diff --git a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index 2cd2ba5f1e8198cacadab653d3979d5a1662f9ea..fcac6e3984cf3fd45fec9f7b736f794289278b25 100644 --- a/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -1,16 +1,16 @@ -===========================ch_ppocr_mobile_v2.0=========================== +===========================ch_PP-OCRv2=========================== model_name:ch_PP-OCRv2 python:python3.7 infer_model:./inference/ch_PP-OCRv2_det_infer/ infer_export:null -infer_quant:True +infer_quant:False inference:tools/infer/predict_system.py ---use_gpu:False ---enable_mkldnn:False +--use_gpu:False|True +--enable_mkldnn:False|True --cpu_threads:1|6 --rec_batch_num:1 ---use_tensorrt:False ---precision:int8 +--use_tensorrt:False|True +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --rec_model_dir:./inference/ch_PP-OCRv2_rec_infer/ diff --git a/test_tipc/configs/ch_PP-OCRv2_det/train_infer_python.txt b/test_tipc/configs/ch_PP-OCRv2_det/train_infer_python.txt index 9520ede3acd33b0e12300ee2de1b715605c9a0eb..b8db0ff19287c6db3d48758b22602252b5b2c6cc 100644 --- a/test_tipc/configs/ch_PP-OCRv2_det/train_infer_python.txt +++ b/test_tipc/configs/ch_PP-OCRv2_det/train_infer_python.txt @@ -12,9 +12,9 @@ train_model_name:latest train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ null:null ## -trainer:norm_train|pact_train +trainer:norm_train norm_train:tools/train.py -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml -o -pact_train:deploy/slim/quantization/quant.py -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml -o +pact_train:null fpgm_train:null distill_train:null null:null @@ -26,9 +26,9 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:tools/export_model.py -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml -o -quant_export:deploy/slim/quantization/export_model.py -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml -o +quant_export:null fpgm_export: distill_export:null export1:null diff --git a/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index 0ff24cbccfe282c12982714b5d079b0031703a04..1aad65b687992155133ed11533a14f642510361d 100644 --- a/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv2_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -1,15 +1,17 @@ ===========================kl_quant_params=========================== model_name:PPOCRv2_ocr_det_kl python:python3.7 +Global.pretrained_model:null +Global.save_inference_dir:null infer_model:./inference/ch_PP-OCRv2_det_infer/ infer_export:deploy/slim/quantization/quant_kl.py -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml -o infer_quant:True inference:tools/infer/predict_det.py ---use_gpu:False ---enable_mkldnn:False +--use_gpu:False|True +--enable_mkldnn:True --cpu_threads:1|6 --rec_batch_num:1 ---use_tensorrt:False +--use_tensorrt:False|True --precision:int8 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ diff --git a/test_tipc/configs/ch_PP-OCRv2_det_PACT/train_infer_python.txt b/test_tipc/configs/ch_PP-OCRv2_det_PACT/train_infer_python.txt index b567c08185e084384c3883f1d602cec3f312ea53..70292f49c960c14cf390d0168a510f3f20a5631f 100644 --- a/test_tipc/configs/ch_PP-OCRv2_det_PACT/train_infer_python.txt +++ b/test_tipc/configs/ch_PP-OCRv2_det_PACT/train_infer_python.txt @@ -1,5 +1,5 @@ ===========================train_params=========================== -model_name:PPOCRv2_ocr_det +model_name:ch_PPOCRv2_det_PACT python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True @@ -26,7 +26,7 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:null quant_export:deploy/slim/quantization/export_model.py -c configs/det/ch_PP-OCRv2/ch_PP-OCRv2_det_cml.yml -o fpgm_export: diff --git a/test_tipc/configs/ch_PP-OCRv2_rec/train_infer_python.txt b/test_tipc/configs/ch_PP-OCRv2_rec/train_infer_python.txt index b61dc8bbe36ac5b21ec5f3561d39997f992d6c58..467263bd2cd3b2b44d38a810fd9657dc0a2d1685 100644 --- a/test_tipc/configs/ch_PP-OCRv2_rec/train_infer_python.txt +++ b/test_tipc/configs/ch_PP-OCRv2_rec/train_infer_python.txt @@ -6,7 +6,7 @@ Global.use_gpu:True|True Global.auto_cast:fp32 Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=300 Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 Global.pretrained_model:null train_model_name:latest train_infer_img_dir:./inference/rec_inference @@ -26,7 +26,7 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:tools/export_model.py -c test_tipc/configs/ch_PP-OCRv2_rec/ch_PP-OCRv2_rec_distillation.yml -o quant_export: fpgm_export: @@ -34,7 +34,7 @@ distill_export:null export1:null export2:null inference_dir:Student -infer_model:./inference/ch_PP-OCRv2_rec_infer/ +infer_model:./inference/ch_PP-OCRv2_rec_infer infer_export:null infer_quant:False inference:tools/infer/predict_rec.py @@ -45,7 +45,7 @@ inference:tools/infer/predict_rec.py --use_tensorrt:False|True --precision:fp32|fp16|int8 --rec_model_dir: ---image_dir:/inference/rec_inference +--image_dir:./inference/rec_inference null:null --benchmark:True null:null diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index 8826bb4f078d518a79748f9cb305268c5ec2c198..083a3ae26e726e290ffde4095821cbf3c40f7178 100644 --- a/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_PP-OCRv2_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -1,15 +1,17 @@ ===========================kl_quant_params=========================== model_name:PPOCRv2_ocr_rec_kl python:python3.7 +Global.pretrained_model:null +Global.save_inference_dir:null infer_model:./inference/ch_PP-OCRv2_rec_infer/ infer_export:deploy/slim/quantization/quant_kl.py -c test_tipc/configs/ch_PP-OCRv2_rec/ch_PP-OCRv2_rec_distillation.yml -o infer_quant:True inference:tools/infer/predict_rec.py ---use_gpu:False ---enable_mkldnn:False +--use_gpu:False|True +--enable_mkldnn:False|True --cpu_threads:1|6 --rec_batch_num:1|6 ---use_tensorrt:False +--use_tensorrt:True --precision:int8 --rec_model_dir: --image_dir:./inference/rec_inference diff --git a/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_infer_python.txt b/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_infer_python.txt index 914c1bc7575dfee3309493b9110afe8b9cb7e59b..57124e66356650ca9919976ed9f2efb2aa8bfb77 100644 --- a/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_infer_python.txt +++ b/test_tipc/configs/ch_PP-OCRv2_rec_PACT/train_infer_python.txt @@ -6,15 +6,15 @@ Global.use_gpu:True|True Global.auto_cast:fp32 Global.epoch_num:lite_train_lite_infer=3|whole_train_whole_infer=300 Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=128 Global.pretrained_model:null train_model_name:latest train_infer_img_dir:./inference/rec_inference null:null ## trainer:pact_train -norm_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ch_PP-OCRv2_rec/ch_PP-OCRv2_rec_distillation.yml -o -pact_train:null +norm_train:null +pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ch_PP-OCRv2_rec/ch_PP-OCRv2_rec_distillation.yml -o fpgm_train:null distill_train:null null:null @@ -26,15 +26,15 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ch_PP-OCRv2_rec/ch_PP-OCRv2_rec_distillation.yml -o -quant_export: -fpgm_export: +Global.checkpoints: +norm_export:null +quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ch_PP-OCRv2_rec/ch_PP-OCRv2_rec_distillation.yml -o +fpgm_export: null distill_export:null export1:null export2:null inference_dir:Student -infer_model:./inference/ch_PP-OCRv2_rec_infer/ +infer_model:./inference/ch_PP-OCRv2_rec_slim_quant_infer infer_export:null infer_quant:True inference:tools/infer/predict_rec.py @@ -45,7 +45,7 @@ inference:tools/infer/predict_rec.py --use_tensorrt:False|True --precision:fp32|fp16|int8 --rec_model_dir: ---image_dir:/inference/rec_inference +--image_dir:./inference/rec_inference null:null --benchmark:True null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_V2.0_det_FPGM/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_V2.0_det_FPGM/train_infer_python.txt index 77889729e61a4b859895ee0de52c92ed258ace31..92ac3e9d37460a7f299f5cc2929a9bcaabdc34ef 100644 --- a/test_tipc/configs/ch_ppocr_mobile_V2.0_det_FPGM/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_V2.0_det_FPGM/train_infer_python.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=300 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -15,7 +15,7 @@ null:null trainer:fpgm_train norm_train:null pact_train:null -fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy +fpgm_train:deploy/slim/prune/sensitivity_anal.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy distill_train:null null:null null:null @@ -29,7 +29,7 @@ Global.save_inference_dir:./output/ Global.pretrained_model: norm_export:null quant_export:null -fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o +fpgm_export:deploy/slim/prune/export_prune_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o distill_export:null export1:null export2:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index eea9d789dd4919fe8112d337e48b82fabacfc57a..4a46f0cf09dcf2bb812910f0cf322dda0749b87c 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -3,14 +3,14 @@ model_name:ch_ppocr_mobile_v2.0 python:python3.7 infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_export:null -infer_quant:True +infer_quant:False inference:tools/infer/predict_system.py ---use_gpu:False ---enable_mkldnn:False +--use_gpu:False|True +--enable_mkldnn:False|True --cpu_threads:1|6 --rec_batch_num:1 ---use_tensorrt:False ---precision:int8 +--use_tensorrt:False|True +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt index a19c8ee3355b010b55d1dbf16aa0e21940ba546c..6e5cecf632a42294006cffdf4cf3a466a326260b 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -15,4 +15,4 @@ op.det.local_service_conf.thread_num:1|6 op.det.local_service_conf.use_trt:False|True op.det.local_service_conf.precision:fp32|fp16|int8 pipline:pipeline_rpc_client.py|pipeline_http_client.py ---image_dir:../../doc/imgs \ No newline at end of file +--image_dir:../../doc/imgs diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_infer_python.txt index 46093302972aec7f0e0443b33482af06d98bbf4f..9a5dd76437b236389f9880fdc1726e18e2cafee4 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_infer_python.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=100|whole_train_whole_infer=300 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_mac_cpu_normal_normal_infer_python_mac_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_mac_cpu_normal_normal_infer_python_mac_cpu.txt index 4001ca18284b703b92a6998d2218df3f003c74d3..014dad5fc9d87c08a0725f57127f8bf2cb248be3 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_mac_cpu_normal_normal_infer_python_mac_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_mac_cpu_normal_normal_infer_python_mac_cpu.txt @@ -4,7 +4,7 @@ python:python gpu_list:-1 Global.use_gpu:False Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=300 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -12,10 +12,10 @@ train_model_name:latest train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ null:null ## -trainer:norm_train|pact_train|fpgm_train -norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained -pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o -fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy +trainer:norm_train +norm_train:tools/train.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained +pact_train:null +fpgm_train:null distill_train:null null:null null:null @@ -27,9 +27,9 @@ null:null ===========================infer_params=========================== Global.save_inference_dir:./output/ Global.pretrained_model: -norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o -quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o -fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o +norm_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o +quant_export:null +fpgm_export:null distill_export:null export1:null export2:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_windows_gpu_normal_normal_infer_python_windows_cpu_gpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_windows_gpu_normal_normal_infer_python_windows_cpu_gpu.txt index 0f4faee4b32925b4d0780ece6838c176238c7000..6a63b39d976c0e9693deec097c37eb0ff212d8af 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_windows_gpu_normal_normal_infer_python_windows_cpu_gpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det/train_windows_gpu_normal_normal_infer_python_windows_cpu_gpu.txt @@ -4,7 +4,7 @@ python:python gpu_list:0 Global.use_gpu:True Global.auto_cast:fp32|amp -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=300 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -12,10 +12,10 @@ train_model_name:latest train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ null:null ## -trainer:norm_train|pact_train|fpgm_train -norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained -pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o -fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy +trainer:norm_train +norm_train:tools/train.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained +pact_train:null +fpgm_train:null distill_train:null null:null null:null @@ -27,9 +27,9 @@ null:null ===========================infer_params=========================== Global.save_inference_dir:./output/ Global.pretrained_model: -norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o -quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o -fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o +norm_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o +quant_export:null +fpgm_export:null distill_export:null export1:null export2:null @@ -49,63 +49,4 @@ inference:tools/infer/predict_det.py null:null --benchmark:True null:null -===========================cpp_infer_params=========================== -use_opencv:True -infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ -infer_quant:False -inference:./deploy/cpp_infer/build/ppocr det ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -===========================serving_params=========================== -model_name:ocr_det -python:python3.7 -trans_model:-m paddle_serving_client.convert ---dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ ---model_filename:inference.pdmodel ---params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/ -serving_dir:./deploy/pdserving -web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 -op.det.local_service_conf.devices:null|0 -op.det.local_service_conf.use_mkldnn:True|False -op.det.local_service_conf.thread_num:1|6 -op.det.local_service_conf.use_trt:False|True -op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py|pipeline_rpc_client.py ---image_dir=../../doc/imgs -===========================kl_quant_params=========================== -infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ -infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o -infer_quant:True -inference:tools/infer/predict_det.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -null:null -null:null -===========================lite_params=========================== -inference:./ocr_db_crnn det -infer_model:./models/ch_ppocr_mobile_v2.0_det_opt.nb|./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb ---cpu_threads:1|4 ---batch_size:1 ---power_mode:LITE_POWER_HIGH|LITE_POWER_LOW ---image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/|./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg ---config_dir:./config.txt ---rec_dict_dir:./ppocr_keys_v1.txt ---benchmark:True + diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index bd58e964033243c00e7a270d642f97ced7659114..1039dcad06d63bb1fc1a47b7cc4760cd8d75ed63 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -1,15 +1,17 @@ ===========================kl_quant_params=========================== -model_name:PPOCRv2_ocr_det +model_name:ch_ppocr_mobile_v2.0_det_KL python:python3.7 +Global.pretrained_model:null +Global.save_inference_dir:null infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ infer_export:deploy/slim/quantization/quant_kl.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o infer_quant:True inference:tools/infer/predict_det.py ---use_gpu:False ---enable_mkldnn:False +--use_gpu:False|True +--enable_mkldnn:True --cpu_threads:1|6 --rec_batch_num:1 ---use_tensorrt:False +--use_tensorrt:False|True --precision:int8 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/train_infer_python.txt index 7328be25ffd0ffa0abac83ec80e46be42ff93185..372b8ad4137cc19a8c1dfc59b99a00d525ae466f 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_det_PACT/train_infer_python.txt @@ -1,10 +1,10 @@ ===========================train_params=========================== -model_name:ocr_det +model_name:ch_ppocr_mobile_v2.0_det_PACT python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.epoch_num:lite_train_lite_infer=20|whole_train_whole_infer=300 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 Global.pretrained_model:null @@ -14,7 +14,7 @@ null:null ## trainer:pact_train norm_train:null -pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o +pact_train:deploy/slim/quantization/quant.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o fpgm_train:null distill_train:null null:null @@ -26,15 +26,15 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:null -quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o +quant_export:deploy/slim/quantization/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o fpgm_export:null distill_export:null export1:null export2:null inference_dir:null -train_model:null +train_model:./inference/ch_ppocr_mobile_v2.0_det_prune_infer/ infer_export:null infer_quant:False inference:tools/infer/predict_det.py @@ -48,4 +48,4 @@ inference:tools/infer/predict_det.py --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null --benchmark:True -null:null \ No newline at end of file +null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_infer_python.txt index c93b83e5dcab1aab56ea5fa1a178e3dc7ec3c2e4..92388f5ff78460f847893338eb428d4a8daae2b7 100644 --- a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec/train_infer_python.txt @@ -4,9 +4,9 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_infer=2|whole_train_infer=300 +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_infer=128|whole_train_infer=128 +Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 Global.pretrained_model:null train_model_name:latest train_infer_img_dir:./inference/rec_inference @@ -26,7 +26,7 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:tools/export_model.py -c configs/rec/rec_icdar15_train.yml -o quant_export:null fpgm_export:null @@ -34,10 +34,10 @@ distill_export:null export1:null export2:null ## -infer_model:null +train_model:./inference/ch_ppocr_mobile_v2.0_rec_train/best_accuracy infer_export:tools/export_model.py -c configs/rec/rec_icdar15_train.yml -o infer_quant:False -inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --rec_algorithm="RARE" +inference:tools/infer/predict_rec.py --use_gpu:True|False --enable_mkldnn:True|False --cpu_threads:1|6 diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/rec_chinese_lite_train_v2.0.yml b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/rec_chinese_lite_train_v2.0.yml new file mode 100644 index 0000000000000000000000000000000000000000..ee42dbfd0c80667ae5c3da4ee6df6416e1908388 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/rec_chinese_lite_train_v2.0.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_chinese_lite_v2.0 + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: 25 + infer_mode: False + use_space_char: True + save_res_path: ./output/rec/predicts_chinese_lite_v2.0.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: [1, 2, 2, 2] + disable_se: True + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: train_data/ic15_data + label_file_list: ["train_data/ic15_data/rec_gt_train.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: train_data/ic15_data + label_file_list: ["train_data/ic15_data/rec_gt_test.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..a968573d4410f3d474788cb5f6ab414c5d02aae3 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/train_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:ch_ppocr_mobile_v2.0_rec_FPGM +python:python3.7 +gpu_list:0 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/ic15_data/test/word_1.png +null:null +## +trainer:fpgm_train +norm_train:null +pact_train:null +fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/rec_chinese_lite_train_v2.0.yml -o Global.pretrained_model=./pretrain_models/ch_ppocr_mobile_v2.0_rec_train/best_accuracy +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.pretrained_model: +norm_export:null +quant_export:null +fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/ch_ppocr_mobile_v2.0_rec_FPGM/rec_chinese_lite_train_v2.0.yml -o +distill_export:null +export1:null +export2:null +inference_dir:null +train_model:null +infer_export:null +infer_quant:False +inference:tools/infer/predict_rec.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp32|int8 +--rec_model_dir: +--image_dir:./inference/rec_inference +null:null +--benchmark:True +null:null \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..92f33c58c9e97347e53b778bde5a21472b769f36 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,21 @@ +===========================kl_quant_params=========================== +model_name:ch_ppocr_mobile_v2.0_rec_KL +python:python3.7 +Global.pretrained_model:null +Global.save_inference_dir:null +infer_model:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +infer_export:deploy/slim/quantization/quant_kl.py -c test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/rec_chinese_lite_train_v2.0.yml -o +infer_quant:True +inference:tools/infer/predict_rec.py +--use_gpu:False|True +--enable_mkldnn:True +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:int8 +--det_model_dir: +--image_dir:./inference/rec_inference +null:null +--benchmark:True +null:null +null:null diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/rec_chinese_lite_train_v2.0.yml b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/rec_chinese_lite_train_v2.0.yml new file mode 100644 index 0000000000000000000000000000000000000000..b06dafe7fdc01eadeee51e70dfa4e8c675bda531 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_KL/rec_chinese_lite_train_v2.0.yml @@ -0,0 +1,101 @@ +Global: + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_chinese_lite_v2.0 + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: 25 + infer_mode: False + use_space_char: True + save_res_path: ./output/rec/predicts_chinese_lite_v2.0.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: [1, 2, 2, 2] + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: train_data/ic15_data + label_file_list: ["train_data/ic15_data/rec_gt_train.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: train_data/ic15_data + label_file_list: ["train_data/ic15_data/rec_gt_test.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/rec_chinese_lite_train_v2.0.yml b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/rec_chinese_lite_train_v2.0.yml new file mode 100644 index 0000000000000000000000000000000000000000..b06dafe7fdc01eadeee51e70dfa4e8c675bda531 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/rec_chinese_lite_train_v2.0.yml @@ -0,0 +1,101 @@ +Global: + use_gpu: true + epoch_num: 500 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec_chinese_lite_v2.0 + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: ppocr/utils/ppocr_keys_v1.txt + max_text_length: 25 + infer_mode: False + use_space_char: True + save_res_path: ./output/rec/predicts_chinese_lite_v2.0.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Cosine + learning_rate: 0.001 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: CRNN + Transform: + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: small + small_stride: [1, 2, 2, 2] + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 48 + Head: + name: CTCHead + fc_decay: 0.00001 + +Loss: + name: CTCLoss + +PostProcess: + name: CTCLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: train_data/ic15_data + label_file_list: ["train_data/ic15_data/rec_gt_train.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - RecAug: + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: train_data/ic15_data + label_file_list: ["train_data/ic15_data/rec_gt_test.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - CTCLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 320] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/train_infer_python.txt b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6f3a380bebf24785e9df4f0bb39f809a85b61b76 --- /dev/null +++ b/test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/train_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:ch_ppocr_mobile_v2.0_rec_PACT +python:python3.7 +gpu_list:0 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=128|whole_train_whole_infer=128 +Global.checkpoints:null +train_model_name:latest +train_infer_img_dir:./train_data/ic15_data/test/word_1.png +null:null +## +trainer:pact_train +norm_train:null +pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/rec_chinese_lite_train_v2.0.yml -o +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:null +quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ch_ppocr_mobile_v2.0_rec_PACT/rec_chinese_lite_train_v2.0.yml -o +fpgm_export:null +distill_export:null +export1:null +export2:null +inference_dir:null +infer_model:./inference/ch_ppocr_mobile_v2.0_rec_slim_infer/ +infer_export:null +infer_quant:False +inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ppocr_keys_v1.txt --rec_image_shape="3,32,100" +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1|6 +--use_tensorrt:False|True +--precision:fp32|fp16|int8 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null \ No newline at end of file diff --git a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt index 5a93571a76366de191d2fb1736aa3ff4c71b1737..92d7031e884d10df3a5c98bf675d64d63b3cb335 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_infer_python_linux_gpu_cpu.txt @@ -1,16 +1,16 @@ -===========================ch_ppocr_mobile_v2.0=========================== +===========================ch_ppocr_server_v2.0=========================== model_name:ch_ppocr_server_v2.0 python:python3.7 infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ infer_export:null infer_quant:True inference:tools/infer/predict_system.py ---use_gpu:False ---enable_mkldnn:False +--use_gpu:False|True +--enable_mkldnn:False|True --cpu_threads:1|6 --rec_batch_num:1 --use_tensorrt:False ---precision:int8 +--precision:fp32 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ --rec_model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/ diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_det/train_infer_python.txt b/test_tipc/configs/ch_ppocr_server_v2.0_det/train_infer_python.txt index bea918a7f366548056d7d62a5785353a4e689d01..ca52eeb1bc6a1853fa7015478fb9028d8dec71c3 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0_det/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0_det/train_infer_python.txt @@ -12,22 +12,22 @@ train_model_name:latest train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ null:null ## -trainer:norm_train|pact_train|fpgm_export -norm_train:tools/train.py -c test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml -o -quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml -o -fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml -o +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/ch_ppocr_server_v2.0_det/det_r50_vd_db.yml -o +quant_train:null +fpgm_train:null distill_train:null null:null null:null ## ===========================eval_params=========================== -eval:tools/eval.py -c test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml -o +eval:tools/eval.py -c test_tipc/configs/ch_ppocr_server_v2.0_det/det_r50_vd_db.yml -o null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ Global.pretrained_model: -norm_export:tools/export_model.py -c test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml -o +norm_export:tools/export_model.py -c test_tipc/configs/ch_ppocr_server_v2.0_det/det_r50_vd_db.yml -o quant_export:null fpgm_export:null distill_export:null diff --git a/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_infer_python.txt b/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_infer_python.txt index f35bb3876737606f58949968d5b43f3bd796be30..c42edbee4dd2a26afff94f6028ca7a8f4170648e 100644 --- a/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_infer_python.txt +++ b/test_tipc/configs/ch_ppocr_server_v2.0_rec/train_infer_python.txt @@ -26,7 +26,7 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:tools/export_model.py -c test_tipc/configs/ch_ppocr_server_v2.0_rec/rec_icdar15_train.yml -o quant_export:null fpgm_export:null @@ -34,16 +34,16 @@ distill_export:null export1:null export2:null ## -infer_model:null +train_model:./inference/ch_ppocr_server_v2.0_rec_train/best_accuracy infer_export:tools/export_model.py -c test_tipc/configs/ch_ppocr_server_v2.0_rec/rec_icdar15_train.yml -o infer_quant:False -inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" +inference:tools/infer/predict_rec.py --use_gpu:True|False --enable_mkldnn:True|False --cpu_threads:1|6 --rec_batch_num:1|6 --use_tensorrt:True|False ---precision:fp32|fp16|int8 +--precision:fp32|int8 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt b/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt index 7a3aced57aaf31bb54075d8ba3119d1626a2c58a..230a799f2e6d49b6bc5816fd53724259e1b881c3 100644 --- a/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt +++ b/test_tipc/configs/det_mv3_east_v2.0/train_infer_python.txt @@ -34,15 +34,15 @@ distill_export:null export1:null export2:null ## -train_model:./inference/det_mv3_east/best_accuracy -infer_export:tools/export_model.py -c test_tipc/cconfigs/det_mv3_east_v2.0/det_mv3_east.yml -o +train_model:./inference/det_mv3_east_v2.0_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/det_mv3_east_v2.0/det_mv3_east.yml -o infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False --enable_mkldnn:True|False --cpu_threads:1|6 --rec_batch_num:1 ---use_tensorrt:False|True +--use_tensorrt:False --precision:fp32|fp16|int8 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ diff --git a/test_tipc/configs/det_mv3_pse_v2.0/det_mv3_pse.yml b/test_tipc/configs/det_mv3_pse_v2.0/det_mv3_pse.yml new file mode 100644 index 0000000000000000000000000000000000000000..d37fdcfbb5b27404403674d99c1b8abe8cd65e85 --- /dev/null +++ b/test_tipc/configs/det_mv3_pse_v2.0/det_mv3_pse.yml @@ -0,0 +1,135 @@ +Global: + use_gpu: true + epoch_num: 600 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/det_mv3_pse/ + save_epoch_step: 600 + # evaluation is run every 63 iterations + eval_batch_step: [ 0,1000 ] + cal_metric_during_train: False + pretrained_model: ./pretrain_models/MobileNetV3_large_x0_5_pretrained + checkpoints: #./output/det_r50_vd_pse_batch8_ColorJitter/best_accuracy + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./output/det_pse/predicts_pse.txt + +Architecture: + model_type: det + algorithm: PSE + Transform: null + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + Neck: + name: FPN + out_channels: 96 + Head: + name: PSEHead + hidden_dim: 96 + out_channels: 7 + +Loss: + name: PSELoss + alpha: 0.7 + ohem_ratio: 3 + kernel_sample_mask: pred + reduction: none + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Step + learning_rate: 0.001 + step_size: 200 + gamma: 0.1 + regularizer: + name: 'L2' + factor: 0.0005 + +PostProcess: + name: PSEPostProcess + thresh: 0 + box_thresh: 0.85 + min_area: 16 + box_type: box # 'box' or 'poly' + scale: 1 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [ 1.0 ] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - ColorJitter: + brightness: 0.12549019607843137 + saturation: 0.5 + - IaaAugment: + augmenter_args: + - { 'type': Resize, 'args': { 'size': [ 0.5, 3 ] } } + - { 'type': Fliplr, 'args': { 'p': 0.5 } } + - { 'type': Affine, 'args': { 'rotate': [ -10, 10 ] } } + - MakePseGt: + kernel_num: 7 + min_shrink_ratio: 0.4 + size: 640 + - RandomCropImgMask: + size: [ 640,640 ] + main_key: gt_text + crop_keys: [ 'image', 'gt_text', 'gt_kernels', 'mask' ] + - NormalizeImage: + scale: 1./255. + mean: [ 0.485, 0.456, 0.406 ] + std: [ 0.229, 0.224, 0.225 ] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'image', 'gt_text', 'gt_kernels', 'mask' ] # the order of the dataloader list + loader: + shuffle: True + drop_last: False + batch_size_per_card: 16 + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + ratio_list: [ 1.0 ] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: + limit_side_len: 736 + limit_type: min + - NormalizeImage: + scale: 1./255. + mean: [ 0.485, 0.456, 0.406 ] + std: [ 0.229, 0.224, 0.225 ] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'image', 'shape', 'polys', 'ignore_tags' ] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 8 \ No newline at end of file diff --git a/test_tipc/configs/det_mv3_pse_v2.0/train_infer_python.txt b/test_tipc/configs/det_mv3_pse_v2.0/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..0171a97ae6c88dd13e74d85eb59bb019dad954f7 --- /dev/null +++ b/test_tipc/configs/det_mv3_pse_v2.0/train_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:det_mv3_pse_v2.0 +python:python3.7 +gpu_list:0 +Global.use_gpu:True|True +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/det_mv3_pse_v2.0/det_mv3_pse.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.pretrained_model: +norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_pse_v2.0/det_mv3_pse.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/det_mv3_pse_v2.0_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/det_mv3_pse_v2.0/det_mv3_pse.yml -o +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32|fp16 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--save_log_path:null +--benchmark:True +--det_algorithm:PSE diff --git a/test_tipc/configs/det_r50_vd_east_v2.0/train_infer_python.txt b/test_tipc/configs/det_r50_vd_east_v2.0/train_infer_python.txt index e9eaa779520f78622509153482fd6a84322c9cc5..45023ae3eeebc925d61e1686e0c18c75085b2ab4 100644 --- a/test_tipc/configs/det_r50_vd_east_v2.0/train_infer_python.txt +++ b/test_tipc/configs/det_r50_vd_east_v2.0/train_infer_python.txt @@ -34,15 +34,15 @@ distill_export:null export1:null export2:null ## -train_model:./inference/det_mv3_east/best_accuracy -infer_export:tools/export_model.py -c test_tipc/cconfigs/det_r50_vd_east_v2.0/det_r50_vd_east.yml -o +train_model:./inference/det_r50_vd_east_v2.0_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/det_r50_vd_east_v2.0/det_r50_vd_east.yml -o infer_quant:False inference:tools/infer/predict_det.py --use_gpu:True|False --enable_mkldnn:True|False --cpu_threads:1|6 --rec_batch_num:1 ---use_tensorrt:False|True +--use_tensorrt:False --precision:fp32|fp16|int8 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ diff --git a/test_tipc/configs/det_r50_vd_pse_v2.0/det_r50_vd_pse.yml b/test_tipc/configs/det_r50_vd_pse_v2.0/det_r50_vd_pse.yml new file mode 100644 index 0000000000000000000000000000000000000000..5ebc4252718d5572837eac58061bf6f9eb35bf73 --- /dev/null +++ b/test_tipc/configs/det_r50_vd_pse_v2.0/det_r50_vd_pse.yml @@ -0,0 +1,134 @@ +Global: + use_gpu: true + epoch_num: 600 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/det_r50_vd_pse/ + save_epoch_step: 600 + # evaluation is run every 125 iterations + eval_batch_step: [ 0,1000 ] + cal_metric_during_train: False + pretrained_model: + checkpoints: #./output/det_r50_vd_pse_batch8_ColorJitter/best_accuracy + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_en/img_10.jpg + save_res_path: ./output/det_pse/predicts_pse.txt + +Architecture: + model_type: det + algorithm: PSE + Transform: + Backbone: + name: ResNet + layers: 50 + Neck: + name: FPN + out_channels: 256 + Head: + name: PSEHead + hidden_dim: 256 + out_channels: 7 + +Loss: + name: PSELoss + alpha: 0.7 + ohem_ratio: 3 + kernel_sample_mask: pred + reduction: none + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Step + learning_rate: 0.0001 + step_size: 200 + gamma: 0.1 + regularizer: + name: 'L2' + factor: 0.0005 + +PostProcess: + name: PSEPostProcess + thresh: 0 + box_thresh: 0.85 + min_area: 16 + box_type: box # 'box' or 'poly' + scale: 1 + +Metric: + name: DetMetric + main_indicator: hmean + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt + ratio_list: [ 1.0 ] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - ColorJitter: + brightness: 0.12549019607843137 + saturation: 0.5 + - IaaAugment: + augmenter_args: + - { 'type': Resize, 'args': { 'size': [ 0.5, 3 ] } } + - { 'type': Fliplr, 'args': { 'p': 0.5 } } + - { 'type': Affine, 'args': { 'rotate': [ -10, 10 ] } } + - MakePseGt: + kernel_num: 7 + min_shrink_ratio: 0.4 + size: 640 + - RandomCropImgMask: + size: [ 640,640 ] + main_key: gt_text + crop_keys: [ 'image', 'gt_text', 'gt_kernels', 'mask' ] + - NormalizeImage: + scale: 1./255. + mean: [ 0.485, 0.456, 0.406 ] + std: [ 0.229, 0.224, 0.225 ] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'image', 'gt_text', 'gt_kernels', 'mask' ] # the order of the dataloader list + loader: + shuffle: True + drop_last: False + batch_size_per_card: 8 + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/icdar2015/text_localization/ + label_file_list: + - ./train_data/icdar2015/text_localization/test_icdar2015_label.txt + ratio_list: [ 1.0 ] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - DetLabelEncode: # Class handling label + - DetResizeForTest: + limit_side_len: 736 + limit_type: min + - NormalizeImage: + scale: 1./255. + mean: [ 0.485, 0.456, 0.406 ] + std: [ 0.229, 0.224, 0.225 ] + order: 'hwc' + - ToCHWImage: + - KeepKeys: + keep_keys: [ 'image', 'shape', 'polys', 'ignore_tags' ] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 1 # must be 1 + num_workers: 8 \ No newline at end of file diff --git a/test_tipc/configs/det_r50_vd_pse_v2.0/train_infer_python.txt b/test_tipc/configs/det_r50_vd_pse_v2.0/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d81542ea2e11fcddfc403fae686bbfab419de254 --- /dev/null +++ b/test_tipc/configs/det_r50_vd_pse_v2.0/train_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:det_r50_vd_pse_v2.0 +python:python3.7 +gpu_list:0 +Global.use_gpu:True|True +Global.auto_cast:fp32 +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/det_r50_vd_pse_v2.0/det_r50_vd_pse.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.pretrained_model: +norm_export:tools/export_model.py -c test_tipc/configs/det_r50_vd_pse_v2.0/det_r50_vd_pse.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/det_r50_vd_pse_v2.0_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/det_r50_vd_pse_v2.0/det_r50_vd_pse.yml -o +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False +--precision:fp32|fp16|int8 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--save_log_path:null +--benchmark:True +--det_algorithm:PSE diff --git a/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/det_r50_vd_sast_icdar2015.yml b/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/det_r50_vd_sast_icdar2015.yml index 8e9315d2488ad187eb12708d094c5be57cb48eac..4b7340ac59851aa54effa49f73196ad863d02a95 100644 --- a/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/det_r50_vd_sast_icdar2015.yml +++ b/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/det_r50_vd_sast_icdar2015.yml @@ -62,7 +62,7 @@ Train: data_dir: ./train_data/icdar2015/text_localization/ label_file_list: - ./train_data/icdar2015/text_localization/train_icdar2015_label.txt - ratio_list: [0.1, 0.45, 0.3, 0.15] + ratio_list: [1.0] transforms: - DecodeImage: # load image img_mode: BGR diff --git a/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/train_infer_python.txt b/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/train_infer_python.txt index d9f15dded4b920cb93b2180aeb9e14e93ebab5cc..f6ff061ff5a1e0ba914bbe69684a1fa60cdfff5d 100644 --- a/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/train_infer_python.txt +++ b/test_tipc/configs/det_r50_vd_sast_icdar15_v2.0/train_infer_python.txt @@ -42,10 +42,10 @@ inference:tools/infer/predict_det.py --enable_mkldnn:True|False --cpu_threads:1|6 --rec_batch_num:1 ---use_tensorrt:False|True +--use_tensorrt:False --precision:fp32|fp16|int8 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null --benchmark:True -null:null +--det_algorithm:SAST diff --git a/test_tipc/configs/det_r50_vd_sast_totaltext_v2.0/train_infer_python.txt b/test_tipc/configs/det_r50_vd_sast_totaltext_v2.0/train_infer_python.txt index 602254f2f3b7eb6f5b1fc72fbaf212fbea43ca49..54921cb1a8d361cdaba7c7c5154cb2730ef0ec77 100644 --- a/test_tipc/configs/det_r50_vd_sast_totaltext_v2.0/train_infer_python.txt +++ b/test_tipc/configs/det_r50_vd_sast_totaltext_v2.0/train_infer_python.txt @@ -34,7 +34,7 @@ distill_export:null export1:null export2:null inference_dir:null -train_model:./inference/det_r50_vd_sast_totaltext_v2.0/best_accuracy +train_model:./inference/det_r50_vd_sast_totaltext_v2.0_train/best_accuracy infer_export:tools/export_model.py -c test_tipc/configs/det_r50_vd_sast_totaltext_v2.0/det_r50_vd_sast_totaltext.yml -o infer_quant:False inference:tools/infer/predict_det.py @@ -42,10 +42,10 @@ inference:tools/infer/predict_det.py --enable_mkldnn:True|False --cpu_threads:1|6 --rec_batch_num:1 ---use_tensorrt:False|True +--use_tensorrt:False --precision:fp32|fp16|int8 --det_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null --benchmark:True -null:null +--det_algorithm:SAST diff --git a/test_tipc/configs/en_server_pgnetA/train_infer_python.txt b/test_tipc/configs/en_server_pgnetA/train_infer_python.txt index c7b2d1b0a712693b666cd0b40cff4a8871084aa6..1a25eccb3a192823d58af1c6cf089ea15b6d394c 100644 --- a/test_tipc/configs/en_server_pgnetA/train_infer_python.txt +++ b/test_tipc/configs/en_server_pgnetA/train_infer_python.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=500 +Global.epoch_num:lite_train_lite_infer=5|whole_train_whole_infer=500 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=14 Global.pretrained_model:null @@ -42,9 +42,9 @@ inference:tools/infer/predict_e2e.py --enable_mkldnn:True|False --cpu_threads:1|6 --rec_batch_num:1 ---use_tensorrt:False|True +--use_tensorrt:False --precision:fp32|fp16|int8 ---det_model_dir: +--e2e_model_dir: --image_dir:./inference/ch_det_data_50/all-sum-510/ null:null --benchmark:True diff --git a/test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml b/test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml new file mode 100644 index 0000000000000000000000000000000000000000..15119bb2a9de02c19684d21ad5a1859db94895ce --- /dev/null +++ b/test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml @@ -0,0 +1,103 @@ +Global: + use_gpu: True + epoch_num: 21 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/nrtr/ + save_epoch_step: 1 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words_en/word_10.png + # for data or label process + character_dict_path: ppocr/utils/EN_symbol_dict.txt + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_nrtr.txt + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.99 + clip_norm: 5.0 + lr: + name: Cosine + learning_rate: 0.0005 + warmup_epoch: 2 + regularizer: + name: 'L2' + factor: 0. + +Architecture: + model_type: rec + algorithm: NRTR + in_channels: 1 + Transform: + Backbone: + name: MTB + cnn_num: 2 + Head: + name: Transformer + d_model: 512 + num_encoder_layers: 6 + beam_size: -1 # When Beam size is greater than 0, it means to use beam search when evaluation. + + +Loss: + name: NRTRLoss + smoothing: True + +PostProcess: + name: NRTRLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data/ + label_file_list: ["./train_data/ic15_data/rec_gt_train.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - NRTRLabelEncode: # Class handling label + - NRTRRecResizeImg: + image_shape: [100, 32] + resize_type: PIL # PIL or OpenCV + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 512 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data + label_file_list: ["./train_data/ic15_data/rec_gt_test.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - NRTRLabelEncode: # Class handling label + - NRTRRecResizeImg: + image_shape: [100, 32] + resize_type: PIL # PIL or OpenCV + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 1 + use_shared_memory: False diff --git a/test_tipc/configs/rec_mtb_nrtr/train_infer_python.txt b/test_tipc/configs/rec_mtb_nrtr/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..2adca464a63d548f2b218ed1de91692ed25da89a --- /dev/null +++ b/test_tipc/configs/rec_mtb_nrtr/train_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:rec_mtb_nrtr +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=64 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/rec_mtb_nrtr_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/rec_mtb_nrtr/rec_mtb_nrtr.yml -o +infer_quant:False +inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/EN_symbol_dict.txt --rec_image_shape="1,32,100" --rec_algorithm="NRTR" +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1|6 +--use_tensorrt:True|False +--precision:fp32|int8 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null + diff --git a/test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/train_infer_python.txt index 698c14ee66914bea7a6926650234ad7a979d01b4..ac565d8c55b1924e7a39fd8e36456a74fbbce042 100644 --- a/test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/train_infer_python.txt @@ -26,7 +26,7 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:tools/export_model.py -c test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/rec_icdar15_train.yml -o quant_export:null fpgm_export:null @@ -34,7 +34,7 @@ distill_export:null export1:null export2:null ## -infer_model:null +train_model:./inference/rec_mv3_none_bilstm_ctc_v2.0_train/best_accuracy infer_export:tools/export_model.py -c test_tipc/configs/rec_mv3_none_bilstm_ctc_v2.0/rec_icdar15_train.yml -o infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" @@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic --cpu_threads:1|6 --rec_batch_num:1|6 --use_tensorrt:True|False ---precision:fp32|fp16|int8 +--precision:fp32|int8 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_mv3_none_none_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_mv3_none_none_ctc_v2.0/train_infer_python.txt index 53250c57e49967b9816b62dae07c64c0d22d58af..947399a83cedc1f4262374e2c5ba5f3221561f0d 100644 --- a/test_tipc/configs/rec_mv3_none_none_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_mv3_none_none_ctc_v2.0/train_infer_python.txt @@ -26,7 +26,7 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:tools/export_model.py -c test_tipc/configs/rec_mv3_none_none_ctc_v2.0/rec_icdar15_train.yml -o quant_export:null fpgm_export:null @@ -34,7 +34,7 @@ distill_export:null export1:null export2:null ## -infer_model:null +train_model:./inference/rec_mv3_none_none_ctc_v2.0_train/best_accuracy infer_export:tools/export_model.py -c test_tipc/configs/rec_mv3_none_none_ctc_v2.0/rec_icdar15_train.yml -o infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" @@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic --cpu_threads:1|6 --rec_batch_num:1|6 --use_tensorrt:True|False ---precision:fp32|fp16|int8 +--precision:fp32|int8 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml b/test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml new file mode 100644 index 0000000000000000000000000000000000000000..2b14c047d4645104fb9532a1b391072dc341f3b7 --- /dev/null +++ b/test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml @@ -0,0 +1,103 @@ +Global: + use_gpu: True + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/rec_mv3_tps_bilstm_att/ + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_mv3_tps_bilstm_att.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0.00001 + +Architecture: + model_type: rec + algorithm: RARE + Transform: + name: TPS + num_fiducial: 20 + loc_lr: 0.1 + model_name: small + Backbone: + name: MobileNetV3 + scale: 0.5 + model_name: large + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 96 + Head: + name: AttentionHead + hidden_size: 96 + + +Loss: + name: AttentionLoss + +PostProcess: + name: AttnLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data/ + label_file_list: ["./train_data/ic15_data/rec_gt_train.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data + label_file_list: ["./train_data/ic15_data/rec_gt_test.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 1 diff --git a/test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/train_infer_python.txt b/test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..5fcfeee5e1835504d08cf24b0180a5af105be092 --- /dev/null +++ b/test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/train_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:rec_mv3_tps_bilstm_att_v2.0 +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=64 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/rec_mv3_tps_bilstm_att_v2.0_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/rec_mv3_tps_bilstm_att_v2.0/rec_mv3_tps_bilstm_att.yml -o +infer_quant:False +inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --rec_algorithm="RARE" +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1|6 +--use_tensorrt:True|False +--precision:fp32|int8 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null + diff --git a/test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/train_infer_python.txt index 5de24ae5835f91ee2b4a6d7f816197ce694002f6..ac3fce6141ccbf96169d862b8b92f59af597db56 100644 --- a/test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/train_infer_python.txt @@ -26,7 +26,7 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:tools/export_model.py -c test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/rec_icdar15_train.yml -o quant_export:null fpgm_export:null @@ -34,16 +34,16 @@ distill_export:null export1:null export2:null ## -infer_model:null +train_model:./inference/rec_mv3_tps_bilstm_ctc_v2.0_train/best_accuracy infer_export:tools/export_model.py -c test_tipc/configs/rec_mv3_tps_bilstm_ctc_v2.0/rec_icdar15_train.yml -o infer_quant:False -inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" +inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --rec_algorithm="StarNet" --use_gpu:True|False --enable_mkldnn:True|False --cpu_threads:1|6 --rec_batch_num:1|6 --use_tensorrt:True|False ---precision:fp32|fp16|int8 +--precision:fp32|int8 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_r31_sar/rec_r31_sar.yml b/test_tipc/configs/rec_r31_sar/rec_r31_sar.yml new file mode 100644 index 0000000000000000000000000000000000000000..36bc3c5d12c55de574507cd613da772bbe0d2ced --- /dev/null +++ b/test_tipc/configs/rec_r31_sar/rec_r31_sar.yml @@ -0,0 +1,98 @@ +Global: + use_gpu: true + epoch_num: 5 + log_smooth_window: 20 + print_batch_step: 20 + save_model_dir: ./sar_rec + save_epoch_step: 1 + # evaluation is run every 2000 iterations + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: + # for data or label process + character_dict_path: ppocr/utils/dict90.txt + max_text_length: 30 + infer_mode: False + use_space_char: False + rm_symbol: True + save_res_path: ./output/rec/predicts_sar.txt + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + name: Piecewise + decay_epochs: [3, 4] + values: [0.001, 0.0001, 0.00001] + regularizer: + name: 'L2' + factor: 0 + +Architecture: + model_type: rec + algorithm: SAR + Transform: + Backbone: + name: ResNet31 + Head: + name: SARHead + +Loss: + name: SARLoss + +PostProcess: + name: SARLabelDecode + +Metric: + name: RecMetric + + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data/ + label_file_list: ["./train_data/ic15_data/rec_gt_train.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - SARLabelEncode: # Class handling label + - SARRecResizeImg: + image_shape: [3, 48, 48, 160] # h:48 w:[48,160] + width_downsample_ratio: 0.25 + - KeepKeys: + keep_keys: ['image', 'label', 'valid_ratio'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 64 + drop_last: True + num_workers: 8 + use_shared_memory: False + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data + label_file_list: ["./train_data/ic15_data/rec_gt_test.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - SARLabelEncode: # Class handling label + - SARRecResizeImg: + image_shape: [3, 48, 48, 160] + width_downsample_ratio: 0.25 + - KeepKeys: + keep_keys: ['image', 'label', 'valid_ratio'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 64 + num_workers: 4 + use_shared_memory: False + diff --git a/test_tipc/configs/rec_r31_sar/train_infer_python.txt b/test_tipc/configs/rec_r31_sar/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..8ae29c056803cfa6e2d69abdad93aff473adc571 --- /dev/null +++ b/test_tipc/configs/rec_r31_sar/train_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:rec_r31_sar +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=64 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/rec_r31_sar/rec_r31_sar.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c test_tipc/configs/rec_r31_sar/rec_r31_sar.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/rec_r31_sar/rec_r31_sar.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/rec_r31_sar_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/rec_r31_sar/rec_r31_sar.yml -o +infer_quant:False +inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/dict90.txt --rec_image_shape="3,48,48,160" --rec_algorithm="SAR" +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1|6 +--use_tensorrt:True|False +--precision:fp32|fp16|int8 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null + diff --git a/test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/train_infer_python.txt index baf306e1897c9f0f65ebe45747738b18173dd286..99f86872574bc300d3447efc0e4c83eaa88aab6c 100644 --- a/test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/train_infer_python.txt @@ -26,7 +26,7 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/rec_icdar15_train.yml -o quant_export:null fpgm_export:null @@ -34,7 +34,7 @@ distill_export:null export1:null export2:null ## -infer_model:null +train_model:./inference/rec_r34_vd_none_bilstm_ctc_v2.0_train/best_accuracy infer_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_none_bilstm_ctc_v2.0/rec_icdar15_train.yml -o infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" @@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic --cpu_threads:1|6 --rec_batch_num:1|6 --use_tensorrt:True|False ---precision:fp32|fp16|int8 +--precision:fp32|int8 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/train_infer_python.txt index 0d54bd5647d9cd4d8b1ffec5b2baa99874cff7f6..fb1ece49f71338307bfdf30714cd68cb382ea5e2 100644 --- a/test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/train_infer_python.txt @@ -26,7 +26,7 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/rec_icdar15_train.yml -o quant_export:null fpgm_export:null @@ -34,7 +34,7 @@ distill_export:null export1:null export2:null ## -infer_model:null +train_model:./inference/rec_r34_vd_none_none_ctc_v2.0_train/best_accuracy infer_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_none_none_ctc_v2.0/rec_icdar15_train.yml -o infer_quant:False inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" @@ -43,7 +43,7 @@ inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dic --cpu_threads:1|6 --rec_batch_num:1|6 --use_tensorrt:True|False ---precision:fp32|fp16|int8 +--precision:fp32|int8 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml b/test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml new file mode 100644 index 0000000000000000000000000000000000000000..5dd797b0ec742932ca7f85353b9ea4c5eb637edd --- /dev/null +++ b/test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml @@ -0,0 +1,102 @@ +Global: + use_gpu: True + epoch_num: 400 + log_smooth_window: 20 + print_batch_step: 10 + save_model_dir: ./output/rec/b3_rare_r34_none_gru/ + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 2000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: + max_text_length: 25 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_b3_rare_r34_none_gru.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + lr: + learning_rate: 0.0005 + regularizer: + name: 'L2' + factor: 0.00000 + +Architecture: + model_type: rec + algorithm: RARE + Transform: + name: TPS + num_fiducial: 20 + loc_lr: 0.1 + model_name: large + Backbone: + name: ResNet + layers: 34 + Neck: + name: SequenceEncoder + encoder_type: rnn + hidden_size: 256 #96 + Head: + name: AttentionHead # AttentionHead + hidden_size: 256 # + l2_decay: 0.00001 + +Loss: + name: AttentionLoss + +PostProcess: + name: AttnLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data/ + label_file_list: ["./train_data/ic15_data/rec_gt_train.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: True + batch_size_per_card: 256 + drop_last: True + num_workers: 8 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data + label_file_list: ["./train_data/ic15_data/rec_gt_test.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - AttnLabelEncode: # Class handling label + - RecResizeImg: + image_shape: [3, 32, 100] + - KeepKeys: + keep_keys: ['image', 'label', 'length'] # dataloader will return list in this order + loader: + shuffle: False + drop_last: False + batch_size_per_card: 256 + num_workers: 8 diff --git a/test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/train_infer_python.txt b/test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..acc9749f08b42f7fa2200da7ef865f710afc77c3 --- /dev/null +++ b/test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/train_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:rec_r34_vd_tps_bilstm_att_v2.0 +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=64 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/rec_r34_vd_tps_bilstm_att_v2.0_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_att_v2.0/rec_r34_vd_tps_bilstm_att.yml -o +infer_quant:False +inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --rec_algorithm="RARE" +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1|6 +--use_tensorrt:True|False +--precision:fp32|int8 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null + diff --git a/test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/train_infer_python.txt b/test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/train_infer_python.txt index 666c7d159b4cecb262197b3373ba3df125037d67..d11850528604074e9bb3d3d92b58ec709238b24b 100644 --- a/test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/train_infer_python.txt +++ b/test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/train_infer_python.txt @@ -26,7 +26,7 @@ null:null ## ===========================infer_params=========================== Global.save_inference_dir:./output/ -Global.pretrained_model: +Global.checkpoints: norm_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/rec_icdar15_train.yml -o quant_export:null fpgm_export:null @@ -34,16 +34,16 @@ distill_export:null export1:null export2:null ## -infer_model:null +train_model:./inference/rec_r34_vd_tps_bilstm_ctc_v2.0_train/best_accuracy infer_export:tools/export_model.py -c test_tipc/configs/rec_r34_vd_tps_bilstm_ctc_v2.0/rec_icdar15_train.yml -o infer_quant:False -inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" +inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="3,32,100" --rec_algorithm="StarNet" --use_gpu:True|False --enable_mkldnn:True|False --cpu_threads:1|6 --rec_batch_num:1|6 --use_tensorrt:True|False ---precision:fp32|fp16|int8 +--precision:fp32|int8 --rec_model_dir: --image_dir:./inference/rec_inference --save_log_path:./test/output/ diff --git a/test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml b/test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml new file mode 100644 index 0000000000000000000000000000000000000000..41e525205d2b047934a69a8b41a5e7d776990097 --- /dev/null +++ b/test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml @@ -0,0 +1,108 @@ +Global: + use_gpu: True + epoch_num: 72 + log_smooth_window: 20 + print_batch_step: 5 + save_model_dir: ./output/rec/srn_new + save_epoch_step: 3 + # evaluation is run every 5000 iterations after the 4000th iteration + eval_batch_step: [0, 5000] + cal_metric_during_train: True + pretrained_model: + checkpoints: + save_inference_dir: + use_visualdl: False + infer_img: doc/imgs_words/ch/word_1.jpg + # for data or label process + character_dict_path: + max_text_length: 25 + num_heads: 8 + infer_mode: False + use_space_char: False + save_res_path: ./output/rec/predicts_srn.txt + + +Optimizer: + name: Adam + beta1: 0.9 + beta2: 0.999 + clip_norm: 10.0 + lr: + learning_rate: 0.0001 + +Architecture: + model_type: rec + algorithm: SRN + in_channels: 1 + Transform: + Backbone: + name: ResNetFPN + Head: + name: SRNHead + max_text_length: 25 + num_heads: 8 + num_encoder_TUs: 2 + num_decoder_TUs: 4 + hidden_dims: 512 + +Loss: + name: SRNLoss + +PostProcess: + name: SRNLabelDecode + +Metric: + name: RecMetric + main_indicator: acc + +Train: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data/ + label_file_list: ["./train_data/ic15_data/rec_gt_train.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - SRNLabelEncode: # Class handling label + - SRNRecResizeImg: + image_shape: [1, 64, 256] + - KeepKeys: + keep_keys: ['image', + 'label', + 'length', + 'encoder_word_pos', + 'gsrm_word_pos', + 'gsrm_slf_attn_bias1', + 'gsrm_slf_attn_bias2'] # dataloader will return list in this order + loader: + shuffle: False + batch_size_per_card: 64 + drop_last: False + num_workers: 4 + +Eval: + dataset: + name: SimpleDataSet + data_dir: ./train_data/ic15_data + label_file_list: ["./train_data/ic15_data/rec_gt_test.txt"] + transforms: + - DecodeImage: # load image + img_mode: BGR + channel_first: False + - SRNLabelEncode: # Class handling label + - SRNRecResizeImg: + image_shape: [1, 64, 256] + - KeepKeys: + keep_keys: ['image', + 'label', + 'length', + 'encoder_word_pos', + 'gsrm_word_pos', + 'gsrm_slf_attn_bias1', + 'gsrm_slf_attn_bias2'] + loader: + shuffle: False + drop_last: False + batch_size_per_card: 32 + num_workers: 4 diff --git a/test_tipc/configs/rec_r50_fpn_vd_none_srn/train_infer_python.txt b/test_tipc/configs/rec_r50_fpn_vd_none_srn/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..fb135df60b7716fd46a48482c0d7e8a3faca579a --- /dev/null +++ b/test_tipc/configs/rec_r50_fpn_vd_none_srn/train_infer_python.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:rec_r50_fpn_vd_none_srn +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=2|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=16|whole_train_whole_infer=64 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./inference/rec_inference +null:null +## +trainer:norm_train +norm_train:tools/train.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:tools/export_model.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/rec_r50_vd_srn_train/best_accuracy +infer_export:tools/export_model.py -c test_tipc/configs/rec_r50_fpn_vd_none_srn/rec_r50_fpn_srn.yml -o +infer_quant:False +inference:tools/infer/predict_rec.py --rec_char_dict_path=./ppocr/utils/ic15_dict.txt --rec_image_shape="1,64,256" --rec_algorithm="SRN" --use_space_char=False +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1|6 +--use_tensorrt:True|False +--precision:fp32|int8 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null + diff --git a/test_tipc/docs/jeston_test_train_inference_python.md b/test_tipc/docs/jeston_test_train_inference_python.md index e23aa7651da8b57c9f5e92338bb21dbde2ccda05..d96505985ea8a291b3579acb2aaee1b3d66c1baa 100644 --- a/test_tipc/docs/jeston_test_train_inference_python.md +++ b/test_tipc/docs/jeston_test_train_inference_python.md @@ -1,6 +1,6 @@ # Jeston端基础训练预测功能测试 -Jeston端基础训练预测功能测试的主程序为`test_train_inference_python.sh`,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。 +Jeston端基础训练预测功能测试的主程序为`test_inference_inference.sh`,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。 ## 1. 测试结论汇总 @@ -40,21 +40,21 @@ Jeston端基础训练预测功能测试的主程序为`test_train_inference_pyth ### 2.2 功能测试 -先运行`prepare.sh`准备数据和模型,然后运行`test_train_inference_python.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。 +先运行`prepare.sh`准备数据和模型,然后运行`test_inference_inference.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。 -`test_train_inference_python.sh`包含5种[运行模式](./test_train_inference_python.md),在Jeston端,仅需要测试预测推理的模式即可: +`test_inference_inference.sh`仅有一个模式`whole_infer`,在Jeston端,仅需要测试预测推理的模式即可: ``` - 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度; ```shell bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' # 用法1: -bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' +bash test_tipc/test_inference_inference.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' # 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' '1' ``` -运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`lite_train_lite_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件: +运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`whole_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件: ``` test_tipc/output/ |- results_python.log # 运行指令状态的日志 diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 9854da77170e5c0a6a9ed5b8dee7c267b39ce833..6b9d3abb7e8e22ce45811bcdd7ea8791ebeb8312 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -25,7 +25,7 @@ if [ ${MODE} = "lite_train_lite_infer" ];then # pretrain lite train data wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar --no-check-certificate - if [ ${model_name} == "ch_PPOCRv2_det" ]; then + if [[ ${model_name} =~ "PPOCRv2_det" ]];then wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_distill_train.tar --no-check-certificate cd ./pretrain_models/ && tar xf ch_PP-OCRv2_det_distill_train.tar && cd ../ fi @@ -45,12 +45,12 @@ if [ ${MODE} = "lite_train_lite_infer" ];then wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate cd ./pretrain_models/ && tar xf en_server_pgnetA.tar && cd ../ - cd ./train_data && tar xf total_text_lite.tar && ln -s total_text && cd ../ + cd ./train_data && tar xf total_text_lite.tar && ln -s total_text_lite total_text && cd ../ fi if [ ${model_name} == "det_r50_vd_sast_icdar15_v2.0" ] || [ ${model_name} == "det_r50_vd_sast_totaltext_v2.0" ]; then wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate - wget -nc -P ./train_data/ wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate - cd ./train_data && tar xf total_text_lite.tar && ln -s total_text && cd ../ + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate + cd ./train_data && tar xf total_text_lite.tar && ln -s total_text_lite total_text && cd ../ fi if [ ${model_name} == "det_mv3_db_v2.0" ]; then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar --no-check-certificate @@ -61,6 +61,10 @@ if [ ${MODE} = "lite_train_lite_infer" ];then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar --no-check-certificate cd ./inference/ && tar xf det_r50_vd_db_v2.0_train.tar && cd ../ fi + if [ ${model_name} == "ch_ppocr_mobile_v2.0_rec_FPGM" ]; then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar --no-check-certificate + cd ./pretrain_models/ && tar xf ch_ppocr_mobile_v2.0_rec_train.tar && cd ../ + fi elif [ ${MODE} = "whole_train_whole_infer" ];then wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate @@ -74,15 +78,15 @@ elif [ ${MODE} = "whole_train_whole_infer" ];then cd ./pretrain_models/ && tar xf ch_PP-OCRv2_det_distill_train.tar && cd ../ fi if [ ${model_name} == "en_server_pgnetA" ]; then - wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/total_text.tar --no-check-certificate + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate cd ./pretrain_models/ && tar xf en_server_pgnetA.tar && cd ../ - cd ./train_data && tar xf total_text.tar && ln -s total_text && cd ../ + cd ./train_data && tar xf total_text.tar && ln -s total_text_lite total_text && cd ../ fi if [ ${model_name} == "det_r50_vd_sast_totaltext_v2.0" ]; then wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate - wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dataset/total_text.tar --no-check-certificate - cd ./train_data && tar xf total_text.tar && ln -s total_text && cd ../ + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/total_text_lite.tar --no-check-certificate + cd ./train_data && tar xf total_text.tar && ln -s total_text_lite total_text && cd ../ fi elif [ ${MODE} = "lite_train_whole_infer" ];then wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate @@ -99,68 +103,145 @@ elif [ ${MODE} = "lite_train_whole_infer" ];then fi elif [ ${MODE} = "whole_infer" ];then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + cd ./inference && tar xf rec_inference.tar && tar xf ch_det_data_50.tar && cd ../ if [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_train" rm -rf ./train_data/icdar2015 - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && cd ../ + elif [ ${model_name} = "ch_ppocr_mobile_v2.0_det_PACT" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_det_prune_infer" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar --no-check-certificate + cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ elif [ ${model_name} = "ch_ppocr_server_v2.0_det" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar --no-check-certificate - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate cd ./inference && tar xf ch_ppocr_server_v2.0_det_train.tar && tar xf ch_det_data_50.tar && cd ../ elif [ ${model_name} = "ch_ppocr_mobile_v2.0" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../ elif [ ${model_name} = "ch_ppocr_server_v2.0" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar --no-check-certificate - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar --no-check-certificate cd ./inference && tar xf ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_det_data_50.tar && cd ../ - elif [ ${model_name} = "ch_ppocr_mobile_v2.0_rec" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_rec_infer" - wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate - cd ./inference && tar xf ${eval_model_name}.tar && tar xf rec_inference.tar && cd ../ - elif [ ${model_name} = "ch_ppocr_server_v2.0_rec" ]; then - eval_model_name="ch_ppocr_server_v2.0_rec_infer" - wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar --no-check-certificate - cd ./inference && tar xf ${eval_model_name}.tar && tar xf rec_inference.tar && cd ../ + elif [ ${model_name} = "ch_ppocr_mobile_v2.0_rec_PACT" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_rec_slim_infer" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar --no-check-certificate + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ + elif [ ${model_name} = "ch_ppocr_mobile_v2.0_rec_FPGM" ]; then + eval_model_name="ch_PP-OCRv2_rec_infer" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi - if [ ${model_name} = "ch_PPOCRv2_det" ]; then + if [[ ${model_name} =~ "ch_PPOCRv2_det" ]]; then eval_model_name="ch_PP-OCRv2_det_infer" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ fi - if [ ${model_name} = "ch_PPOCRv2_det" ]; then - wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate - wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/e2e_server_pgnetA_infer.tar --no-check-certificate - cd ./inference && tar xf e2e_server_pgnetA_infer.tar && tar xf ch_det_data_50.tar && cd ../ - fi + if [[ ${model_name} =~ "PPOCRv2_ocr_rec" ]]; then + eval_model_name="ch_PP-OCRv2_rec_infer" + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_slim_quant_infer.tar --no-check-certificate + cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_PP-OCRv2_rec_slim_quant_infer.tar && cd ../ + fi if [ ${model_name} == "en_server_pgnetA" ]; then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/pgnet/en_server_pgnetA.tar --no-check-certificate - cd ./inference && tar xf en_server_pgnetA.tar && cd ../ + cd ./inference && tar xf en_server_pgnetA.tar && tar xf ch_det_data_50.tar && cd ../ fi if [ ${model_name} == "det_r50_vd_sast_icdar15_v2.0" ]; then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_icdar15_v2.0_train.tar --no-check-certificate - cd ./inference/ && tar xf det_r50_vd_sast_icdar15_v2.0_train.tar && cd ../ + cd ./inference/ && tar xf det_r50_vd_sast_icdar15_v2.0_train.tar && tar xf ch_det_data_50.tar && cd ../ + fi + if [ ${model_name} == "rec_mv3_none_none_ctc_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_none_ctc_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf rec_mv3_none_none_ctc_v2.0_train.tar && cd ../ + fi + if [ ${model_name} == "rec_r34_vd_none_none_ctc_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_none_ctc_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf rec_r34_vd_none_none_ctc_v2.0_train.tar && cd ../ + fi + if [ ${model_name} == "rec_mv3_none_bilstm_ctc_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_none_bilstm_ctc_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf rec_mv3_none_bilstm_ctc_v2.0_train.tar && cd ../ + fi + if [ ${model_name} == "rec_r34_vd_none_bilstm_ctc_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_none_bilstm_ctc_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf rec_r34_vd_none_bilstm_ctc_v2.0_train.tar && cd ../ + fi + if [ ${model_name} == "rec_mv3_tps_bilstm_ctc_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_ctc_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf rec_mv3_tps_bilstm_ctc_v2.0_train.tar && cd ../ + fi + if [ ${model_name} == "rec_r34_vd_tps_bilstm_ctc_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf rec_r34_vd_tps_bilstm_ctc_v2.0_train.tar && cd ../ + fi + if [ ${model_name} == "ch_ppocr_server_v2.0_rec" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/ch_ppocr_server_v2.0_rec_train.tar --no-check-certificate + cd ./inference/ && tar xf ch_ppocr_server_v2.0_rec_train.tar && cd ../ + fi + if [ ${model_name} == "ch_ppocr_mobile_v2.0_rec" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar --no-check-certificate + cd ./inference/ && tar xf ch_ppocr_mobile_v2.0_rec_train.tar && cd ../ + fi + if [ ${model_name} == "rec_mtb_nrtr" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mtb_nrtr_train.tar --no-check-certificate + cd ./inference/ && tar xf rec_mtb_nrtr_train.tar && cd ../ + fi + if [ ${model_name} == "rec_mv3_tps_bilstm_att_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_mv3_tps_bilstm_att_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf rec_mv3_tps_bilstm_att_v2.0_train.tar && cd ../ + fi + if [ ${model_name} == "rec_r34_vd_tps_bilstm_att_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r34_vd_tps_bilstm_att_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf rec_r34_vd_tps_bilstm_att_v2.0_train.tar && cd ../ + fi + if [ ${model_name} == "rec_r31_sar" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.1/rec/rec_r31_sar_train.tar --no-check-certificate + cd ./inference/ && tar xf rec_r31_sar_train.tar && cd ../ + fi + if [ ${model_name} == "rec_r50_fpn_vd_none_srn" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/rec_r50_vd_srn_train.tar --no-check-certificate + cd ./inference/ && tar xf rec_r50_vd_srn_train.tar && cd ../ + fi + + if [ ${model_name} == "det_r50_vd_sast_totaltext_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_sast_totaltext_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf det_r50_vd_sast_totaltext_v2.0_train.tar && cd ../ fi if [ ${model_name} == "det_mv3_db_v2.0" ]; then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar --no-check-certificate - cd ./inference/ && tar xf det_mv3_db_v2.0_train.tar && cd ../ + cd ./inference/ && tar xf det_mv3_db_v2.0_train.tar && tar xf ch_det_data_50.tar && cd ../ fi if [ ${model_name} == "det_r50_db_v2.0" ]; then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_db_v2.0_train.tar --no-check-certificate - cd ./inference/ && tar xf det_r50_vd_db_v2.0_train.tar && cd ../ + cd ./inference/ && tar xf det_r50_vd_db_v2.0_train.tar && tar xf ch_det_data_50.tar && cd ../ + fi + if [ ${model_name} == "det_mv3_pse_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_mv3_pse_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf det_mv3_pse_v2.0_train.tar & cd ../ + fi + if [ ${model_name} == "det_r50_vd_pse_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.1/en_det/det_r50_vd_pse_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf det_r50_vd_pse_v2.0_train.tar & cd ../ + fi + if [ ${model_name} == "det_mv3_east_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_east_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf det_mv3_east_v2.0_train.tar & cd ../ + fi + if [ ${model_name} == "det_r50_vd_east_v2.0" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_r50_vd_east_v2.0_train.tar --no-check-certificate + cd ./inference/ && tar xf det_r50_vd_east_v2.0_train.tar & cd ../ fi fi + if [ ${MODE} = "klquant_whole_infer" ]; then - if [ ${model_name} = "ch_ppocr_mobile_v2.0_det" ]; then + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar --no-check-certificate + cd ./train_data/ && tar xf icdar2015_lite.tar + ln -s ./icdar2015_lite ./icdar2015 && cd ../ + if [ ${model_name} = "ch_ppocr_mobile_v2.0_det_KL" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ @@ -171,6 +252,13 @@ if [ ${MODE} = "klquant_whole_infer" ]; then wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar --no-check-certificate cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ fi + if [ ${model_name} = "ch_ppocr_mobile_v2.0_rec_KL" ]; then + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar --no-check-certificate + wget -nc -P ./inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar --no-check-certificate + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar --no-check-certificate + cd ./train_data/ && tar xf ic15_data.tar && cd ../ + cd ./inference && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf rec_inference.tar && cd ../ + fi fi if [ ${MODE} = "cpp_infer" ];then @@ -192,8 +280,11 @@ fi if [ ${MODE} = "serving_infer" ];then # prepare serving env - python_name=$(func_parser_value "${lines[2]}") - wget https://paddle-serving.bj.bcebos.com/chain/paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl + python_name_list=$(func_parser_value "${lines[2]}") + IFS='|' + array=(${python_name_list}) + python_name=${array[0]} + wget -nc https://paddle-serving.bj.bcebos.com/chain/paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl ${python_name} -m pip install install paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl ${python_name} -m pip install paddle_serving_client==0.6.1 ${python_name} -m pip install paddle-serving-app==0.6.3 diff --git a/test_tipc/readme.md b/test_tipc/readme.md index a188b675a90a651588fdda08694bc30ca9e0f301..8b2489f3445ddfa87c1e587d6da81992fdb90e64 100644 --- a/test_tipc/readme.md +++ b/test_tipc/readme.md @@ -1,9 +1,9 @@ -# 飞桨训推一体认证(TIPC) +# 飞桨训推一体全流程(TIPC) ## 1. 简介 -飞桨除了基本的模型训练和预测,还提供了支持多端多平台的高性能推理部署工具。本文档提供了PaddleOCR中所有模型的飞桨训推一体认证 (Training and Inference Pipeline Certification(TIPC)) 信息和测试工具,方便用户查阅每种模型的训练推理部署打通情况,并可以进行一键测试。 +飞桨除了基本的模型训练和预测,还提供了支持多端多平台的高性能推理部署工具。本文档提供了PaddleOCR中所有模型的飞桨训推一体全流程(Training and Inference Pipeline Criterion(TIPC))信息和测试工具,方便用户查阅每种模型的训练推理部署打通情况,并可以进行一键测试。
diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index d26954353ef1e81ae49364b7f9d20357768cff85..4787f83093b0040ae3da6d9efb9028d0cc28de00 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -64,10 +64,11 @@ function func_cpp_inference(){ set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}") set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}") set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}") + set_mkldnn=$(func_set_params "${cpp_use_mkldnn_key}" "${use_mkldnn}") set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}") set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}") set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}") - command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${cpp_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " + command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" diff --git a/test_tipc/test_inference_jeston.sh b/test_tipc/test_inference_jeston.sh deleted file mode 100644 index 2fd76e1e9e7e8c7b52d0b6838cd15840a59fe5c4..0000000000000000000000000000000000000000 --- a/test_tipc/test_inference_jeston.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash -source test_tipc/common_func.sh -source test_tipc/test_train_inference_python.sh - -FILENAME=$1 -# MODE be one of ['whole_infer'] -MODE=$2 - -dataline=$(awk 'NR==1, NR==17{print}' $FILENAME) - -# parser params -IFS=$'\n' -lines=(${dataline}) - -model_name=$(func_parser_value "${lines[1]}") -python=$(func_parser_value "${lines[2]}") - -infer_model_dir_list=$(func_parser_value "${lines[3]}") -infer_export_list=$(func_parser_value "${lines[4]}") -infer_is_quant=$(func_parser_value "${lines[5]}") -# parser inference -inference_py=$(func_parser_value "${lines[6]}") -use_gpu_key=$(func_parser_key "${lines[7]}") -use_gpu_list=$(func_parser_value "${lines[7]}") -use_mkldnn_key=$(func_parser_key "${lines[8]}") -use_mkldnn_list=$(func_parser_value "${lines[8]}") -cpu_threads_key=$(func_parser_key "${lines[9]}") -cpu_threads_list=$(func_parser_value "${lines[9]}") -batch_size_key=$(func_parser_key "${lines[10]}") -batch_size_list=$(func_parser_value "${lines[10]}") -use_trt_key=$(func_parser_key "${lines[11]}") -use_trt_list=$(func_parser_value "${lines[11]}") -precision_key=$(func_parser_key "${lines[12]}") -precision_list=$(func_parser_value "${lines[12]}") -infer_model_key=$(func_parser_key "${lines[13]}") -image_dir_key=$(func_parser_key "${lines[14]}") -infer_img_dir=$(func_parser_value "${lines[14]}") -save_log_key=$(func_parser_key "${lines[15]}") -benchmark_key=$(func_parser_key "${lines[16]}") -benchmark_value=$(func_parser_value "${lines[16]}") -infer_key1=$(func_parser_key "${lines[17]}") -infer_value1=$(func_parser_value "${lines[17]}") - - -LOG_PATH="./test_tipc/output" -mkdir -p ${LOG_PATH} -status_log="${LOG_PATH}/results_python.log" - - -if [ ${MODE} = "whole_infer" ]; then - GPUID=$3 - if [ ${#GPUID} -le 0 ];then - env=" " - else - env="export CUDA_VISIBLE_DEVICES=${GPUID}" - fi - # set CUDA_VISIBLE_DEVICES - eval $env - export Count=0 - IFS="|" - infer_run_exports=(${infer_export_list}) - infer_quant_flag=(${infer_is_quant}) - for infer_model in ${infer_model_dir_list[*]}; do - # run export - if [ ${infer_run_exports[Count]} != "null" ];then - save_infer_dir=$(dirname $infer_model) - set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") - set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") - export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" - echo ${infer_run_exports[Count]} - echo $export_cmd - eval $export_cmd - status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" - else - save_infer_dir=${infer_model} - fi - #run inference - is_quant=${infer_quant_flag[Count]} - if [ ${MODE} = "klquant_infer" ]; then - is_quant="True" - fi - func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} - Count=$(($Count + 1)) - done -fi - diff --git a/test_tipc/test_inference_python.sh b/test_tipc/test_inference_python.sh new file mode 100644 index 0000000000000000000000000000000000000000..27276d55b95051e167432600308f42127d784ee6 --- /dev/null +++ b/test_tipc/test_inference_python.sh @@ -0,0 +1,170 @@ +#!/bin/bash +source test_tipc/common_func.sh +#source test_tipc/test_train_inference_python.sh + +FILENAME=$1 +# MODE be one of ['whole_infer'] +MODE=$2 + +dataline=$(awk 'NR==1, NR==20{print}' $FILENAME) + +# parser params +IFS=$'\n' +lines=(${dataline}) + +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") + +infer_model_dir_list=$(func_parser_value "${lines[3]}") +infer_export_list=$(func_parser_value "${lines[4]}") +infer_is_quant=$(func_parser_value "${lines[5]}") +# parser inference +inference_py=$(func_parser_value "${lines[6]}") +use_gpu_key=$(func_parser_key "${lines[7]}") +use_gpu_list=$(func_parser_value "${lines[7]}") +use_mkldnn_key=$(func_parser_key "${lines[8]}") +use_mkldnn_list=$(func_parser_value "${lines[8]}") +cpu_threads_key=$(func_parser_key "${lines[9]}") +cpu_threads_list=$(func_parser_value "${lines[9]}") +batch_size_key=$(func_parser_key "${lines[10]}") +batch_size_list=$(func_parser_value "${lines[10]}") +use_trt_key=$(func_parser_key "${lines[11]}") +use_trt_list=$(func_parser_value "${lines[11]}") +precision_key=$(func_parser_key "${lines[12]}") +precision_list=$(func_parser_value "${lines[12]}") +infer_model_key=$(func_parser_key "${lines[13]}") +image_dir_key=$(func_parser_key "${lines[14]}") +infer_img_dir=$(func_parser_value "${lines[14]}") +rec_model_key=$(func_parser_key "${lines[15]}") +rec_model_value=$(func_parser_value "${lines[15]}") +benchmark_key=$(func_parser_key "${lines[16]}") +benchmark_value=$(func_parser_value "${lines[16]}") +infer_key1=$(func_parser_key "${lines[17]}") +infer_value1=$(func_parser_value "${lines[17]}") + + + +LOG_PATH="./test_tipc/output" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_python.log" + + +function func_inference(){ + IFS='|' + _python=$1 + _script=$2 + _model_dir=$3 + _log_path=$4 + _img_dir=$5 + _flag_quant=$6 + # inference + for use_gpu in ${use_gpu_list[*]}; do + if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then + continue + fi + for threads in ${cpu_threads_list[*]}; do + for batch_size in ${batch_size_list[*]}; do + for precision in ${precision_list[*]}; do + if [ ${use_mkldnn} = "False" ] && [ ${precision} = "fp16" ]; then + continue + fi # skip when enable fp16 but disable mkldnn + if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then + continue + fi # skip when quant model inference but precision is not int8 + set_precision=$(func_set_params "${precision_key}" "${precision}") + + _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}") + set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${rec_model_key}" "${rec_model_value}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + done + done + done + done + elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then + for use_trt in ${use_trt_list[*]}; do + for precision in ${precision_list[*]}; do + if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then + continue + fi + if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then + continue + fi + if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then + continue + fi + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") + set_precision=$(func_set_params "${precision_key}" "${precision}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + + done + done + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done +} + +if [ ${MODE} = "whole_infer" ]; then + GPUID=$3 + if [ ${#GPUID} -le 0 ];then + env=" " + else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" + fi + # set CUDA_VISIBLE_DEVICES + eval $env + export Count=0 + IFS="|" + infer_run_exports=(${infer_export_list}) + infer_quant_flag=(${infer_is_quant}) + for infer_model in ${infer_model_dir_list[*]}; do + # run export + if [ ${infer_run_exports[Count]} != "null" ];then + save_infer_dir=$(dirname $infer_model) + set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") + set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") + export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" + echo ${infer_run_exports[Count]} + eval $export_cmd + status_export=$? + status_check $status_export "${export_cmd}" "${status_log}" + else + save_infer_dir=${infer_model} + fi + #run inference + is_quant=${infer_quant_flag[Count]} + if [ ${MODE} = "klquant_infer" ]; then + is_quant="True" + fi + func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} + Count=$(($Count + 1)) + done +fi + + diff --git a/test_tipc/test_serving.sh b/test_tipc/test_serving.sh index c36935a60fecacea672fd932773a8fb0bdcd619b..1318d012d401c4f4e8540a5d0d227ea75f677004 100644 --- a/test_tipc/test_serving.sh +++ b/test_tipc/test_serving.sh @@ -10,7 +10,7 @@ lines=(${dataline}) # parser serving model_name=$(func_parser_value "${lines[1]}") -python=$(func_parser_value "${lines[2]}") +python_list=$(func_parser_value "${lines[2]}") trans_model_py=$(func_parser_value "${lines[3]}") infer_model_dir_key=$(func_parser_key "${lines[4]}") infer_model_dir_value=$(func_parser_value "${lines[4]}") @@ -54,14 +54,15 @@ function func_serving(){ set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}") set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}") set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}") - trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" + python_list=(${python_list}) + trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}" eval $trans_model_cmd cd ${serving_dir_value} echo $PWD unset https_proxy unset http_proxy - for python in ${python[*]}; do - if [ ${python} = "cpp"]; then + for python in ${python_list[*]}; do + if [ ${python} = "cpp" ]; then for use_gpu in ${web_use_gpu_list[*]}; do if [ ${use_gpu} = "null" ]; then web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293" @@ -91,9 +92,6 @@ function func_serving(){ echo ${ues_gpu} if [ ${use_gpu} = "null" ]; then for use_mkldnn in ${web_use_mkldnn_list[*]}; do - if [ ${use_mkldnn} = "False" ]; then - continue - fi for threads in ${web_cpu_threads_list[*]}; do set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}") web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &" @@ -124,6 +122,9 @@ function func_serving(){ continue fi set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}") + if [ ${use_trt} = True ]; then + device_type=2 + fi set_precision=$(func_set_params "${web_precision_key}" "${precision}") web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & " eval $web_service_cmd diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 7d035256527e01f31a4a1bc113caff3c744d859d..b69c0f278f2886eeb7c01847bab5d54ff7a18af6 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -90,36 +90,39 @@ infer_value1=$(func_parser_value "${lines[50]}") # parser klquant_infer if [ ${MODE} = "klquant_whole_infer" ]; then - dataline=$(awk 'NR==1 NR==17{print}' $FILENAME) + dataline=$(awk 'NR==1, NR==17{print}' $FILENAME) lines=(${dataline}) model_name=$(func_parser_value "${lines[1]}") python=$(func_parser_value "${lines[2]}") + export_weight=$(func_parser_key "${lines[3]}") + save_infer_key=$(func_parser_key "${lines[4]}") # parser inference model - infer_model_dir_list=$(func_parser_value "${lines[3]}") - infer_export_list=$(func_parser_value "${lines[4]}") - infer_is_quant=$(func_parser_value "${lines[5]}") + infer_model_dir_list=$(func_parser_value "${lines[5]}") + infer_export_list=$(func_parser_value "${lines[6]}") + infer_is_quant=$(func_parser_value "${lines[7]}") # parser inference - inference_py=$(func_parser_value "${lines[6]}") - use_gpu_key=$(func_parser_key "${lines[7]}") - use_gpu_list=$(func_parser_value "${lines[7]}") - use_mkldnn_key=$(func_parser_key "${lines[8]}") - use_mkldnn_list=$(func_parser_value "${lines[8]}") - cpu_threads_key=$(func_parser_key "${lines[9]}") - cpu_threads_list=$(func_parser_value "${lines[9]}") - batch_size_key=$(func_parser_key "${lines[10]}") - batch_size_list=$(func_parser_value "${lines[10]}") - use_trt_key=$(func_parser_key "${lines[11]}") - use_trt_list=$(func_parser_value "${lines[11]}") - precision_key=$(func_parser_key "${lines[12]}") - precision_list=$(func_parser_value "${lines[12]}") - infer_model_key=$(func_parser_key "${lines[13]}") - image_dir_key=$(func_parser_key "${lines[14]}") - infer_img_dir=$(func_parser_value "${lines[14]}") - save_log_key=$(func_parser_key "${lines[15]}") - benchmark_key=$(func_parser_key "${lines[16]}") - benchmark_value=$(func_parser_value "${lines[16]}") - infer_key1=$(func_parser_key "${lines[17]}") - infer_value1=$(func_parser_value "${lines[17]}") + inference_py=$(func_parser_value "${lines[8]}") + use_gpu_key=$(func_parser_key "${lines[9]}") + use_gpu_list=$(func_parser_value "${lines[9]}") + use_mkldnn_key=$(func_parser_key "${lines[10]}") + use_mkldnn_list=$(func_parser_value "${lines[10]}") + cpu_threads_key=$(func_parser_key "${lines[11]}") + cpu_threads_list=$(func_parser_value "${lines[11]}") + batch_size_key=$(func_parser_key "${lines[12]}") + batch_size_list=$(func_parser_value "${lines[12]}") + use_trt_key=$(func_parser_key "${lines[13]}") + use_trt_list=$(func_parser_value "${lines[13]}") + precision_key=$(func_parser_key "${lines[14]}") + precision_list=$(func_parser_value "${lines[14]}") + infer_model_key=$(func_parser_key "${lines[15]}") + image_dir_key=$(func_parser_key "${lines[16]}") + infer_img_dir=$(func_parser_value "${lines[16]}") + save_log_key=$(func_parser_key "${lines[17]}") + save_log_value=$(func_parser_value "${lines[17]}") + benchmark_key=$(func_parser_key "${lines[18]}") + benchmark_value=$(func_parser_value "${lines[18]}") + infer_key1=$(func_parser_key "${lines[19]}") + infer_value1=$(func_parser_value "${lines[19]}") fi LOG_PATH="./test_tipc/output" @@ -157,10 +160,12 @@ function func_inference(){ set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}") set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" @@ -189,8 +194,9 @@ function func_inference(){ set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") set_precision=$(func_set_params "${precision_key}" "${precision}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 " eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" @@ -235,7 +241,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then fi #run inference is_quant=${infer_quant_flag[Count]} - if [ ${MODE} = "klquant_infer" ]; then + if [ ${MODE} = "klquant_whole_infer" ]; then is_quant="True" fi func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} @@ -316,10 +322,6 @@ else save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}" fi - # load pretrain from norm training if current trainer is pact or fpgm trainer - if ([ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]) && [ ${nodes} -le 1 ]; then - set_pretrain="${load_norm_train_model}" - fi set_save_model=$(func_set_params "${save_model_key}" "${save_log}") if [ ${#gpu} -le 2 ];then # train with cpu or single gpu @@ -335,10 +337,7 @@ else status_check $? "${cmd}" "${status_log}" set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") - # save norm trained models to set pretrain for pact training and fpgm training - if [ ${trainer} = ${trainer_norm} ] && [ ${nodes} -le 1 ]; then - load_norm_train_model=${set_eval_pretrain} - fi + # run eval if [ ${eval_py} != "null" ]; then set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") diff --git a/tools/infer/predict_cls.py b/tools/infer/predict_cls.py index a25cac2600e67667badc76c648c1fcda12981a0f..ab3f4b04f0c306aaf7e26eb98e781938b7528275 100755 --- a/tools/infer/predict_cls.py +++ b/tools/infer/predict_cls.py @@ -145,8 +145,6 @@ def main(args): for ino in range(len(img_list)): logger.info("Predicts of {}:{}".format(valid_image_file_list[ino], cls_res[ino])) - logger.info( - "The predict time about text angle classify module is as follows: ") if __name__ == "__main__": diff --git a/tools/infer/utility.py b/tools/infer/utility.py index bd9e14a65749f4223eeb6cf79a37546909854d17..21bbee098ef19456d05165969a9ad400400f1264 100644 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -195,6 +195,7 @@ def create_predictor(args, mode, logger): max_batch_size=args.max_batch_size, min_subgraph_size=args.min_subgraph_size) # skip the minmum trt subgraph + use_dynamic_shape = True if mode == "det": min_input_shape = { "x": [1, 3, 50, 50], @@ -211,7 +212,7 @@ def create_predictor(args, mode, logger): "nearest_interp_v2_0.tmp_0": [1, 256, 2, 2] } max_input_shape = { - "x": [1, 3, 1280, 1280], + "x": [1, 3, 1536, 1536], "conv2d_92.tmp_0": [1, 120, 400, 400], "conv2d_91.tmp_0": [1, 24, 200, 200], "conv2d_59.tmp_0": [1, 96, 400, 400], @@ -260,19 +261,20 @@ def create_predictor(args, mode, logger): max_input_shape.update(max_pact_shape) opt_input_shape.update(opt_pact_shape) elif mode == "rec": + if args.rec_algorithm != "CRNN": + use_dynamic_shape = False min_input_shape = {"x": [1, 3, 32, 10]} - max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1024]} + max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1536]} opt_input_shape = {"x": [args.rec_batch_num, 3, 32, 320]} elif mode == "cls": min_input_shape = {"x": [1, 3, 48, 10]} max_input_shape = {"x": [args.rec_batch_num, 3, 48, 1024]} opt_input_shape = {"x": [args.rec_batch_num, 3, 48, 320]} else: - min_input_shape = {"x": [1, 3, 10, 10]} - max_input_shape = {"x": [1, 3, 512, 512]} - opt_input_shape = {"x": [1, 3, 256, 256]} - config.set_trt_dynamic_shape_info(min_input_shape, max_input_shape, - opt_input_shape) + use_dynamic_shape = False + if use_dynamic_shape: + config.set_trt_dynamic_shape_info( + min_input_shape, max_input_shape, opt_input_shape) else: config.disable_gpu() @@ -311,7 +313,10 @@ def create_predictor(args, mode, logger): def get_infer_gpuid(): - cmd = "env | grep CUDA_VISIBLE_DEVICES" + if not paddle.fluid.core.is_compiled_with_rocm(): + cmd = "env | grep CUDA_VISIBLE_DEVICES" + else: + cmd = "env | grep HIP_VISIBLE_DEVICES" env_cuda = os.popen(cmd).readlines() if len(env_cuda) == 0: return 0 diff --git a/tools/infer_det.py b/tools/infer_det.py index bb2cca7362e81494018aa3471664d60bef1b852c..1c679e0faf0d3ebdb6ca7ed4c317ce3eecfa910f 100755 --- a/tools/infer_det.py +++ b/tools/infer_det.py @@ -53,6 +53,7 @@ def draw_det_res(dt_boxes, config, img, img_name, save_path): logger.info("The detected Image saved in {}".format(save_path)) +@paddle.no_grad() def main(): global_config = config['Global']