diff --git a/MANIFEST.in b/MANIFEST.in index d674fabc5d714f7e31ed00b32be2d44d6dd10871..1fcf184dacee9dcaf3d5b2e62d12c7b156e068c7 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,5 +5,6 @@ recursive-include ppocr/utils *.txt utility.py logging.py network.py recursive-include ppocr/data *.py recursive-include ppocr/postprocess *.py recursive-include tools/infer *.py +recursive-include tools __init__.py recursive-include ppocr/utils/e2e_utils *.py recursive-include ppstructure *.py \ No newline at end of file diff --git a/PPOCRLabel/README.md b/PPOCRLabel/README.md index 7a936543277e2e1d5687681ac78eca96f5f3f400..9375559f3b10baaa52d034badaee40214b7c31ff 100644 --- a/PPOCRLabel/README.md +++ b/PPOCRLabel/README.md @@ -207,6 +207,24 @@ For some data that are difficult to recognize, the recognition results will not pip install opencv-contrib-python-headless==4.2.0.32 ``` +### Dataset division + +- Enter the following command in the terminal to execute the dataset division script: + ``` + cd ./PPOCRLabel # Change the directory to the PPOCRLabel folder + python gen_ocr_train_val_test.py --trainValTestRatio 6:2:2 --labelRootPath ../train_data/label --detRootPath ../train_data/det --recRootPath ../train_data/rec + ``` + +- Parameter Description: + + trainValTestRatio is the division ratio of the number of images in the training set, validation set, and test set, set according to your actual situation, the default is 6:2:2 + + labelRootPath is the storage path of the dataset labeled by PPOCRLabel, the default is ../train_data/label + + detRootPath is the path where the text detection dataset is divided according to the dataset marked by PPOCRLabel. The default is ../train_data/det + + recRootPath is the path where the character recognition dataset is divided according to the dataset marked by PPOCRLabel. The default is ../train_data/rec + ### Related 1.[Tzutalin. LabelImg. Git code (2015)](https://github.com/tzutalin/labelImg) \ No newline at end of file diff --git a/PPOCRLabel/README_ch.md b/PPOCRLabel/README_ch.md index 17bb95c08267bfac5e2cecde2e1d88c4a7cb1b60..7abf2f5d8b193b895e1993a8e1628d29c371df0a 100644 --- a/PPOCRLabel/README_ch.md +++ b/PPOCRLabel/README_ch.md @@ -193,7 +193,23 @@ PPOCRLabel支持三种导出方式: ``` pip install opencv-contrib-python-headless==4.2.0.32 ``` +### 数据集划分 +- 在终端中输入以下命令执行数据集划分脚本: + ``` + cd ./PPOCRLabel # 将目录切换到PPOCRLabel文件夹下 + python gen_ocr_train_val_test.py --trainValTestRatio 6:2:2 --labelRootPath ../train_data/label --detRootPath ../train_data/det --recRootPath ../train_data/rec + ``` +- 参数说明: + + trainValTestRatio是训练集、验证集、测试集的图像数量划分比例,根据你的实际情况设定,默认是6:2:2 + + labelRootPath是PPOCRLabel标注的数据集存放路径,默认是../train_data/label + + detRootPath是根据PPOCRLabel标注的数据集划分后的文本检测数据集存放的路径,默认是../train_data/det + recRootPath是根据PPOCRLabel标注的数据集划分后的字符识别数据集存放的路径,默认是../train_data/rec + + ### 4. 参考资料 1.[Tzutalin. LabelImg. Git code (2015)](https://github.com/tzutalin/labelImg) diff --git a/PPOCRLabel/gen_ocr_train_val_test.py b/PPOCRLabel/gen_ocr_train_val_test.py new file mode 100644 index 0000000000000000000000000000000000000000..64cba612ae267835dd47aedc2b0356c9df462038 --- /dev/null +++ b/PPOCRLabel/gen_ocr_train_val_test.py @@ -0,0 +1,147 @@ +# coding:utf8 +import os +import shutil +import random +import argparse + + +# 删除划分的训练集、验证集、测试集文件夹,重新创建一个空的文件夹 +def isCreateOrDeleteFolder(path, flag): + flagPath = os.path.join(path, flag) + + if os.path.exists(flagPath): + shutil.rmtree(flagPath) + + os.makedirs(flagPath) + flagAbsPath = os.path.abspath(flagPath) + return flagAbsPath + + +def splitTrainVal(root, dir, absTrainRootPath, absValRootPath, absTestRootPath, trainTxt, valTxt, testTxt, flag): + # 按照指定的比例划分训练集、验证集、测试集 + labelPath = os.path.join(root, dir) + labelAbsPath = os.path.abspath(labelPath) + + if flag == "det": + labelFilePath = os.path.join(labelAbsPath, args.detLabelFileName) + elif flag == "rec": + labelFilePath = os.path.join(labelAbsPath, args.recLabelFileName) + + labelFileRead = open(labelFilePath, "r", encoding="UTF-8") + labelFileContent = labelFileRead.readlines() + random.shuffle(labelFileContent) + labelRecordLen = len(labelFileContent) + + for index, labelRecordInfo in enumerate(labelFileContent): + imageRelativePath = labelRecordInfo.split('\t')[0] + imageLabel = labelRecordInfo.split('\t')[1] + imageName = os.path.basename(imageRelativePath) + + if flag == "det": + imagePath = os.path.join(labelAbsPath, imageName) + elif flag == "rec": + imagePath = os.path.join(labelAbsPath, "{}\\{}".format(args.recImageDirName, imageName)) + + # 按预设的比例划分训练集、验证集、测试集 + trainValTestRatio = args.trainValTestRatio.split(":") + trainRatio = eval(trainValTestRatio[0]) / 10 + valRatio = trainRatio + eval(trainValTestRatio[1]) / 10 + curRatio = index / labelRecordLen + + if curRatio < trainRatio: + imageCopyPath = os.path.join(absTrainRootPath, imageName) + shutil.copy(imagePath, imageCopyPath) + trainTxt.write("{}\t{}".format(imageCopyPath, imageLabel)) + elif curRatio >= trainRatio and curRatio < valRatio: + imageCopyPath = os.path.join(absValRootPath, imageName) + shutil.copy(imagePath, imageCopyPath) + valTxt.write("{}\t{}".format(imageCopyPath, imageLabel)) + else: + imageCopyPath = os.path.join(absTestRootPath, imageName) + shutil.copy(imagePath, imageCopyPath) + testTxt.write("{}\t{}".format(imageCopyPath, imageLabel)) + + +# 删掉存在的文件 +def removeFile(path): + if os.path.exists(path): + os.remove(path) + + +def genDetRecTrainVal(args): + detAbsTrainRootPath = isCreateOrDeleteFolder(args.detRootPath, "train") + detAbsValRootPath = isCreateOrDeleteFolder(args.detRootPath, "val") + detAbsTestRootPath = isCreateOrDeleteFolder(args.detRootPath, "test") + recAbsTrainRootPath = isCreateOrDeleteFolder(args.recRootPath, "train") + recAbsValRootPath = isCreateOrDeleteFolder(args.recRootPath, "val") + recAbsTestRootPath = isCreateOrDeleteFolder(args.recRootPath, "test") + + removeFile(os.path.join(args.detRootPath, "train.txt")) + removeFile(os.path.join(args.detRootPath, "val.txt")) + removeFile(os.path.join(args.detRootPath, "test.txt")) + removeFile(os.path.join(args.recRootPath, "train.txt")) + removeFile(os.path.join(args.recRootPath, "val.txt")) + removeFile(os.path.join(args.recRootPath, "test.txt")) + + detTrainTxt = open(os.path.join(args.detRootPath, "train.txt"), "a", encoding="UTF-8") + detValTxt = open(os.path.join(args.detRootPath, "val.txt"), "a", encoding="UTF-8") + detTestTxt = open(os.path.join(args.detRootPath, "test.txt"), "a", encoding="UTF-8") + recTrainTxt = open(os.path.join(args.recRootPath, "train.txt"), "a", encoding="UTF-8") + recValTxt = open(os.path.join(args.recRootPath, "val.txt"), "a", encoding="UTF-8") + recTestTxt = open(os.path.join(args.recRootPath, "test.txt"), "a", encoding="UTF-8") + + for root, dirs, files in os.walk(args.labelRootPath): + for dir in dirs: + splitTrainVal(root, dir, detAbsTrainRootPath, detAbsValRootPath, detAbsTestRootPath, detTrainTxt, detValTxt, + detTestTxt, "det") + splitTrainVal(root, dir, recAbsTrainRootPath, recAbsValRootPath, recAbsTestRootPath, recTrainTxt, recValTxt, + recTestTxt, "rec") + break + + +if __name__ == "__main__": + # 功能描述:分别划分检测和识别的训练集、验证集、测试集 + # 说明:可以根据自己的路径和需求调整参数,图像数据往往多人合作分批标注,每一批图像数据放在一个文件夹内用PPOCRLabel进行标注, + # 如此会有多个标注好的图像文件夹汇总并划分训练集、验证集、测试集的需求 + parser = argparse.ArgumentParser() + parser.add_argument( + "--trainValTestRatio", + type=str, + default="6:2:2", + help="ratio of trainset:valset:testset") + parser.add_argument( + "--labelRootPath", + type=str, + default="../train_data/label", + help="path to the dataset marked by ppocrlabel, E.g, dataset folder named 1,2,3..." + ) + parser.add_argument( + "--detRootPath", + type=str, + default="../train_data/det", + help="the path where the divided detection dataset is placed") + parser.add_argument( + "--recRootPath", + type=str, + default="../train_data/rec", + help="the path where the divided recognition dataset is placed" + ) + parser.add_argument( + "--detLabelFileName", + type=str, + default="Label.txt", + help="the name of the detection annotation file") + parser.add_argument( + "--recLabelFileName", + type=str, + default="rec_gt.txt", + help="the name of the recognition annotation file" + ) + parser.add_argument( + "--recImageDirName", + type=str, + default="crop_img", + help="the name of the folder where the cropped recognition dataset is located" + ) + args = parser.parse_args() + genDetRecTrainVal(args) diff --git a/configs/det/det_r50_vd_sast_icdar15.yml b/configs/det/det_r50_vd_sast_icdar15.yml index dbfcefca964e73d42298fbbbc1e654b3bd809c77..e1bf6fad0c4f526c2b635494560446e779bdb572 100755 --- a/configs/det/det_r50_vd_sast_icdar15.yml +++ b/configs/det/det_r50_vd_sast_icdar15.yml @@ -8,7 +8,7 @@ Global: # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] cal_metric_during_train: False - pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ + pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained checkpoints: save_inference_dir: use_visualdl: False @@ -106,4 +106,4 @@ Eval: shuffle: False drop_last: False batch_size_per_card: 1 # must be 1 - num_workers: 2 \ No newline at end of file + num_workers: 2 diff --git a/configs/det/det_r50_vd_sast_totaltext.yml b/configs/det/det_r50_vd_sast_totaltext.yml index 88dd31f3c21b184d956ad718dae808bb6054532e..44a0766b1bf2790a9633602bc637932529046e34 100755 --- a/configs/det/det_r50_vd_sast_totaltext.yml +++ b/configs/det/det_r50_vd_sast_totaltext.yml @@ -8,7 +8,7 @@ Global: # evaluation is run every 5000 iterations after the 4000th iteration eval_batch_step: [4000, 5000] cal_metric_during_train: False - pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained/ + pretrained_model: ./pretrain_models/ResNet50_vd_ssld_pretrained checkpoints: save_inference_dir: use_visualdl: False diff --git a/configs/table/table_mv3.yml b/configs/table/table_mv3.yml index a74e18d318699685400cc48430c04db3fef70b60..1a91ea95afb4ff91d3fd68fe0df6afaac9304661 100755 --- a/configs/table/table_mv3.yml +++ b/configs/table/table_mv3.yml @@ -1,29 +1,28 @@ Global: use_gpu: true - epoch_num: 50 + epoch_num: 400 log_smooth_window: 20 print_batch_step: 5 save_model_dir: ./output/table_mv3/ - save_epoch_step: 5 + save_epoch_step: 3 # evaluation is run every 400 iterations after the 0th iteration eval_batch_step: [0, 400] cal_metric_during_train: True - pretrained_model: + pretrained_model: checkpoints: save_inference_dir: use_visualdl: False - infer_img: doc/imgs_words/ch/word_1.jpg + infer_img: doc/table/table.jpg # for data or label process character_dict_path: ppocr/utils/dict/table_structure_dict.txt character_type: en max_text_length: 100 - max_elem_length: 500 + max_elem_length: 800 max_cell_num: 500 infer_mode: False process_total_num: 0 process_cut_num: 0 - Optimizer: name: Adam beta1: 0.9 @@ -41,13 +40,15 @@ Architecture: Backbone: name: MobileNetV3 scale: 1.0 - model_name: small - disable_se: True + model_name: large Head: name: TableAttentionHead hidden_size: 256 l2_decay: 0.00001 loc_type: 2 + max_text_length: 100 + max_elem_length: 800 + max_cell_num: 500 Loss: name: TableAttentionLoss diff --git a/deploy/lite/ocr_db_crnn.cc b/deploy/lite/ocr_db_crnn.cc index 9a7d6548654bdd21110f0fe343efd92a13dcb4c0..011d4adbeb65732f12c263ddfec94afb84bf5969 100644 --- a/deploy/lite/ocr_db_crnn.cc +++ b/deploy/lite/ocr_db_crnn.cc @@ -307,21 +307,10 @@ RunDetModel(std::shared_ptr predictor, cv::Mat img, return filter_boxes; } -std::shared_ptr loadModel(std::string model_file, std::string power_mode, int num_threads) { +std::shared_ptr loadModel(std::string model_file, int num_threads) { MobileConfig config; config.set_model_from_file(model_file); - if (power_mode == "LITE_POWER_HIGH"){ - config.set_power_mode(LITE_POWER_HIGH); - } else { - if (power_mode == "LITE_POWER_LOW") { - config.set_power_mode(LITE_POWER_HIGH); - } else { - std::cerr << "Only support LITE_POWER_HIGH or LITE_POWER_HIGH." << std::endl; - exit(1); - } - } - config.set_threads(num_threads); std::shared_ptr predictor = @@ -391,7 +380,7 @@ void check_params(int argc, char **argv) { if (strcmp(argv[1], "det") == 0) { if (argc < 9){ std::cerr << "[ERROR] usage:" << argv[0] - << " det det_model num_threads batchsize power_mode img_dir det_config lite_benchmark_value" << std::endl; + << " det det_model runtime_device num_threads batchsize img_dir det_config lite_benchmark_value" << std::endl; exit(1); } } @@ -399,7 +388,7 @@ void check_params(int argc, char **argv) { if (strcmp(argv[1], "rec") == 0) { if (argc < 9){ std::cerr << "[ERROR] usage:" << argv[0] - << " rec rec_model num_threads batchsize power_mode img_dir key_txt lite_benchmark_value" << std::endl; + << " rec rec_model runtime_device num_threads batchsize img_dir key_txt lite_benchmark_value" << std::endl; exit(1); } } @@ -407,7 +396,7 @@ void check_params(int argc, char **argv) { if (strcmp(argv[1], "system") == 0) { if (argc < 12){ std::cerr << "[ERROR] usage:" << argv[0] - << " system det_model rec_model clas_model num_threads batchsize power_mode img_dir det_config key_txt lite_benchmark_value" << std::endl; + << " system det_model rec_model clas_model runtime_device num_threads batchsize img_dir det_config key_txt lite_benchmark_value" << std::endl; exit(1); } } @@ -417,15 +406,15 @@ void system(char **argv){ std::string det_model_file = argv[2]; std::string rec_model_file = argv[3]; std::string cls_model_file = argv[4]; - std::string precision = argv[5]; - std::string num_threads = argv[6]; - std::string batchsize = argv[7]; - std::string power_mode = argv[8]; + std::string runtime_device = argv[5]; + std::string precision = argv[6]; + std::string num_threads = argv[7]; + std::string batchsize = argv[8]; std::string img_dir = argv[9]; std::string det_config_path = argv[10]; std::string dict_path = argv[11]; - if (strcmp(argv[5], "FP32") != 0 && strcmp(argv[5], "INT8") != 0) { + if (strcmp(argv[6], "FP32") != 0 && strcmp(argv[6], "INT8") != 0) { std::cerr << "Only support FP32 or INT8." << std::endl; exit(1); } @@ -441,9 +430,9 @@ void system(char **argv){ charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc charactor_dict.push_back(" "); - auto det_predictor = loadModel(det_model_file, power_mode, std::stoi(num_threads)); - auto rec_predictor = loadModel(rec_model_file, power_mode, std::stoi(num_threads)); - auto cls_predictor = loadModel(cls_model_file, power_mode, std::stoi(num_threads)); + auto det_predictor = loadModel(det_model_file, std::stoi(num_threads)); + auto rec_predictor = loadModel(rec_model_file, std::stoi(num_threads)); + auto cls_predictor = loadModel(cls_model_file, std::stoi(num_threads)); for (int i = 0; i < cv_all_img_names.size(); ++i) { std::cout << "The predict img: " << cv_all_img_names[i] << std::endl; @@ -477,14 +466,14 @@ void system(char **argv){ void det(int argc, char **argv) { std::string det_model_file = argv[2]; - std::string precision = argv[3]; - std::string num_threads = argv[4]; - std::string batchsize = argv[5]; - std::string power_mode = argv[6]; + std::string runtime_device = argv[3]; + std::string precision = argv[4]; + std::string num_threads = argv[5]; + std::string batchsize = argv[6]; std::string img_dir = argv[7]; std::string det_config_path = argv[8]; - if (strcmp(argv[3], "FP32") != 0 && strcmp(argv[3], "INT8") != 0) { + if (strcmp(argv[4], "FP32") != 0 && strcmp(argv[4], "INT8") != 0) { std::cerr << "Only support FP32 or INT8." << std::endl; exit(1); } @@ -495,7 +484,7 @@ void det(int argc, char **argv) { //// load config from txt file auto Config = LoadConfigTxt(det_config_path); - auto det_predictor = loadModel(det_model_file, power_mode, std::stoi(num_threads)); + auto det_predictor = loadModel(det_model_file, std::stoi(num_threads)); std::vector time_info = {0, 0, 0}; for (int i = 0; i < cv_all_img_names.size(); ++i) { @@ -530,14 +519,11 @@ void det(int argc, char **argv) { if (strcmp(argv[9], "True") == 0) { AutoLogger autolog(det_model_file, - 0, - 0, - 0, + runtime_device, std::stoi(num_threads), std::stoi(batchsize), "dynamic", precision, - power_mode, time_info, cv_all_img_names.size()); autolog.report(); @@ -546,14 +532,14 @@ void det(int argc, char **argv) { void rec(int argc, char **argv) { std::string rec_model_file = argv[2]; - std::string precision = argv[3]; - std::string num_threads = argv[4]; - std::string batchsize = argv[5]; - std::string power_mode = argv[6]; + std::string runtime_device = argv[3]; + std::string precision = argv[4]; + std::string num_threads = argv[5]; + std::string batchsize = argv[6]; std::string img_dir = argv[7]; std::string dict_path = argv[8]; - if (strcmp(argv[3], "FP32") != 0 && strcmp(argv[3], "INT8") != 0) { + if (strcmp(argv[4], "FP32") != 0 && strcmp(argv[4], "INT8") != 0) { std::cerr << "Only support FP32 or INT8." << std::endl; exit(1); } @@ -565,7 +551,7 @@ void rec(int argc, char **argv) { charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc charactor_dict.push_back(" "); - auto rec_predictor = loadModel(rec_model_file, power_mode, std::stoi(num_threads)); + auto rec_predictor = loadModel(rec_model_file, std::stoi(num_threads)); std::shared_ptr cls_predictor; @@ -603,14 +589,11 @@ void rec(int argc, char **argv) { // TODO: support autolog if (strcmp(argv[9], "True") == 0) { AutoLogger autolog(rec_model_file, - 0, - 0, - 0, + runtime_device, std::stoi(num_threads), std::stoi(batchsize), "dynamic", precision, - power_mode, time_info, cv_all_img_names.size()); autolog.report(); diff --git a/deploy/pdserving/pipeline_rpc_client.py b/deploy/pdserving/pipeline_rpc_client.py index 4dcb1ad5f533729e344809e99951b59fb2908537..3d2a90f443f76ba142bbae05e00ea76b083335ba 100644 --- a/deploy/pdserving/pipeline_rpc_client.py +++ b/deploy/pdserving/pipeline_rpc_client.py @@ -41,6 +41,6 @@ for img_file in os.listdir(test_img_dir): image_data = file.read() image = cv2_to_base64(image_data) -for i in range(1): - ret = client.predict(feed_dict={"image": image}, fetch=["res"]) - print(ret) + for i in range(1): + ret = client.predict(feed_dict={"image": image}, fetch=["res"]) + print(ret) diff --git a/deploy/slim/prune/export_prune_model.py b/deploy/slim/prune/export_prune_model.py index 29f7d211df7b2ad02bf2229f0be81c3cbe005503..2c9d0a1831c3c0de321668dfdde55aecb825ab06 100644 --- a/deploy/slim/prune/export_prune_model.py +++ b/deploy/slim/prune/export_prune_model.py @@ -30,7 +30,7 @@ from ppocr.modeling.architectures import build_model from ppocr.postprocess import build_post_process from ppocr.metrics import build_metric -from ppocr.utils.save_load import init_model +from ppocr.utils.save_load import load_model import tools.program as program @@ -89,7 +89,7 @@ def main(config, device, logger, vdl_writer): logger.info(f"FLOPs after pruning: {flops}") # load pretrain model - pre_best_model_dict = init_model(config, model, logger, None) + load_model(config, model) metric = program.eval(model, valid_dataloader, post_process_class, eval_class) logger.info(f"metric['hmean']: {metric['hmean']}") diff --git a/deploy/slim/prune/sensitivity_anal.py b/deploy/slim/prune/sensitivity_anal.py index 0f0492af2f57eea9b9c1d13ec5ee1dad9fc2f1bc..c5d008779eaafef36f4264b45295ec7bc78e3d27 100644 --- a/deploy/slim/prune/sensitivity_anal.py +++ b/deploy/slim/prune/sensitivity_anal.py @@ -32,7 +32,7 @@ from ppocr.losses import build_loss from ppocr.optimizer import build_optimizer from ppocr.postprocess import build_post_process from ppocr.metrics import build_metric -from ppocr.utils.save_load import init_model +from ppocr.utils.save_load import load_model import tools.program as program dist.get_world_size() @@ -94,7 +94,7 @@ def main(config, device, logger, vdl_writer): # build metric eval_class = build_metric(config['Metric']) # load pretrain model - pre_best_model_dict = init_model(config, model, logger, optimizer) + pre_best_model_dict = load_model(config, model, optimizer) logger.info('train dataloader has {} iters, valid dataloader has {} iters'. format(len(train_dataloader), len(valid_dataloader))) diff --git a/deploy/slim/quantization/export_model.py b/deploy/slim/quantization/export_model.py index d94e53034a2bf67b364e6d91f83acfb9e5445b8a..dddae923de223178665e3bfb55a2e7a8c0d5ba17 100755 --- a/deploy/slim/quantization/export_model.py +++ b/deploy/slim/quantization/export_model.py @@ -28,7 +28,7 @@ from paddle.jit import to_static from ppocr.modeling.architectures import build_model from ppocr.postprocess import build_post_process -from ppocr.utils.save_load import init_model +from ppocr.utils.save_load import load_model from ppocr.utils.logging import get_logger from tools.program import load_config, merge_config, ArgsParser from ppocr.metrics import build_metric @@ -101,7 +101,7 @@ def main(): quanter = QAT(config=quant_config) quanter.quantize(model) - init_model(config, model) + load_model(config, model) model.eval() # build metric diff --git a/deploy/slim/quantization/quant.py b/deploy/slim/quantization/quant.py index 37aab68a0e88afce54e10fb6248c73684b58d808..941cfb36b291dcd1dbedbf51de5edd2cf0017167 100755 --- a/deploy/slim/quantization/quant.py +++ b/deploy/slim/quantization/quant.py @@ -37,7 +37,7 @@ from ppocr.losses import build_loss from ppocr.optimizer import build_optimizer from ppocr.postprocess import build_post_process from ppocr.metrics import build_metric -from ppocr.utils.save_load import init_model +from ppocr.utils.save_load import load_model import tools.program as program from paddleslim.dygraph.quant import QAT @@ -137,7 +137,7 @@ def main(config, device, logger, vdl_writer): # build metric eval_class = build_metric(config['Metric']) # load pretrain model - pre_best_model_dict = init_model(config, model, logger, optimizer) + pre_best_model_dict = load_model(config, model, optimizer) logger.info('train dataloader has {} iters, valid dataloader has {} iters'. format(len(train_dataloader), len(valid_dataloader))) diff --git a/deploy/slim/quantization/quant_kl.py b/deploy/slim/quantization/quant_kl.py index d866784ae6a3c087215320ec95bd39fdd1e89418..cc3a455b971937fbb2e401b87112475341bd41f3 100755 --- a/deploy/slim/quantization/quant_kl.py +++ b/deploy/slim/quantization/quant_kl.py @@ -37,7 +37,7 @@ from ppocr.losses import build_loss from ppocr.optimizer import build_optimizer from ppocr.postprocess import build_post_process from ppocr.metrics import build_metric -from ppocr.utils.save_load import init_model +from ppocr.utils.save_load import load_model import tools.program as program import paddleslim from paddleslim.dygraph.quant import QAT diff --git a/doc/doc_ch/detection.md b/doc/doc_ch/detection.md index 208647ecdeef95658dc4261870fe823bac9853d3..cfc9d52bf280400982a9fcd9941ddc4cce3f5e5c 100644 --- a/doc/doc_ch/detection.md +++ b/doc/doc_ch/detection.md @@ -101,7 +101,7 @@ python3 tools/train.py -c configs/det/det_mv3_db.yml \ # 单机多卡训练,通过 --gpus 参数设置使用的GPU ID python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/det_mv3_db.yml \ -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained - + # 多机多卡训练,通过 --ips 参数设置使用的机器IP地址,通过 --gpus 参数设置使用的GPU ID python3 -m paddle.distributed.launch --ips="xx.xx.xx.xx,xx.xx.xx.xx" --gpus '0,1,2,3' tools/train.py -c configs/det/det_mv3_db.yml \ -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained @@ -109,14 +109,14 @@ python3 -m paddle.distributed.launch --ips="xx.xx.xx.xx,xx.xx.xx.xx" --gpus '0,1 上述指令中,通过-c 选择训练使用configs/det/det_db_mv3.yml配置文件。 有关配置文件的详细解释,请参考[链接](./config.md)。 - + 您也可以通过-o参数在不需要修改yml文件的情况下,改变训练的参数,比如,调整训练的学习率为0.0001 ```shell python3 tools/train.py -c configs/det/det_mv3_db.yml -o Optimizer.base_lr=0.0001 ``` - -**注意:** 采用多机多卡训练时,需要替换上面命令中的ips值为您机器的地址,机器之间需要能够相互ping通。查看机器ip地址的命令为`ifconfig`。 - + +**注意:** 采用多机多卡训练时,需要替换上面命令中的ips值为您机器的地址,机器之间需要能够相互ping通。另外,训练时需要在多个机器上分别启动命令。查看机器ip地址的命令为`ifconfig`。 + 如果您想进一步加快训练速度,可以使用[自动混合精度训练](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/01_paddle2.0_introduction/basic_concept/amp_cn.html), 以单机单卡为例,命令如下: ```shell python3 tools/train.py -c configs/det/det_mv3_db.yml \ diff --git a/doc/doc_ch/whl.md b/doc/doc_ch/whl.md index ba5bbae6255382d0c7fa5be319946d6242b1a544..3b88709a2328409a266d0d482baa072dd7aa3824 100644 --- a/doc/doc_ch/whl.md +++ b/doc/doc_ch/whl.md @@ -420,3 +420,5 @@ im_show.save('result.jpg') | cls | 前向时是否启动分类 (命令行模式下使用use_angle_cls控制前向是否启动分类) | FALSE | | show_log | 是否打印det和rec等信息 | FALSE | | type | 执行ocr或者表格结构化, 值可选['ocr','structure'] | ocr | +| ocr_version | OCR模型版本,可选PP-OCRv2, PP-OCR。PP-OCRv2 目前仅支持中文的检测和识别模型,PP-OCR支持中文的检测,识别,多语种识别,方向分类器等模型 | PP-OCRv2 | +| structure_version | 表格结构化模型版本,可选 STRUCTURE。STRUCTURE支持表格结构化模型 | STRUCTURE | diff --git a/doc/doc_en/detection_en.md b/doc/doc_en/detection_en.md index dfee0814f5df6490d6fc4d67ad9f5686581e7d4f..a634dd4903483a819caee88cf6dd1781253e6f85 100644 --- a/doc/doc_en/detection_en.md +++ b/doc/doc_en/detection_en.md @@ -98,14 +98,14 @@ python3 tools/train.py -c configs/det/det_mv3_db.yml -o \ # multi-GPU training # Set the GPU ID used by the '--gpus' parameter. python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained - + # multi-Node, multi-GPU training # Set the IPs of your nodes used by the '--ips' parameter. Set the GPU ID used by the '--gpus' parameter. python3 -m paddle.distributed.launch --ips="xx.xx.xx.xx,xx.xx.xx.xx" --gpus '0,1,2,3' tools/train.py -c configs/det/det_mv3_db.yml \ -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained ``` -**Note:** For multi-Node multi-GPU training, you need to replace the `ips` value in the preceding command with the address of your machine, and the machines must be able to ping each other. The command for viewing the IP address of the machine is `ifconfig`. - +**Note:** For multi-Node multi-GPU training, you need to replace the `ips` value in the preceding command with the address of your machine, and the machines must be able to ping each other. In addition, it requires activating commands separately on multiple machines when we start the training. The command for viewing the IP address of the machine is `ifconfig`. + If you want to further speed up the training, you can use [automatic mixed precision training](https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/01_paddle2.0_introduction/basic_concept/amp_en.html). for single card training, the command is as follows: ``` python3 tools/train.py -c configs/det/det_mv3_db.yml \ diff --git a/doc/doc_en/whl_en.md b/doc/doc_en/whl_en.md index c2577e1e151e4675abab5139da099db9ad20fb4b..62aa452dcd36906c6480031375e6ca94f8a36de3 100644 --- a/doc/doc_en/whl_en.md +++ b/doc/doc_en/whl_en.md @@ -366,4 +366,6 @@ im_show.save('result.jpg') | rec | Enable recognition when `ppocr.ocr` func exec | TRUE | | cls | Enable classification when `ppocr.ocr` func exec((Use use_angle_cls in command line mode to control whether to start classification in the forward direction) | FALSE | | show_log | Whether to print log in det and rec | FALSE | -| type | Perform ocr or table structuring, the value is selected in ['ocr','structure'] | ocr | \ No newline at end of file +| type | Perform ocr or table structuring, the value is selected in ['ocr','structure'] | ocr | +| ocr_version | OCR Model version number, the current model support list is as follows: PP-OCRv2 support Chinese detection and recognition model, PP-OCR support Chinese detection, recognition and direction classifier, multilingual recognition model | PP-OCRv2 | +| structure_version | table structure Model version number, the current model support list is as follows: STRUCTURE support english table structure model | STRUCTURE | diff --git a/paddleocr.py b/paddleocr.py index a98efd34088701d5eb5602743cf75b7d5e80157f..028cfcc1faae3d9cca7d756b55213c030c7496de 100644 --- a/paddleocr.py +++ b/paddleocr.py @@ -16,6 +16,9 @@ import os import sys __dir__ = os.path.dirname(__file__) + +import paddle + sys.path.append(os.path.join(__dir__, '')) import cv2 @@ -29,7 +32,7 @@ from ppocr.utils.logging import get_logger logger = get_logger() from ppocr.utils.utility import check_and_read_gif, get_image_file_list from ppocr.utils.network import maybe_download, download_with_progressbar, is_link, confirm_model_dir_url -from tools.infer.utility import draw_ocr, str2bool +from tools.infer.utility import draw_ocr, str2bool, check_gpu from ppstructure.utility import init_args, draw_structure_result from ppstructure.predict_system import OCRSystem, save_structure_res @@ -39,130 +42,137 @@ __all__ = [ ] SUPPORT_DET_MODEL = ['DB'] -VERSION = '2.2.1' +VERSION = '2.3.0.1' SUPPORT_REC_MODEL = ['CRNN'] BASE_DIR = os.path.expanduser("~/.paddleocr/") -DEFAULT_MODEL_VERSION = '2.0' +DEFAULT_OCR_MODEL_VERSION = 'PP-OCR' +DEFAULT_STRUCTURE_MODEL_VERSION = 'STRUCTURE' MODEL_URLS = { - '2.1': { - 'det': { - 'ch': { - 'url': - 'https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar', - }, - }, - 'rec': { - 'ch': { - 'url': - 'https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar', - 'dict_path': './ppocr/utils/ppocr_keys_v1.txt' - } - } - }, - '2.0': { - 'det': { - 'ch': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar', - }, - 'en': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_ppocr_mobile_v2.0_det_infer.tar', + 'OCR': { + 'PP-OCRv2': { + 'det': { + 'ch': { + 'url': + 'https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar', + }, }, - 'structure': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar' + 'rec': { + 'ch': { + 'url': + 'https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar', + 'dict_path': './ppocr/utils/ppocr_keys_v1.txt' + } } }, - 'rec': { - 'ch': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/ppocr_keys_v1.txt' - }, - 'en': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_number_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/en_dict.txt' - }, - 'french': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/french_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/french_dict.txt' - }, - 'german': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/german_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/german_dict.txt' - }, - 'korean': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/korean_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/korean_dict.txt' - }, - 'japan': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/japan_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/japan_dict.txt' - }, - 'chinese_cht': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/chinese_cht_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/chinese_cht_dict.txt' - }, - 'ta': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/ta_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/ta_dict.txt' + DEFAULT_OCR_MODEL_VERSION: { + 'det': { + 'ch': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar', + }, + 'en': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_ppocr_mobile_v2.0_det_infer.tar', + }, + 'structure': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar' + } }, - 'te': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/te_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/te_dict.txt' + 'rec': { + 'ch': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/ppocr_keys_v1.txt' + }, + 'en': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_number_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/en_dict.txt' + }, + 'french': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/french_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/french_dict.txt' + }, + 'german': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/german_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/german_dict.txt' + }, + 'korean': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/korean_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/korean_dict.txt' + }, + 'japan': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/japan_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/japan_dict.txt' + }, + 'chinese_cht': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/chinese_cht_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/chinese_cht_dict.txt' + }, + 'ta': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/ta_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/ta_dict.txt' + }, + 'te': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/te_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/te_dict.txt' + }, + 'ka': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/ka_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/ka_dict.txt' + }, + 'latin': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/latin_ppocr_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/latin_dict.txt' + }, + 'arabic': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/arabic_ppocr_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/arabic_dict.txt' + }, + 'cyrillic': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/cyrillic_ppocr_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/cyrillic_dict.txt' + }, + 'devanagari': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/devanagari_ppocr_mobile_v2.0_rec_infer.tar', + 'dict_path': './ppocr/utils/dict/devanagari_dict.txt' + }, + 'structure': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar', + 'dict_path': 'ppocr/utils/dict/table_dict.txt' + } }, - 'ka': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/ka_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/ka_dict.txt' + 'cls': { + 'ch': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar', + } }, - 'latin': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/latin_ppocr_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/latin_dict.txt' - }, - 'arabic': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/arabic_ppocr_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/arabic_dict.txt' - }, - 'cyrillic': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/cyrillic_ppocr_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/cyrillic_dict.txt' - }, - 'devanagari': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/devanagari_ppocr_mobile_v2.0_rec_infer.tar', - 'dict_path': './ppocr/utils/dict/devanagari_dict.txt' - }, - 'structure': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar', - 'dict_path': 'ppocr/utils/dict/table_dict.txt' - } - }, - 'cls': { - 'ch': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar', - } - }, - 'table': { - 'en': { - 'url': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar', - 'dict_path': 'ppocr/utils/dict/table_structure_dict.txt' + } + }, + 'STRUCTURE': { + DEFAULT_STRUCTURE_MODEL_VERSION: { + 'table': { + 'en': { + 'url': + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar', + 'dict_path': 'ppocr/utils/dict/table_structure_dict.txt' + } } } } @@ -177,7 +187,20 @@ def parse_args(mMain=True): parser.add_argument("--det", type=str2bool, default=True) parser.add_argument("--rec", type=str2bool, default=True) parser.add_argument("--type", type=str, default='ocr') - parser.add_argument("--version", type=str, default='2.1') + parser.add_argument( + "--ocr_version", + type=str, + default='PP-OCRv2', + help='OCR Model version, the current model support list is as follows: ' + '1. PP-OCRv2 Support Chinese detection and recognition model. ' + '2. PP-OCR support Chinese detection, recognition and direction classifier and multilingual recognition model.' + ) + parser.add_argument( + "--structure_version", + type=str, + default='STRUCTURE', + help='Model version, the current model support list is as follows:' + ' 1. STRUCTURE Support en table structure model.') for action in parser._actions: if action.dest in ['rec_char_dict_path', 'table_char_dict_path']: @@ -215,9 +238,9 @@ def parse_lang(lang): lang = "cyrillic" elif lang in devanagari_lang: lang = "devanagari" - assert lang in MODEL_URLS[DEFAULT_MODEL_VERSION][ + assert lang in MODEL_URLS['OCR'][DEFAULT_OCR_MODEL_VERSION][ 'rec'], 'param lang must in {}, but got {}'.format( - MODEL_URLS[DEFAULT_MODEL_VERSION]['rec'].keys(), lang) + MODEL_URLS['OCR'][DEFAULT_OCR_MODEL_VERSION]['rec'].keys(), lang) if lang == "ch": det_lang = "ch" elif lang == 'structure': @@ -227,33 +250,41 @@ def parse_lang(lang): return lang, det_lang -def get_model_config(version, model_type, lang): - if version not in MODEL_URLS: - logger.warning('version {} not in {}, use version {} instead'.format( - version, MODEL_URLS.keys(), DEFAULT_MODEL_VERSION)) +def get_model_config(type, version, model_type, lang): + if type == 'OCR': + DEFAULT_MODEL_VERSION = DEFAULT_OCR_MODEL_VERSION + elif type == 'STRUCTURE': + DEFAULT_MODEL_VERSION = DEFAULT_STRUCTURE_MODEL_VERSION + else: + raise NotImplementedError + model_urls = MODEL_URLS[type] + if version not in model_urls: + logger.warning('version {} not in {}, auto switch to version {}'.format( + version, model_urls.keys(), DEFAULT_MODEL_VERSION)) version = DEFAULT_MODEL_VERSION - if model_type not in MODEL_URLS[version]: - if model_type in MODEL_URLS[DEFAULT_MODEL_VERSION]: + if model_type not in model_urls[version]: + if model_type in model_urls[DEFAULT_MODEL_VERSION]: logger.warning( - 'version {} not support {} models, use version {} instead'. + 'version {} not support {} models, auto switch to version {}'. format(version, model_type, DEFAULT_MODEL_VERSION)) version = DEFAULT_MODEL_VERSION else: logger.error('{} models is not support, we only support {}'.format( - model_type, MODEL_URLS[DEFAULT_MODEL_VERSION].keys())) + model_type, model_urls[DEFAULT_MODEL_VERSION].keys())) sys.exit(-1) - if lang not in MODEL_URLS[version][model_type]: - if lang in MODEL_URLS[DEFAULT_MODEL_VERSION][model_type]: - logger.warning('lang {} is not support in {}, use {} instead'. - format(lang, version, DEFAULT_MODEL_VERSION)) + if lang not in model_urls[version][model_type]: + if lang in model_urls[DEFAULT_MODEL_VERSION][model_type]: + logger.warning( + 'lang {} is not support in {}, auto switch to version {}'. + format(lang, version, DEFAULT_MODEL_VERSION)) version = DEFAULT_MODEL_VERSION else: logger.error( 'lang {} is not support, we only support {} for {} models'. - format(lang, MODEL_URLS[DEFAULT_MODEL_VERSION][model_type].keys( + format(lang, model_urls[DEFAULT_MODEL_VERSION][model_type].keys( ), model_type)) sys.exit(-1) - return MODEL_URLS[version][model_type][lang] + return model_urls[version][model_type][lang] class PaddleOCR(predict_system.TextSystem): @@ -265,23 +296,28 @@ class PaddleOCR(predict_system.TextSystem): """ params = parse_args(mMain=False) params.__dict__.update(**kwargs) + params.use_gpu = check_gpu(params.use_gpu) + if not params.show_log: logger.setLevel(logging.INFO) self.use_angle_cls = params.use_angle_cls lang, det_lang = parse_lang(params.lang) # init model dir - det_model_config = get_model_config(params.version, 'det', det_lang) + det_model_config = get_model_config('OCR', params.ocr_version, 'det', + det_lang) params.det_model_dir, det_url = confirm_model_dir_url( params.det_model_dir, os.path.join(BASE_DIR, VERSION, 'ocr', 'det', det_lang), det_model_config['url']) - rec_model_config = get_model_config(params.version, 'rec', lang) + rec_model_config = get_model_config('OCR', params.ocr_version, 'rec', + lang) params.rec_model_dir, rec_url = confirm_model_dir_url( params.rec_model_dir, os.path.join(BASE_DIR, VERSION, 'ocr', 'rec', lang), rec_model_config['url']) - cls_model_config = get_model_config(params.version, 'cls', 'ch') + cls_model_config = get_model_config('OCR', params.ocr_version, 'cls', + 'ch') params.cls_model_dir, cls_url = confirm_model_dir_url( params.cls_model_dir, os.path.join(BASE_DIR, VERSION, 'ocr', 'cls'), @@ -362,22 +398,27 @@ class PPStructure(OCRSystem): def __init__(self, **kwargs): params = parse_args(mMain=False) params.__dict__.update(**kwargs) + params.use_gpu = check_gpu(params.use_gpu) + if not params.show_log: logger.setLevel(logging.INFO) lang, det_lang = parse_lang(params.lang) # init model dir - det_model_config = get_model_config(params.version, 'det', det_lang) + det_model_config = get_model_config('OCR', params.ocr_version, 'det', + det_lang) params.det_model_dir, det_url = confirm_model_dir_url( params.det_model_dir, os.path.join(BASE_DIR, VERSION, 'ocr', 'det', det_lang), det_model_config['url']) - rec_model_config = get_model_config(params.version, 'rec', lang) + rec_model_config = get_model_config('OCR', params.ocr_version, 'rec', + lang) params.rec_model_dir, rec_url = confirm_model_dir_url( params.rec_model_dir, os.path.join(BASE_DIR, VERSION, 'ocr', 'rec', lang), rec_model_config['url']) - table_model_config = get_model_config(params.version, 'table', 'en') + table_model_config = get_model_config( + 'STRUCTURE', params.structure_version, 'table', 'en') params.table_model_dir, table_url = confirm_model_dir_url( params.table_model_dir, os.path.join(BASE_DIR, VERSION, 'ocr', 'table'), diff --git a/ppocr/modeling/architectures/distillation_model.py b/ppocr/modeling/architectures/distillation_model.py index 1e95fe574433eaca6f322ff47c8547cc1a29a248..5e867940e796841111fc668a0b3eb12547807d76 100644 --- a/ppocr/modeling/architectures/distillation_model.py +++ b/ppocr/modeling/architectures/distillation_model.py @@ -21,7 +21,7 @@ from ppocr.modeling.backbones import build_backbone from ppocr.modeling.necks import build_neck from ppocr.modeling.heads import build_head from .base_model import BaseModel -from ppocr.utils.save_load import init_model, load_pretrained_params +from ppocr.utils.save_load import load_pretrained_params __all__ = ['DistillationModel'] diff --git a/ppocr/modeling/heads/table_att_head.py b/ppocr/modeling/heads/table_att_head.py index 155f036d15673135eae9e5ee493648603609535d..e354f40d6518c1f7ca22e93694b1c6668fc003d2 100644 --- a/ppocr/modeling/heads/table_att_head.py +++ b/ppocr/modeling/heads/table_att_head.py @@ -23,32 +23,40 @@ import numpy as np class TableAttentionHead(nn.Layer): - def __init__(self, in_channels, hidden_size, loc_type, in_max_len=488, **kwargs): + def __init__(self, + in_channels, + hidden_size, + loc_type, + in_max_len=488, + max_text_length=100, + max_elem_length=800, + max_cell_num=500, + **kwargs): super(TableAttentionHead, self).__init__() self.input_size = in_channels[-1] self.hidden_size = hidden_size self.elem_num = 30 - self.max_text_length = 100 - self.max_elem_length = 500 - self.max_cell_num = 500 + self.max_text_length = max_text_length + self.max_elem_length = max_elem_length + self.max_cell_num = max_cell_num self.structure_attention_cell = AttentionGRUCell( self.input_size, hidden_size, self.elem_num, use_gru=False) self.structure_generator = nn.Linear(hidden_size, self.elem_num) self.loc_type = loc_type self.in_max_len = in_max_len - + if self.loc_type == 1: self.loc_generator = nn.Linear(hidden_size, 4) else: if self.in_max_len == 640: - self.loc_fea_trans = nn.Linear(400, self.max_elem_length+1) + self.loc_fea_trans = nn.Linear(400, self.max_elem_length + 1) elif self.in_max_len == 800: - self.loc_fea_trans = nn.Linear(625, self.max_elem_length+1) + self.loc_fea_trans = nn.Linear(625, self.max_elem_length + 1) else: - self.loc_fea_trans = nn.Linear(256, self.max_elem_length+1) + self.loc_fea_trans = nn.Linear(256, self.max_elem_length + 1) self.loc_generator = nn.Linear(self.input_size + hidden_size, 4) - + def _char_to_onehot(self, input_char, onehot_dim): input_ont_hot = F.one_hot(input_char, onehot_dim) return input_ont_hot @@ -60,16 +68,16 @@ class TableAttentionHead(nn.Layer): if len(fea.shape) == 3: pass else: - last_shape = int(np.prod(fea.shape[2:])) # gry added + last_shape = int(np.prod(fea.shape[2:])) # gry added fea = paddle.reshape(fea, [fea.shape[0], fea.shape[1], last_shape]) fea = fea.transpose([0, 2, 1]) # (NTC)(batch, width, channels) batch_size = fea.shape[0] - + hidden = paddle.zeros((batch_size, self.hidden_size)) output_hiddens = [] if self.training and targets is not None: structure = targets[0] - for i in range(self.max_elem_length+1): + for i in range(self.max_elem_length + 1): elem_onehots = self._char_to_onehot( structure[:, i], onehot_dim=self.elem_num) (outputs, hidden), alpha = self.structure_attention_cell( @@ -96,7 +104,7 @@ class TableAttentionHead(nn.Layer): alpha = None max_elem_length = paddle.to_tensor(self.max_elem_length) i = 0 - while i < max_elem_length+1: + while i < max_elem_length + 1: elem_onehots = self._char_to_onehot( temp_elem, onehot_dim=self.elem_num) (outputs, hidden), alpha = self.structure_attention_cell( @@ -105,7 +113,7 @@ class TableAttentionHead(nn.Layer): structure_probs_step = self.structure_generator(outputs) temp_elem = structure_probs_step.argmax(axis=1, dtype="int32") i += 1 - + output = paddle.concat(output_hiddens, axis=1) structure_probs = self.structure_generator(output) structure_probs = F.softmax(structure_probs) @@ -119,9 +127,9 @@ class TableAttentionHead(nn.Layer): loc_concat = paddle.concat([output, loc_fea], axis=2) loc_preds = self.loc_generator(loc_concat) loc_preds = F.sigmoid(loc_preds) - return {'structure_probs':structure_probs, 'loc_preds':loc_preds} + return {'structure_probs': structure_probs, 'loc_preds': loc_preds} + - class AttentionGRUCell(nn.Layer): def __init__(self, input_size, hidden_size, num_embeddings, use_gru=False): super(AttentionGRUCell, self).__init__() diff --git a/ppocr/utils/network.py b/ppocr/utils/network.py index 453abb693d4c0ed370c1031b677d5bf51661add9..118d1be364925d9416134cffe21d636fcac753e9 100644 --- a/ppocr/utils/network.py +++ b/ppocr/utils/network.py @@ -24,15 +24,17 @@ from ppocr.utils.logging import get_logger def download_with_progressbar(url, save_path): logger = get_logger() response = requests.get(url, stream=True) - total_size_in_bytes = int(response.headers.get('content-length', 0)) - block_size = 1024 # 1 Kibibyte - progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True) - with open(save_path, 'wb') as file: - for data in response.iter_content(block_size): - progress_bar.update(len(data)) - file.write(data) - progress_bar.close() - if total_size_in_bytes == 0 or progress_bar.n != total_size_in_bytes: + if response.status_code == 200: + total_size_in_bytes = int(response.headers.get('content-length', 1)) + block_size = 1024 # 1 Kibibyte + progress_bar = tqdm( + total=total_size_in_bytes, unit='iB', unit_scale=True) + with open(save_path, 'wb') as file: + for data in response.iter_content(block_size): + progress_bar.update(len(data)) + file.write(data) + progress_bar.close() + else: logger.error("Something went wrong while downloading models") sys.exit(0) @@ -45,7 +47,7 @@ def maybe_download(model_storage_directory, url): if not os.path.exists( os.path.join(model_storage_directory, 'inference.pdiparams') ) or not os.path.exists( - os.path.join(model_storage_directory, 'inference.pdmodel')): + os.path.join(model_storage_directory, 'inference.pdmodel')): assert url.endswith('.tar'), 'Only supports tar compressed package' tmp_path = os.path.join(model_storage_directory, url.split('/')[-1]) print('download {} to {}'.format(url, tmp_path)) diff --git a/ppocr/utils/save_load.py b/ppocr/utils/save_load.py index a7d24dd71a6e35ca619c2a3f90df3a202b8ad94b..702f3e9770d0572e9128357bfd6b39199566a959 100644 --- a/ppocr/utils/save_load.py +++ b/ppocr/utils/save_load.py @@ -25,7 +25,7 @@ import paddle from ppocr.utils.logging import get_logger -__all__ = ['init_model', 'save_model', 'load_dygraph_params'] +__all__ = ['load_model'] def _mkdir_if_not_exist(path, logger): @@ -44,7 +44,7 @@ def _mkdir_if_not_exist(path, logger): raise OSError('Failed to mkdir {}'.format(path)) -def init_model(config, model, optimizer=None, lr_scheduler=None): +def load_model(config, model, optimizer=None): """ load model from checkpoint or pretrained_model """ @@ -54,15 +54,14 @@ def init_model(config, model, optimizer=None, lr_scheduler=None): pretrained_model = global_config.get('pretrained_model') best_model_dict = {} if checkpoints: - assert os.path.exists(checkpoints + ".pdparams"), \ - "Given dir {}.pdparams not exist.".format(checkpoints) + if checkpoints.endswith('pdparams'): + checkpoints = checkpoints.replace('.pdparams', '') assert os.path.exists(checkpoints + ".pdopt"), \ - "Given dir {}.pdopt not exist.".format(checkpoints) - para_dict = paddle.load(checkpoints + '.pdparams') - opti_dict = paddle.load(checkpoints + '.pdopt') - model.set_state_dict(para_dict) + f"The {checkpoints}.pdopt does not exists!" + load_pretrained_params(model, checkpoints) + optim_dict = paddle.load(checkpoints + '.pdopt') if optimizer is not None: - optimizer.set_state_dict(opti_dict) + optimizer.set_state_dict(optim_dict) if os.path.exists(checkpoints + '.states'): with open(checkpoints + '.states', 'rb') as f: @@ -73,70 +72,31 @@ def init_model(config, model, optimizer=None, lr_scheduler=None): best_model_dict['start_epoch'] = states_dict['epoch'] + 1 logger.info("resume from {}".format(checkpoints)) elif pretrained_model: - if not isinstance(pretrained_model, list): - pretrained_model = [pretrained_model] - for pretrained in pretrained_model: - if not (os.path.isdir(pretrained) or - os.path.exists(pretrained + '.pdparams')): - raise ValueError("Model pretrain path {} does not " - "exists.".format(pretrained)) - param_state_dict = paddle.load(pretrained + '.pdparams') - model.set_state_dict(param_state_dict) - logger.info("load pretrained model from {}".format( - pretrained_model)) + load_pretrained_params(model, pretrained_model) else: logger.info('train from scratch') return best_model_dict -def load_dygraph_params(config, model, logger, optimizer): - ckp = config['Global']['checkpoints'] - if ckp and os.path.exists(ckp + ".pdparams"): - pre_best_model_dict = init_model(config, model, optimizer) - return pre_best_model_dict - else: - pm = config['Global']['pretrained_model'] - if pm is None: - return {} - if not os.path.exists(pm) and not os.path.exists(pm + ".pdparams"): - logger.info(f"The pretrained_model {pm} does not exists!") - return {} - pm = pm if pm.endswith('.pdparams') else pm + '.pdparams' - params = paddle.load(pm) - state_dict = model.state_dict() - new_state_dict = {} - for k1, k2 in zip(state_dict.keys(), params.keys()): - if list(state_dict[k1].shape) == list(params[k2].shape): - new_state_dict[k1] = params[k2] - else: - logger.info( - f"The shape of model params {k1} {state_dict[k1].shape} not matched with loaded params {k2} {params[k2].shape} !" - ) - model.set_state_dict(new_state_dict) - logger.info(f"loaded pretrained_model successful from {pm}") - return {} - - def load_pretrained_params(model, path): - if path is None: - return False - if not os.path.exists(path) and not os.path.exists(path + ".pdparams"): - print(f"The pretrained_model {path} does not exists!") - return False - - path = path if path.endswith('.pdparams') else path + '.pdparams' - params = paddle.load(path) + logger = get_logger() + if path.endswith('pdparams'): + path = path.replace('.pdparams', '') + assert os.path.exists(path + ".pdparams"), \ + f"The {path}.pdparams does not exists!" + + params = paddle.load(path + '.pdparams') state_dict = model.state_dict() new_state_dict = {} for k1, k2 in zip(state_dict.keys(), params.keys()): if list(state_dict[k1].shape) == list(params[k2].shape): new_state_dict[k1] = params[k2] else: - print( + logger.info( f"The shape of model params {k1} {state_dict[k1].shape} not matched with loaded params {k2} {params[k2].shape} !" ) model.set_state_dict(new_state_dict) - print(f"load pretrain successful from {path}") + logger.info(f"load pretrain successful from {path}") return model diff --git a/test_tipc/configs/amp_ppocr_det_mobile_params.txt b/test_tipc/configs/amp_ppocr_det_mobile_params.txt deleted file mode 100644 index 1c9978753e663c7b466a55d70657f515c12df18b..0000000000000000000000000000000000000000 --- a/test_tipc/configs/amp_ppocr_det_mobile_params.txt +++ /dev/null @@ -1,110 +0,0 @@ -===========================train_params=========================== -model_name:ocr_det -python:python3.7 -gpu_list:0|0,1 -Global.use_gpu:True|True -Global.auto_cast:amp -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 -Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ -null:null -## -trainer:norm_train|pact_train|fpgm_train -norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained -pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o -fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:null -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o -quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o -fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o -distill_export:null -export1:null -export2:null -inference_dir:null -train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy -infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o -infer_quant:False -inference:tools/infer/predict_det.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -null:null -===========================cpp_infer_params=========================== -use_opencv:True -infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ -infer_quant:False -inference:./deploy/cpp_infer/build/ppocr det ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -===========================serving_params=========================== -model_name:ocr_det -python:python3.7 -trans_model:-m paddle_serving_client.convert ---dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ ---model_filename:inference.pdmodel ---params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/ -serving_dir:./deploy/pdserving -web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 -op.det.local_service_conf.devices:null|0 -op.det.local_service_conf.use_mkldnn:True|False -op.det.local_service_conf.thread_num:1|6 -op.det.local_service_conf.use_trt:False|True -op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs -===========================kl_quant_params=========================== -infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ -infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o -infer_quant:True -inference:tools/infer/predict_det.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -null:null -null:null -===========================lite_params=========================== -inference:./ocr_db_crnn det -infer_model:./models/ch_ppocr_mobile_v2.0_det_opt.nb|./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb ---cpu_threads:1|4 ---batch_size:1 ---power_mode:LITE_POWER_HIGH|LITE_POWER_LOW ---image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/|./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg ---config_dir:./config.txt ---rec_dict_dir:./ppocr_keys_v1.txt ---benchmark:True diff --git a/test_tipc/configs/fleet_ppocr_det_mobile_params.txt b/test_tipc/configs/fleet_ppocr_det_mobile_params.txt deleted file mode 100644 index 99278845e43f1a56239b508e49c1670f5bc77922..0000000000000000000000000000000000000000 --- a/test_tipc/configs/fleet_ppocr_det_mobile_params.txt +++ /dev/null @@ -1,110 +0,0 @@ -===========================train_params=========================== -model_name:ocr_det -python:python3.7 -gpu_list:xx.xx.xx.xx,xx.xx.xx.xx;0,1 -Global.use_gpu:True|True -Global.auto_cast:null|amp -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 -Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ -null:null -## -trainer:norm_train|pact_train|fpgm_train -norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained -pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o -fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:null -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o -quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o -fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o -distill_export:null -export1:null -export2:null -inference_dir:null -train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy -infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o -infer_quant:False -inference:tools/infer/predict_det.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -null:null -===========================cpp_infer_params=========================== -use_opencv:True -infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ -infer_quant:False -inference:./deploy/cpp_infer/build/ppocr det ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -===========================serving_params=========================== -model_name:ocr_det -python:python3.7 -trans_model:-m paddle_serving_client.convert ---dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ ---model_filename:inference.pdmodel ---params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/ -serving_dir:./deploy/pdserving -web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 -op.det.local_service_conf.devices:null|0 -op.det.local_service_conf.use_mkldnn:True|False -op.det.local_service_conf.thread_num:1|6 -op.det.local_service_conf.use_trt:False|True -op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py --image_dir=../../doc/imgs -===========================kl_quant_params=========================== -infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ -infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o -infer_quant:True -inference:tools/infer/predict_det.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -null:null -null:null -===========================lite_params=========================== -inference:./ocr_db_crnn det -infer_model:./models/ch_ppocr_mobile_v2.0_det_opt.nb|./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb ---cpu_threads:1|4 ---batch_size:1 ---power_mode:LITE_POWER_HIGH|LITE_POWER_LOW ---image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/|./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg ---config_dir:./config.txt ---rec_dict_dir:./ppocr_keys_v1.txt ---benchmark:True diff --git a/test_tipc/configs/jeston_ppocr_det_mobile_params.txt b/test_tipc/configs/jeston_ppocr_det_mobile_params.txt deleted file mode 100644 index 7e2bba2462bc4517f13da35a6a3bb527275bb85f..0000000000000000000000000000000000000000 --- a/test_tipc/configs/jeston_ppocr_det_mobile_params.txt +++ /dev/null @@ -1,51 +0,0 @@ -===========================train_params=========================== -model_name:ocr_det -python:python -gpu_list:null -Global.use_gpu:null -Global.auto_cast:null -Global.epoch_num:null -Global.save_model_dir:null -Train.loader.batch_size_per_card:null -Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:null -null:null -## -trainer:null -norm_train:null -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:null -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.pretrained_model:null -norm_export:null -quant_export:null -fpgm_export:null -distill_export:null -export1:null -export2:null -inference_dir:null -train_model:./inference/ch_ppocr_mobile_v2.0_det_infer -infer_export:null -infer_quant:False -inference:tools/infer/predict_det.py ---use_gpu:True|False ---enable_mkldnn:False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp16|fp32 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -null:null diff --git a/test_tipc/configs/det_mv3_db.yml b/test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml similarity index 100% rename from test_tipc/configs/det_mv3_db.yml rename to test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b4139876976f0f1c34619a8fb4e0b3b6c35c9e0 --- /dev/null +++ b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt @@ -0,0 +1,16 @@ +===========================cpp_infer_params=========================== +model_name:ocr_det +use_opencv:True +infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ +infer_quant:False +inference:./deploy/cpp_infer/build/ppocr det +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp32|fp16 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True \ No newline at end of file diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_python_jetson.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_python_jetson.txt new file mode 100644 index 0000000000000000000000000000000000000000..7d3f60bd42aad18c045aeee70fc60d2c17a2af13 --- /dev/null +++ b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_python_jetson.txt @@ -0,0 +1,18 @@ +===========================infer_params=========================== +model_name:ocr_det +python:python +infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer +infer_export:null +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp16|fp32 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +null:null diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..af71ce3b870e27f9dc046f00a53f266950f6f112 --- /dev/null +++ b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt @@ -0,0 +1,12 @@ +===========================lite_params=========================== +inference:./ocr_db_crnn det +infer_model:ch_PP-OCRv2_det_infer|ch_PP-OCRv2_det_slim_quant_infer +runtime_device:ARM_CPU +--cpu_threads:1|4 +--det_batch_size:1 +--rec_batch_size:1 +--system_batch_size:1 +--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/ +--config_dir:./config.txt +--rec_dict_dir:./ppocr_keys_v1.txt +--benchmark:True diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..160bcdbd88661c3d795eb2faf6b93965598c3e22 --- /dev/null +++ b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,14 @@ +===========================paddle2onnx_params=========================== +model_name:ocr_det_mobile +python:python3.7 +2onnx: paddle2onnx +--model_dir:./inference/ch_ppocr_mobile_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./inference/det_mobile_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_det.py +--use_gpu:True|False +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ \ No newline at end of file diff --git a/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..a19c8ee3355b010b55d1dbf16aa0e21940ba546c --- /dev/null +++ b/test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================serving_params=========================== +model_name:ocr_det_mobile +python:python3.7|cpp +trans_model:-m paddle_serving_client.convert +--dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/ +--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/ +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 +op.det.local_service_conf.devices:null|0 +op.det.local_service_conf.use_mkldnn:True|False +op.det.local_service_conf.thread_num:1|6 +op.det.local_service_conf.use_trt:False|True +op.det.local_service_conf.precision:fp32|fp16|int8 +pipline:pipeline_rpc_client.py|pipeline_http_client.py +--image_dir:../../doc/imgs \ No newline at end of file diff --git a/test_tipc/configs/ppocr_det_mobile/train_infer_python.txt b/test_tipc/configs/ppocr_det_mobile/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..17cc924a5bb4f06b8585547ddb9dcc2e5614e854 --- /dev/null +++ b/test_tipc/configs/ppocr_det_mobile/train_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:ocr_det +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train|pact_train|fpgm_train +norm_train:tools/train.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained +pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o +fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.pretrained_model: +norm_export:tools/export_model.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o +quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o +fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/ppocr_det_mobile/det_mv3_db.yml -o +distill_export:null +export1:null +export2:null +inference_dir:null +train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy +infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp32|fp16|int8 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +null:null \ No newline at end of file diff --git a/test_tipc/configs/mac_ppocr_det_mobile_params.txt b/test_tipc/configs/ppocr_det_mobile/train_linux_cpu_normal_normal_infer_python_mac.txt similarity index 100% rename from test_tipc/configs/mac_ppocr_det_mobile_params.txt rename to test_tipc/configs/ppocr_det_mobile/train_linux_cpu_normal_normal_infer_python_mac.txt diff --git a/test_tipc/configs/ppocr_det_mobile/train_linux_dcu_normal_normal_infer_python_dcu.txt b/test_tipc/configs/ppocr_det_mobile/train_linux_dcu_normal_normal_infer_python_dcu.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test_tipc/configs/ppocr_det_mobile/train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_det_mobile/train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..6f5aa58b47154fa4d9f555e906c0e16086612628 --- /dev/null +++ b/test_tipc/configs/ppocr_det_mobile/train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:ocr_det +python:python3.7 +gpu_list:xx.xx.xx.xx,yy.yy.yy.yy;0,1 +Global.use_gpu:True +Global.auto_cast:fp32|amp +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train|pact_train|fpgm_train +norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained +pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o +fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.pretrained_model: +norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o +quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o +fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o +distill_export:null +export1:null +export2:null +inference_dir:null +train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy +infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp32|fp16|int8 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +null:null diff --git a/test_tipc/configs/ppocr_det_mobile/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_det_mobile/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..8171a5aea94d29da88de2606afe8dc27b0f6512a --- /dev/null +++ b/test_tipc/configs/ppocr_det_mobile/train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:ocr_det +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:amp +Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train|pact_train|fpgm_train +norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained +pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o +fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.pretrained_model: +norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o +quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o +fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o +distill_export:null +export1:null +export2:null +inference_dir:null +train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy +infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp32|fp16|int8 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +null:null +--benchmark:True +null:null diff --git a/test_tipc/configs/win_ppocr_det_mobile_params.txt b/test_tipc/configs/ppocr_det_mobile/train_linux_gpu_normal_normal_infer_python_windows.txt similarity index 100% rename from test_tipc/configs/win_ppocr_det_mobile_params.txt rename to test_tipc/configs/ppocr_det_mobile/train_linux_gpu_normal_normal_infer_python_windows.txt diff --git a/test_tipc/configs/ppocr_det_mobile_params.txt b/test_tipc/configs/ppocr_det_mobile_params.txt deleted file mode 100644 index 0ccf77d1f91bde4e0ef191d11debb5818dcd49e0..0000000000000000000000000000000000000000 --- a/test_tipc/configs/ppocr_det_mobile_params.txt +++ /dev/null @@ -1,123 +0,0 @@ -===========================train_params=========================== -model_name:ocr_det -python:python3.7 -gpu_list:0|0,1 -Global.use_gpu:True|True -Global.auto_cast:null -Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=300 -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_lite_infer=2|whole_train_whole_infer=4 -Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ -null:null -## -trainer:norm_train|pact_train|fpgm_train -norm_train:tools/train.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained -pact_train:deploy/slim/quantization/quant.py -c test_tipc/configs/det_mv3_db.yml -o -fpgm_train:deploy/slim/prune/sensitivity_anal.py -c test_tipc/configs/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:null -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:tools/export_model.py -c test_tipc/configs/det_mv3_db.yml -o -quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/det_mv3_db.yml -o -fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/det_mv3_db.yml -o -distill_export:null -export1:null -export2:null -inference_dir:null -train_model:./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy -infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o -infer_quant:False -inference:tools/infer/predict_det.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -null:null -===========================cpp_infer_params=========================== -use_opencv:True -infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ -infer_quant:False -inference:./deploy/cpp_infer/build/ppocr det ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -===========================serving_params=========================== -model_name:ocr_det -python:python3.7 -trans_model:-m paddle_serving_client.convert ---dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/ ---model_filename:inference.pdmodel ---params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/ -serving_dir:./deploy/pdserving -web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 -op.det.local_service_conf.devices:null|0 -op.det.local_service_conf.use_mkldnn:True|False -op.det.local_service_conf.thread_num:1|6 -op.det.local_service_conf.use_trt:False|True -op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_rpc_client.py|pipeline_http_client.py ---image_dir:../../doc/imgs -===========================kl_quant_params=========================== -infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ -infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o -infer_quant:True -inference:tools/infer/predict_det.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -null:null -null:null -===========================lite_params=========================== -inference:./ocr_db_crnn det -infer_model:./models/ch_ppocr_mobile_v2.0_det_opt.nb|./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb ---cpu_threads:1|4 ---batch_size:1 ---power_mode:LITE_POWER_HIGH|LITE_POWER_LOW ---image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/|./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg ---config_dir:./config.txt ---rec_dict_dir:./ppocr_keys_v1.txt ---benchmark:True -===========================paddle2onnx_params=========================== -2onnx: paddle2onnx ---model_dir:./inference/ch_ppocr_mobile_v2.0_det_infer/ ---model_filename:inference.pdmodel ---params_filename:inference.pdiparams ---save_file:./inference/det_mobile_onnx/model.onnx ---opset_version:10 ---enable_onnx_checker:True -inference:tools/infer/predict_det.py ---use_gpu:False ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ \ No newline at end of file diff --git a/test_tipc/configs/det_r50_vd_db.yml b/test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml similarity index 100% rename from test_tipc/configs/det_r50_vd_db.yml rename to test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml diff --git a/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..40fdc11241f3ac966ff01d4c51173f990cc594c5 --- /dev/null +++ b/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,14 @@ +===========================paddle2onnx_params=========================== +model_name:ocr_det_server +python:python3.7 +2onnx: paddle2onnx +--model_dir:./inference/ch_ppocr_server_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./inference/det_server_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_det.py +--use_gpu:True|False +--det_model_dir: +--image_dir:./inference/det_inference \ No newline at end of file diff --git a/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..09b7ab750408a54fa292f1168d8de01bd962ca43 --- /dev/null +++ b/test_tipc/configs/ppocr_det_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================serving_params=========================== +model_name:ocr_det_server +python:python3.7|cpp +trans_model:-m paddle_serving_client.convert +--dirname:./inference/ch_ppocr_server_v2.0_det_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/pdserving/ppocr_det_server_2.0_serving/ +--serving_client:./deploy/pdserving/ppocr_det_server_2.0_client/ +serving_dir:./deploy/pdserving +web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 +op.det.local_service_conf.devices:null|0 +op.det.local_service_conf.use_mkldnn:True|False +op.det.local_service_conf.thread_num:1|6 +op.det.local_service_conf.use_trt:False|True +op.det.local_service_conf.precision:fp32|fp16|int8 +pipline:pipeline_rpc_client.py|pipeline_http_client.py +--image_dir:../../doc/imgs_words_en \ No newline at end of file diff --git a/test_tipc/configs/ppocr_det_server/train_infer_python.txt b/test_tipc/configs/ppocr_det_server/train_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..35aa1886808cb0a9edf6f7db230954080bdb53eb --- /dev/null +++ b/test_tipc/configs/ppocr_det_server/train_infer_python.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:ocr_server_det +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_infer=2|whole_train_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_infer=2|whole_train_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train|pact_train|fpgm_export +norm_train:tools/train.py -c test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml -o +quant_export:deploy/slim/quantization/export_model.py -c test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml -o +fpgm_export:deploy/slim/prune/export_prune_model.py -c test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml -o +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.pretrained_model: +norm_export:tools/export_model.py -c test_tipc/configs/ppocr_det_server/det_r50_vd_db.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +train_model:./inference/ch_ppocr_server_v2.0_det_train/best_accuracy +infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml -o +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp32|fp16|int8 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--save_log_path:null +--benchmark:True +null:null \ No newline at end of file diff --git a/test_tipc/configs/ppocr_det_server_params.txt b/test_tipc/configs/ppocr_det_server_params.txt deleted file mode 100644 index f688fffac8824b0608ea6b6cec0683c70feb659e..0000000000000000000000000000000000000000 --- a/test_tipc/configs/ppocr_det_server_params.txt +++ /dev/null @@ -1,84 +0,0 @@ -===========================train_params=========================== -model_name:ocr_server_det -python:python3.7 -gpu_list:0|0,1 -Global.use_gpu:True|True -Global.auto_cast:null -Global.epoch_num:lite_train_infer=2|whole_train_infer=300 -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_infer=2|whole_train_infer=4 -Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ -null:null -## -trainer:norm_train|pact_train|fpgm_export -norm_train:tools/train.py -c tests/configs/det_r50_vd_db.yml -o -quant_export:deploy/slim/quantization/export_model.py -c tests/configs/det_r50_vd_db.yml -o -fpgm_export:deploy/slim/prune/export_prune_model.py -c tests/configs/det_r50_vd_db.yml -o -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c tests/configs/det_r50_vd_db.yml -o -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:tools/export_model.py -c tests/configs/det_r50_vd_db.yml -o -quant_export:null -fpgm_export:null -distill_export:null -export1:null -export2:null -## -train_model:./inference/ch_ppocr_server_v2.0_det_train/best_accuracy -infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_res18_db_v2.0.yml -o -infer_quant:False -inference:tools/infer/predict_det.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ ---save_log_path:null ---benchmark:True -null:null -===========================cpp_infer_params=========================== -use_opencv:True -infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ -infer_quant:False -inference:./deploy/cpp_infer/build/ppocr det ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ -null:null ---benchmark:True -===========================serving_params=========================== -model_name:ocr_det_server -python:python3.7 -trans_model:-m paddle_serving_client.convert ---dirname:./inference/ch_ppocr_server_v2.0_det_infer/ ---model_filename:inference.pdmodel ---params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/ -serving_dir:./deploy/pdserving -web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1 -op.det.local_service_conf.devices:null|0 -op.det.local_service_conf.use_mkldnn:True|False -op.det.local_service_conf.thread_num:1|6 -op.det.local_service_conf.use_trt:False|True -op.det.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py|pipeline_rpc_client.py ---image_dir=../../doc/imgs diff --git a/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..f29b303879f555eaa9a392633aed6e0095f05cfb --- /dev/null +++ b/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,14 @@ +===========================paddle2onnx_params=========================== +model_name:ocr_rec_mobile +python:python3.7 +2onnx: paddle2onnx +--model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./inference/rec_mobile_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_rec.py +--use_gpu:True|False +--rec_model_dir: +--image_dir:./inference/rec_inference \ No newline at end of file diff --git a/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..7351e5bd6d5d8ffc5d49b313ad662b1e2fd55bd2 --- /dev/null +++ b/test_tipc/configs/ppocr_rec_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================serving_params=========================== +model_name:ocr_rec_mobile +python:python3.7|cpp +trans_model:-m paddle_serving_client.convert +--dirname:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/pdserving/ppocr_rec_mobile_2.0_serving/ +--serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1 +op.rec.local_service_conf.devices:null|0 +op.rec.local_service_conf.use_mkldnn:True|False +op.rec.local_service_conf.thread_num:1|6 +op.rec.local_service_conf.use_trt:False|True +op.rec.local_service_conf.precision:fp32|fp16|int8 +pipline:pipeline_rpc_client.py|pipeline_http_client.py +--image_dir:../../doc/imgs_words_en \ No newline at end of file diff --git a/test_tipc/configs/ppocr_rec_mobile_params.txt b/test_tipc/configs/ppocr_rec_mobile_params.txt deleted file mode 100644 index 3177d19cf6cf7759e13e5597492f3bd7fcea78ff..0000000000000000000000000000000000000000 --- a/test_tipc/configs/ppocr_rec_mobile_params.txt +++ /dev/null @@ -1,84 +0,0 @@ -===========================train_params=========================== -model_name:ocr_rec -python:python3.7 -gpu_list:0|0,1 -Global.use_gpu:True|True -Global.auto_cast:null -Global.epoch_num:lite_train_infer=2|whole_train_infer=300 -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_infer=128|whole_train_infer=128 -Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./inference/rec_inference -null:null -## -trainer:norm_train|pact_train -norm_train:tools/train.py -c configs/rec/rec_icdar15_train.yml -o -pact_train:deploy/slim/quantization/quant.py -c configs/rec/rec_icdar15_train.yml -o -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c configs/rec/rec_icdar15_train.yml -o -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:tools/export_model.py -c configs/rec/rec_icdar15_train.yml -o -quant_export:deploy/slim/quantization/export_model.py -c configs/rec/rec_icdar15_train.yml -o -fpgm_export:null -distill_export:null -export1:null -export2:null -## -infer_model:./inference/ch_ppocr_mobile_v2.0_rec_infer/ -infer_export:null -infer_quant:False -inference:tools/infer/predict_rec.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|fp16|int8 ---rec_model_dir: ---image_dir:./inference/rec_inference ---save_log_path:./test/output/ ---benchmark:True -null:null -===========================cpp_infer_params=========================== -use_opencv:True -infer_model:./inference/ch_ppocr_mobile_v2.0_rec_infer/ -infer_quant:False -inference:./deploy/cpp_infer/build/ppocr rec ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16 ---rec_model_dir: ---image_dir:./inference/rec_inference/ -null:null ---benchmark:True -===========================serving_params=========================== -model_name:ocr_rec -python:python3.7 -trans_model:-m paddle_serving_client.convert ---dirname:./inference/ch_ppocr_mobile_v2.0_rec_infer/ ---model_filename:inference.pdmodel ---params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_rec_mobile_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/ -serving_dir:./deploy/pdserving -web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1 -op.rec.local_service_conf.devices:null|0 -op.rec.local_service_conf.use_mkldnn:True|False -op.rec.local_service_conf.thread_num:1|6 -op.rec.local_service_conf.use_trt:False|True -op.rec.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py|pipeline_rpc_client.py ---image_dir=../../doc/imgs_words_en diff --git a/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt b/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..05542332e94eab38d8a433a727e04bf0be15f423 --- /dev/null +++ b/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt @@ -0,0 +1,14 @@ +===========================paddle2onnx_params=========================== +model_name:ocr_rec_server +python:python3.7 +2onnx: paddle2onnx +--model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--save_file:./inference/rec_server_onnx/model.onnx +--opset_version:10 +--enable_onnx_checker:True +inference:tools/infer/predict_rec.py +--use_gpu:True|False +--rec_model_dir: +--image_dir:./inference/rec_inference \ No newline at end of file diff --git a/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt b/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt new file mode 100644 index 0000000000000000000000000000000000000000..24e7a8f3e0364f2a0a14c74a27da7372508cd414 --- /dev/null +++ b/test_tipc/configs/ppocr_rec_server/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt @@ -0,0 +1,18 @@ +===========================serving_params=========================== +model_name:ocr_rec_server +python:python3.7 +trans_model:-m paddle_serving_client.convert +--dirname:./inference/ch_ppocr_server_v2.0_rec_infer/ +--model_filename:inference.pdmodel +--params_filename:inference.pdiparams +--serving_server:./deploy/pdserving/ppocr_rec_server_2.0_serving/ +--serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/ +serving_dir:./deploy/pdserving +web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1 +op.rec.local_service_conf.devices:null|0 +op.rec.local_service_conf.use_mkldnn:True|False +op.rec.local_service_conf.thread_num:1|6 +op.rec.local_service_conf.use_trt:False|True +op.rec.local_service_conf.precision:fp32|fp16|int8 +pipline:pipeline_rpc_client.py|pipeline_http_client.py +--image_dir:../../doc/imgs_words_en \ No newline at end of file diff --git a/test_tipc/configs/rec_icdar15_r34_train.yml b/test_tipc/configs/ppocr_rec_server/rec_icdar15_r34_train.yml similarity index 100% rename from test_tipc/configs/rec_icdar15_r34_train.yml rename to test_tipc/configs/ppocr_rec_server/rec_icdar15_r34_train.yml diff --git a/test_tipc/configs/ppocr_rec_server_params.txt b/test_tipc/configs/ppocr_rec_server_params.txt deleted file mode 100644 index 3bc1dcce2c7103f2180c19551e8f5379e5524476..0000000000000000000000000000000000000000 --- a/test_tipc/configs/ppocr_rec_server_params.txt +++ /dev/null @@ -1,84 +0,0 @@ -===========================train_params=========================== -model_name:ocr_server_rec -python:python3.7 -gpu_list:0|0,1 -Global.use_gpu:True|True -Global.auto_cast:null -Global.epoch_num:lite_train_infer=2|whole_train_infer=300 -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:lite_train_infer=128|whole_train_infer=128 -Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:./inference/rec_inference -null:null -## -trainer:norm_train|pact_train -norm_train:tools/train.py -c tests/configs/rec_icdar15_r34_train.yml -o -pact_train:deploy/slim/quantization/quant.py -c tests/configs/rec_icdar15_r34_train.yml -o -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:tools/eval.py -c tests/configs/rec_icdar15_r34_train.yml -o -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:tools/export_model.py -c tests/configs/rec_icdar15_r34_train.yml -o -quant_export:deploy/slim/quantization/export_model.py -c tests/configs/rec_icdar15_r34_train.yml -o -fpgm_export:null -distill_export:null -export1:null -export2:null -## -infer_model:./inference/ch_ppocr_server_v2.0_rec_infer/ -infer_export:null -infer_quant:False -inference:tools/infer/predict_rec.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1|6 ---use_tensorrt:True|False ---precision:fp32|fp16|int8 ---rec_model_dir: ---image_dir:./inference/rec_inference ---save_log_path:./test/output/ ---benchmark:True -null:null -===========================cpp_infer_params=========================== -use_opencv:True -infer_model:./inference/ch_ppocr_server_v2.0_rec_infer/ -infer_quant:False -inference:./deploy/cpp_infer/build/ppocr rec ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16 ---rec_model_dir: ---image_dir:./inference/rec_inference/ -null:null ---benchmark:True -===========================serving_params=========================== -model_name:ocr_server_rec -python:python3.7 -trans_model:-m paddle_serving_client.convert ---dirname:./inference/ch_ppocr_server_v2.0_rec_infer/ ---model_filename:inference.pdmodel ---params_filename:inference.pdiparams ---serving_server:./deploy/pdserving/ppocr_rec_mobile_2.0_serving/ ---serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/ -serving_dir:./deploy/pdserving -web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1 -op.rec.local_service_conf.devices:null|0 -op.rec.local_service_conf.use_mkldnn:True|False -op.rec.local_service_conf.thread_num:1|6 -op.rec.local_service_conf.use_trt:False|True -op.rec.local_service_conf.precision:fp32|fp16|int8 -pipline:pipeline_http_client.py|pipeline_rpc_client.py ---image_dir=../../doc/imgs_words_en diff --git a/test_tipc/configs/ppocr_sys_mobile_params.txt b/test_tipc/configs/ppocr_sys_mobile_params.txt deleted file mode 100644 index bb6e0960a77c946e7d452c1026368682be0c4579..0000000000000000000000000000000000000000 --- a/test_tipc/configs/ppocr_sys_mobile_params.txt +++ /dev/null @@ -1,67 +0,0 @@ -===========================train_params=========================== -model_name:ocr_system_mobile -python:python3.7 -gpu_list:null -Global.use_gpu:null -Global.auto_cast:null -Global.epoch_num:null -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:null -Global.pretrained_model:null -train_model_name:null -train_infer_img_dir:null -null:null -## -trainer: -norm_train:null -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:null -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:null -quant_export:null -fpgm_export:null -distill_export:null -export1:null -export2:null -## -infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ -infer_export:null -infer_quant:False -inference:tools/infer/predict_system.py ---use_gpu:True ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ ---save_log_path:null ---benchmark:True ---rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ -===========================cpp_infer_params=========================== -use_opencv:True -infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ -infer_quant:False -inference:./deploy/cpp_infer/build/ppocr system ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ ---rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ ---benchmark:True - diff --git a/test_tipc/configs/ppocr_sys_server_params.txt b/test_tipc/configs/ppocr_sys_server_params.txt deleted file mode 100644 index 9c49f7ddf43dbca2562bb206d92e5aeb84e703aa..0000000000000000000000000000000000000000 --- a/test_tipc/configs/ppocr_sys_server_params.txt +++ /dev/null @@ -1,66 +0,0 @@ -===========================train_params=========================== -model_name:ocr_system_server -python:python3.7 -gpu_list:null -Global.use_gpu:null -Global.auto_cast:null -Global.epoch_num:null -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card:null -Global.pretrained_model:null -train_model_name:null -train_infer_img_dir:null -null:null -## -trainer: -norm_train:null -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:null -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:null -quant_export:null -fpgm_export:null -distill_export:null -export1:null -export2:null -## -infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ -infer_export:null -infer_quant:False -inference:tools/infer/predict_system.py ---use_gpu:True ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16|int8 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ ---save_log_path:null ---benchmark:True ---rec_model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/ -===========================cpp_infer_params=========================== -use_opencv:True -infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ -infer_quant:False -inference:./deploy/cpp_infer/build/ppocr system ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:False|True ---precision:fp32|fp16 ---det_model_dir: ---image_dir:./inference/ch_det_data_50/all-sum-510/ ---rec_model_dir:./inference/ch_ppocr_server_v2.0_rec_infer/ ---benchmark:True \ No newline at end of file diff --git a/test_tipc/configs/ppocrv2_det_mobile_params.txt b/test_tipc/configs/ppocrv2_det_mobile/train_infer_python.txt similarity index 100% rename from test_tipc/configs/ppocrv2_det_mobile_params.txt rename to test_tipc/configs/ppocrv2_det_mobile/train_infer_python.txt diff --git a/test_tipc/docs/install.md b/test_tipc/docs/install.md index f17c264f3987c8cc2a756e045ebacb8fba5c277a..c1cd163e061b146fe0f25f9e3ef7f409992e2f91 100644 --- a/test_tipc/docs/install.md +++ b/test_tipc/docs/install.md @@ -1,6 +1,6 @@ ## 1. 环境准备 -本教程适用于PTDN目录下基础功能测试的运行环境搭建。 +本教程适用于test_tipc目录下基础功能测试的运行环境搭建。 推荐环境: - CUDA 10.1/10.2 diff --git a/test_tipc/docs/lite_auto_log.png b/test_tipc/docs/lite_auto_log.png index cd9256db40232d689ea67a1bbef2b768c5f98753..d5b6b450d445d0047fb3836bfd9c726adebb7b9f 100644 Binary files a/test_tipc/docs/lite_auto_log.png and b/test_tipc/docs/lite_auto_log.png differ diff --git a/test_tipc/docs/lite_log.png b/test_tipc/docs/lite_log.png index 24ae5abc7167049ac879428e5e105a6e67d3c36d..2b3e40b3fa2700a4c715269229cdfe582d29f90a 100644 Binary files a/test_tipc/docs/lite_log.png and b/test_tipc/docs/lite_log.png differ diff --git a/test_tipc/docs/test_inference_cpp.md b/test_tipc/docs/test_inference_cpp.md index cd757a895bb957e498fda61cf52d2132d660ca8f..3c9599a32cd473a31670d3ee028a36bcf621df87 100644 --- a/test_tipc/docs/test_inference_cpp.md +++ b/test_tipc/docs/test_inference_cpp.md @@ -20,12 +20,12 @@ C++预测功能测试的主程序为`test_inference_cpp.sh`,可以测试基于 先运行`prepare.sh`准备数据和模型,然后运行`test_inference_cpp.sh`进行测试,最终在```test_tipc/output```目录下生成`cpp_infer_*.log`后缀的日志文件。 ```shell -bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt "cpp_infer" +bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt "cpp_infer" # 用法1: -bash test_tipc/test_inference_cpp.sh ./test_tipc/configs/ppocr_det_mobile_params.txt +bash test_tipc/test_inference_cpp.sh test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt # 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 -bash test_tipc/test_inference_cpp.sh ./test_tipc/configs/ppocr_det_mobile_params.txt '1' +bash test_tipc/test_inference_cpp.sh test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt '1' ``` 运行预测指令后,在`test_tipc/output`文件夹下自动会保存运行日志,包括以下文件: diff --git a/test_tipc/docs/test_lite.md b/test_tipc/docs/test_lite.md deleted file mode 100644 index 01ae0cb4b471f1219f88ffa9e2c11d50765233d3..0000000000000000000000000000000000000000 --- a/test_tipc/docs/test_lite.md +++ /dev/null @@ -1,72 +0,0 @@ -# Lite预测功能测试 - -Lite预测功能测试的主程序为`test_lite.sh`,可以测试基于Lite预测库的模型推理功能。 - -## 1. 测试结论汇总 - -目前Lite端的样本间支持以方式的组合: - -**字段说明:** -- 输入设置:包括C++预测、python预测、java预测 -- 模型类型:包括正常模型(FP32)和量化模型(FP16) -- batch-size:包括1和4 -- predictor数量:包括多predictor预测和单predictor预测 -- 功耗模式:包括高性能模式(LITE_POWER_HIGH)和省电模式(LITE_POWER_LOW) -- 预测库来源:包括下载方式和编译方式,其中编译方式分为以下目标硬件:(1)ARM CPU;(2)Linux XPU;(3)OpenCL GPU;(4)Metal GPU - -| 模型类型 | batch-size | predictor数量 | 功耗模式 | 预测库来源 | 支持语言 | -| :----: | :----: | :----: | :----: | :----: | :----: | -| 正常模型/量化模型 | 1 | 1 | 高性能模式/省电模式 | 下载方式 | C++预测 | - - -## 2. 测试流程 -运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。 - -### 2.1 功能测试 - -先运行`prepare.sh`准备数据和模型,模型和数据会打包到test_lite.tar中,将test_lite.tar上传到手机上,解压后进`入test_lite`目录中,然后运行`test_lite.sh`进行测试,最终在`test_lite/output`目录下生成`lite_*.log`后缀的日志文件。 - -```shell - -# 数据和模型准备 -bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt "lite_infer" - -# 手机端测试: -bash test_lite.sh ppocr_det_mobile_params.txt - -``` - -**注意**:由于运行该项目需要bash等命令,传统的adb方式不能很好的安装。所以此处推荐通在手机上开启虚拟终端的方式连接电脑,连接方式可以参考[安卓手机termux连接电脑](./termux_for_android.md)。 - -#### 运行结果 - -各测试的运行情况会打印在 `./output/` 中: -运行成功时会输出: - -``` -Run successfully with command - ./ocr_db_crnn det ./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb INT8 4 1 LITE_POWER_LOW ./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg ./config.txt True > ./output/lite_ch_ppocr_mobile_v2.0_det_slim_opt.nb_precision_INT8_batchsize_1_threads_4_powermode_LITE_POWER_LOW_singleimg_True.log 2>&1! -Run successfully with command xxx -... -``` - -运行失败时会输出: - -``` -Run failed with command - ./ocr_db_crnn det ./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb INT8 4 1 LITE_POWER_LOW ./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg ./config.txt True > ./output/lite_ch_ppocr_mobile_v2.0_det_slim_opt.nb_precision_INT8_batchsize_1_threads_4_powermode_LITE_POWER_LOW_singleimg_True.log 2>&1! -Run failed with command xxx -... -``` - -在./output/文件夹下,会存在如下日志,每一个日志都是不同配置下的log结果: - - - -在每一个log中,都会调用autolog打印如下信息: - - - - - -## 3. 更多教程 - -本文档为功能测试用,更详细的Lite端预测使用教程请参考:[Lite端部署](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme.md)。 diff --git a/test_tipc/docs/test_lite_arm_cpu_cpp.md b/test_tipc/docs/test_lite_arm_cpu_cpp.md new file mode 100644 index 0000000000000000000000000000000000000000..6f58026a315dabb8810e56b6d694733c1c72019c --- /dev/null +++ b/test_tipc/docs/test_lite_arm_cpu_cpp.md @@ -0,0 +1,71 @@ +# Lite\_arm\_cpu\_cpp预测功能测试 + +Lite\_arm\_cpu\_cpp预测功能测试的主程序为`test_lite_arm_cpu_cpp.sh`,可以在ARM CPU上基于Lite预测库测试模型的C++推理功能。 + +## 1. 测试结论汇总 + +目前Lite端的样本间支持以方式的组合: + +**字段说明:** +- 模型类型:包括正常模型(FP32)和量化模型(INT8) +- batch-size:包括1和4 +- threads:包括1和4 +- predictor数量:包括多predictor预测和单predictor预测 +- 预测库来源:包括下载方式和编译方式 + +| 模型类型 | batch-size | threads | predictor数量 | 预测库来源 | +| :----: | :----: | :----: | :----: | :----: | +| 正常模型/量化模型 | 1 | 1/4 | 1 | 下载方式 | + + +## 2. 测试流程 +运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。 + +### 2.1 功能测试 + +先运行`prepare_lite.sh`,运行后会在当前路径下生成`test_lite.tar`,其中包含了测试数据、测试模型和用于预测的可执行文件。将`test_lite.tar`上传到被测试的手机上,在手机的终端解压该文件,进入`test_lite`目录中,然后运行`test_lite_arm_cpu_cpp.sh`进行测试,最终在`test_lite/output`目录下生成`lite_*.log`后缀的日志文件。 + +```shell + +# 数据和模型准备 +bash test_tipc/prepare_lite.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt + +# 手机端测试: +bash test_lite_arm_cpu_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt + +``` + +**注意**:由于运行该项目需要bash等命令,传统的adb方式不能很好的安装。所以此处推荐通在手机上开启虚拟终端的方式连接电脑,连接方式可以参考[安卓手机termux连接电脑](./termux_for_android.md)。 + +#### 运行结果 + +各测试的运行情况会打印在 `./output/` 中: +运行成功时会输出: + +``` +Run successfully with command - ./ocr_db_crnn det ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1 ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1! +Run successfully with command xxx +... +``` + +运行失败时会输出: + +``` +Run failed with command - ./ocr_db_crnn det ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1 ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1! +Run failed with command xxx +... +``` + +在./output/文件夹下,会存在如下日志,每一个日志都是不同配置下的log结果: + + + +在每一个log中,都会调用autolog打印如下信息: + + + + + +## 3. 更多教程 + +本文档为功能测试用,更详细的Lite端预测使用教程请参考:[Lite端部署](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme.md)。 diff --git a/test_tipc/docs/test_paddle2onnx.md b/test_tipc/docs/test_paddle2onnx.md index 5d784c5e93c3a93d00c256004de582dcbf357c45..df2734771e9252a40811c42ead03abbff1b7a1a3 100644 --- a/test_tipc/docs/test_paddle2onnx.md +++ b/test_tipc/docs/test_paddle2onnx.md @@ -18,10 +18,10 @@ PaddleServing预测功能测试的主程序为`test_paddle2onnx.sh`,可以测 先运行`prepare.sh`准备数据和模型,然后运行`test_paddle2onnx.sh`进行测试,最终在```test_tipc/output```目录下生成`paddle2onnx_infer_*.log`后缀的日志文件。 ```shell -bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt "paddle2onnx_infer" +bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt "paddle2onnx_infer" # 用法: -bash test_tipc/test_paddle2onnx.sh ./test_tipc/configs/ppocr_det_mobile_params.txt +bash test_tipc/test_paddle2onnx.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt ``` #### 运行结果 diff --git a/test_tipc/docs/test_serving.md b/test_tipc/docs/test_serving.md index f63d6c7107ce92807c53d81a22a582b09178a712..1eded6f5821a5ebd9180cc4d89a1fecac61ad63d 100644 --- a/test_tipc/docs/test_serving.md +++ b/test_tipc/docs/test_serving.md @@ -20,10 +20,10 @@ PaddleServing预测功能测试的主程序为`test_serving.sh`,可以测试 先运行`prepare.sh`准备数据和模型,然后运行`test_serving.sh`进行测试,最终在```test_tipc/output```目录下生成`serving_infer_*.log`后缀的日志文件。 ```shell -bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt "serving_infer" +bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt "serving_infer" # 用法: -bash test_tipc/test_serving.sh ./test_tipc/configs/ppocr_det_mobile_params.txt +bash test_tipc/test_serving.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt ``` #### 运行结果 diff --git a/test_tipc/docs/test_train_inference_python.md b/test_tipc/docs/test_train_inference_python.md index 9028e67d093112d23cc7c5d9da10d185f1db9b5b..13caa2542a3b89c28106b22b5ef42e84e6ed0632 100644 --- a/test_tipc/docs/test_train_inference_python.md +++ b/test_tipc/docs/test_train_inference_python.md @@ -64,35 +64,35 @@ Linux端基础训练预测功能测试的主程序为`test_train_inference_pytho - 模式1:lite_train_lite_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度; ```shell -bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'lite_train_lite_infer' -bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'lite_train_lite_infer' +bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile/train_infer_python.txt 'lite_train_lite_infer' +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile/train_infer_python.txt 'lite_train_lite_infer' ``` - 模式2:lite_train_whole_infer,使用少量数据训练,一定量数据预测,用于验证训练后的模型执行预测,预测速度是否合理; ```shell -bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'lite_train_whole_infer' -bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'lite_train_whole_infer' +bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile/train_infer_python.txt 'lite_train_whole_infer' +bash test_tipc/test_train_inference_python.sh ../test_tipc/configs/ppocr_det_mobile/train_infer_python.txt 'lite_train_whole_infer' ``` - 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度; ```shell -bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'whole_infer' +bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile/train_infer_python.txt 'whole_infer' # 用法1: -bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'whole_infer' +bash test_tipc/test_train_inference_python.sh ../test_tipc/configs/ppocr_det_mobile/train_infer_python.txt 'whole_infer' # 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 -bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'whole_infer' '1' +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile/train_infer_python.txt 'whole_infer' '1' ``` - 模式4:whole_train_whole_infer,CE: 全量数据训练,全量数据预测,验证模型训练精度,预测精度,预测速度; ```shell -bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'whole_train_whole_infer' -bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'whole_train_whole_infer' +bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile/train_infer_python.txt 'whole_train_whole_infer' +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile/train_infer_python.txt 'whole_train_whole_infer' ``` - 模式5:klquant_whole_infer,测试离线量化; ```shell -bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile_params.txt 'klquant_whole_infer' -bash test_tipc/test_train_inference_python.sh test_tipc/configs/ppocr_det_mobile_params.txt 'klquant_whole_infer' +bash test_tipc/prepare.sh ./test_tipc/configs/ppocr_det_mobile/train_infer_python.txt 'klquant_whole_infer' +bash test_tipc/test_train_inference_python.sh ./test_tipc/configs/ppocr_det_mobile/train_infer_python.txt 'klquant_whole_infer' ``` 运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如'lite_train_lite_infer'模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件: diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index f3ad242538a9471af237c804eae343da06e2b9dd..6e07bf8a1ac174b93f9abe141a00f32da169d9d5 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -1,9 +1,11 @@ #!/bin/bash +source test_tipc/common_func.sh + FILENAME=$1 # MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', # 'whole_infer', 'klquant_whole_infer', -# 'cpp_infer', 'serving_infer', 'lite_infer'] +# 'cpp_infer', 'serving_infer'] MODE=$2 @@ -12,30 +14,12 @@ dataline=$(cat ${FILENAME}) # parser params IFS=$'\n' lines=(${dataline}) -function func_parser_key(){ - strs=$1 - IFS=":" - array=(${strs}) - tmp=${array[0]} - echo ${tmp} -} -function func_parser_value(){ - strs=$1 - IFS=":" - array=(${strs}) - tmp=${array[1]} - echo ${tmp} -} -IFS=$'\n' + # The training params model_name=$(func_parser_value "${lines[1]}") trainer_list=$(func_parser_value "${lines[14]}") -# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', -# 'whole_infer', 'klquant_whole_infer', -# 'cpp_infer', 'serving_infer', 'lite_infer'] -MODE=$2 if [ ${MODE} = "lite_train_lite_infer" ];then # pretrain lite train data @@ -87,7 +71,8 @@ elif [ ${MODE} = "whole_infer" ];then rm -rf ./train_data/icdar2015 wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar --no-check-certificate - cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar --no-check-certificate + cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && cd ../ elif [ ${model_name} = "ocr_server_det" ]; then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_train.tar --no-check-certificate wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar --no-check-certificate @@ -168,40 +153,6 @@ if [ ${MODE} = "serving_infer" ];then cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar && cd ../ fi - -if [ ${MODE} = "lite_infer" ];then - # prepare lite nb model and test data - current_dir=${PWD} - wget -nc -P ./models https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_opt.nb - wget -nc -P ./models https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_slim_opt.nb - wget -nc -P ./test_data https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar - cd ./test_data && tar -xf icdar2015_lite.tar && rm icdar2015_lite.tar && cd ../ - # prepare lite env - export http_proxy=http://172.19.57.45:3128 - export https_proxy=http://172.19.57.45:3128 - paddlelite_url=https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz - paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}') - paddlelite_file=${paddlelite_zipfile:0:66} - wget ${paddlelite_url} - tar -xf ${paddlelite_zipfile} - mkdir -p ${paddlelite_file}/demo/cxx/ocr/test_lite - mv models test_data ${paddlelite_file}/demo/cxx/ocr/test_lite - cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite - cp ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/ - cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite - cp test_tipc/configs/ppocr_det_mobile_params.txt test_tipc/test_lite.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite - cd ${paddlelite_file}/demo/cxx/ocr/ - git clone https://github.com/LDOUBLEV/AutoLog.git - unset http_proxy - unset https_proxy - make -j - sleep 1 - make -j - cp ocr_db_crnn test_lite && cp test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so - tar -cf test_lite.tar ./test_lite && cp test_lite.tar ${current_dir} && cd ${current_dir} -fi - - if [ ${MODE} = "paddle2onnx_infer" ];then # prepare serving env python_name=$(func_parser_value "${lines[2]}") diff --git a/test_tipc/prepare_lite.sh b/test_tipc/prepare_lite.sh new file mode 100644 index 0000000000000000000000000000000000000000..6a08d96298592c829547df9fa30ef4149ddc5b00 --- /dev/null +++ b/test_tipc/prepare_lite.sh @@ -0,0 +1,55 @@ +#!/bin/bash +source ./test_tipc/common_func.sh +FILENAME=$1 +dataline=$(cat ${FILENAME}) +# parser params +IFS=$'\n' +lines=(${dataline}) +IFS=$'\n' +lite_model_list=$(func_parser_value "${lines[2]}") + +# prepare lite .nb model +pip install paddlelite==2.9 +current_dir=${PWD} +IFS="|" +model_path=./inference_models +for model in ${lite_model_list[*]}; do + inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/${model}.tar + inference_model=${inference_model_url##*/} + wget -nc -P ${model_path} ${inference_model_url} + cd ${model_path} && tar -xf ${inference_model} && cd ../ + model_dir=${model_path}/${inference_model%.*} + model_file=${model_dir}/inference.pdmodel + param_file=${model_dir}/inference.pdiparams + paddle_lite_opt --model_dir=${model_dir} --model_file=${model_file} --param_file=${param_file} --valid_targets=arm --optimize_out=${model_dir}_opt +done + +# prepare test data +data_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar +model_path=./inference_models +inference_model=${inference_model_url##*/} +data_file=${data_url##*/} +wget -nc -P ./inference_models ${inference_model_url} +wget -nc -P ./test_data ${data_url} +cd ./inference_models && tar -xf ${inference_model} && cd ../ +cd ./test_data && tar -xf ${data_file} && rm ${data_file} && cd ../ + +# prepare lite env +paddlelite_url=https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz +paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}') +paddlelite_file=${paddlelite_zipfile:0:66} +wget ${paddlelite_url} && tar -xf ${paddlelite_zipfile} +mkdir -p ${paddlelite_file}/demo/cxx/ocr/test_lite +cp -r ${model_path}/*_opt.nb test_data ${paddlelite_file}/demo/cxx/ocr/test_lite +cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite +cp -r ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/ +cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite +cp ${FILENAME} test_tipc/test_lite_arm_cpu_cpp.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite +cd ${paddlelite_file}/demo/cxx/ocr/ +git clone https://github.com/cuicheng01/AutoLog.git +make -j +sleep 1 +make -j +cp ocr_db_crnn test_lite && cp test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so +tar -cf test_lite.tar ./test_lite && cp test_lite.tar ${current_dir} && cd ${current_dir} +rm -rf ${paddlelite_file}* && rm -rf ${model_path} diff --git a/test_tipc/readme.md b/test_tipc/readme.md index 1d8df7da6cf6d1319cedd329e4202fa674e8538b..9ab6f333d9dd74052dfd6eb71b60a8f257abd72d 100644 --- a/test_tipc/readme.md +++ b/test_tipc/readme.md @@ -60,16 +60,20 @@ ```shell test_tipc/ ├── configs/ # 配置文件目录 - ├── det_mv3_db.yml # 测试mobile版ppocr检测模型训练的yml文件 - ├── det_r50_vd_db.yml # 测试server版ppocr检测模型训练的yml文件 - ├── rec_icdar15_r34_train.yml # 测试server版ppocr识别模型训练的yml文件 - ├── ppocr_sys_mobile_params.txt # 测试mobile版ppocr检测+识别模型串联的参数配置文件 - ├── ppocr_det_mobile_params.txt # 测试mobile版ppocr检测模型的参数配置文件 - ├── ppocr_rec_mobile_params.txt # 测试mobile版ppocr识别模型的参数配置文件 - ├── ppocr_sys_server_params.txt # 测试server版ppocr检测+识别模型串联的参数配置文件 - ├── ppocr_det_server_params.txt # 测试server版ppocr检测模型的参数配置文件 - ├── ppocr_rec_server_params.txt # 测试server版ppocr识别模型的参数配置文件 - ├── ... + ├── ppocr_det_mobile # ppocr_det_mobile模型的测试配置文件目录 + ├── det_mv3_db.yml # 测试mobile版ppocr检测模型训练的yml文件 + ├── train_infer_python.txt.txt # 测试Linux上python训练预测(基础训练预测)的配置文件 + ├── model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt # 测试Linux上c++预测的配置文件 + ├── model_linux_gpu_normal_normal_infer_python_jetson.txt # 测试Jetson上python预测的配置文件 + ├── train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt # 测试Linux上多机多卡、混合精度训练和python预测的配置文件 + ├── ... + ├── ppocr_det_server # ppocr_det_server模型的测试配置文件目录 + ├── ... + ├── ppocr_rec_mobile # ppocr_rec_mobile模型的测试配置文件目录 + ├── ... + ├── ppocr_rec_server # ppocr_rec_server模型的测试配置文件目录 + ├── ... + ├── ... ├── results/ # 预先保存的预测结果,用于和实际预测结果进行精读比对 ├── python_ppocr_det_mobile_results_fp32.txt # 预存的mobile版ppocr检测模型python预测fp32精度的结果 ├── python_ppocr_det_mobile_results_fp16.txt # 预存的mobile版ppocr检测模型python预测fp16精度的结果 @@ -80,11 +84,22 @@ test_tipc/ ├── test_train_inference_python.sh # 测试python训练预测的主程序 ├── test_inference_cpp.sh # 测试c++预测的主程序 ├── test_serving.sh # 测试serving部署预测的主程序 -├── test_lite.sh # 测试lite部署预测的主程序 +├── test_lite_arm_cpu_cpp.sh # 测试lite在arm_cpu上部署的C++预测的主程序 ├── compare_results.py # 用于对比log中的预测结果与results中的预存结果精度误差是否在限定范围内 └── readme.md # 使用文档 ``` +### 配置文件命名规范 +在`configs`目录下,按模型名称划分为子目录,子目录中存放所有该模型测试需要用到的配置文件,配置文件的命名遵循如下规范: + +1. 基础训练预测配置简单命名为:`train_infer_python.txt`,表示**Linux环境下单机、不使用混合精度训练+python预测**,其完整命名对应`train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt`,由于本配置文件使用频率较高,这里进行了名称简化。 + +2. 其他带训练配置命名格式为:`train_训练硬件环境(linux_gpu/linux_dcu/…)_是否多机(fleet/normal)_是否混合精度(amp/normal)_预测模式(infer/lite/serving/js)_语言(cpp/python/java)_预测硬件环境(linux_gpu/mac/jetson/opencl_arm_gpu/...).txt`。如,linux gpu下多机多卡+混合精度链条测试对应配置 `train_linux_gpu_fleet_amp_infer_python_linux_gpu_cpu.txt`,linux dcu下基础训练预测对应配置 `train_linux_dcu_normal_normal_infer_python_dcu.txt`。 + +3. 仅预测的配置(如serving、lite等)命名格式:`model_训练硬件环境(linux_gpu/linux_dcu/…)_是否多机(fleet/normal)_是否混合精度(amp/normal)_(infer/lite/serving/js)_语言(cpp/python/java)_预测硬件环境(linux_gpu/mac/jetson/opencl_arm_gpu/...).txt`,即,与2相比,仅第一个字段从train换为model,测试时模型直接下载获取,这里的“训练硬件环境”表示所测试的模型是在哪种环境下训练得到的。 + +根据上述命名规范,可以直接从配置文件名看出对应的测试场景和功能。 + ### 测试流程 使用本工具,可以测试不同功能的支持情况,以及预测结果是否对齐,测试流程如下:
@@ -99,7 +114,8 @@ test_tipc/ - `test_train_inference_python.sh`:测试基于Python的模型训练、评估、推理等基本功能,包括裁剪、量化、蒸馏。 - `test_inference_cpp.sh`:测试基于C++的模型推理。 - `test_serving.sh`:测试基于Paddle Serving的服务化部署功能。 -- `test_lite.sh`:测试基于Paddle-Lite的端侧预测部署功能。 +- `test_lite_arm_cpu_cpp.sh`:测试基于Paddle-Lite的ARM CPU端c++预测部署功能。 +- `test_paddle2onnx.sh`:测试Paddle2ONNX的模型转化功能,并验证正确性。 #### 更多教程 @@ -107,4 +123,5 @@ test_tipc/ [test_train_inference_python 使用](docs/test_train_inference_python.md) [test_inference_cpp 使用](docs/test_inference_cpp.md) [test_serving 使用](docs/test_serving.md) -[test_lite 使用](docs/test_lite.md) +[test_lite_arm_cpu_cpp 使用](docs/test_lite_arm_cpu_cpp.md) +[test_paddle2onnx 使用](docs/test_paddle2onnx.md) diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh index 3f8b54b189349aa9c011a56f6f12752b771ce43e..d26954353ef1e81ae49364b7f9d20357768cff85 100644 --- a/test_tipc/test_inference_cpp.sh +++ b/test_tipc/test_inference_cpp.sh @@ -2,38 +2,38 @@ source test_tipc/common_func.sh FILENAME=$1 -dataline=$(awk 'NR==52, NR==66{print}' $FILENAME) +dataline=$(awk 'NR==1, NR==16{print}' $FILENAME) # parser params IFS=$'\n' lines=(${dataline}) # parser cpp inference model -use_opencv=$(func_parser_value "${lines[1]}") -cpp_infer_model_dir_list=$(func_parser_value "${lines[2]}") -cpp_infer_is_quant=$(func_parser_value "${lines[3]}") +model_name=$(func_parser_value "${lines[1]}") +use_opencv=$(func_parser_value "${lines[2]}") +cpp_infer_model_dir_list=$(func_parser_value "${lines[3]}") +cpp_infer_is_quant=$(func_parser_value "${lines[4]}") # parser cpp inference -inference_cmd=$(func_parser_value "${lines[4]}") -cpp_use_gpu_key=$(func_parser_key "${lines[5]}") -cpp_use_gpu_list=$(func_parser_value "${lines[5]}") -cpp_use_mkldnn_key=$(func_parser_key "${lines[6]}") -cpp_use_mkldnn_list=$(func_parser_value "${lines[6]}") -cpp_cpu_threads_key=$(func_parser_key "${lines[7]}") -cpp_cpu_threads_list=$(func_parser_value "${lines[7]}") -cpp_batch_size_key=$(func_parser_key "${lines[8]}") -cpp_batch_size_list=$(func_parser_value "${lines[8]}") -cpp_use_trt_key=$(func_parser_key "${lines[9]}") -cpp_use_trt_list=$(func_parser_value "${lines[9]}") -cpp_precision_key=$(func_parser_key "${lines[10]}") -cpp_precision_list=$(func_parser_value "${lines[10]}") -cpp_infer_model_key=$(func_parser_key "${lines[11]}") -cpp_image_dir_key=$(func_parser_key "${lines[12]}") -cpp_infer_img_dir=$(func_parser_value "${lines[12]}") -cpp_infer_key1=$(func_parser_key "${lines[13]}") -cpp_infer_value1=$(func_parser_value "${lines[13]}") -cpp_benchmark_key=$(func_parser_key "${lines[14]}") -cpp_benchmark_value=$(func_parser_value "${lines[14]}") - +inference_cmd=$(func_parser_value "${lines[5]}") +cpp_use_gpu_key=$(func_parser_key "${lines[6]}") +cpp_use_gpu_list=$(func_parser_value "${lines[6]}") +cpp_use_mkldnn_key=$(func_parser_key "${lines[7]}") +cpp_use_mkldnn_list=$(func_parser_value "${lines[7]}") +cpp_cpu_threads_key=$(func_parser_key "${lines[8]}") +cpp_cpu_threads_list=$(func_parser_value "${lines[8]}") +cpp_batch_size_key=$(func_parser_key "${lines[9]}") +cpp_batch_size_list=$(func_parser_value "${lines[9]}") +cpp_use_trt_key=$(func_parser_key "${lines[10]}") +cpp_use_trt_list=$(func_parser_value "${lines[10]}") +cpp_precision_key=$(func_parser_key "${lines[11]}") +cpp_precision_list=$(func_parser_value "${lines[11]}") +cpp_infer_model_key=$(func_parser_key "${lines[12]}") +cpp_image_dir_key=$(func_parser_key "${lines[13]}") +cpp_infer_img_dir=$(func_parser_value "${lines[13]}") +cpp_infer_key1=$(func_parser_key "${lines[14]}") +cpp_infer_value1=$(func_parser_value "${lines[14]}") +cpp_benchmark_key=$(func_parser_key "${lines[15]}") +cpp_benchmark_value=$(func_parser_value "${lines[15]}") LOG_PATH="./test_tipc/output" mkdir -p ${LOG_PATH} diff --git a/test_tipc/test_inference_jeston.sh b/test_tipc/test_inference_jeston.sh new file mode 100644 index 0000000000000000000000000000000000000000..2fd76e1e9e7e8c7b52d0b6838cd15840a59fe5c4 --- /dev/null +++ b/test_tipc/test_inference_jeston.sh @@ -0,0 +1,87 @@ +#!/bin/bash +source test_tipc/common_func.sh +source test_tipc/test_train_inference_python.sh + +FILENAME=$1 +# MODE be one of ['whole_infer'] +MODE=$2 + +dataline=$(awk 'NR==1, NR==17{print}' $FILENAME) + +# parser params +IFS=$'\n' +lines=(${dataline}) + +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") + +infer_model_dir_list=$(func_parser_value "${lines[3]}") +infer_export_list=$(func_parser_value "${lines[4]}") +infer_is_quant=$(func_parser_value "${lines[5]}") +# parser inference +inference_py=$(func_parser_value "${lines[6]}") +use_gpu_key=$(func_parser_key "${lines[7]}") +use_gpu_list=$(func_parser_value "${lines[7]}") +use_mkldnn_key=$(func_parser_key "${lines[8]}") +use_mkldnn_list=$(func_parser_value "${lines[8]}") +cpu_threads_key=$(func_parser_key "${lines[9]}") +cpu_threads_list=$(func_parser_value "${lines[9]}") +batch_size_key=$(func_parser_key "${lines[10]}") +batch_size_list=$(func_parser_value "${lines[10]}") +use_trt_key=$(func_parser_key "${lines[11]}") +use_trt_list=$(func_parser_value "${lines[11]}") +precision_key=$(func_parser_key "${lines[12]}") +precision_list=$(func_parser_value "${lines[12]}") +infer_model_key=$(func_parser_key "${lines[13]}") +image_dir_key=$(func_parser_key "${lines[14]}") +infer_img_dir=$(func_parser_value "${lines[14]}") +save_log_key=$(func_parser_key "${lines[15]}") +benchmark_key=$(func_parser_key "${lines[16]}") +benchmark_value=$(func_parser_value "${lines[16]}") +infer_key1=$(func_parser_key "${lines[17]}") +infer_value1=$(func_parser_value "${lines[17]}") + + +LOG_PATH="./test_tipc/output" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_python.log" + + +if [ ${MODE} = "whole_infer" ]; then + GPUID=$3 + if [ ${#GPUID} -le 0 ];then + env=" " + else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" + fi + # set CUDA_VISIBLE_DEVICES + eval $env + export Count=0 + IFS="|" + infer_run_exports=(${infer_export_list}) + infer_quant_flag=(${infer_is_quant}) + for infer_model in ${infer_model_dir_list[*]}; do + # run export + if [ ${infer_run_exports[Count]} != "null" ];then + save_infer_dir=$(dirname $infer_model) + set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") + set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") + export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" + echo ${infer_run_exports[Count]} + echo $export_cmd + eval $export_cmd + status_export=$? + status_check $status_export "${export_cmd}" "${status_log}" + else + save_infer_dir=${infer_model} + fi + #run inference + is_quant=${infer_quant_flag[Count]} + if [ ${MODE} = "klquant_infer" ]; then + is_quant="True" + fi + func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} + Count=$(($Count + 1)) + done +fi + diff --git a/test_tipc/test_lite.sh b/test_tipc/test_lite.sh deleted file mode 100644 index 1fd9d3c7186207922c436e7981622c707a56596f..0000000000000000000000000000000000000000 --- a/test_tipc/test_lite.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash -source ./common_func.sh -export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH - -FILENAME=$1 -dataline=$(awk 'NR==102, NR==111{print}' $FILENAME) -echo $dataline -# parser params -IFS=$'\n' -lines=(${dataline}) - -# parser lite inference -lite_inference_cmd=$(func_parser_value "${lines[1]}") -lite_model_dir_list=$(func_parser_value "${lines[2]}") -lite_cpu_threads_list=$(func_parser_value "${lines[3]}") -lite_batch_size_list=$(func_parser_value "${lines[4]}") -lite_power_mode_list=$(func_parser_value "${lines[5]}") -lite_infer_img_dir_list=$(func_parser_value "${lines[6]}") -lite_config_dir=$(func_parser_value "${lines[7]}") -lite_rec_dict_dir=$(func_parser_value "${lines[8]}") -lite_benchmark_value=$(func_parser_value "${lines[9]}") - -LOG_PATH="./output" -mkdir -p ${LOG_PATH} -status_log="${LOG_PATH}/results.log" - - -function func_lite(){ - IFS='|' - _script=$1 - _lite_model=$2 - _log_path=$3 - _img_dir=$4 - _config=$5 - if [[ $lite_model =~ "slim" ]]; then - precision="INT8" - else - precision="FP32" - fi - is_single_img=$(echo $_img_dir | grep -E ".jpg|.jpeg|.png|.JPEG|.JPG") - if [[ "$is_single_img" != "" ]]; then - single_img="True" - else - single_img="False" - fi - - # lite inference - for num_threads in ${lite_cpu_threads_list[*]}; do - for power_mode in ${lite_power_mode_list[*]}; do - for batchsize in ${lite_batch_size_list[*]}; do - model_name=$(echo $lite_model | awk -F "/" '{print $NF}') - _save_log_path="${_log_path}/lite_${model_name}_precision_${precision}_batchsize_${batchsize}_threads_${num_threads}_powermode_${power_mode}_singleimg_${single_img}.log" - command="${_script} ${lite_model} ${precision} ${num_threads} ${batchsize} ${power_mode} ${_img_dir} ${_config} ${lite_benchmark_value} > ${_save_log_path} 2>&1" - eval ${command} - status_check $? "${command}" "${status_log}" - done - done - done -} - - -echo "################### run test ###################" -IFS="|" -for lite_model in ${lite_model_dir_list[*]}; do - #run lite inference - for img_dir in ${lite_infer_img_dir_list[*]}; do - func_lite "${lite_inference_cmd}" "${lite_model}" "${LOG_PATH}" "${img_dir}" "${lite_config_dir}" - done -done diff --git a/test_tipc/test_lite_arm_cpu_cpp.sh b/test_tipc/test_lite_arm_cpu_cpp.sh new file mode 100644 index 0000000000000000000000000000000000000000..04eebbd28a334f7ac7819f8ff55d7b3192f4b490 --- /dev/null +++ b/test_tipc/test_lite_arm_cpu_cpp.sh @@ -0,0 +1,60 @@ +#!/bin/bash +source ./common_func.sh +export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH + +FILENAME=$1 +dataline=$(cat $FILENAME) +# parser params +IFS=$'\n' +lines=(${dataline}) + +# parser lite inference +lite_inference_cmd=$(func_parser_value "${lines[1]}") +lite_model_dir_list=$(func_parser_value "${lines[2]}") +runtime_device=$(func_parser_value "${lines[3]}") +lite_cpu_threads_list=$(func_parser_value "${lines[4]}") +lite_batch_size_list=$(func_parser_value "${lines[5]}") +lite_infer_img_dir_list=$(func_parser_value "${lines[8]}") +lite_config_dir=$(func_parser_value "${lines[9]}") +lite_rec_dict_dir=$(func_parser_value "${lines[10]}") +lite_benchmark_value=$(func_parser_value "${lines[11]}") + + +LOG_PATH="./output" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results.log" + + +function func_lite(){ + IFS='|' + _script=$1 + _lite_model=$2 + _log_path=$3 + _img_dir=$4 + _config=$5 + if [[ $lite_model =~ "slim" ]]; then + precision="INT8" + else + precision="FP32" + fi + + # lite inference + for num_threads in ${lite_cpu_threads_list[*]}; do + for batchsize in ${lite_batch_size_list[*]}; do + _save_log_path="${_log_path}/lite_${_lite_model}_runtime_device_${runtime_device}_precision_${precision}_batchsize_${batchsize}_threads_${num_threads}.log" + command="${_script} ${_lite_model} ${runtime_device} ${precision} ${num_threads} ${batchsize} ${_img_dir} ${_config} ${lite_benchmark_value} > ${_save_log_path} 2>&1" + eval ${command} + status_check $? "${command}" "${status_log}" + done + done +} + + +echo "################### run test ###################" +IFS="|" +for lite_model in ${lite_model_dir_list[*]}; do + #run lite inference + for img_dir in ${lite_infer_img_dir_list[*]}; do + func_lite "${lite_inference_cmd}" "${lite_model}_opt.nb" "${LOG_PATH}" "${img_dir}" "${lite_config_dir}" + done +done diff --git a/test_tipc/test_paddle2onnx.sh b/test_tipc/test_paddle2onnx.sh index 5dc6e65ec81e6b8674877fc686c8b3650ce93a59..300c61770d2519fad0502147e2cee4a3e4f50ac9 100644 --- a/test_tipc/test_paddle2onnx.sh +++ b/test_tipc/test_paddle2onnx.sh @@ -11,31 +11,33 @@ python=$(func_parser_value "${lines[2]}") # parser params -dataline=$(awk 'NR==111, NR==123{print}' $FILENAME) +dataline=$(awk 'NR==1, NR==12{print}' $FILENAME) IFS=$'\n' lines=(${dataline}) # parser paddle2onnx -padlle2onnx_cmd=$(func_parser_value "${lines[1]}") -infer_model_dir_key=$(func_parser_key "${lines[2]}") -infer_model_dir_value=$(func_parser_value "${lines[2]}") -model_filename_key=$(func_parser_key "${lines[3]}") -model_filename_value=$(func_parser_value "${lines[3]}") -params_filename_key=$(func_parser_key "${lines[4]}") -params_filename_value=$(func_parser_value "${lines[4]}") -save_file_key=$(func_parser_key "${lines[5]}") -save_file_value=$(func_parser_value "${lines[5]}") -opset_version_key=$(func_parser_key "${lines[6]}") -opset_version_value=$(func_parser_value "${lines[6]}") -enable_onnx_checker_key=$(func_parser_key "${lines[7]}") -enable_onnx_checker_value=$(func_parser_value "${lines[7]}") +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") +padlle2onnx_cmd=$(func_parser_value "${lines[3]}") +infer_model_dir_key=$(func_parser_key "${lines[4]}") +infer_model_dir_value=$(func_parser_value "${lines[4]}") +model_filename_key=$(func_parser_key "${lines[5]}") +model_filename_value=$(func_parser_value "${lines[5]}") +params_filename_key=$(func_parser_key "${lines[6]}") +params_filename_value=$(func_parser_value "${lines[6]}") +save_file_key=$(func_parser_key "${lines[7]}") +save_file_value=$(func_parser_value "${lines[7]}") +opset_version_key=$(func_parser_key "${lines[8]}") +opset_version_value=$(func_parser_value "${lines[8]}") +enable_onnx_checker_key=$(func_parser_key "${lines[9]}") +enable_onnx_checker_value=$(func_parser_value "${lines[9]}") # parser onnx inference -inference_py=$(func_parser_value "${lines[8]}") -use_gpu_key=$(func_parser_key "${lines[9]}") -use_gpu_value=$(func_parser_value "${lines[9]}") -det_model_key=$(func_parser_key "${lines[10]}") -image_dir_key=$(func_parser_key "${lines[11]}") -image_dir_value=$(func_parser_value "${lines[11]}") +inference_py=$(func_parser_value "${lines[10]}") +use_gpu_key=$(func_parser_key "${lines[11]}") +use_gpu_value=$(func_parser_value "${lines[11]}") +det_model_key=$(func_parser_key "${lines[12]}") +image_dir_key=$(func_parser_key "${lines[13]}") +image_dir_value=$(func_parser_value "${lines[13]}") LOG_PATH="./test_tipc/output" diff --git a/test_tipc/test_serving.sh b/test_tipc/test_serving.sh index 9b1e90ed6116f32e232657e30277a747a70904c7..c36935a60fecacea672fd932773a8fb0bdcd619b 100644 --- a/test_tipc/test_serving.sh +++ b/test_tipc/test_serving.sh @@ -2,7 +2,7 @@ source test_tipc/common_func.sh FILENAME=$1 -dataline=$(awk 'NR==67, NR==84{print}' $FILENAME) +dataline=$(awk 'NR==1, NR==18{print}' $FILENAME) # parser params IFS=$'\n' diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 0d4e182b2832f65cec08beffe99055603b90982b..c62b6274f8dcbc84d08900c5d228d78fd3c0de1a 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -244,7 +244,7 @@ else export Count=0 USE_GPU_KEY=(${train_use_gpu_value}) for gpu in ${gpu_list[*]}; do - use_gpu=${USE_GPU_KEY[Count]} + train_use_gpu=${USE_GPU_KEY[Count]} Count=$(($Count + 1)) ips="" if [ ${gpu} = "-1" ];then @@ -302,11 +302,20 @@ else set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}") set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") - set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${use_gpu}") - save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" - + set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu}") + if [ ${#ips} -le 26 ];then + save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" + nodes=1 + else + IFS="," + ips_array=(${ips}) + IFS="|" + nodes=${#ips_array[@]} + save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}" + fi + # load pretrain from norm training if current trainer is pact or fpgm trainer - if [ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]; then + if ([ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]) && [ ${nodes} -le 1 ]; then set_pretrain="${load_norm_train_model}" fi @@ -325,7 +334,7 @@ else set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") # save norm trained models to set pretrain for pact training and fpgm training - if [ ${trainer} = ${trainer_norm} ]; then + if [ ${trainer} = ${trainer_norm} ] && [ ${nodes} -le 1]; then load_norm_train_model=${set_eval_pretrain} fi # run eval diff --git a/tools/__init__.py b/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d56c9dbaa1304b160521da03c05db2352e341bf2 --- /dev/null +++ b/tools/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tools/eval.py b/tools/eval.py index 28247bc57450aaf067fcb405674098eacb990166..c85490a316772e9dfdfe3267087ea3946a2a3b72 100755 --- a/tools/eval.py +++ b/tools/eval.py @@ -27,7 +27,7 @@ from ppocr.data import build_dataloader from ppocr.modeling.architectures import build_model from ppocr.postprocess import build_post_process from ppocr.metrics import build_metric -from ppocr.utils.save_load import init_model, load_dygraph_params +from ppocr.utils.save_load import load_model from ppocr.utils.utility import print_dict import tools.program as program @@ -60,7 +60,7 @@ def main(): else: model_type = None - best_model_dict = load_dygraph_params(config, model, logger, None) + best_model_dict = load_model(config, model) if len(best_model_dict): logger.info('metric in ckpt ***************') for k, v in best_model_dict.items(): diff --git a/tools/export_center.py b/tools/export_center.py index c46e8b9d58997b9b66c6ce81b2558ecd4cad0e81..30b9c33499b8d0c8044682c6a078e00f683c1d7c 100644 --- a/tools/export_center.py +++ b/tools/export_center.py @@ -27,7 +27,7 @@ sys.path.append(os.path.abspath(os.path.join(__dir__, '..'))) from ppocr.data import build_dataloader from ppocr.modeling.architectures import build_model from ppocr.postprocess import build_post_process -from ppocr.utils.save_load import init_model, load_dygraph_params +from ppocr.utils.save_load import load_model from ppocr.utils.utility import print_dict import tools.program as program @@ -57,7 +57,7 @@ def main(): model = build_model(config['Architecture']) - best_model_dict = load_dygraph_params(config, model, logger, None) + best_model_dict = load_model(config, model) if len(best_model_dict): logger.info('metric in ckpt ***************') for k, v in best_model_dict.items(): diff --git a/tools/export_model.py b/tools/export_model.py index 64a0d4036303716a632eb93c53f2478f32b42848..9ed8e1b6ace89ded030c946870551c8e078d7340 100755 --- a/tools/export_model.py +++ b/tools/export_model.py @@ -26,7 +26,7 @@ from paddle.jit import to_static from ppocr.modeling.architectures import build_model from ppocr.postprocess import build_post_process -from ppocr.utils.save_load import init_model +from ppocr.utils.save_load import load_model from ppocr.utils.logging import get_logger from tools.program import load_config, merge_config, ArgsParser @@ -107,7 +107,7 @@ def main(): else: # base rec model config["Architecture"]["Head"]["out_channels"] = char_num model = build_model(config["Architecture"]) - init_model(config, model) + load_model(config, model) model.eval() save_path = config["Global"]["save_inference_dir"] diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 58170e393cdc9d8441408a89c84aa6f88d683db3..cab918419ab5efbc4a8a11d1669ca6b93e45e789 100755 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -17,7 +17,7 @@ import os import sys import cv2 import numpy as np -import json +import paddle from PIL import Image, ImageDraw, ImageFont import math from paddle import inference @@ -205,7 +205,7 @@ def create_predictor(args, mode, logger): "nearest_interp_v2_0.tmp_0": [1, 256, 2, 2] } max_input_shape = { - "x": [1, 3, 2000, 2000], + "x": [1, 3, 1280, 1280], "conv2d_92.tmp_0": [1, 120, 400, 400], "conv2d_91.tmp_0": [1, 24, 200, 200], "conv2d_59.tmp_0": [1, 96, 400, 400], @@ -255,16 +255,16 @@ def create_predictor(args, mode, logger): opt_input_shape.update(opt_pact_shape) elif mode == "rec": min_input_shape = {"x": [1, 3, 32, 10]} - max_input_shape = {"x": [args.rec_batch_num, 3, 32, 2000]} + max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1024]} opt_input_shape = {"x": [args.rec_batch_num, 3, 32, 320]} elif mode == "cls": min_input_shape = {"x": [1, 3, 48, 10]} - max_input_shape = {"x": [args.rec_batch_num, 3, 48, 2000]} + max_input_shape = {"x": [args.rec_batch_num, 3, 48, 1024]} opt_input_shape = {"x": [args.rec_batch_num, 3, 48, 320]} else: min_input_shape = {"x": [1, 3, 10, 10]} - max_input_shape = {"x": [1, 3, 1000, 1000]} - opt_input_shape = {"x": [1, 3, 500, 500]} + max_input_shape = {"x": [1, 3, 512, 512]} + opt_input_shape = {"x": [1, 3, 256, 256]} config.set_trt_dynamic_shape_info(min_input_shape, max_input_shape, opt_input_shape) @@ -601,5 +601,12 @@ def get_rotate_crop_image(img, points): return dst_img +def check_gpu(use_gpu): + if use_gpu and not paddle.is_compiled_with_cuda(): + + use_gpu = False + return use_gpu + + if __name__ == '__main__': pass diff --git a/tools/infer_cls.py b/tools/infer_cls.py index a588cab433442695e3bd395da63e35a2052de501..7522e43907b50b84cc52930ff4eeb8e537cb2c73 100755 --- a/tools/infer_cls.py +++ b/tools/infer_cls.py @@ -32,7 +32,7 @@ import paddle from ppocr.data import create_operators, transform from ppocr.modeling.architectures import build_model from ppocr.postprocess import build_post_process -from ppocr.utils.save_load import init_model +from ppocr.utils.save_load import load_model from ppocr.utils.utility import get_image_file_list import tools.program as program @@ -47,7 +47,7 @@ def main(): # build model model = build_model(config['Architecture']) - init_model(config, model) + load_model(config, model) # create data ops transforms = [] diff --git a/tools/infer_det.py b/tools/infer_det.py index ce16da8dc5fffb3f5fdc633aeb00a386a2d60d4f..bb2cca7362e81494018aa3471664d60bef1b852c 100755 --- a/tools/infer_det.py +++ b/tools/infer_det.py @@ -34,7 +34,7 @@ import paddle from ppocr.data import create_operators, transform from ppocr.modeling.architectures import build_model from ppocr.postprocess import build_post_process -from ppocr.utils.save_load import init_model, load_dygraph_params +from ppocr.utils.save_load import load_model from ppocr.utils.utility import get_image_file_list import tools.program as program @@ -59,7 +59,7 @@ def main(): # build model model = build_model(config['Architecture']) - _ = load_dygraph_params(config, model, logger, None) + load_model(config, model) # build post process post_process_class = build_post_process(config['PostProcess']) diff --git a/tools/infer_e2e.py b/tools/infer_e2e.py index 1cd468b8e552237af31d985b8b68ddbeecba9c96..96dbac8e83cb8651ca19c05d5a680a4efebc6ff6 100755 --- a/tools/infer_e2e.py +++ b/tools/infer_e2e.py @@ -34,7 +34,7 @@ import paddle from ppocr.data import create_operators, transform from ppocr.modeling.architectures import build_model from ppocr.postprocess import build_post_process -from ppocr.utils.save_load import init_model +from ppocr.utils.save_load import load_model from ppocr.utils.utility import get_image_file_list import tools.program as program @@ -68,7 +68,7 @@ def main(): # build model model = build_model(config['Architecture']) - init_model(config, model) + load_model(config, model) # build post process post_process_class = build_post_process(config['PostProcess'], diff --git a/tools/infer_rec.py b/tools/infer_rec.py index 29d4b530dfcfb8a3201e12b38c9b9f186f34b627..adc3c1c3c49dcaad5ec8657f5d32b2eca8e10a40 100755 --- a/tools/infer_rec.py +++ b/tools/infer_rec.py @@ -33,7 +33,7 @@ import paddle from ppocr.data import create_operators, transform from ppocr.modeling.architectures import build_model from ppocr.postprocess import build_post_process -from ppocr.utils.save_load import init_model +from ppocr.utils.save_load import load_model from ppocr.utils.utility import get_image_file_list import tools.program as program @@ -58,7 +58,7 @@ def main(): model = build_model(config['Architecture']) - init_model(config, model) + load_model(config, model) # create data ops transforms = [] @@ -75,9 +75,7 @@ def main(): 'gsrm_slf_attn_bias1', 'gsrm_slf_attn_bias2' ] elif config['Architecture']['algorithm'] == "SAR": - op[op_name]['keep_keys'] = [ - 'image', 'valid_ratio' - ] + op[op_name]['keep_keys'] = ['image', 'valid_ratio'] else: op[op_name]['keep_keys'] = ['image'] transforms.append(op) diff --git a/tools/infer_table.py b/tools/infer_table.py index f743d87540f7fd64157a808db156c9f62a042d9c..c73e384046d1fadbbec4bf43a63e13aa8d54fc6c 100644 --- a/tools/infer_table.py +++ b/tools/infer_table.py @@ -34,11 +34,12 @@ from paddle.jit import to_static from ppocr.data import create_operators, transform from ppocr.modeling.architectures import build_model from ppocr.postprocess import build_post_process -from ppocr.utils.save_load import init_model +from ppocr.utils.save_load import load_model from ppocr.utils.utility import get_image_file_list import tools.program as program import cv2 + def main(config, device, logger, vdl_writer): global_config = config['Global'] @@ -53,7 +54,7 @@ def main(config, device, logger, vdl_writer): model = build_model(config['Architecture']) - init_model(config, model, logger) + load_model(config, model) # create data ops transforms = [] @@ -104,4 +105,3 @@ def main(config, device, logger, vdl_writer): if __name__ == '__main__': config, device, logger, vdl_writer = program.preprocess() main(config, device, logger, vdl_writer) - diff --git a/tools/train.py b/tools/train.py index d182af2988cb29511be40a079d2b3e06605ebe28..f3852469eb198ebfec13713fc4d8f139b2c10f2b 100755 --- a/tools/train.py +++ b/tools/train.py @@ -35,7 +35,7 @@ from ppocr.losses import build_loss from ppocr.optimizer import build_optimizer from ppocr.postprocess import build_post_process from ppocr.metrics import build_metric -from ppocr.utils.save_load import init_model, load_dygraph_params +from ppocr.utils.save_load import load_model import tools.program as program dist.get_world_size() @@ -97,7 +97,7 @@ def main(config, device, logger, vdl_writer): # build metric eval_class = build_metric(config['Metric']) # load pretrain model - pre_best_model_dict = load_dygraph_params(config, model, logger, optimizer) + pre_best_model_dict = load_model(config, model, optimizer) logger.info('train dataloader has {} iters'.format(len(train_dataloader))) if valid_dataloader is not None: logger.info('valid dataloader has {} iters'.format(