From e1704f4613e7a6b914c32c873b780e8386858700 Mon Sep 17 00:00:00 2001 From: yunyaoXYY <109218879+yunyaoXYY@users.noreply.github.com> Date: Tue, 21 Mar 2023 11:13:43 +0800 Subject: [PATCH] [FastDeploy] Improve readme and code format. (#9478) * Fix padding value in rec model, and box sort in det model * Add FastDeploy support to deploy PaddleOCR models. * Improve readme * improve readme * improve readme * improve code * improve readme * improve readme and format * improve readme and format * improve readme and format * improve readme * improve readme --- deploy/fastdeploy/ascend/cpp/README.md | 8 +++++++- deploy/fastdeploy/ascend/python/README.md | 8 +++++++- deploy/fastdeploy/cpu-gpu/c/README.md | 6 ++++++ deploy/fastdeploy/cpu-gpu/cpp/CMakeLists.txt | 2 +- deploy/fastdeploy/cpu-gpu/cpp/README.md | 7 ++++++- deploy/fastdeploy/cpu-gpu/cpp/infer_cls.cc | 1 - deploy/fastdeploy/cpu-gpu/cpp/infer_rec.cc | 1 - deploy/fastdeploy/cpu-gpu/csharp/README.md | 6 ++++++ deploy/fastdeploy/cpu-gpu/python/README.md | 6 ++++++ deploy/fastdeploy/cpu-gpu/python/infer.py | 12 ++++++------ deploy/fastdeploy/{kunlun => kunlunxin}/README.md | 0 .../{kunlun => kunlunxin}/cpp/CMakeLists.txt | 1 - .../fastdeploy/{kunlun => kunlunxin}/cpp/README.md | 8 +++++++- deploy/fastdeploy/{kunlun => kunlunxin}/cpp/infer.cc | 0 .../{kunlun => kunlunxin}/python/README.md | 8 +++++++- .../fastdeploy/{kunlun => kunlunxin}/python/infer.py | 4 ++-- deploy/fastdeploy/rockchip/cpp/README.md | 8 +++++++- deploy/fastdeploy/rockchip/python/README.md | 9 ++++++++- .../fastdeploy/serving/fastdeploy_serving/README.md | 7 +++++++ deploy/fastdeploy/serving/simple_serving/README.md | 6 ++++++ deploy/fastdeploy/sophgo/cpp/README.md | 6 ++++++ deploy/fastdeploy/sophgo/python/README.md | 6 ++++++ 22 files changed, 101 insertions(+), 19 deletions(-) rename deploy/fastdeploy/{kunlun => kunlunxin}/README.md (100%) rename deploy/fastdeploy/{kunlun => kunlunxin}/cpp/CMakeLists.txt (99%) rename deploy/fastdeploy/{kunlun => kunlunxin}/cpp/README.md (87%) rename deploy/fastdeploy/{kunlun => kunlunxin}/cpp/infer.cc (100%) rename deploy/fastdeploy/{kunlun => kunlunxin}/python/README.md (87%) rename deploy/fastdeploy/{kunlun => kunlunxin}/python/infer.py (98%) diff --git a/deploy/fastdeploy/ascend/cpp/README.md b/deploy/fastdeploy/ascend/cpp/README.md index 03cef163..ed8d63a3 100644 --- a/deploy/fastdeploy/ascend/cpp/README.md +++ b/deploy/fastdeploy/ascend/cpp/README.md @@ -15,8 +15,14 @@ ## 3.运行部署示例 ``` # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/ascend/cpp + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git -cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/ascend/cpp +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph +cd PaddleOCR/deploy/fastdeploy/ascend/cpp mkdir build cd build diff --git a/deploy/fastdeploy/ascend/python/README.md b/deploy/fastdeploy/ascend/python/README.md index 74815703..13a0fb64 100644 --- a/deploy/fastdeploy/ascend/python/README.md +++ b/deploy/fastdeploy/ascend/python/README.md @@ -12,8 +12,14 @@ ## 3.运行部署示例 ``` # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/ascend/python + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git -cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/ascend/python +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph +cd PaddleOCR/deploy/fastdeploy/ascend/python # 下载PP-OCRv3文字检测模型 wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar diff --git a/deploy/fastdeploy/cpu-gpu/c/README.md b/deploy/fastdeploy/cpu-gpu/c/README.md index efaea2ec..7c586377 100755 --- a/deploy/fastdeploy/cpu-gpu/c/README.md +++ b/deploy/fastdeploy/cpu-gpu/c/README.md @@ -16,7 +16,13 @@ PaddleOCR支持利用FastDeploy在NVIDIA GPU、X86 CPU、飞腾CPU、ARM CPU、I ## 4.运行部署示例 ```bash # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/cpu-gpu/c + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph cd PaddleOCR/deploy/fastdeploy/cpu-gpu/c mkdir build diff --git a/deploy/fastdeploy/cpu-gpu/cpp/CMakeLists.txt b/deploy/fastdeploy/cpu-gpu/cpp/CMakeLists.txt index 9844c3b1..fe4e03f2 100644 --- a/deploy/fastdeploy/cpu-gpu/cpp/CMakeLists.txt +++ b/deploy/fastdeploy/cpu-gpu/cpp/CMakeLists.txt @@ -27,4 +27,4 @@ target_link_libraries(infer_cls ${FASTDEPLOY_LIBS}) # Only Rec add_executable(infer_rec ${PROJECT_SOURCE_DIR}/infer_rec.cc) # 添加FastDeploy库依赖 -target_link_libraries(infer_rec ${FASTDEPLOY_LIBS}) \ No newline at end of file +target_link_libraries(infer_rec ${FASTDEPLOY_LIBS}) diff --git a/deploy/fastdeploy/cpu-gpu/cpp/README.md b/deploy/fastdeploy/cpu-gpu/cpp/README.md index 5ab4edfd..4481f49b 100644 --- a/deploy/fastdeploy/cpu-gpu/cpp/README.md +++ b/deploy/fastdeploy/cpu-gpu/cpp/README.md @@ -2,7 +2,6 @@ # PaddleOCR CPU-GPU C++部署示例 本目录下提供`infer.cc`快速完成PP-OCRv3在CPU/GPU,以及GPU上通过Paddle-TensorRT加速部署的示例. - ## 1. 说明 PaddleOCR支持利用FastDeploy在NVIDIA GPU、X86 CPU、飞腾CPU、ARM CPU、Intel GPU(独立显卡/集成显卡)硬件上快速部署OCR模型. @@ -17,7 +16,13 @@ PaddleOCR支持利用FastDeploy在NVIDIA GPU、X86 CPU、飞腾CPU、ARM CPU、I ```bash # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/cpu-gpu/cpp + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph cd PaddleOCR/deploy/fastdeploy/cpu-gpu/cpp # 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用 diff --git a/deploy/fastdeploy/cpu-gpu/cpp/infer_cls.cc b/deploy/fastdeploy/cpu-gpu/cpp/infer_cls.cc index 953d9683..789c2a9f 100644 --- a/deploy/fastdeploy/cpu-gpu/cpp/infer_cls.cc +++ b/deploy/fastdeploy/cpu-gpu/cpp/infer_cls.cc @@ -22,7 +22,6 @@ const char sep = '/'; void InitAndInfer(const std::string &cls_model_dir, const std::string &image_file, const fastdeploy::RuntimeOption &option) { - auto cls_model_file = cls_model_dir + sep + "inference.pdmodel"; auto cls_params_file = cls_model_dir + sep + "inference.pdiparams"; auto cls_option = option; diff --git a/deploy/fastdeploy/cpu-gpu/cpp/infer_rec.cc b/deploy/fastdeploy/cpu-gpu/cpp/infer_rec.cc index b1ab9d49..e07e2a0c 100644 --- a/deploy/fastdeploy/cpu-gpu/cpp/infer_rec.cc +++ b/deploy/fastdeploy/cpu-gpu/cpp/infer_rec.cc @@ -23,7 +23,6 @@ void InitAndInfer(const std::string &rec_model_dir, const std::string &rec_label_file, const std::string &image_file, const fastdeploy::RuntimeOption &option) { - auto rec_model_file = rec_model_dir + sep + "inference.pdmodel"; auto rec_params_file = rec_model_dir + sep + "inference.pdiparams"; auto rec_option = option; diff --git a/deploy/fastdeploy/cpu-gpu/csharp/README.md b/deploy/fastdeploy/cpu-gpu/csharp/README.md index 551db8e6..3a87730e 100755 --- a/deploy/fastdeploy/cpu-gpu/csharp/README.md +++ b/deploy/fastdeploy/cpu-gpu/csharp/README.md @@ -35,7 +35,13 @@ PaddleOCR支持利用FastDeploy在NVIDIA GPU、X86 CPU、飞腾CPU、ARM CPU、I https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-x.x.x.tgz # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd D:\FastDeploy\examples\vision\ocr\PP-OCR\cpu-gpu\csharp + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph cd D:\PaddleOCR\deploy\fastdeploy\cpu-gpu\csharp mkdir build && cd build diff --git a/deploy/fastdeploy/cpu-gpu/python/README.md b/deploy/fastdeploy/cpu-gpu/python/README.md index 9ddb940f..d8143e02 100644 --- a/deploy/fastdeploy/cpu-gpu/python/README.md +++ b/deploy/fastdeploy/cpu-gpu/python/README.md @@ -18,7 +18,13 @@ pip install fastdeploy-gpu-python -f https://www.paddlepaddle.org.cn/whl/fastdep conda config --add channels conda-forge && conda install cudatoolkit=11.2 cudnn=8.2 # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/cpu-gpu/python + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph cd PaddleOCR/deploy/fastdeploy/cpu-gpu/python # 下载PP-OCRv3文字检测模型 diff --git a/deploy/fastdeploy/cpu-gpu/python/infer.py b/deploy/fastdeploy/cpu-gpu/python/infer.py index 23f940c8..8eac8459 100755 --- a/deploy/fastdeploy/cpu-gpu/python/infer.py +++ b/deploy/fastdeploy/cpu-gpu/python/infer.py @@ -87,7 +87,7 @@ def build_option(args): # If use TRT backend, the dynamic shape will be set as follow. # We recommend that users set the length and height of the detection model to a multiple of 32. - # We also recommend that users set the Trt input shape as follow. + # We also recommend that users set the Trt input shape as follow. det_option.set_trt_input_shape("x", [1, 3, 64, 64], [1, 3, 640, 640], [1, 3, 960, 960]) cls_option.set_trt_input_shape("x", [1, 3, 48, 10], @@ -97,7 +97,7 @@ def build_option(args): [args.rec_bs, 3, 48, 320], [args.rec_bs, 3, 48, 2304]) - # Users could save TRT cache file to disk as follow. + # Users could save TRT cache file to disk as follow. det_option.set_trt_cache_file(args.det_model + "/det_trt_cache.trt") cls_option.set_trt_cache_file(args.cls_model + "/cls_trt_cache.trt") rec_option.set_trt_cache_file(args.rec_model + "/rec_trt_cache.trt") @@ -119,7 +119,7 @@ def build_option(args): # If use TRT backend, the dynamic shape will be set as follow. # We recommend that users set the length and height of the detection model to a multiple of 32. - # We also recommend that users set the Trt input shape as follow. + # We also recommend that users set the Trt input shape as follow. det_option.set_trt_input_shape("x", [1, 3, 64, 64], [1, 3, 640, 640], [1, 3, 960, 960]) cls_option.set_trt_input_shape("x", [1, 3, 48, 10], @@ -129,7 +129,7 @@ def build_option(args): [args.rec_bs, 3, 48, 320], [args.rec_bs, 3, 48, 2304]) - # Users could save TRT cache file to disk as follow. + # Users could save TRT cache file to disk as follow. det_option.set_trt_cache_file(args.det_model) cls_option.set_trt_cache_file(args.cls_model) rec_option.set_trt_cache_file(args.rec_model) @@ -199,8 +199,8 @@ ppocr_v3 = fd.vision.ocr.PPOCRv3( det_model=det_model, cls_model=cls_model, rec_model=rec_model) # Set inference batch size for cls model and rec model, the value could be -1 and 1 to positive infinity. -# When inference batch size is set to -1, it means that the inference batch size -# of the cls and rec models will be the same as the number of boxes detected by the det model. +# When inference batch size is set to -1, it means that the inference batch size +# of the cls and rec models will be the same as the number of boxes detected by the det model. ppocr_v3.cls_batch_size = args.cls_bs ppocr_v3.rec_batch_size = args.rec_bs diff --git a/deploy/fastdeploy/kunlun/README.md b/deploy/fastdeploy/kunlunxin/README.md similarity index 100% rename from deploy/fastdeploy/kunlun/README.md rename to deploy/fastdeploy/kunlunxin/README.md diff --git a/deploy/fastdeploy/kunlun/cpp/CMakeLists.txt b/deploy/fastdeploy/kunlunxin/cpp/CMakeLists.txt similarity index 99% rename from deploy/fastdeploy/kunlun/cpp/CMakeLists.txt rename to deploy/fastdeploy/kunlunxin/cpp/CMakeLists.txt index e9b33c39..93540a7e 100644 --- a/deploy/fastdeploy/kunlun/cpp/CMakeLists.txt +++ b/deploy/fastdeploy/kunlunxin/cpp/CMakeLists.txt @@ -12,4 +12,3 @@ include_directories(${FASTDEPLOY_INCS}) add_executable(infer_demo ${PROJECT_SOURCE_DIR}/infer.cc) # 添加FastDeploy库依赖 target_link_libraries(infer_demo ${FASTDEPLOY_LIBS}) - diff --git a/deploy/fastdeploy/kunlun/cpp/README.md b/deploy/fastdeploy/kunlunxin/cpp/README.md similarity index 87% rename from deploy/fastdeploy/kunlun/cpp/README.md rename to deploy/fastdeploy/kunlunxin/cpp/README.md index c7a74aa6..3725a807 100644 --- a/deploy/fastdeploy/kunlun/cpp/README.md +++ b/deploy/fastdeploy/kunlunxin/cpp/README.md @@ -12,8 +12,14 @@ ## 3.运行部署示例 ``` # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/kunlunxin/cpp + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git -cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/kunlunxin/cpp +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph +cd PaddleOCR/deploy/fastdeploy/kunlunxin/cpp mkdir build cd build diff --git a/deploy/fastdeploy/kunlun/cpp/infer.cc b/deploy/fastdeploy/kunlunxin/cpp/infer.cc similarity index 100% rename from deploy/fastdeploy/kunlun/cpp/infer.cc rename to deploy/fastdeploy/kunlunxin/cpp/infer.cc diff --git a/deploy/fastdeploy/kunlun/python/README.md b/deploy/fastdeploy/kunlunxin/python/README.md similarity index 87% rename from deploy/fastdeploy/kunlun/python/README.md rename to deploy/fastdeploy/kunlunxin/python/README.md index 4bc86f0e..724fad27 100644 --- a/deploy/fastdeploy/kunlun/python/README.md +++ b/deploy/fastdeploy/kunlunxin/python/README.md @@ -12,8 +12,14 @@ ## 3.运行部署示例 ``` # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/kunlunxin/python + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git -cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/kunlunxin/python +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph +cd PaddleOCR/deploy/fastdeploy/kunlunxin/python # 下载PP-OCRv3文字检测模型 wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar diff --git a/deploy/fastdeploy/kunlun/python/infer.py b/deploy/fastdeploy/kunlunxin/python/infer.py similarity index 98% rename from deploy/fastdeploy/kunlun/python/infer.py rename to deploy/fastdeploy/kunlunxin/python/infer.py index b4d32f66..4780df83 100755 --- a/deploy/fastdeploy/kunlun/python/infer.py +++ b/deploy/fastdeploy/kunlunxin/python/infer.py @@ -92,8 +92,8 @@ ppocr_v3 = fd.vision.ocr.PPOCRv3( det_model=det_model, cls_model=cls_model, rec_model=rec_model) # Set inference batch size for cls model and rec model, the value could be -1 and 1 to positive infinity. -# When inference batch size is set to -1, it means that the inference batch size -# of the cls and rec models will be the same as the number of boxes detected by the det model. +# When inference batch size is set to -1, it means that the inference batch size +# of the cls and rec models will be the same as the number of boxes detected by the det model. ppocr_v3.cls_batch_size = args.cls_bs ppocr_v3.rec_batch_size = args.rec_bs diff --git a/deploy/fastdeploy/rockchip/cpp/README.md b/deploy/fastdeploy/rockchip/cpp/README.md index de2d6355..f5fb212d 100755 --- a/deploy/fastdeploy/rockchip/cpp/README.md +++ b/deploy/fastdeploy/rockchip/cpp/README.md @@ -67,8 +67,14 @@ python rockchip/rknpu2_tools/export.py --config_path tools/rknpu2/config/ppocrv3 ``` # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/rockchip/cpp + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git -cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/rockchip/cpp +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph +cd PaddleOCR/deploy/fastdeploy/rockchip/cpp mkdir build cd build diff --git a/deploy/fastdeploy/rockchip/python/README.md b/deploy/fastdeploy/rockchip/python/README.md index cc039633..00d97dd9 100755 --- a/deploy/fastdeploy/rockchip/python/README.md +++ b/deploy/fastdeploy/rockchip/python/README.md @@ -71,8 +71,15 @@ wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt # 下载部署示例代码 +# 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/rockchip/python + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git -cd PaddleOCR/deploy/fastdeploy/PP-OCRv3/rockchip/python +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph +cd PaddleOCR/deploy/fastdeploy/rockchip/python # CPU推理 diff --git a/deploy/fastdeploy/serving/fastdeploy_serving/README.md b/deploy/fastdeploy/serving/fastdeploy_serving/README.md index 5dbfff12..7da6ce6f 100755 --- a/deploy/fastdeploy/serving/fastdeploy_serving/README.md +++ b/deploy/fastdeploy/serving/fastdeploy_serving/README.md @@ -30,7 +30,14 @@ PP-OCRv3由det(检测)、cls(分类)和rec(识别)三个模型组成. ### 3.1 下载模型并使用服务化Docker ```bash # 下载仓库代码 +# 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/serving/fastdeploy_serving + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph cd PaddleOCR/deploy/fastdeploy/serving/fastdeploy_serving # 下载模型,图片和字典文件 diff --git a/deploy/fastdeploy/serving/simple_serving/README.md b/deploy/fastdeploy/serving/simple_serving/README.md index 82f85667..913475c7 100644 --- a/deploy/fastdeploy/serving/simple_serving/README.md +++ b/deploy/fastdeploy/serving/simple_serving/README.md @@ -14,7 +14,13 @@ PaddleOCR Python轻量服务化部署是FastDeploy基于Flask框架搭建的可 ## 2. 启动服务 ```bash # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/serving/simple_serving + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph cd PaddleOCR/deploy/fastdeploy/serving/simple_serving # 下载模型和字典文件 diff --git a/deploy/fastdeploy/sophgo/cpp/README.md b/deploy/fastdeploy/sophgo/cpp/README.md index 450b207c..0b17f7df 100644 --- a/deploy/fastdeploy/sophgo/cpp/README.md +++ b/deploy/fastdeploy/sophgo/cpp/README.md @@ -22,7 +22,13 @@ ### 3.1 下载部署示例代码 ```bash # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/sophgo/cpp + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph cd PaddleOCR/deploy/fastdeploy/sophgo/cpp ``` diff --git a/deploy/fastdeploy/sophgo/python/README.md b/deploy/fastdeploy/sophgo/python/README.md index 0926d889..27dbe269 100644 --- a/deploy/fastdeploy/sophgo/python/README.md +++ b/deploy/fastdeploy/sophgo/python/README.md @@ -15,7 +15,13 @@ ### 2.2 开始部署 ```bash # 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/ocr/PP-OCR/sophgo/python + +# 如果您希望从PaddleOCR下载示例代码,请运行 git clone https://github.com/PaddlePaddle/PaddleOCR.git +# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到dygraph分支 +git checkout dygraph cd PaddleOCR/deploy/fastdeploy/sophgo/python # 下载图片 -- GitLab