diff --git a/deploy/fastdeploy/cpu-gpu/cpp/infer.cc b/deploy/fastdeploy/cpu-gpu/cpp/infer.cc index 6906a01717fff23029de352b375c5e7a68052074..1464edacdf6a74c21b9025a73ece8470595f9538 100644 --- a/deploy/fastdeploy/cpu-gpu/cpp/infer.cc +++ b/deploy/fastdeploy/cpu-gpu/cpp/infer.cc @@ -152,9 +152,9 @@ int main(int argc, char *argv[]) { option.UsePaddleBackend(); // Paddle Inference } else if (flag == 5) { option.UseGpu(); - option.UseTrtBackend(); - option.EnablePaddleTrtCollectShape(); - option.EnablePaddleToTrt(); // Paddle-TensorRT + option.UsePaddleInferBackend(); + option.paddle_infer_option.collect_trt_shape = true; + option.paddle_infer_option.enable_trt = true; // Paddle-TensorRT } else if (flag == 6) { option.UseGpu(); option.UseOrtBackend(); // ONNX Runtime diff --git a/deploy/fastdeploy/cpu-gpu/csharp/README.md b/deploy/fastdeploy/cpu-gpu/csharp/README.md index 335c774aed01cacd26ebdd82af924b8f62eebeb2..551db8e62ed1938ce71e530454326152ecfff268 100755 --- a/deploy/fastdeploy/cpu-gpu/csharp/README.md +++ b/deploy/fastdeploy/cpu-gpu/csharp/README.md @@ -70,7 +70,7 @@ infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v3.0_cls_infer ./ch_PP-OCRv ## 5. PP-OCRv3 C# API接口简介 下面提供了PP-OCRv3的C# API简介 -- 如果用户想要更换部署后端或进行其他定制化操作, 请查看[C# Runtime API](https://github.com/PaddlePaddle/FastDeploy/blob/develop/csharp/fastdeploy/runtime_option.cs). +- 如果用户想要更换部署后端或进行其他定制化操作, 请查看[C# Runtime API](https://baidu-paddle.github.io/fastdeploy-api/csharp/html/classfastdeploy_1_1RuntimeOption.html). - 更多 PP-OCR C# API 请查看 [C# PP-OCR API](https://github.com/PaddlePaddle/FastDeploy/blob/develop/csharp/fastdeploy/vision/ocr/model.cs) ### 模型 diff --git a/deploy/fastdeploy/cpu-gpu/python/infer.py b/deploy/fastdeploy/cpu-gpu/python/infer.py index cb5d21c4b944a683158905be217b8740ebcf97e7..23f940c89aef9b4bae4a45e13ee0d1712b884a9b 100755 --- a/deploy/fastdeploy/cpu-gpu/python/infer.py +++ b/deploy/fastdeploy/cpu-gpu/python/infer.py @@ -105,17 +105,17 @@ def build_option(args): elif args.backend.lower() == "pptrt": assert args.device.lower( ) == "gpu", "Paddle-TensorRT backend require inference on device GPU." - det_option.use_trt_backend() - det_option.enable_paddle_trt_collect_shape() - det_option.enable_paddle_to_trt() + det_option.use_paddle_infer_backend() + det_option.paddle_infer_option.collect_trt_shape = True + det_option.paddle_infer_option.enable_trt = True - cls_option.use_trt_backend() - cls_option.enable_paddle_trt_collect_shape() - cls_option.enable_paddle_to_trt() + cls_option.use_paddle_infer_backend() + cls_option.paddle_infer_option.collect_trt_shape = True + cls_option.paddle_infer_option.enable_trt = True - rec_option.use_trt_backend() - rec_option.enable_paddle_trt_collect_shape() - rec_option.enable_paddle_to_trt() + rec_option.use_paddle_infer_backend() + rec_option.paddle_infer_option.collect_trt_shape = True + rec_option.paddle_infer_option.enable_trt = True # If use TRT backend, the dynamic shape will be set as follow. # We recommend that users set the length and height of the detection model to a multiple of 32.