From 9a703419158c7288b5ecb0243cc4519a4ed329d2 Mon Sep 17 00:00:00 2001 From: FlyingQianMM <245467267@qq.com> Date: Mon, 7 Sep 2020 09:55:51 +0000 Subject: [PATCH] change fluid_inference version --- deploy/cpp/src/paddlex.cpp | 6 +++--- docs/change_log.md | 23 +++++++++++++++++++++-- docs/deploy/index.rst | 2 ++ docs/deploy/raspberry/index.rst | 2 +- docs/deploy/server/cpp/linux.md | 19 +++++++++---------- docs/deploy/server/cpp/windows.md | 14 +++++++------- 6 files changed, 43 insertions(+), 23 deletions(-) diff --git a/deploy/cpp/src/paddlex.cpp b/deploy/cpp/src/paddlex.cpp index 57d35b8..1bc96d8 100644 --- a/deploy/cpp/src/paddlex.cpp +++ b/deploy/cpp/src/paddlex.cpp @@ -85,7 +85,7 @@ void Model::create_predictor(const std::string& model_dir, #endif // enable Memory Optim config.EnableMemoryOptim(); - if (use_trt) { + if (use_trt && use_gpu) { config.EnableTensorRtEngine( 1 << 20 /* workspace_size*/, 32 /* max_batch_size*/, @@ -283,7 +283,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) { im_tensor->Reshape({1, 3, h, w}); im_tensor->copy_from_cpu(inputs_.im_data_.data()); - if (name == "YOLOv3") { + if (name == "YOLOv3" || name == "PPYOLO") { auto im_size_tensor = predictor_->GetInputTensor("im_size"); im_size_tensor->Reshape({1, 2}); im_size_tensor->copy_from_cpu(inputs_.ori_im_size_.data()); @@ -442,7 +442,7 @@ bool Model::predict(const std::vector& im_batch, inputs_data.begin() + i * 3 * h * w); } im_tensor->copy_from_cpu(inputs_data.data()); - if (name == "YOLOv3") { + if (name == "YOLOv3" || name == "PPYOLO") { auto im_size_tensor = predictor_->GetInputTensor("im_size"); im_size_tensor->Reshape({batch_size, 2}); std::vector inputs_data_size(batch_size * 2); diff --git a/docs/change_log.md b/docs/change_log.md index 08e95f7..59a7b78 100644 --- a/docs/change_log.md +++ b/docs/change_log.md @@ -1,5 +1,24 @@ # 更新日志 +**v1.2.0** 2020.09.07 +- 模型更新 + > - 新增目标检测模型PPYOLO[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-ppyolo) + > - FasterRCNN、MaskRCNN、YOLOv3、DeepLabv3p等模型新增内置COCO数据集预训练模型 + > - 目标检测模型FasterRCNN和MaskRCNN新增backbone HRNet_W18[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-fasterrcnn) + > - 语义分割模型DeepLabv3p新增backbone MobileNetV3_large_ssld[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#paddlex-seg-deeplabv3p) + +- 模型部署更新 + > - 新增模型通过OpenVINO的部署方案[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/deploy/openvino/index.html) + > - 新增模型在树莓派上的部署方案[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/deploy/raspberry/index.html) + > - 优化PaddleLite Android部署的数据预处理和后处理代码性能 + > - 优化Paddle服务端C++代码部署代码,增加use_mkl等参数,通过mkldnn显著提升模型在CPU上的预测性能 + +- 产业案例更新 + > - 新增RGB图像遥感分割案例[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/examples/remote_sensing.html) + > - 新增多通道遥感分割案例[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/examples/multi-channel_remote_sensing/README.html) + +- 其它 + > - 新增数据集切分功能,支持通过命令行切分ImageNet、PascalVOC、MSCOCO和语义分割数据集[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/data/format/classification.html#id2) **v1.1.0** 2020.07.12 @@ -13,8 +32,8 @@ > - 新增Jetson、Paddle Lite模型部署预测方案 > - C++部署代码新增batch批预测,并采用OpenMP对预处理进行并行加速 - 新增2个PaddleX产业案例 -> - [人像分割案例]() -> - [工业表计读数案例]() +> - [人像分割案例](https://paddlex.readthedocs.io/zh_CN/develop/examples/human_segmentation.html) +> - [工业表计读数案例](https://paddlex.readthedocs.io/zh_CN/develop/examples/meter_reader.html) - 新增数据格式转换功能,LabelMe、精灵标注助手和EasyData平台标注的数据转为PaddleX支持加载的数据格式 - PaddleX文档更新,优化文档结构 diff --git a/docs/deploy/index.rst b/docs/deploy/index.rst index cbcea21..0bc3705 100755 --- a/docs/deploy/index.rst +++ b/docs/deploy/index.rst @@ -11,3 +11,5 @@ server/index nvidia-jetson.md paddlelite/index + openvino/index + raspberry/index diff --git a/docs/deploy/raspberry/index.rst b/docs/deploy/raspberry/index.rst index e5c1921..2aeb2bc 100644 --- a/docs/deploy/raspberry/index.rst +++ b/docs/deploy/raspberry/index.rst @@ -8,4 +8,4 @@ Raspberry.md python.md - export_nb_model.md \ No newline at end of file + export_nb_model.md diff --git a/docs/deploy/server/cpp/linux.md b/docs/deploy/server/cpp/linux.md index fb35824..452c49b 100644 --- a/docs/deploy/server/cpp/linux.md +++ b/docs/deploy/server/cpp/linux.md @@ -19,16 +19,15 @@ ### Step2: 下载PaddlePaddle C++ 预测库 paddle_inference -PaddlePaddle C++ 预测库针对不同的`CPU`,`CUDA`,以及是否支持TensorRT,提供了不同的预编译版本,目前PaddleX依赖于Paddle1.8版本,以下提供了多个不同版本的Paddle预测库: +PaddlePaddle C++ 预测库针对不同的`CPU`,`CUDA`,以及是否支持TensorRT,提供了不同的预编译版本,目前PaddleX依赖于Paddle1.8.4版本,以下提供了多个不同版本的Paddle预测库: -| 版本说明 | 预测库(1.8.2版本) | +| 版本说明 | 预测库(1.8.4版本) | | ---- | ---- | -| ubuntu14.04_cpu_avx_mkl | [paddle_inference](https://paddle-inference-lib.bj.bcebos.com/1.8.2-cpu-avx-mkl/fluid_inference.tgz) | -| ubuntu14.04_cpu_avx_openblas | [paddle_inference](https://paddle-inference-lib.bj.bcebos.com/1.8.2-cpu-avx-openblas/fluid_inference.tgz) | -| ubuntu14.04_cpu_noavx_openblas | [paddle_inference](https://paddle-inference-lib.bj.bcebos.com/1.8.2-cpu-noavx-openblas/fluid_inference.tgz) | -| ubuntu14.04_cuda9.0_cudnn7_avx_mkl | [paddle_inference](https://paddle-inference-lib.bj.bcebos.com/1.8.2-gpu-cuda9-cudnn7-avx-mkl/fluid_inference.tgz) | -| ubuntu14.04_cuda10.0_cudnn7_avx_mkl | [paddle_inference](https://paddle-inference-lib.bj.bcebos.com/1.8.2-gpu-cuda10-cudnn7-avx-mkl/fluid_inference.tgz ) | -| ubuntu14.04_cuda10.1_cudnn7.6_avx_mkl_trt6 | [paddle_inference](https://paddle-inference-lib.bj.bcebos.com/1.8.2-gpu-cuda10.1-cudnn7.6-avx-mkl-trt6%2Ffluid_inference.tgz) | +| ubuntu14.04_cpu_avx_mkl | [paddle_inference](https://paddle-inference-lib.bj.bcebos.com/latest-cpu-avx-mkl/fluid_inference.tgz) | +| ubuntu14.04_cpu_avx_openblas | [paddle_inference](https://paddle-inference-lib.bj.bcebos.com/latest-cpu-avx-openblas/fluid_inference.tgz) | +| ubuntu14.04_cpu_noavx_openblas | [paddle_inference](https://paddle-inference-lib.bj.bcebos.com/latest-cpu-noavx-openblas/fluid_inference.tgz) | +| ubuntu14.04_cuda9.0_cudnn7_avx_mkl | [paddle_inference](https://paddle-inference-lib.bj.bcebos.com/latest-gpu-cuda9-cudnn7-avx-mkl/fluid_inference.tgz) | +| ubuntu14.04_cuda10.0_cudnn7_avx_mkl | [paddle_inference](https://paddle-inference-lib.bj.bcebos.com/latest-gpu-cuda10-cudnn7-avx-mkl/fluid_inference.tgz) | 更多和更新的版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html) @@ -42,7 +41,7 @@ fluid_inference └── version.txt # 版本和编译信息 ``` -**注意:** 预编译版本除`nv-jetson-cuda10-cudnn7.5-trt5` 以外其它包都是基于`GCC 4.8.5`编译,使用高版本`GCC`可能存在 `ABI`兼容性问题,建议降级或[自行编译预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html#id12)。 +**注意:** 预编译版本除nv-jetson-cuda10-cudnn7.5-trt5 以外其它包都是基于`GCC 4.8.5`编译,使用高版本`GCC`可能存在 `ABI`兼容性问题,建议降级或[自行编译预测库](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/build_and_install_lib_cn.html#id12)。 ### Step3: 编译 @@ -97,7 +96,7 @@ make ``` **注意:** linux环境下编译会自动下载OPENCV, PaddleX-Encryption和YAML,如果编译环境无法访问外网,可手动下载: -- [opencv3gcc4.8.tar.bz2](https://paddleseg.bj.bcebos.com/deploy/docker/opencv3gcc4.8.tar.bz2) +- [opencv3.4.6gcc4.8ffmpeg.tar.gz2](https://bj.bcebos.com/paddleseg/deploy/opencv3.4.6gcc4.8ffmpeg.tar.gz2) - [paddlex-encryption.zip](https://bj.bcebos.com/paddlex/tools/paddlex-encryption.zip) - [yaml-cpp.zip](https://bj.bcebos.com/paddlex/deploy/deps/yaml-cpp.zip) diff --git a/docs/deploy/server/cpp/windows.md b/docs/deploy/server/cpp/windows.md index 151433d..c39d962 100644 --- a/docs/deploy/server/cpp/windows.md +++ b/docs/deploy/server/cpp/windows.md @@ -26,15 +26,15 @@ git clone https://github.com/PaddlePaddle/PaddleX.git ### Step2: 下载PaddlePaddle C++ 预测库 paddle_inference -PaddlePaddle C++ 预测库针对是否使用GPU、是否支持TensorRT、以及不同的CUDA版本提供了已经编译好的预测库,目前PaddleX依赖于Paddle 1.8,基于Paddle 1.8的Paddle预测库下载链接如下所示: +PaddlePaddle C++ 预测库针对是否使用GPU、是否支持TensorRT、以及不同的CUDA版本提供了已经编译好的预测库,目前PaddleX依赖于Paddle 1.8.4,基于Paddle 1.8.4的Paddle预测库下载链接如下所示: -| 版本说明 | 预测库(1.8.2版本) | 编译器 | 构建工具| cuDNN | CUDA | +| 版本说明 | 预测库(1.8.4版本) | 编译器 | 构建工具| cuDNN | CUDA | | ---- | ---- | ---- | ---- | ---- | ---- | -| cpu_avx_mkl | [paddle_inference](https://paddle-wheel.bj.bcebos.com/1.8.2/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | -| cpu_avx_openblas | [paddle_inference](https://paddle-wheel.bj.bcebos.com/1.8.2/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | -| cuda9.0_cudnn7_avx_mkl | [paddle_inference](https://paddle-wheel.bj.bcebos.com/1.8.2/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 | -| cuda9.0_cudnn7_avx_openblas | [paddle_inference](https://paddle-wheel.bj.bcebos.com/1.8.2/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 | -| cuda10.0_cudnn7_avx_mkl | [paddle_inference](https://paddle-wheel.bj.bcebos.com/1.8.2/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.5.0 | 10.0 | +| cpu_avx_mkl | [paddle_inference](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | +| cpu_avx_openblas | [paddle_inference](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/open/cpu/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | +| cuda9.0_cudnn7_avx_mkl | [paddle_inference](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 | +| cuda9.0_cudnn7_avx_openblas | [paddle_inference](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/open/post97/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.4.1 | 9.0 | +| cuda10.0_cudnn7_avx_mkl | [paddle_inference](https://paddle-wheel.bj.bcebos.com/1.8.4/win-infer/mkl/post107/fluid_inference_install_dir.zip) | MSVC 2015 update 3 | CMake v3.16.0 | 7.5.0 | 10.0 | 请根据实际情况选择下载,如若以上版本不满足您的需求,请至[C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/advanced_guide/inference_deployment/inference/windows_cpp_inference.html)选择符合的版本。 -- GitLab