diff --git a/deploy/third_engine/demo_openvino_kpts/README.md b/deploy/third_engine/demo_openvino_kpts/README.md index 9730c4b914359020f637208c4e0a99170bd9b253..d7d7ce0de80eabcfeffd580d920a25e1341f575b 100644 --- a/deploy/third_engine/demo_openvino_kpts/README.md +++ b/deploy/third_engine/demo_openvino_kpts/README.md @@ -2,8 +2,12 @@ This fold provides TinyPose inference code using [Intel's OpenVINO Toolkit](https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit.html). Most of the implements in this fold are same as *demo_ncnn*. -**Recommand** to use the xxx.tar.gz file to install instead of github method, [link](https://registrationcenter-download.intel.com/akdlm/irc_nas/18096/l_openvino_toolkit_p_2021.4.689.tgz). - +**Recommand** +1. To use the xxx.tar.gz file to install instead of github method, [link](https://registrationcenter-download.intel.com/akdlm/irc_nas/18096/l_openvino_toolkit_p_2021.4.689.tgz). +2. Your can also deploy openvino with docker, the command is : +``` +docker pull openvino/ubuntu18_dev:2021.4.1 +``` ## Install OpenVINO Toolkit @@ -59,7 +63,30 @@ source /opt/intel/openvino_2021/bin/setupvars.sh ## Convert model - Convert to OpenVINO + **1. Conver to onnx** + + Create picodet_m_416_coco.onnx and tinypose256.onnx + + example: + + ```shell + modelName=picodet_m_416_coco + # export model + python tools/export_model.py \ + -c configs/picodet/${modelName}.yml \ + -o weights=${modelName}.pdparams \ + --output_dir=inference_model + # convert to onnx + paddle2onnx --model_dir inference_model/${modelName} \ + --model_filename model.pdmodel \ + --params_filename model.pdiparams \ + --opset_version 11 \ + --save_file ${modelName}.onnx + # onnxsim + python -m onnxsim ${modelName}.onnx ${modelName}_sim.onnx + ``` + + **2.Convert to OpenVINO** ``` shell cd /openvino_2021/deployment_tools/model_optimizer @@ -75,9 +102,11 @@ source /opt/intel/openvino_2021/bin/setupvars.sh Then convert model. Notice: mean_values and scale_values should be the same with your training settings in YAML config file. ```shell - python3 mo_onnx.py --input_model --mean_values [103.53,116.28,123.675] --scale_values [57.375,57.12,58.395] + mo_onnx.py --input_model --mean_values [103.53,116.28,123.675] --scale_values [57.375,57.12,58.395] --input_shape [1,3,256,192] ``` + **Note: The new version of openvino convert tools may cause error in Resize op. If you has problem with this, please try the version: openvino_2021.4.689** + ## Build ### Windows @@ -101,11 +130,41 @@ make ## Run demo + Download PicoDet openvino model [PicoDet openvino model download link](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_416_openvino.zip). -Download TinyPose openvino model [TinyPose openvino model download link](https://paddledet.bj.bcebos.com/deploy/third_engine/tinypose_256_openvino.zip). + +Download TinyPose openvino model [TinyPose openvino model download link](https://bj.bcebos.com/v1/paddledet/deploy/third_engine/demo_openvino_kpts.tar.gz), the origin paddlepaddle model is [Tinypose256](https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_enhance/tinypose_256x192.pdparams). move picodet and tinypose openvino model files to the demo's weight folder. +Note: +1. The model output node name may update by new version of paddle\paddle2onnx\onnxsim\openvino, please checkout your own model output node when the code can't find "conv2d_441.tmp_1"\"argmax_0.tmp_0". +2. If you happened with this error "Cannot find blob with name: transpose_1.tmp_0", it means your picodet model is oldversion. you can modify the below code to fix it. + +``` +#picodet_openvino.h line 50-54 + + std::vector heads_info_{ + // cls_pred|dis_pred|stride + {"transpose_0.tmp_0", "transpose_1.tmp_0", 8}, + {"transpose_2.tmp_0", "transpose_3.tmp_0", 16}, + {"transpose_4.tmp_0", "transpose_5.tmp_0", 32}, + {"transpose_6.tmp_0", "transpose_7.tmp_0", 64}, + }; + + modify to: + + std::vector heads_info_{ + // cls_pred|dis_pred|stride + {"save_infer_model/scale_0.tmp_1", "save_infer_model/scale_4.tmp_1", 8}, + {"save_infer_model/scale_1.tmp_1", "save_infer_model/scale_5.tmp_1", 16}, + {"save_infer_model/scale_2.tmp_1", "save_infer_model/scale_6.tmp_1", 32}, + {"save_infer_model/scale_3.tmp_1", "save_infer_model/scale_7.tmp_1", 64}, + }; +``` + +3. you can view your onnx model with [Netron](https://netron.app/). + ### Edit file ``` step1: diff --git a/deploy/third_engine/demo_openvino_kpts/keypoint_detector.cpp b/deploy/third_engine/demo_openvino_kpts/keypoint_detector.cpp index 8757f781a72d472c7701c8947775f5fb5f7771a4..4200dd93b3375fa1d9c511aaacd4ebf4e0903189 100644 --- a/deploy/third_engine/demo_openvino_kpts/keypoint_detector.cpp +++ b/deploy/third_engine/demo_openvino_kpts/keypoint_detector.cpp @@ -74,7 +74,7 @@ cv::Mat VisualizeKptsResult(const cv::Mat& img, void KeyPointDetector::Postprocess(std::vector& output, std::vector& output_shape, - std::vector& idxout, + std::vector& idxout, std::vector& idx_shape, std::vector* result, std::vector>& center_bs, @@ -141,7 +141,7 @@ void KeyPointDetector::Predict(const std::vector imgs, infer_request_.Infer(); InferenceEngine::Blob::Ptr output_blob = - infer_request_.GetBlob("save_infer_model/scale_0.tmp_1"); + infer_request_.GetBlob("conv2d_441.tmp_1"); auto output_shape = output_blob->getTensorDesc().getDims(); InferenceEngine::MemoryBlob::Ptr moutput = InferenceEngine::as(output_blob); @@ -159,12 +159,15 @@ void KeyPointDetector::Predict(const std::vector imgs, for (int j = 0; j < output_shape.size(); ++j) { output_size *= output_shape[j]; } + output_data_.resize(output_size); std::copy_n(data, output_size, output_data_.data()); + } + InferenceEngine::Blob::Ptr output_blob2 = - infer_request_.GetBlob("save_infer_model/scale_1.tmp_1"); + infer_request_.GetBlob("argmax_0.tmp_0"); auto idx_shape = output_blob2->getTensorDesc().getDims(); InferenceEngine::MemoryBlob::Ptr moutput2 = InferenceEngine::as(output_blob2); @@ -175,7 +178,7 @@ void KeyPointDetector::Predict(const std::vector imgs, auto minputHolder = moutput2->rmap(); // Original I64 precision was converted to I32 auto data = minputHolder.as::value_type*>(); + InferenceEngine::Precision::FP32>::value_type*>(); // Calculate output length int output_size = 1; diff --git a/deploy/third_engine/demo_openvino_kpts/keypoint_detector.h b/deploy/third_engine/demo_openvino_kpts/keypoint_detector.h index bdaa6091e51566bf05b6ebcfdfe3dc23d487ec74..e72e63dcc30bfacff21181b383ecbc23a580438d 100644 --- a/deploy/third_engine/demo_openvino_kpts/keypoint_detector.h +++ b/deploy/third_engine/demo_openvino_kpts/keypoint_detector.h @@ -69,7 +69,7 @@ class KeyPointDetector { if (idx == 0) { output_info.second->setPrecision(InferenceEngine::Precision::FP32); } else { - output_info.second->setPrecision(InferenceEngine::Precision::I32); + output_info.second->setPrecision(InferenceEngine::Precision::FP32); } idx++; } @@ -99,14 +99,14 @@ class KeyPointDetector { // Postprocess result void Postprocess(std::vector& output, std::vector& output_shape, - std::vector& idxout, + std::vector& idxout, std::vector& idx_shape, std::vector* result, std::vector>& center, std::vector>& scale); std::vector output_data_; - std::vector idx_data_; + std::vector idx_data_; float threshold_; bool use_dark_; diff --git a/deploy/third_engine/demo_openvino_kpts/keypoint_postprocess.cpp b/deploy/third_engine/demo_openvino_kpts/keypoint_postprocess.cpp index 951fe8f48e6b77f52c6cfcf97b5933160db5cb17..65430ab1f07c0690aad8a26d5d3abda52badd9c4 100644 --- a/deploy/third_engine/demo_openvino_kpts/keypoint_postprocess.cpp +++ b/deploy/third_engine/demo_openvino_kpts/keypoint_postprocess.cpp @@ -74,11 +74,26 @@ void transform_preds(std::vector& coords, std::vector& scale, std::vector& output_size, std::vector& dim, - std::vector& target_coords) { - cv::Mat trans(2, 3, CV_64FC1); - get_affine_transform(center, scale, 0, output_size, trans, 1); - for (int p = 0; p < dim[1]; ++p) { - affine_tranform(coords[p * 2], coords[p * 2 + 1], trans, target_coords, p); + std::vector& target_coords, + bool affine=false) { + if (affine) { + cv::Mat trans(2, 3, CV_64FC1); + get_affine_transform(center, scale, 0, output_size, trans, 1); + for (int p = 0; p < dim[1]; ++p) { + affine_tranform( + coords[p * 2], coords[p * 2 + 1], trans, target_coords, p); + } + } else { + float heat_w = static_cast(output_size[0]); + float heat_h = static_cast(output_size[1]); + float x_scale = scale[0] / heat_w; + float y_scale = scale[1] / heat_h; + float offset_x = center[0] - scale[0] / 2.; + float offset_y = center[1] - scale[1] / 2.; + for (int i = 0; i < dim[1]; i++) { + target_coords[i * 3 + 1] = x_scale * coords[i * 2] + offset_x; + target_coords[i * 3 + 2] = y_scale * coords[i * 2 + 1] + offset_y; + } } } @@ -172,7 +187,7 @@ void dark_parse(std::vector& heatmap, void get_final_preds(std::vector& heatmap, std::vector& dim, - std::vector& idxout, + std::vector& idxout, std::vector& idxdim, std::vector& center, std::vector scale, @@ -187,7 +202,7 @@ void get_final_preds(std::vector& heatmap, for (int j = 0; j < dim[1]; ++j) { int index = (batchid * dim[1] + j) * dim[2] * dim[3]; - int idx = idxout[batchid * dim[1] + j]; + int idx = int(idxout[batchid * dim[1] + j]); preds[j * 3] = heatmap[index + idx]; coords[j * 2] = idx % heatmap_width; coords[j * 2 + 1] = idx / heatmap_width; diff --git a/deploy/third_engine/demo_openvino_kpts/keypoint_postprocess.h b/deploy/third_engine/demo_openvino_kpts/keypoint_postprocess.h index effd0969ce40b3d4b17ff1d709cebd1c429fa65e..b9bd743b772d226b4b02c4f411e8492fda220571 100644 --- a/deploy/third_engine/demo_openvino_kpts/keypoint_postprocess.h +++ b/deploy/third_engine/demo_openvino_kpts/keypoint_postprocess.h @@ -37,7 +37,8 @@ void transform_preds(std::vector& coords, std::vector& scale, std::vector& output_size, std::vector& dim, - std::vector& target_coords); + std::vector& target_coords, + bool affine); void box_to_center_scale(std::vector& box, int width, int height, @@ -51,7 +52,7 @@ void get_max_preds(std::vector& heatmap, int joint_idx); void get_final_preds(std::vector& heatmap, std::vector& dim, - std::vector& idxout, + std::vector& idxout, std::vector& idxdim, std::vector& center, std::vector scale, diff --git a/deploy/third_engine/demo_openvino_kpts/main.cpp b/deploy/third_engine/demo_openvino_kpts/main.cpp index 0aa4facec6f6c964fbf3e7350ff2fdb01c50d303..cc580e41db18fbd1a5f61302f1b633eb65254f8a 100644 --- a/deploy/third_engine/demo_openvino_kpts/main.cpp +++ b/deploy/third_engine/demo_openvino_kpts/main.cpp @@ -375,9 +375,9 @@ int main(int argc, char** argv) { return -1; } std::cout << "start init model" << std::endl; - auto detector = PicoDet("../weight/picodet_m_416.xml"); + auto detector = PicoDet("./weight/picodet_m_416.xml"); auto kpts_detector = - new KeyPointDetector("../weight/tinypose256.xml", 256, 192); + new KeyPointDetector("./weight/tinypose256_git2-sim.xml", 256, 192); std::cout << "success" << std::endl; int mode = atoi(argv[1]);