From 6ff6842bb8bb80b67071b8239036f2122d4b485a Mon Sep 17 00:00:00 2001
From: wangxinxin08 <69842442+wangxinxin08@users.noreply.github.com>
Date: Thu, 17 Nov 2022 21:54:57 +0800
Subject: [PATCH] fix introduction of detection models (#5592)
* fix introduction of detection models
* add ppyolov2 infer result
* fix PP-YOLOE+ intro
---
modelcenter/PP-PicoDet/introduction_cn.ipynb | 15 +++++++++----
modelcenter/PP-PicoDet/introduction_en.ipynb | 5 +++--
modelcenter/PP-YOLO/introduction_cn.ipynb | 7 +++---
modelcenter/PP-YOLO/introduction_en.ipynb | 7 +++---
modelcenter/PP-YOLOE+/introduction_cn.ipynb | 23 +++++++++++++-------
modelcenter/PP-YOLOE+/introduction_en.ipynb | 13 ++++++-----
modelcenter/PP-YOLOE/introduction_cn.ipynb | 3 ++-
modelcenter/PP-YOLOE/introduction_en.ipynb | 3 ++-
modelcenter/PP-YOLOv2/introduction_cn.ipynb | 6 ++---
modelcenter/PP-YOLOv2/introduction_en.ipynb | 9 ++++----
10 files changed, 56 insertions(+), 35 deletions(-)
diff --git a/modelcenter/PP-PicoDet/introduction_cn.ipynb b/modelcenter/PP-PicoDet/introduction_cn.ipynb
index d53e6782..cba57017 100644
--- a/modelcenter/PP-PicoDet/introduction_cn.ipynb
+++ b/modelcenter/PP-PicoDet/introduction_cn.ipynb
@@ -24,7 +24,7 @@
"## 2. 模型效果\n",
"PP-Picodet与其他轻量级模型的精度速度对比图如下所示:\n",
"
\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -37,12 +37,19 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {},
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "plaintext"
+ }
+ },
+ "outputs": [],
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
diff --git a/modelcenter/PP-PicoDet/introduction_en.ipynb b/modelcenter/PP-PicoDet/introduction_en.ipynb
index f7d859d8..d8f0e42e 100644
--- a/modelcenter/PP-PicoDet/introduction_en.ipynb
+++ b/modelcenter/PP-PicoDet/introduction_en.ipynb
@@ -23,7 +23,7 @@
"## 2. Model Effects\n",
"The accuracy and speed comparison of PP-Picodet and other lightweight models is shown below:\n",
"\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -47,7 +47,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
diff --git a/modelcenter/PP-YOLO/introduction_cn.ipynb b/modelcenter/PP-YOLO/introduction_cn.ipynb
index fd886fd5..523eb2fa 100644
--- a/modelcenter/PP-YOLO/introduction_cn.ipynb
+++ b/modelcenter/PP-YOLO/introduction_cn.ipynb
@@ -16,7 +16,7 @@
"PP-YOLO在[COCO](http://cocodataset.org) test-dev2017数据集上精度达到45.9%,在单卡V100上FP32推理速度为72.9 FPS, V100上开启TensorRT下FP16推理速度为155.6 FPS。\n",
"\n",
"\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -40,7 +40,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
@@ -61,7 +62,7 @@
},
"outputs": [],
"source": [
- "!python -m paddle.distributed.launch --log_dir=./ppyolo_dygraph/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml &>ppyolo_dygraph.log 2>&1 &"
+ "!python -m paddle.distributed.launch --log_dir=./ppyolo_dygraph/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml"
]
},
{
diff --git a/modelcenter/PP-YOLO/introduction_en.ipynb b/modelcenter/PP-YOLO/introduction_en.ipynb
index 04f0415f..d7a30b1e 100644
--- a/modelcenter/PP-YOLO/introduction_en.ipynb
+++ b/modelcenter/PP-YOLO/introduction_en.ipynb
@@ -16,7 +16,7 @@
"PP-YOLO reached mmAP(IoU=0.5:0.95) as 45.9% on COCO test-dev2017 dataset, and inference speed of FP32 on single V100 is 72.9 FPS, inference speed of FP16 with TensorRT on single V100 is 155.6 FPS.\n",
"\n",
"\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -40,7 +40,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
@@ -61,7 +62,7 @@
},
"outputs": [],
"source": [
- "!python -m paddle.distributed.launch --log_dir=./ppyolo_dygraph/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml &>ppyolo_dygraph.log 2>&1 &"
+ "!python -m paddle.distributed.launch --log_dir=./ppyolo_dygraph/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml"
]
},
{
diff --git a/modelcenter/PP-YOLOE+/introduction_cn.ipynb b/modelcenter/PP-YOLOE+/introduction_cn.ipynb
index 423c0764..98e80589 100644
--- a/modelcenter/PP-YOLOE+/introduction_cn.ipynb
+++ b/modelcenter/PP-YOLOE+/introduction_cn.ipynb
@@ -15,7 +15,7 @@
"## 2. 模型效果\n",
"PP-YOLOE+_l在COCO test-dev2017达到了53.3的mAP, 同时其速度在Tesla V100上达到了78.1 FPS。如下图所示,PP-YOLOE+_s/m/x同样具有卓越的精度速度性价比。\n",
"\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -28,12 +28,19 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {},
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "plaintext"
+ }
+ },
+ "outputs": [],
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
@@ -114,10 +121,10 @@
"outputs": [],
"source": [
"# 推理单张图片\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n",
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n",
"\n",
"# 推理文件夹下的所有图片\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16"
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16"
]
},
{
@@ -168,10 +175,10 @@
"outputs": [],
"source": [
"# 推理单张图片\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n",
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n",
"\n",
"# 推理文件夹下的所有图片\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=paddle"
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=paddle"
]
},
{
diff --git a/modelcenter/PP-YOLOE+/introduction_en.ipynb b/modelcenter/PP-YOLOE+/introduction_en.ipynb
index 775d0fb0..3feaa15b 100644
--- a/modelcenter/PP-YOLOE+/introduction_en.ipynb
+++ b/modelcenter/PP-YOLOE+/introduction_en.ipynb
@@ -15,7 +15,7 @@
"## 2. Model Effects\n",
"PP-YOLOE+_l achieves 53.3 mAP on COCO test-dev2017 dataset with 78.1 FPS on Tesla V100. While using TensorRT FP16, PP-YOLOE+_l can be further accelerated to 149.2 FPS. PP-YOLOE+_s/m/x also have excellent accuracy and speed performance as shown below.\n",
"\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -39,7 +39,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
@@ -120,10 +121,10 @@
"outputs": [],
"source": [
"# inference single image\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n",
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n",
"\n",
"# inference all images in the directory\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16"
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16"
]
},
{
@@ -180,10 +181,10 @@
"outputs": [],
"source": [
"# inference single image\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n",
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n",
"\n",
"# inference all images in the directory\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=paddle"
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=paddle"
]
},
{
diff --git a/modelcenter/PP-YOLOE/introduction_cn.ipynb b/modelcenter/PP-YOLOE/introduction_cn.ipynb
index 8088907f..3bd6a5ed 100644
--- a/modelcenter/PP-YOLOE/introduction_cn.ipynb
+++ b/modelcenter/PP-YOLOE/introduction_cn.ipynb
@@ -39,7 +39,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
diff --git a/modelcenter/PP-YOLOE/introduction_en.ipynb b/modelcenter/PP-YOLOE/introduction_en.ipynb
index ab8e09cb..add9b2c9 100644
--- a/modelcenter/PP-YOLOE/introduction_en.ipynb
+++ b/modelcenter/PP-YOLOE/introduction_en.ipynb
@@ -40,7 +40,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
diff --git a/modelcenter/PP-YOLOv2/introduction_cn.ipynb b/modelcenter/PP-YOLOv2/introduction_cn.ipynb
index 6b381b6b..fd43dcd7 100644
--- a/modelcenter/PP-YOLOv2/introduction_cn.ipynb
+++ b/modelcenter/PP-YOLOv2/introduction_cn.ipynb
@@ -153,8 +153,8 @@
"outputs": [],
"source": [
"# 在GPU上预测一张图片\n",
- "export CUDA_VISIBLE_DEVICES=0\n",
- "python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg"
+ "!export CUDA_VISIBLE_DEVICES=0\n",
+ "!python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg"
]
},
{
@@ -165,7 +165,7 @@
"\n",
"结果如下图:\n",
"\n",
- "
\n",
+ "
\n",
"
\n",
"\n",
"\n"
diff --git a/modelcenter/PP-YOLOv2/introduction_en.ipynb b/modelcenter/PP-YOLOv2/introduction_en.ipynb
index 260196af..fc094724 100644
--- a/modelcenter/PP-YOLOv2/introduction_en.ipynb
+++ b/modelcenter/PP-YOLOv2/introduction_en.ipynb
@@ -145,8 +145,8 @@
"outputs": [],
"source": [
"# Predict a picture on the GPU.\n",
- "export CUDA_VISIBLE_DEVICES=0\n",
- "python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg"
+ "!export CUDA_VISIBLE_DEVICES=0\n",
+ "!python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg"
]
},
{
@@ -156,8 +156,9 @@
"An image with the predicted result is generated under the output folder.\n",
"\n",
"The result is as follows:\n",
- "\n",
- "![](https://ai-studio-static-online.cdn.bcebos.com/76fb0d0b60fe4fe39cc7302f4c25818133f970ebdf924d2d85f70f25a586aab9)\n"
+ "\n",
+ "
\n",
+ "
\n"
]
},
{
--
GitLab