diff --git a/modelcenter/PP-PicoDet/introduction_cn.ipynb b/modelcenter/PP-PicoDet/introduction_cn.ipynb
index d53e67823c95b2b4e0a31e25274505bfb981a5b2..cba5701734eaa8029db4bba52dda543bac64b70c 100644
--- a/modelcenter/PP-PicoDet/introduction_cn.ipynb
+++ b/modelcenter/PP-PicoDet/introduction_cn.ipynb
@@ -24,7 +24,7 @@
"## 2. 模型效果\n",
"PP-Picodet与其他轻量级模型的精度速度对比图如下所示:\n",
"
\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -37,12 +37,19 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {},
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "plaintext"
+ }
+ },
+ "outputs": [],
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
diff --git a/modelcenter/PP-PicoDet/introduction_en.ipynb b/modelcenter/PP-PicoDet/introduction_en.ipynb
index f7d859d8764d90209255d2e5122a8cb2ba5c6c00..d8f0e42e46721833ce3fe038fbc3b68dc1b56da8 100644
--- a/modelcenter/PP-PicoDet/introduction_en.ipynb
+++ b/modelcenter/PP-PicoDet/introduction_en.ipynb
@@ -23,7 +23,7 @@
"## 2. Model Effects\n",
"The accuracy and speed comparison of PP-Picodet and other lightweight models is shown below:\n",
"\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -47,7 +47,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
diff --git a/modelcenter/PP-YOLO/introduction_cn.ipynb b/modelcenter/PP-YOLO/introduction_cn.ipynb
index fd886fd51a62835202fc72166694480f2192223f..523eb2fa19f2ef39f385ff99d285de40c20c8957 100644
--- a/modelcenter/PP-YOLO/introduction_cn.ipynb
+++ b/modelcenter/PP-YOLO/introduction_cn.ipynb
@@ -16,7 +16,7 @@
"PP-YOLO在[COCO](http://cocodataset.org) test-dev2017数据集上精度达到45.9%,在单卡V100上FP32推理速度为72.9 FPS, V100上开启TensorRT下FP16推理速度为155.6 FPS。\n",
"\n",
"\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -40,7 +40,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
@@ -61,7 +62,7 @@
},
"outputs": [],
"source": [
- "!python -m paddle.distributed.launch --log_dir=./ppyolo_dygraph/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml &>ppyolo_dygraph.log 2>&1 &"
+ "!python -m paddle.distributed.launch --log_dir=./ppyolo_dygraph/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml"
]
},
{
diff --git a/modelcenter/PP-YOLO/introduction_en.ipynb b/modelcenter/PP-YOLO/introduction_en.ipynb
index 04f0415fe7b1a51da12f68751741bde84b3ac260..d7a30b1ed1cba0923a8dc6b75b54175fe59ee3c4 100644
--- a/modelcenter/PP-YOLO/introduction_en.ipynb
+++ b/modelcenter/PP-YOLO/introduction_en.ipynb
@@ -16,7 +16,7 @@
"PP-YOLO reached mmAP(IoU=0.5:0.95) as 45.9% on COCO test-dev2017 dataset, and inference speed of FP32 on single V100 is 72.9 FPS, inference speed of FP16 with TensorRT on single V100 is 155.6 FPS.\n",
"\n",
"\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -40,7 +40,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
@@ -61,7 +62,7 @@
},
"outputs": [],
"source": [
- "!python -m paddle.distributed.launch --log_dir=./ppyolo_dygraph/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml &>ppyolo_dygraph.log 2>&1 &"
+ "!python -m paddle.distributed.launch --log_dir=./ppyolo_dygraph/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml"
]
},
{
diff --git a/modelcenter/PP-YOLOE+/introduction_cn.ipynb b/modelcenter/PP-YOLOE+/introduction_cn.ipynb
index 423c076481e8e34c81f415d6e04ccbe63fef9db7..98e8058939a4f806dca169020f15a77399f3408a 100644
--- a/modelcenter/PP-YOLOE+/introduction_cn.ipynb
+++ b/modelcenter/PP-YOLOE+/introduction_cn.ipynb
@@ -15,7 +15,7 @@
"## 2. 模型效果\n",
"PP-YOLOE+_l在COCO test-dev2017达到了53.3的mAP, 同时其速度在Tesla V100上达到了78.1 FPS。如下图所示,PP-YOLOE+_s/m/x同样具有卓越的精度速度性价比。\n",
"\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -28,12 +28,19 @@
]
},
{
- "cell_type": "markdown",
- "metadata": {},
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "vscode": {
+ "languageId": "plaintext"
+ }
+ },
+ "outputs": [],
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
@@ -114,10 +121,10 @@
"outputs": [],
"source": [
"# 推理单张图片\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n",
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n",
"\n",
"# 推理文件夹下的所有图片\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16"
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16"
]
},
{
@@ -168,10 +175,10 @@
"outputs": [],
"source": [
"# 推理单张图片\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n",
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n",
"\n",
"# 推理文件夹下的所有图片\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=paddle"
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=paddle"
]
},
{
diff --git a/modelcenter/PP-YOLOE+/introduction_en.ipynb b/modelcenter/PP-YOLOE+/introduction_en.ipynb
index 775d0fb01c314c4ea1dfe1c6d56b8d6970eada88..3feaa15b609c70beaceb2173e17d4b96403584e1 100644
--- a/modelcenter/PP-YOLOE+/introduction_en.ipynb
+++ b/modelcenter/PP-YOLOE+/introduction_en.ipynb
@@ -15,7 +15,7 @@
"## 2. Model Effects\n",
"PP-YOLOE+_l achieves 53.3 mAP on COCO test-dev2017 dataset with 78.1 FPS on Tesla V100. While using TensorRT FP16, PP-YOLOE+_l can be further accelerated to 149.2 FPS. PP-YOLOE+_s/m/x also have excellent accuracy and speed performance as shown below.\n",
"\n",
- "
\n",
+ "
\n",
"
"
]
},
@@ -39,7 +39,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
@@ -120,10 +121,10 @@
"outputs": [],
"source": [
"# inference single image\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n",
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n",
"\n",
"# inference all images in the directory\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16"
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16"
]
},
{
@@ -180,10 +181,10 @@
"outputs": [],
"source": [
"# inference single image\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n",
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n",
"\n",
"# inference all images in the directory\n",
- "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=paddle"
+ "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=paddle"
]
},
{
diff --git a/modelcenter/PP-YOLOE/introduction_cn.ipynb b/modelcenter/PP-YOLOE/introduction_cn.ipynb
index 8088907fa14eca3d6516943172a6cdecf24b48ae..3bd6a5ed5c447e07b391c0f80d837eb7394625a7 100644
--- a/modelcenter/PP-YOLOE/introduction_cn.ipynb
+++ b/modelcenter/PP-YOLOE/introduction_cn.ipynb
@@ -39,7 +39,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
diff --git a/modelcenter/PP-YOLOE/introduction_en.ipynb b/modelcenter/PP-YOLOE/introduction_en.ipynb
index ab8e09cb528f81cb4268e138a5ce0e38decf6ca4..add9b2c9e518e56f279b99144f82cc4e507d0d96 100644
--- a/modelcenter/PP-YOLOE/introduction_en.ipynb
+++ b/modelcenter/PP-YOLOE/introduction_en.ipynb
@@ -40,7 +40,8 @@
"source": [
"%cd ~/work\n",
"!git clone https://gitee.com/paddlepaddle/PaddleDetection\n",
- "%cd PaddleDetection"
+ "%cd PaddleDetection\n",
+ "!pip install -r requirements.txt"
]
},
{
diff --git a/modelcenter/PP-YOLOv2/introduction_cn.ipynb b/modelcenter/PP-YOLOv2/introduction_cn.ipynb
index 6b381b6b95071f71836aeb7a6d2c1a1c51b9e554..fd43dcd78727cfe29354ad452987446b3fc45cc6 100644
--- a/modelcenter/PP-YOLOv2/introduction_cn.ipynb
+++ b/modelcenter/PP-YOLOv2/introduction_cn.ipynb
@@ -153,8 +153,8 @@
"outputs": [],
"source": [
"# 在GPU上预测一张图片\n",
- "export CUDA_VISIBLE_DEVICES=0\n",
- "python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg"
+ "!export CUDA_VISIBLE_DEVICES=0\n",
+ "!python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg"
]
},
{
@@ -165,7 +165,7 @@
"\n",
"结果如下图:\n",
"\n",
- "
\n",
+ "
\n",
"
\n",
"\n",
"\n"
diff --git a/modelcenter/PP-YOLOv2/introduction_en.ipynb b/modelcenter/PP-YOLOv2/introduction_en.ipynb
index 260196af367088f80ac46ed62d254dbc181c0d5f..fc094724529cb13ef8eec0b2e4e88d985f4f835d 100644
--- a/modelcenter/PP-YOLOv2/introduction_en.ipynb
+++ b/modelcenter/PP-YOLOv2/introduction_en.ipynb
@@ -145,8 +145,8 @@
"outputs": [],
"source": [
"# Predict a picture on the GPU.\n",
- "export CUDA_VISIBLE_DEVICES=0\n",
- "python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg"
+ "!export CUDA_VISIBLE_DEVICES=0\n",
+ "!python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg"
]
},
{
@@ -156,8 +156,9 @@
"An image with the predicted result is generated under the output folder.\n",
"\n",
"The result is as follows:\n",
- "\n",
- "![](https://ai-studio-static-online.cdn.bcebos.com/76fb0d0b60fe4fe39cc7302f4c25818133f970ebdf924d2d85f70f25a586aab9)\n"
+ "\n",
+ "
\n",
+ "
\n"
]
},
{