diff --git a/modelcenter/PP-HumanV2/APP/app.py b/modelcenter/PP-HumanV2/APP/app.py index 092db1c5f59b91caa8ab09d665a225c441be7b81..1a9bc1c4a471fe0b39d245fcf847c772805316ce 100644 --- a/modelcenter/PP-HumanV2/APP/app.py +++ b/modelcenter/PP-HumanV2/APP/app.py @@ -9,7 +9,9 @@ from pipeline.pipeline import pp_humanv2 # UGC: Define the inference fn() for your models def model_inference(input_date, avtivity_list): - + if 'do_entrance_counting'in avtivity_list or 'draw_center_traj' in avtivity_list: + if 'MOT' not in avtivity_list: + avtivity_list.append('MOT') result = pp_humanv2(input_date, avtivity_list) return result @@ -26,7 +28,7 @@ with gr.Blocks() as demo: with gr.TabItem("image"): - img_in = gr.Image(label="Input") + img_in = gr.Image(value="https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_attr.jpg",label="Input") img_out = gr.Image(label="Output") img_avtivity_list = gr.CheckboxGroup(["ATTR"]) @@ -35,11 +37,11 @@ with gr.Blocks() as demo: with gr.TabItem("video"): - video_in = gr.Video(label="Input") + video_in = gr.Video(value="https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_attr.mp4",label="Input") video_out = gr.Video(label="Output") video_avtivity_list = gr.CheckboxGroup(["MOT","ATTR","VIDEO_ACTION","SKELETON_ACTION","ID_BASED_DETACTION","ID_BASED_CLSACTION","REID",\ - "do_entrance_counting","draw_center_traj"]) + "do_entrance_counting","draw_center_traj"],label="Task Choice (note: only one task should be checked)") video_button1 = gr.Button("Submit") video_button2 = gr.Button("Clear") diff --git a/modelcenter/PP-HumanV2/APP/pipeline/pipeline.py b/modelcenter/PP-HumanV2/APP/pipeline/pipeline.py index 2b34b6b8f8b07d36362845bc9b9deaf55d64e02d..c43fb34f0f1d49e88dde58aea257afb858da8fc3 100644 --- a/modelcenter/PP-HumanV2/APP/pipeline/pipeline.py +++ b/modelcenter/PP-HumanV2/APP/pipeline/pipeline.py @@ -135,7 +135,7 @@ def get_model_dir_with_list(cfg, args): cfg[key]["rec_model_dir"] = rec_model_dir print("rec_model_dir model dir: ", rec_model_dir) - if key == 'ID_BASED_DETACTION' or key == 'SKELETON_ACTION': + if key == 'ID_BASED_DETACTION' or key == 'SKELETON_ACTION' or key == 'ATTR' or key =='ID_BASED_CLSACTION' or key=='REID': model_dir = cfg['MOT']["model_dir"] downloaded_model_dir = auto_download_model(model_dir) if downloaded_model_dir: diff --git a/modelcenter/PP-HumanV2/info.yaml b/modelcenter/PP-HumanV2/info.yaml index 0859c91f1833c8508c42b58a51f2266ee6b6bda9..a6e4cddba84ecaab60a5a3c5b114daa6b9876f64 100644 --- a/modelcenter/PP-HumanV2/info.yaml +++ b/modelcenter/PP-HumanV2/info.yaml @@ -3,7 +3,7 @@ Model_Info: name: "PP-HumanV2" description: "飞桨行人场景分析工具" description_en: "PaddlePadle Pedestrian Scene Analysis Tool" - icon: "" + icon: "https://paddledet.bj.bcebos.com/modelcenter/images/icons/icon_PP-Vehicle.png" from_repo: "PaddleDetection" Task: - tag_en: "Computer Vision" @@ -38,7 +38,7 @@ Example: sub_tag: "进出管理" url: "https://aistudio.baidu.com/aistudio/projectdetail/4537344" Datasets: "" -Pulisher: "Baidu" +Publisher: "Baidu" License: "apache.2.0" IfTraining: 0 IfOnlineDemo: 1 diff --git a/modelcenter/PP-HumanV2/introduction_cn.ipynb b/modelcenter/PP-HumanV2/introduction_cn.ipynb index 8ebf3be5fdd7ab9dd0397a9ab4dab05e3c5c82d3..05386cdcd43c323f17340761f6c6b4515dc34101 100644 --- a/modelcenter/PP-HumanV2/introduction_cn.ipynb +++ b/modelcenter/PP-HumanV2/introduction_cn.ipynb @@ -60,21 +60,18 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ - "\n", "# 克隆PaddleDetection仓库\n", + "%mkdir -p ~/work\n", "%cd ~/work/\n", "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", "\n", "# 安装其他依赖\n", "%cd PaddleDetection\n", - "!pip install -r requirements.txt\n" + "%mkdir -p demo_input demo_output\n", + "!pip install -r requirements.txt" ] }, { @@ -101,11 +98,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "```\n", @@ -140,40 +133,91 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "#直接使用默认配置或者examples中配置文件,或者直接在`infer_cfg_pphuman.yml`中修改配置:\n", "\n", "# 例:行人检测,指定配置文件路径和测试图片,图片输入默认打开检测模型\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml --image_file=test_image.jpg --device=gpu\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_attr.jpg \n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml --image_file=demo_input/human_attr.jpg --device=gpu --output_dir=demo_output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# 例:行人属性识别,直接使用examples中配置\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml --video_file=test_video.mp4 --device=gpu\n", - "\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_attr.mp4\n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml --video_file=demo_input/human_attr.mp4 --device=gpu --output_dir=demo_output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "#使用命令行进行功能开启,或者模型路径修改:\n", "\n", "# 例:行人跟踪,指定配置文件路径,模型路径和测试视频, 命令行中指定的模型路径优先级高于配置文件\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml -o MOT.enable=True MOT.model_dir=ppyoloe_infer/ --video_file=test_video.mp4 --device=gpu\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_count.mp4\n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml -o MOT.enable=True --video_file=demo_input/human_count.mp4 --device=gpu --output_dir=demo_output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# 例:行为识别,以摔倒识别为例,命令行中开启SKELETON_ACTION模型\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml -o SKELETON_ACTION.enbale=True --video_file=test_video.mp4 --device=gpu\n", - "\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_falldown.mp4\n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml -o SKELETON_ACTION.enable=True --video_file=demo_input/human_falldown.mp4 --device=gpu --output_dir=demo_output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "#rtsp推拉流\n", "\n", "#对rtsp拉流的支持,使用--rtsp RTSP [RTSP ...]参数指定一路或者多路rtsp视频流,如果是多路地址中间用空格隔开。(或者video_file后面的视频地址直接更换为rtsp流地址),示例如下:\n", "\n", "# 例:行人属性识别,单路视频流\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml -o visual=False --rtsp rtsp://[YOUR_RTSP_SITE] --device=gpu\n", - "\n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml -o visual=False --rtsp rtsp://[YOUR_RTSP_SITE] --device=gpu" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# 例:行人属性识别,多路视频流\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml -o visual=False --rtsp rtsp://[YOUR_RTSP_SITE1] rtsp://[YOUR_RTSP_SITE2] --device=gpu\n" + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml -o visual=False --rtsp rtsp://[YOUR_RTSP_SITE1] rtsp://[YOUR_RTSP_SITE2] --device=gpu" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 视频结果推流rtsp\n", + "\n", + "# 预测结果进行rtsp推流,使用--pushurl rtsp:[IP] 推流到IP地址端,PC端可以使用[VLC播放器](https://vlc.onl/)打开网络流进行播放,播放地址为 `rtsp:[IP]/videoname`。其中`videoname`是预测的视频文件名,如果视频来源是本地摄像头则`videoname`默认为`output`.\n", + "\n", + "# 例:行人属性识别,单路视频流,该示例播放地址为 rtsp://[YOUR_SERVER_IP]:8554/test_video\n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml --video_file=test_video.mp4 --device=gpu --pushurl rtsp://[YOUR_SERVER_IP]:8554\n", + "\n", + "# 注:\n", + "# 1. rtsp推流服务基于 [rtsp-simple-server](https://github.com/aler9/rtsp-simple-server), 如使用推流功能请先开启该服务.\n", + "# 2. rtsp推流如果模型处理速度跟不上会出现很明显的卡顿现象,建议跟踪模型使用ppyoloe_s版本,即修改配置中跟踪模型mot_ppyoloe_l_36e_pipeline.zip替换为mot_ppyoloe_s_36e_pipeline.zip。" ] }, { @@ -262,7 +306,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.8.13 ('paddle_env')", "language": "python", "name": "python3" }, @@ -276,7 +320,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.8" + "version": "3.8.13" + }, + "vscode": { + "interpreter": { + "hash": "864bc28e4d94d9c1c4bd0747e4313c0ab41718ab445ced17dbe1a405af5ecc64" + } } }, "nbformat": 4, diff --git a/modelcenter/PP-HumanV2/introduction_en.ipynb b/modelcenter/PP-HumanV2/introduction_en.ipynb index 11d27928cec2194c5fccb7afc6885d20ff26bea0..fbe8b6eadc687b1c56c5ef2fdecc2bbda3249e07 100644 --- a/modelcenter/PP-HumanV2/introduction_en.ipynb +++ b/modelcenter/PP-HumanV2/introduction_en.ipynb @@ -57,20 +57,18 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# clone PaddleDetection\n", + "%mkdir -p ~/work\n", "%cd ~/work/\n", "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", "\n", "# Other Dependencies\n", "%cd PaddleDetection\n", - "!pip install -r requirements.txt\n" + "%mkdir -p demo_input demo_output\n", + "!pip install -r requirements.txt" ] }, { @@ -97,11 +95,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "```\n", @@ -136,28 +130,55 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "#Use the default configuration directly or the configuration file in examples, or modify the configuration in `infer_cfg_pphuman.yml`\n", "# Example: In pedestrian detection model, specify configuration file path and test image, and image input opens detection model by default\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml --image_file=test_image.jpg --device=gpu\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_attr.jpg \n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml --image_file=demo_input/human_attr.jpg --device=gpu --output_dir=demo_output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# Example: In pedestrian attribute recognition, directly configure the examples\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml --video_file=test_video.mp4 --device=gpu\n", - " \n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_attr.mp4\n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_human_attr.yml --video_file=demo_input/human_attr.mp4 --device=gpu --output_dir=demo_output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "#Use the command line to enable functions or change the model path.\n", "# Example: Pedestrian tracking, specify config file path, model path and test video. The specified model path on the command line has a higher priority than the config file.\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml -o MOT.enable=True MOT.model_dir=ppyoloe_infer/ --video_file=test_video.mp4 --device=gpu\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_count.mp4\n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml -o MOT.enable=True --video_file=demo_input/human_count.mp4 --device=gpu --output_dir=demo_output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# Example: In behaviour recognition, with fall recognition as an example, enable the SKELETON_ACTION model on the command line\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml -o SKELETON_ACTION.enbale=True --video_file=test_video.mp4 --device=gpu\n", - "\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Human/human_falldown.mp4\n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_pphuman.yml -o SKELETON_ACTION.enable=True --video_file=demo_input/human_falldown.mp4 --device=gpu --output_dir=demo_output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "#rtsp push/pull stream\n", "#For rtsp pull stream, use `--rtsp RTSP [RTSP ...]` parameter to specify one or more rtsp streams. Separate the multiple addresses with a space, or replace the video address directly after the video_file with the rtsp stream address), examples as follows\n", "\n", @@ -267,7 +288,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, @@ -281,7 +302,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.8" + "version": "3.9.6" + }, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } } }, "nbformat": 4, diff --git a/modelcenter/PP-PicoDet/APP/app.py b/modelcenter/PP-PicoDet/APP/app.py index 13cb0249548000f34254cd3125ce2d7c237ad997..cc5324079deae89a92047e152ffe666ee482ddec 100644 --- a/modelcenter/PP-PicoDet/APP/app.py +++ b/modelcenter/PP-PicoDet/APP/app.py @@ -19,7 +19,7 @@ with gr.Blocks() as demo: with gr.Column(scale=1, min_width=100): - img_in = gr.Image(label="Input").style(height=200) + img_in = gr.Image(value="https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg",label="Input").style(height=200) with gr.Row(): btn1 = gr.Button("Clear") diff --git a/modelcenter/PP-PicoDet/info.yaml b/modelcenter/PP-PicoDet/info.yaml index 6ff81716ee98e6cb2dbbde4dd860f46909bc01cd..272b93ff9dced1793f10efdb8e3f3642fc6d25af 100644 --- a/modelcenter/PP-PicoDet/info.yaml +++ b/modelcenter/PP-PicoDet/info.yaml @@ -4,7 +4,7 @@ Model_Info: description: "PP-PicoDet是轻量级系列模型,在移动端具有卓越的性能" description_en: "PP-PicoDet has a series of lightweight models, which are very suitable\ \ for deployment on mobile or CPU" - icon: "@后续UE统一设计之后,会存到bos上某个位置" + icon: "https://paddledet.bj.bcebos.com/modelcenter/images/icons/icon_Picodet.png" from_repo: "PaddleDetection" Task: - tag_en: "Computer Vision" @@ -19,7 +19,7 @@ Example: sub_tag: "路面危害检测" url: "https://aistudio.baidu.com/aistudio/projectdetail/3898651" Datasets: "COCO test-dev2017, COCO train2017, COCO val2017, Pascal VOC" -Pulisher: "Baidu" +Publisher: "Baidu" License: "apache.2.0" Paper: - title: "PP-PicoDet: A Better Real-Time Object Detector on Mobile Devices" diff --git a/modelcenter/PP-PicoDet/introduction_cn.ipynb b/modelcenter/PP-PicoDet/introduction_cn.ipynb index cba5701734eaa8029db4bba52dda543bac64b70c..b0c9dff028a8a2d1b0f0a545736eb52c2843e1d5 100644 --- a/modelcenter/PP-PicoDet/introduction_cn.ipynb +++ b/modelcenter/PP-PicoDet/introduction_cn.ipynb @@ -39,16 +39,18 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ - "%cd ~/work\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection\n", + "\n", + "# 克隆PaddleDetection仓库\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", + "\n", + "# 安装其他依赖\n", "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", "!pip install -r requirements.txt" ] }, @@ -63,16 +65,19 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# 单卡训练\n", - "!CUDA_VISIBLE_DEVICES=0 python tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval\n", - "\n", + "!CUDA_VISIBLE_DEVICES=0 python tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# 多卡训练\n", "!CUDA_VISIBLE_DEVICES=0,1,2,3 python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --eval" ] @@ -135,10 +140,29 @@ } ], "metadata": { + "kernelspec": { + "display_name": "Python 3.8.13 ('paddle_env')", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" }, - "orig_nbformat": 4 + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "864bc28e4d94d9c1c4bd0747e4313c0ab41718ab445ced17dbe1a405af5ecc64" + } + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/modelcenter/PP-PicoDet/introduction_en.ipynb b/modelcenter/PP-PicoDet/introduction_en.ipynb index d8f0e42e46721833ce3fe038fbc3b68dc1b56da8..3fa7bf09777f3de78b4924cb65fe798ccdc22698 100644 --- a/modelcenter/PP-PicoDet/introduction_en.ipynb +++ b/modelcenter/PP-PicoDet/introduction_en.ipynb @@ -38,16 +38,17 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ - "%cd ~/work\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection\n", + "# clone PaddleDetection\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", + "\n", + "# Other Dependencies\n", "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", "!pip install -r requirements.txt" ] }, @@ -62,11 +63,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# training with single GPU\n", @@ -134,10 +131,21 @@ } ], "metadata": { + "kernelspec": { + "display_name": "Python 3.10.6 64-bit", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "name": "python", + "version": "3.10.6" }, - "orig_nbformat": 4 + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/infer_cfg.yml b/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/infer_cfg.yml deleted file mode 100644 index 9ad451a2b04f91b8c6ad2381ba9fb71ba9a33cc7..0000000000000000000000000000000000000000 --- a/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/infer_cfg.yml +++ /dev/null @@ -1,106 +0,0 @@ -mode: paddle -draw_threshold: 0.5 -metric: COCO -use_dynamic_shape: false -arch: GFL -min_subgraph_size: 3 -Preprocess: -- interp: 2 - keep_ratio: false - target_size: - - 320 - - 320 - type: Resize -- type: Permute -label_list: -- person -- bicycle -- car -- motorcycle -- airplane -- bus -- train -- truck -- boat -- traffic light -- fire hydrant -- stop sign -- parking meter -- bench -- bird -- cat -- dog -- horse -- sheep -- cow -- elephant -- bear -- zebra -- giraffe -- backpack -- umbrella -- handbag -- tie -- suitcase -- frisbee -- skis -- snowboard -- sports ball -- kite -- baseball bat -- baseball glove -- skateboard -- surfboard -- tennis racket -- bottle -- wine glass -- cup -- fork -- knife -- spoon -- bowl -- banana -- apple -- sandwich -- orange -- broccoli -- carrot -- hot dog -- pizza -- donut -- cake -- chair -- couch -- potted plant -- bed -- dining table -- toilet -- tv -- laptop -- mouse -- remote -- keyboard -- cell phone -- microwave -- oven -- toaster -- sink -- refrigerator -- book -- clock -- vase -- scissors -- teddy bear -- hair drier -- toothbrush -NMS: - keep_top_k: 100 - name: MultiClassNMS - nms_threshold: 0.5 - nms_top_k: 1000 - score_threshold: 0.3 -fpn_stride: -- 8 -- 16 -- 32 -- 64 diff --git a/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/model.pdiparams b/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/model.pdiparams deleted file mode 100644 index 8a8c5f8a056ae35c01bc008cc6c031d6823325a2..0000000000000000000000000000000000000000 Binary files a/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/model.pdiparams and /dev/null differ diff --git a/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/model.pdiparams.info b/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/model.pdiparams.info deleted file mode 100644 index ee5c41935f0d9ee2d66f3dccdec692876c24bdb9..0000000000000000000000000000000000000000 Binary files a/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/model.pdiparams.info and /dev/null differ diff --git a/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/model.pdmodel b/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/model.pdmodel deleted file mode 100644 index 6d320da589cfe78c516810f70b5062ce0cb32e2b..0000000000000000000000000000000000000000 Binary files a/modelcenter/PP-TInyPose/APP/output_inference/picodet_v2_s_320_pedestrian/model.pdmodel and /dev/null differ diff --git a/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/infer_cfg.yml b/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/infer_cfg.yml deleted file mode 100644 index 7bf18f6570d3860aca966a5420994df19299bb7d..0000000000000000000000000000000000000000 --- a/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/infer_cfg.yml +++ /dev/null @@ -1,24 +0,0 @@ -mode: paddle -draw_threshold: 0.5 -metric: KeyPointTopDownCOCOEval -use_dynamic_shape: false -arch: HRNet -min_subgraph_size: 3 -Preprocess: -- trainsize: - - 96 - - 128 - type: TopDownEvalAffine -- is_scale: true - mean: - - 0.485 - - 0.456 - - 0.406 - std: - - 0.229 - - 0.224 - - 0.225 - type: NormalizeImage -- type: Permute -label_list: -- keypoint diff --git a/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/model.pdiparams b/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/model.pdiparams deleted file mode 100644 index 549c37f5c99aa6efed65ef76ab30b6b2f87ba945..0000000000000000000000000000000000000000 Binary files a/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/model.pdiparams and /dev/null differ diff --git a/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/model.pdiparams.info b/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/model.pdiparams.info deleted file mode 100644 index cfeffe18d02a11ea99ec39c89819ac6c83b1cedf..0000000000000000000000000000000000000000 Binary files a/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/model.pdiparams.info and /dev/null differ diff --git a/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/model.pdmodel b/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/model.pdmodel deleted file mode 100644 index 998bb5b54a0cf85e03eead3ca6507d9bf7ea3b33..0000000000000000000000000000000000000000 Binary files a/modelcenter/PP-TInyPose/APP/output_inference/tinypose_128x96/model.pdmodel and /dev/null differ diff --git a/modelcenter/PP-TInyPose/.gitkeep b/modelcenter/PP-TinyPose/.gitkeep similarity index 100% rename from modelcenter/PP-TInyPose/.gitkeep rename to modelcenter/PP-TinyPose/.gitkeep diff --git a/modelcenter/PP-TInyPose/APP/app.py b/modelcenter/PP-TinyPose/APP/app.py similarity index 83% rename from modelcenter/PP-TInyPose/APP/app.py rename to modelcenter/PP-TinyPose/APP/app.py index 11b798873ac89fdbe601b348b21f2370afa628d7..3be20dc1e2718b44a2b5743234f9c58a33ca7001 100644 --- a/modelcenter/PP-TInyPose/APP/app.py +++ b/modelcenter/PP-TinyPose/APP/app.py @@ -26,7 +26,7 @@ with gr.Blocks() as demo: with gr.TabItem("image"): - img_in = gr.Image(label="Input") + img_in = gr.Image(value="https://paddledet.bj.bcebos.com/modelcenter/images/PP-TinyPose/000000568213.jpg",label="Input") img_out = gr.Image(label="Output") img_json_out = gr.JSON(label="jsonOutput") @@ -36,7 +36,7 @@ with gr.Blocks() as demo: with gr.TabItem("video"): - video_in = gr.Video(label="Input") + video_in = gr.Video(value="https://paddledet.bj.bcebos.com/modelcenter/images/PP-TinyPose/demo_PP-TinyPose.mp4",label="Input") video_out = gr.Video(label="Output") video_json_out = gr.JSON(label="jsonOutput") diff --git a/modelcenter/PP-TInyPose/APP/app.yml b/modelcenter/PP-TinyPose/APP/app.yml similarity index 100% rename from modelcenter/PP-TInyPose/APP/app.yml rename to modelcenter/PP-TinyPose/APP/app.yml diff --git a/modelcenter/PP-TInyPose/APP/benchmark_utils.py b/modelcenter/PP-TinyPose/APP/benchmark_utils.py similarity index 100% rename from modelcenter/PP-TInyPose/APP/benchmark_utils.py rename to modelcenter/PP-TinyPose/APP/benchmark_utils.py diff --git a/modelcenter/PP-TInyPose/APP/det_keypoint_unite_infer.py b/modelcenter/PP-TinyPose/APP/det_keypoint_unite_infer.py similarity index 100% rename from modelcenter/PP-TInyPose/APP/det_keypoint_unite_infer.py rename to modelcenter/PP-TinyPose/APP/det_keypoint_unite_infer.py diff --git a/modelcenter/PP-TInyPose/APP/det_keypoint_unite_utils.py b/modelcenter/PP-TinyPose/APP/det_keypoint_unite_utils.py similarity index 100% rename from modelcenter/PP-TInyPose/APP/det_keypoint_unite_utils.py rename to modelcenter/PP-TinyPose/APP/det_keypoint_unite_utils.py diff --git a/modelcenter/PP-TInyPose/APP/infer.py b/modelcenter/PP-TinyPose/APP/infer.py similarity index 100% rename from modelcenter/PP-TInyPose/APP/infer.py rename to modelcenter/PP-TinyPose/APP/infer.py diff --git a/modelcenter/PP-TInyPose/APP/keypoint_infer.py b/modelcenter/PP-TinyPose/APP/keypoint_infer.py similarity index 100% rename from modelcenter/PP-TInyPose/APP/keypoint_infer.py rename to modelcenter/PP-TinyPose/APP/keypoint_infer.py diff --git a/modelcenter/PP-TInyPose/APP/keypoint_postprocess.py b/modelcenter/PP-TinyPose/APP/keypoint_postprocess.py similarity index 100% rename from modelcenter/PP-TInyPose/APP/keypoint_postprocess.py rename to modelcenter/PP-TinyPose/APP/keypoint_postprocess.py diff --git a/modelcenter/PP-TInyPose/APP/keypoint_preprocess.py b/modelcenter/PP-TinyPose/APP/keypoint_preprocess.py similarity index 100% rename from modelcenter/PP-TInyPose/APP/keypoint_preprocess.py rename to modelcenter/PP-TinyPose/APP/keypoint_preprocess.py diff --git a/modelcenter/PP-TInyPose/APP/picodet_postprocess.py b/modelcenter/PP-TinyPose/APP/picodet_postprocess.py similarity index 100% rename from modelcenter/PP-TInyPose/APP/picodet_postprocess.py rename to modelcenter/PP-TinyPose/APP/picodet_postprocess.py diff --git a/modelcenter/PP-TInyPose/APP/preprocess.py b/modelcenter/PP-TinyPose/APP/preprocess.py similarity index 100% rename from modelcenter/PP-TInyPose/APP/preprocess.py rename to modelcenter/PP-TinyPose/APP/preprocess.py diff --git a/modelcenter/PP-TInyPose/APP/utils.py b/modelcenter/PP-TinyPose/APP/utils.py similarity index 100% rename from modelcenter/PP-TInyPose/APP/utils.py rename to modelcenter/PP-TinyPose/APP/utils.py diff --git a/modelcenter/PP-TInyPose/APP/visualize.py b/modelcenter/PP-TinyPose/APP/visualize.py similarity index 100% rename from modelcenter/PP-TInyPose/APP/visualize.py rename to modelcenter/PP-TinyPose/APP/visualize.py diff --git a/modelcenter/PP-TInyPose/benchmark_cn.md b/modelcenter/PP-TinyPose/benchmark_cn.md similarity index 100% rename from modelcenter/PP-TInyPose/benchmark_cn.md rename to modelcenter/PP-TinyPose/benchmark_cn.md diff --git a/modelcenter/PP-TInyPose/benchmark_en.md b/modelcenter/PP-TinyPose/benchmark_en.md similarity index 100% rename from modelcenter/PP-TInyPose/benchmark_en.md rename to modelcenter/PP-TinyPose/benchmark_en.md diff --git a/modelcenter/PP-TInyPose/download_cn.md b/modelcenter/PP-TinyPose/download_cn.md similarity index 100% rename from modelcenter/PP-TInyPose/download_cn.md rename to modelcenter/PP-TinyPose/download_cn.md diff --git a/modelcenter/PP-TInyPose/download_en.md b/modelcenter/PP-TinyPose/download_en.md similarity index 100% rename from modelcenter/PP-TInyPose/download_en.md rename to modelcenter/PP-TinyPose/download_en.md diff --git a/modelcenter/PP-TInyPose/info.yaml b/modelcenter/PP-TinyPose/info.yaml similarity index 87% rename from modelcenter/PP-TInyPose/info.yaml rename to modelcenter/PP-TinyPose/info.yaml index 6cff094ce0e9e825daa552e27116b4f7404f6785..5436b5c870a67fa659688f283bc4b9ffc651bfe0 100644 --- a/modelcenter/PP-TInyPose/info.yaml +++ b/modelcenter/PP-TinyPose/info.yaml @@ -3,7 +3,7 @@ Model_Info: name: "PP-TInyPose" description: "飞桨人体姿态估计" description_en: "test" - icon: "PaddlePaddle Estimation of human posture" + icon: "https://paddledet.bj.bcebos.com/modelcenter/images/icons/icon_PP-TinyPose.png" from_repo: "PaddleDetection" Task: - tag_en: "Computer Vision" @@ -22,7 +22,7 @@ Example: sub_tag: "动作分类" url: "https://aistudio.baidu.com/aistudio/projectdetail/4385813" Datasets: "COCO train2017,AI Challenger trainset,COCO person keypoints val2017,COCO instances val2017" -Pulisher: "Baidu" +Publisher: "Baidu" License: "apache.2.0" IfTraining: 0 IfOnlineDemo: 1 diff --git a/modelcenter/PP-TInyPose/introduction_cn.ipynb b/modelcenter/PP-TinyPose/introduction_cn.ipynb similarity index 75% rename from modelcenter/PP-TInyPose/introduction_cn.ipynb rename to modelcenter/PP-TinyPose/introduction_cn.ipynb index 3d0262c79a84d9dd8584a2b763309800c72375f6..3f2064718eabde685eb3d7f99a1629330a38d255 100644 --- a/modelcenter/PP-TInyPose/introduction_cn.ipynb +++ b/modelcenter/PP-TinyPose/introduction_cn.ipynb @@ -65,55 +65,18 @@ }, "outputs": [], "source": [ - "%cd ~/work\n", - "# 克隆PaddleDetection(从gitee上更快),本项目以做持久化处理,不用克隆了。\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* 安装" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true, - "tags": [] - }, - "outputs": [], - "source": [ - "# 运行脚本需在PaddleDetection目录下\n", - "%cd ~/work/PaddleDetection/\n", + "# 克隆PaddleDetection仓库\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", "\n", - "# 安装所需依赖项【已经做持久化处理,无需再安装】\n", - "!pip install pyzmq \n", + "# 安装其他依赖\n", + "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", "!pip install -r requirements.txt\n", "\n", - "# 运行脚本需在PaddleDetection目录下\n", - "%cd ~/work/PaddleDetection/\n", - "# 设置python运行目录\n", - "%env PYTHONPATH=.:$PYTHONPATH\n", - "# 设置GPU\n", - "%env CUDA_VISIBLE_DEVICES=0\n", - "\n", - "# 经简单测试,提前安装所需依赖,比直接使用setup.py更快\n", - "!pip install pycocotools \n", - "!pip install cython-bbox \n", - "!pip install xmltodict \n", - "!pip install terminaltables \n", - "!pip intall motmetrics \n", - "!pip install lap \n", - "!pip install shapely \n", - "!pip install pytest-benchmark \n", - "!pip install pytest \n", - "\n", - "\n", "# 开始安装PaddleDetection \n", - "!python setup.py install #如果安装过程中长时间卡住,可中断后继续重新执行," + "!python setup.py install #如果安装过程中长时间卡住,可中断后继续重新执行" ] }, { @@ -155,23 +118,45 @@ "outputs": [], "source": [ "# 下载模型\n", - "!mkdir output_inference\n", + "!mkdir -p output_inference\n", "%cd output_inference\n", - "# 下载行人检测模型\n", + "# 下载行人检测模型s\n", "!wget https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_enhance/picodet_s_320_lcnet_pedestrian.zip\n", "!unzip picodet_s_320_lcnet_pedestrian.zip\n", "# 下载关键点检测模型\n", "!wget https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_enhance/tinypose_128x96.zip\n", - "!unzip tinypose_128x96.zip\n", - "\n", - "%cd ~/work/PaddleDetection/\n", + "!unzip tinypose_128x96.zip" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%cd ~/work/PaddleDetection/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# 预测一张图片\n", - "!python3 deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --image_file=demo/hrnet_demo.jpg --device=GPU\n", - "# 预测多张图片\n", - "!python3 deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --image_dir=demo/ --device=GPU\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-TinyPose/000000568213.jpg\n", + "!python deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_v2_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --image_file=demo_input/000000568213.jpg --device=GPU --output_dir=demo_output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# 预测一个视频\n", - "!python3 deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --video_file={your video file} --device=GPU\n" + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-TinyPose/demo_PP-TinyPose.mp4\n", + "!python deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_v2_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --video_file=demo_input/demo_PP-TinyPose.mp4 --device=GPU --output_dir=demo_output" ] }, { @@ -241,10 +226,17 @@ "outputs": [], "source": [ "# 关键点检测模型\n", - "!python3 -m paddle.distributed.launch tools/train.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml\n", - "\n", + "!python -m paddle.distributed.launch tools/train.py -c configs/keypoint/tiny_pose/tinypose_128x96.yml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# 行人检测模型\n", - "!python3 -m paddle.distributed.launch tools/train.py -c configs/picodet/application/pedestrian_detection/picodet_s_320_lcnet_pedestrian.yml" + "!python -m paddle.distributed.launch tools/train.py -c configs/picodet/application/pedestrian_detection/picodet_s_320_lcnet_pedestrian.yml" ] }, { @@ -260,7 +252,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.8.13 ('paddle_env')", "language": "python", "name": "python3" }, @@ -274,7 +266,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.8" + "version": "3.8.13" + }, + "vscode": { + "interpreter": { + "hash": "864bc28e4d94d9c1c4bd0747e4313c0ab41718ab445ced17dbe1a405af5ecc64" + } } }, "nbformat": 4, diff --git a/modelcenter/PP-TInyPose/introduction_en.ipynb b/modelcenter/PP-TinyPose/introduction_en.ipynb similarity index 80% rename from modelcenter/PP-TInyPose/introduction_en.ipynb rename to modelcenter/PP-TinyPose/introduction_en.ipynb index 1f262b87c64510f99e2763727906d335462b95b9..830a61ad1696d090e488972b2f55953d43216dce 100644 --- a/modelcenter/PP-TInyPose/introduction_en.ipynb +++ b/modelcenter/PP-TinyPose/introduction_en.ipynb @@ -64,55 +64,16 @@ }, "outputs": [], "source": [ - "%cd ~/work\n", - "# clone PaddleDetection。\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* install" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true, - "tags": [] - }, - "outputs": [], - "source": [ - "# \n", - "%cd ~/work/PaddleDetection/\n", + "# clone PaddleDetection\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", "\n", - "# \n", - "!pip install pyzmq \n", + "# Other Dependencies\n", + "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", "!pip install -r requirements.txt\n", - "\n", - "#\n", - "%cd ~/work/PaddleDetection/\n", - "# \n", - "%env PYTHONPATH=.:$PYTHONPATH\n", - "# set GPU\n", - "%env CUDA_VISIBLE_DEVICES=0\n", - "\n", - "# \n", - "!pip install pycocotools \n", - "!pip install cython-bbox \n", - "!pip install xmltodict \n", - "!pip install terminaltables \n", - "!pip intall motmetrics \n", - "!pip install lap \n", - "!pip install shapely \n", - "!pip install pytest-benchmark \n", - "!pip install pytest \n", - "\n", - "\n", - "# PaddleDetection \n", - "!python setup.py install " + "!python setup.py install" ] }, { @@ -130,7 +91,6 @@ }, "outputs": [], "source": [ - "\n", "!python ppdet/modeling/tests/test_architectures.py" ] }, @@ -146,30 +106,49 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": true, - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "# Download model\n", - "!mkdir output_inference\n", + "!mkdir -p output_inference\n", "%cd output_inference\n", "# Download pedestrian detection model\n", "!wget https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_enhance/picodet_s_320_lcnet_pedestrian.zip\n", "!unzip picodet_s_320_lcnet_pedestrian.zip\n", "# Download key point detection model\n", "!wget https://bj.bcebos.com/v1/paddledet/models/keypoint/tinypose_enhance/tinypose_128x96.zip\n", - "!unzip tinypose_128x96.zip\n", - "\n", - "%cd ~/work/PaddleDetection/\n", + "!unzip tinypose_128x96.zip" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%cd ~/work/PaddleDetection/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# Predict a image\n", - "!python3 deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --image_file=demo/hrnet_demo.jpg --device=GPU\n", - "# predict multiple images\n", - "!python3 deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --image_dir=demo/ --device=GPU\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-TinyPose/000000568213.jpg\n", + "!python deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_v2_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --image_file=demo_input/000000568213.jpg --device=GPU --output_dir=demo_output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# predict video\n", - "!python3 deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --video_file={your video file} --device=GPU\n" + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-TinyPose/demo_PP-TinyPose.mp4\n", + "!python deploy/python/det_keypoint_unite_infer.py --det_model_dir=output_inference/picodet_v2_s_320_pedestrian --keypoint_model_dir=output_inference/tinypose_128x96 --video_file=demo_input/demo_PP-TinyPose.mp4 --device=GPU --output_dir=demo_output" ] }, { @@ -259,7 +238,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, @@ -273,7 +252,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.8" + "version": "3.10.6" + }, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } } }, "nbformat": 4, diff --git a/modelcenter/PP-TInyPose/tinypose_pipeline.png b/modelcenter/PP-TinyPose/tinypose_pipeline.png similarity index 100% rename from modelcenter/PP-TInyPose/tinypose_pipeline.png rename to modelcenter/PP-TinyPose/tinypose_pipeline.png diff --git a/modelcenter/PP-Vehicle/APP/app.py b/modelcenter/PP-Vehicle/APP/app.py index cd9cd7eeca9bb2caa862343c1b000836c3560d62..cbcd25958e59a3c899c3fdc3e19f3ee1a86783de 100644 --- a/modelcenter/PP-Vehicle/APP/app.py +++ b/modelcenter/PP-Vehicle/APP/app.py @@ -26,7 +26,7 @@ with gr.Blocks() as demo: with gr.TabItem("image"): - img_in = gr.Image(label="Input") + img_in = gr.Image(value="https://paddledet.bj.bcebos.com/modelcenter/images/PP-Vehicle/demo_vehicle.jpg",label="Input") img_out = gr.Image(label="Output") img_avtivity_list = gr.CheckboxGroup( @@ -36,7 +36,7 @@ with gr.Blocks() as demo: with gr.TabItem("video"): - video_in = gr.Video(label="Input") + video_in = gr.Video(value="https://paddledet.bj.bcebos.com/modelcenter/images/PP-Vehicle/demo_vehicle.mp4",label="Input") video_out = gr.Video(label="Output") video_avtivity_list = gr.CheckboxGroup( diff --git a/modelcenter/PP-Vehicle/info.yaml b/modelcenter/PP-Vehicle/info.yaml index 852266b0ecedff998e6207ef837667f7b6c4665a..39977410794100530f476b2d0f9c787f247da3c5 100644 --- a/modelcenter/PP-Vehicle/info.yaml +++ b/modelcenter/PP-Vehicle/info.yaml @@ -3,7 +3,7 @@ Model_Info: name: "PP-Vehicle" description: "飞桨车辆场景分析工具" description_en: "PaddlePaddle Vehicle Scene Analysis Tool" - icon: "" + icon: "https://paddledet.bj.bcebos.com/modelcenter/images/icons/icon_PP-Vehicle.png" from_repo: "PaddleDetection" Task: - tag_en: "Computer Vision" @@ -22,7 +22,7 @@ Example: sub_tag: "车辆检测" url: "https://aistudio.baidu.com/aistudio/projectdetail/4228391" Datasets: "BDD-100k" -Pulisher: "Baidu" +Publisher: "Baidu" License: "apache.2.0" IfTraining: 0 IfOnlineDemo: 1 diff --git a/modelcenter/PP-Vehicle/introduction_cn.ipynb b/modelcenter/PP-Vehicle/introduction_cn.ipynb index 94584952391bd170b287f02805e79abac22da3d0..f4dcee1c327d80e19e511e51965cf8e6b517cd73 100644 --- a/modelcenter/PP-Vehicle/introduction_cn.ipynb +++ b/modelcenter/PP-Vehicle/introduction_cn.ipynb @@ -53,20 +53,19 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ + "\n", "# 克隆PaddleDetection仓库\n", - "%cd ~/work\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", "\n", "# 安装其他依赖\n", "%cd PaddleDetection\n", - "!pip install -r requirements.txt\n" + "%mkdir -p demo_input demo_output\n", + "!pip install -r requirements.txt" ] }, { @@ -94,11 +93,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "\n", @@ -137,35 +132,62 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# 1. 直接使用默认配置或者examples中配置文件,或者直接在`infer_cfg_ppvehicle.yml`中修改配置:\n", "# 例:车辆检测,指定配置文件路径和测试图片\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml --image_file=test_image.jpg --device=gpu\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Vehicle/demo_vehicle.jpg \n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml --image_file=demo_input/demo_vehicle.jpg --device=gpu --output_dir=demo_output" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# 例:车辆车牌识别,指定配置文件路径和测试视频\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_vehicle_plate.yml --video_file=test_video.mp4 --device=gpu\n", - "\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Vehicle/demo_vehicle.mp4 \n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_vehicle_plate.yml --video_file=demo_input/demo_vehicle.mp4 --device=gpu --output_dir=demo_output/vehicle_plate" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "#2. 使用命令行进行功能开启,或者模型路径修改:\n", "# 例:车辆跟踪,指定配置文件路径和测试视频,命令行中开启MOT模型并修改模型路径,命令行中指定的模型路径优先级高于配置文件\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml -o MOT.enable=True MOT.model_dir=ppyoloe_infer/ --video_file=test_video.mp4 --device=gpu\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Vehicle/demo_vehicle.mp4\n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml -o MOT.enable=True --video_file=demo_input/demo_vehicle.mp4 --device=gpu --output_dir=demo_output/vehicle_tracking" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "# 例:车辆违章分析,指定配置文件和测试视频,命令行中指定违停区域设置、违停时间判断。\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Vehicle/demo_vehicle.mp4\n", "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_illegal_parking.yml \\\n", - " --video_file=../car_test.mov \\\n", + " --video_file=demo_input/demo_vehicle.mp4 \\\n", " --device=gpu \\\n", " --draw_center_traj \\\n", " --illegal_parking_time=3 \\\n", " --region_type=custom \\\n", - " --region_polygon 600 300 1300 300 1300 800 600 800\n", - "\n", - "\n", + " --region_polygon 600 300 1300 300 1300 800 600 800 \\\n", + " --output_dir=demo_output/vehicle_illgal" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "#3. rtsp推拉流\n", "\n", "#对rtsp拉流的支持,使用--rtsp RTSP [RTSP ...]参数指定一路或者多路rtsp视频流,如果是多路地址中间用空格隔开。(或者video_file后面的视频地址直接更换为rtsp流地址),示例如下:\n", @@ -267,7 +289,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.8.13 ('paddle_env')", "language": "python", "name": "python3" }, @@ -281,7 +303,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.8" + "version": "3.8.13" + }, + "vscode": { + "interpreter": { + "hash": "864bc28e4d94d9c1c4bd0747e4313c0ab41718ab445ced17dbe1a405af5ecc64" + } } }, "nbformat": 4, diff --git a/modelcenter/PP-Vehicle/introduction_en.ipynb b/modelcenter/PP-Vehicle/introduction_en.ipynb index 89ef89c421a89ed5b6897db8f217374cf82ccbd1..5fb17870c4cdac2e81024071ddcb911d8a0e0a80 100644 --- a/modelcenter/PP-Vehicle/introduction_en.ipynb +++ b/modelcenter/PP-Vehicle/introduction_en.ipynb @@ -52,20 +52,18 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# clone PaddleDetection\n", - "%cd ~/work\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", "\n", "# Other Dependencies\n", "%cd PaddleDetection\n", - "!pip install -r requirements.txt\n" + "%mkdir -p demo_input demo_output\n", + "!pip install -r requirements.txt" ] }, { @@ -92,11 +90,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "\n", @@ -135,34 +129,41 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# 1. Use the default configuration directly or the configuration file in examples, or modify the configuration in `infer_cfg_ppvehicle.yml`\n", "# Example:In vehicle detection,specify configuration file path and test image\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml --image_file=test_image.jpg --device=gpu\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Vehicle/demo_vehicle.jpg \n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml --image_file=demo_input/demo_vehicle.jpg --device=gpu --output_dir=demo_output\n", "\n", "# Example:In license plate recognition,directly configure the examples\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_vehicle_plate.yml --video_file=test_video.mp4 --device=gpu\n", - "\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Vehicle/demo_vehicle.mp4 \n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_vehicle_plate.yml --video_file=demo_input/demo_vehicle.mp4 --device=gpu --output_dir=demo_output/vehicle_plate\n", "\n", "#2.Use the command line to enable functions or change the model path.\n", "# Example:In vehicle tracking,specify configuration file path and test video, Turn on the MOT model and modify the model path on the command line, the model path specified on the command line has higher priority than the configuration file\n", - "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml -o MOT.enable=True MOT.model_dir=ppyoloe_infer/ --video_file=test_video.mp4 --device=gpu\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Vehicle/demo_vehicle.mp4\n", + "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml -o MOT.enable=True --video_file=demo_input/demo_vehicle.mp4 --device=gpu --output_dir=demo_output/vehicle_tracking\n", "\n", "# Example:In vehicle illegal action analysis,specify configuration file path and test video,Setting of designated violation area and judgment of violation time in the command line\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/PP-Vehicle/demo_vehicle.mp4\n", "!python deploy/pipeline/pipeline.py --config deploy/pipeline/config/examples/infer_cfg_illegal_parking.yml \\\n", - " --video_file=../car_test.mov \\\n", + " --video_file=demo_input/demo_vehicle.mp4 \\\n", " --device=gpu \\\n", " --draw_center_traj \\\n", " --illegal_parking_time=3 \\\n", " --region_type=custom \\\n", - " --region_polygon 600 300 1300 300 1300 800 600 800\n", - "\n", + " --region_polygon 600 300 1300 300 1300 800 600 800 \\\n", + " --output_dir=demo_output/vehicle_illgal\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "#3. rtsp push/pull stream\n", "#For rtsp pull stream, use --rtsp RTSP [RTSP ...] parameter to specify one or more rtsp streams. Separate the multiple addresses with a space, or replace the video address directly after the video_file with the rtsp stream address), examples as follows\n", "\n", @@ -180,7 +181,7 @@ "\n", "#Note: \n", "#1. rtsp push stream is based on [rtsp-simple-server](https://github.com/aler9/rtsp-simple-server), please enable this serving first.\n", - "#2. the output visualize will be frozen frequently if the model cost too much time, we suggest to use faster model like ppyoloe_s in tracking, this is simply replace mot_ppyoloe_l_36e_pipeline.zip with mot_ppyoloe_s_36e_pipeline.zip in model config yaml file.\n" + "#2. the output visualize will be frozen frequently if the model cost too much time, we suggest to use faster model like ppyoloe_s in tracking, this is simply replace mot_ppyoloe_l_36e_pipeline.zip with mot_ppyoloe_s_36e_pipeline.zip in model config yaml file." ] }, { @@ -262,7 +263,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, @@ -276,7 +277,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.8" + "version": "3.10.6" + }, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } } }, "nbformat": 4, diff --git a/modelcenter/PP-YOLO/APP/app.py b/modelcenter/PP-YOLO/APP/app.py index 3b99e3a03f0a76a8353c1db965fcf8a5cc7c8dd7..2790259d4c84e57caaa386e746c88b34ff99f0b8 100644 --- a/modelcenter/PP-YOLO/APP/app.py +++ b/modelcenter/PP-YOLO/APP/app.py @@ -19,7 +19,7 @@ with gr.Blocks() as demo: with gr.Column(scale=1, min_width=100): - img_in = gr.Image(label="Input").style(height=200) + img_in = gr.Image(value="https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg",label="Input").style(height=200) with gr.Row(): btn1 = gr.Button("Clear") diff --git a/modelcenter/PP-YOLO/info.yaml b/modelcenter/PP-YOLO/info.yaml index c0bb6b3d1ea2f735399d83fc22ac91b5e5023b7d..4e74fdfa10b4ee93ab5fbb3ef4c30cc041dcb9b8 100644 --- a/modelcenter/PP-YOLO/info.yaml +++ b/modelcenter/PP-YOLO/info.yaml @@ -3,7 +3,7 @@ Model_Info: name: "PP-YOLO" description: "PP-YOLO是PaddleDetection优化和改进的YOLOv3的模型" description_en: "PP-YOLO is a optimized model based on YOLOv3 in PaddleDetection" - icon: "@后续UE统一设计之后,会存到bos上某个位置" + icon: "https://paddledet.bj.bcebos.com/modelcenter/images/icons/icon_PP-YOLO.png" from_repo: "PaddleDetection" Task: - tag_en: "Computer Vision" @@ -18,7 +18,7 @@ Example: sub_tag: "表计读数" url: "https://aistudio.baidu.com/aistudio/projectdetail/3975848?contributionType=1" Datasets: "COCO test-dev2017, COCO train2017, COCO val2017, Pascal VOC" -Pulisher: "Baidu" +Publisher: "Baidu" License: "apache.2.0" Paper: - title: "PP-YOLO: An Effective and Efficient Implementation of Object Detector" diff --git a/modelcenter/PP-YOLO/introduction_cn.ipynb b/modelcenter/PP-YOLO/introduction_cn.ipynb index 523eb2fa19f2ef39f385ff99d285de40c20c8957..b54bcec95dd6c7d96c739080f768972c3a6e3894 100644 --- a/modelcenter/PP-YOLO/introduction_cn.ipynb +++ b/modelcenter/PP-YOLO/introduction_cn.ipynb @@ -31,16 +31,17 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ - "%cd ~/work\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection\n", + "# 克隆PaddleDetection仓库\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", + "\n", + "# 安装其他依赖\n", "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", "!pip install -r requirements.txt" ] }, @@ -55,11 +56,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "!python -m paddle.distributed.launch --log_dir=./ppyolo_dygraph/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml" @@ -76,18 +73,15 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# 导出模型,默认存储于output/ppyolo目录\n", "!python tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams\n", "\n", "# 预测库推理\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyolo_r50vd_dcn_1x_coco --image_file=demo/000000014439_640x640.jpg --device=GPU" + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg\n", + "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyolo_r50vd_dcn_1x_coco --image_file=demo_input/000000014439.jpg --device=GPU --output_dir=demo_output" ] }, { @@ -142,10 +136,29 @@ } ], "metadata": { + "kernelspec": { + "display_name": "Python 3.10.6 64-bit", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" }, - "orig_nbformat": 4 + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/modelcenter/PP-YOLO/introduction_en.ipynb b/modelcenter/PP-YOLO/introduction_en.ipynb index d7a30b1ed1cba0923a8dc6b75b54175fe59ee3c4..09bb01c331331bd1110ba2637b1b2ac36561e59a 100644 --- a/modelcenter/PP-YOLO/introduction_en.ipynb +++ b/modelcenter/PP-YOLO/introduction_en.ipynb @@ -31,16 +31,15 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ - "%cd ~/work\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", + "\n", "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", "!pip install -r requirements.txt" ] }, @@ -55,11 +54,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "!python -m paddle.distributed.launch --log_dir=./ppyolo_dygraph/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml" @@ -76,18 +71,15 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# export model, model will be save in output/ppyolo as default\n", - "python tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams\n", + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg\n", + "!python tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams\n", "\n", "# inference with Paddle Inference library\n", - "CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyolo_r50vd_dcn_1x_coco --image_file=demo/000000014439_640x640.jpg --device=GPU" + "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyolo_r50vd_dcn_1x_coco --image_file=demo_input/000000014439.jpg --device=GPU --output_dir=demo_output" ] }, { @@ -142,10 +134,29 @@ } ], "metadata": { + "kernelspec": { + "display_name": "Python 3.8.13 ('paddle_env')", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" }, - "orig_nbformat": 4 + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "864bc28e4d94d9c1c4bd0747e4313c0ab41718ab445ced17dbe1a405af5ecc64" + } + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/modelcenter/PP-YOLOE+/APP/app.py b/modelcenter/PP-YOLOE+/APP/app.py index 83952072f6b1954ddf7479435e3cf9f7680b14ec..7695c5ba9ffbe868fbec25165ea35c450b36831a 100644 --- a/modelcenter/PP-YOLOE+/APP/app.py +++ b/modelcenter/PP-YOLOE+/APP/app.py @@ -19,7 +19,7 @@ with gr.Blocks() as demo: with gr.Column(scale=1, min_width=100): - img_in = gr.Image(label="Input").style(height=200) + img_in = gr.Image(value="https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg",label="Input").style(height=200) with gr.Row(): btn1 = gr.Button("Clear") diff --git a/modelcenter/PP-YOLOE+/info.yaml b/modelcenter/PP-YOLOE+/info.yaml index b785220b6a1f77d6669533cc6194ea9f49a74a2d..fb0b86be7f59291e247eda66594e80d0ae48405d 100644 --- a/modelcenter/PP-YOLOE+/info.yaml +++ b/modelcenter/PP-YOLOE+/info.yaml @@ -3,7 +3,7 @@ Model_Info: name: "PP-YOLOE+" description: "PP-YOLOE+是PP-YOLOE的升级版" description_en: "PP-YOLOE+ is an upgraded version of PP-YOLOE" - icon: "@后续UE统一设计之后,会存到bos上某个位置" + icon: "https://paddledet.bj.bcebos.com/modelcenter/images/icons/icon_PP-YOLOE%2B.png" from_repo: "PaddleDetection" Task: - tag_en: "Computer Vision" @@ -18,7 +18,7 @@ Example: sub_tag: "目标检测" url: "https://aistudio.baidu.com/aistudio/projectdetail/4228391" Datasets: "COCO test-dev2017, COCO train2017, COCO val2017, Pascal VOC" -Pulisher: "Baidu" +Publisher: "Baidu" License: "apache.2.0" Paper: - title: "PP-YOLOE: An evolved version of YOLO" diff --git a/modelcenter/PP-YOLOE+/introduction_cn.ipynb b/modelcenter/PP-YOLOE+/introduction_cn.ipynb index 98e8058939a4f806dca169020f15a77399f3408a..d6a5df82e79e55ccfcbafb919241f60f15e085b1 100644 --- a/modelcenter/PP-YOLOE+/introduction_cn.ipynb +++ b/modelcenter/PP-YOLOE+/introduction_cn.ipynb @@ -30,16 +30,17 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ - "%cd ~/work\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection\n", + "# 克隆PaddleDetection仓库\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", + "\n", + "# 安装其他依赖\n", "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", "!pip install -r requirements.txt" ] }, @@ -54,11 +55,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# 单卡训练\n", @@ -93,11 +90,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "!python tools/export_model.py -c configs/ppyoloe/ppyoloe_plus_crn_l_80e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_l_80e_coco.pdparams trt=True" @@ -113,18 +106,12 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# 推理单张图片\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n", - "\n", - "# 推理文件夹下的所有图片\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16" + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg\n", + "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo_input/000000014439.jpg --device=gpu --run_mode=trt_fp16 --output_dir=demo_output" ] }, { @@ -147,11 +134,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "!python tools/export_model.py -c configs/ppyoloe/ppyoloe_plus_crn_l_80e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_l_80e_coco.pdparams" @@ -167,53 +150,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, - "outputs": [], - "source": [ - "# 推理单张图片\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n", - "\n", - "# 推理文件夹下的所有图片\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=paddle" - ] - }, - { - "cell_type": "markdown", "metadata": {}, - "source": [ - "#### 3.2.3 使用ONNX-TensorRT进行速度测试\n", - "**使用 ONNX 和 TensorRT** 进行测速,执行以下命令:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, "outputs": [], "source": [ - "# 导出模型\n", - "!python tools/export_model.py -c configs/ppyoloe/ppyoloe_plus_crn_s_80e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_s_80e_coco.pdparams exclude_nms=True trt=True\n", - "\n", - "# 转化成ONNX格式\n", - "!paddle2onnx --model_dir output_inference/ppyoloe_plus_crn_s_80e_coco --model_filename model.pdmodel --params_filename model.pdiparams --opset_version 12 --save_file ppyoloe_plus_crn_s_80e_coco.onnx\n", - "\n", - "# 测试速度,半精度,batch_size=1\n", - "!trtexec --onnx=./ppyoloe_plus_crn_s_80e_coco.onnx --saveEngine=./ppyoloe_s_bs1.engine --workspace=1024 --avgRuns=1000 --shapes=image:1x3x640x640,scale_factor:1x2 --fp16\n", - "\n", - "# 测试速度,半精度,batch_size=32\n", - "!trtexec --onnx=./ppyoloe_plus_crn_s_80e_coco.onnx --saveEngine=./ppyoloe_s_bs32.engine --workspace=1024 --avgRuns=1000 --shapes=image:32x3x640x640,scale_factor:32x2 --fp16\n", - "\n", - "# 使用上边的脚本, 在T4 和 TensorRT 7.2的环境下,PPYOLOE-plus-s模型速度如下\n", - "# batch_size=1, 2.80ms, 357fps\n", - "# batch_size=32, 67.69ms, 472fps" + "# 推理单张图片\n", + "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo_input/000000014439.jpg --device=gpu --run_mode=paddle --output_dir=demo_output\n" ] }, { @@ -252,10 +193,29 @@ } ], "metadata": { - "language_info": { - "name": "python" + "kernelspec": { + "display_name": "Python 3.8.13 ('paddle_env')", + "language": "python", + "name": "python3" }, - "orig_nbformat": 4 + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "864bc28e4d94d9c1c4bd0747e4313c0ab41718ab445ced17dbe1a405af5ecc64" + } + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/modelcenter/PP-YOLOE+/introduction_en.ipynb b/modelcenter/PP-YOLOE+/introduction_en.ipynb index 3feaa15b609c70beaceb2173e17d4b96403584e1..d334705839d53e7878d8a74b161c27c4ea429f8d 100644 --- a/modelcenter/PP-YOLOE+/introduction_en.ipynb +++ b/modelcenter/PP-YOLOE+/introduction_en.ipynb @@ -30,16 +30,15 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ - "%cd ~/work\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", + "\n", "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", "!pip install -r requirements.txt" ] }, @@ -54,11 +53,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# training with single GPU\n", @@ -93,11 +88,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "!python tools/export_model.py -c configs/ppyoloe/ppyoloe_plus_crn_l_80e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_l_80e_coco.pdparams trt=True" @@ -113,18 +104,12 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# inference single image\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n", - "\n", - "# inference all images in the directory\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16" + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg\n", + "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo_input/000000014439.jpg --device=gpu --run_mode=trt_fp16 --output_dir=demo_output" ] }, { @@ -147,25 +132,19 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "!python tools/export_model.py -c configs/ppyoloe/ppyoloe_plus_crn_l_80e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_l_80e_coco.pdparams" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": { "vscode": { "languageId": "plaintext" } }, - "outputs": [], "source": [ "Inference with PaddleInference directly." ] @@ -173,54 +152,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, - "outputs": [], - "source": [ - "# inference single image\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n", - "\n", - "# inference all images in the directory\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_dir=demo/ --device=gpu --run_mode=paddle" - ] - }, - { - "cell_type": "markdown", "metadata": {}, - "source": [ - "#### 3.2.3 Speed testing with ONNX-TensorRT\n", - "**Using TensorRT Inference with ONNX** to test speed, run following command" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, "outputs": [], "source": [ - "# export inference model with trt=True\n", - "!python tools/export_model.py -c configs/ppyoloe/ppyoloe_plus_crn_s_80e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_s_80e_coco.pdparams exclude_nms=True trt=True\n", - "\n", - "# convert to onnx\n", - "!paddle2onnx --model_dir output_inference/ppyoloe_plus_crn_s_80e_coco --model_filename model.pdmodel --params_filename model.pdiparams --opset_version 12 --save_file ppyoloe_plus_crn_s_80e_coco.onnx\n", - "\n", - "# trt inference using fp16 and batch_size=1\n", - "!trtexec --onnx=./ppyoloe_plus_crn_s_80e_coco.onnx --saveEngine=./ppyoloe_s_bs1.engine --workspace=1024 --avgRuns=1000 --shapes=image:1x3x640x640,scale_factor:1x2 --fp16\n", - "\n", - "# trt inference using fp16 and batch_size=32\n", - "!trtexec --onnx=./ppyoloe_plus_crn_s_80e_coco.onnx --saveEngine=./ppyoloe_s_bs32.engine --workspace=1024 --avgRuns=1000 --shapes=image:32x3x640x640,scale_factor:32x2 --fp16\n", - "\n", - "# Using the above script, T4 and tensorrt 7.2 machine, the speed of PPYOLOE-s model is as follows,\n", - "\n", - "# batch_size=1, 2.80ms, 357fps\n", - "# batch_size=32, 67.69ms, 472fps" + "# inference single image\n", + "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_plus_crn_l_80e_coco --image_file=demo_input/000000014439.jpg --device=gpu --run_mode=paddle --output_dir=demo_output" ] }, { @@ -259,10 +195,21 @@ } ], "metadata": { - "language_info": { - "name": "python" + "kernelspec": { + "display_name": "Python 3.10.6 64-bit", + "language": "python", + "name": "python3" }, - "orig_nbformat": 4 + "language_info": { + "name": "python", + "version": "3.10.6" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/modelcenter/PP-YOLOE/APP/app.py b/modelcenter/PP-YOLOE/APP/app.py index 755621da9def89ecb5c04905d36fd19c364fbf6c..e8408b1f74b0c2f23e5a5bfaf521307d16d9373f 100644 --- a/modelcenter/PP-YOLOE/APP/app.py +++ b/modelcenter/PP-YOLOE/APP/app.py @@ -19,7 +19,7 @@ with gr.Blocks() as demo: with gr.Column(scale=1, min_width=100): - img_in = gr.Image(label="Input").style(height=200) + img_in = gr.Image(value="https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg",label="Input").style(height=200) with gr.Row(): btn1 = gr.Button("Clear") diff --git a/modelcenter/PP-YOLOE/info.yaml b/modelcenter/PP-YOLOE/info.yaml index c61544c4e91df592a838421fecd5940e18859673..e48517f9096207e0ee472e776a256feb1e20a3b0 100644 --- a/modelcenter/PP-YOLOE/info.yaml +++ b/modelcenter/PP-YOLOE/info.yaml @@ -4,7 +4,7 @@ Model_Info: description: "PP-YOLOE是基于PP-YOLOv2的卓越的单阶段Anchor-free模型,超越了多种流行的YOLO模型" description_en: "PP-YOLOE is an excellent single-stage anchor-free model based on\ \ PP-YOLOv2, surpassing a variety of popular YOLO models" - icon: "@后续UE统一设计之后,会存到bos上某个位置" + icon: "https://paddledet.bj.bcebos.com/modelcenter/images/icons/icon_PP-YOLOE.png" from_repo: "PaddleDetection" Task: - tag_en: "Computer Vision" @@ -19,7 +19,7 @@ Example: sub_tag: "目标检测" url: "https://aistudio.baidu.com/aistudio/projectdetail/4228391" Datasets: "COCO test-dev2017, COCO train2017, COCO val2017, Pascal VOC" -Pulisher: "Baidu" +Publisher: "Baidu" License: "apache.2.0" Paper: - title: "PP-YOLOE: An evolved version of YOLO" diff --git a/modelcenter/PP-YOLOE/introduction_cn.ipynb b/modelcenter/PP-YOLOE/introduction_cn.ipynb index 3bd6a5ed5c447e07b391c0f80d837eb7394625a7..70e9369850eb25b70d814cef1a1cc3719549e819 100644 --- a/modelcenter/PP-YOLOE/introduction_cn.ipynb +++ b/modelcenter/PP-YOLOE/introduction_cn.ipynb @@ -30,16 +30,17 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ - "%cd ~/work\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection\n", + "# 克隆PaddleDetection仓库\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", + "\n", + "# 安装其他依赖\n", "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", "!pip install -r requirements.txt" ] }, @@ -54,11 +55,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# 单卡训练\n", @@ -93,11 +90,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "!python tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams trt=True" @@ -113,18 +106,12 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# 推理单张图片\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n", - "\n", - "# 推理文件夹下的所有图片\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16" + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg\n", + "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_file=demo_input/000000014439.jpg --device=gpu --run_mode=trt_fp16 --output_dir=demo_output" ] }, { @@ -147,11 +134,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "!python tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams" @@ -167,18 +150,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# 推理单张图片\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n", - "\n", - "# 推理文件夹下的所有图片\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_dir=demo/ --device=gpu --run_mode=paddle" + "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_file=demo_input/000000014439.jpg --device=gpu --run_mode=paddle --output_dir=demo_output\n" ] }, { @@ -225,10 +201,29 @@ } ], "metadata": { - "language_info": { - "name": "python" + "kernelspec": { + "display_name": "Python 3.8.13 ('paddle_env')", + "language": "python", + "name": "python3" }, - "orig_nbformat": 4 + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "864bc28e4d94d9c1c4bd0747e4313c0ab41718ab445ced17dbe1a405af5ecc64" + } + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/modelcenter/PP-YOLOE/introduction_en.ipynb b/modelcenter/PP-YOLOE/introduction_en.ipynb index add9b2c9e518e56f279b99144f82cc4e507d0d96..ffe2ffbfc6d433c5c736391b85a76317c42a470b 100644 --- a/modelcenter/PP-YOLOE/introduction_en.ipynb +++ b/modelcenter/PP-YOLOE/introduction_en.ipynb @@ -31,16 +31,15 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ - "%cd ~/work\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", + "\n", "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", "!pip install -r requirements.txt" ] }, @@ -55,11 +54,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# training with single GPU\n", @@ -94,11 +89,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "!python tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams trt=True" @@ -114,18 +105,12 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# inference single image\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=trt_fp16\n", - "\n", - "# inference all images in the directory\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_dir=demo/ --device=gpu --run_mode=trt_fp16" + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg\n", + "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_file=demo_input/000000014439.jpg --device=gpu --run_mode=trt_fp16 --output_dir=demo_output" ] }, { @@ -148,11 +133,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "!python tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams" @@ -168,18 +149,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, + "metadata": {}, "outputs": [], "source": [ "# inference single image\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_mode=paddle\n", - "\n", - "# inference all images in the directory\n", - "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_dir=demo/ --device=gpu --run_mode=paddle" + "!CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_file=demo_input/000000014439.jpg --device=gpu --run_mode=paddle --output_dir=demo_output" ] }, { @@ -226,10 +200,21 @@ } ], "metadata": { - "language_info": { - "name": "python" + "kernelspec": { + "display_name": "Python 3.10.6 64-bit", + "language": "python", + "name": "python3" }, - "orig_nbformat": 4 + "language_info": { + "name": "python", + "version": "3.10.6" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/modelcenter/PP-YOLOv2/APP/app.py b/modelcenter/PP-YOLOv2/APP/app.py index e722a14a0fc199a2b705f0344a71c0e6d5857331..175adcb84fd974bd0b1698679375e58ce40fa0a2 100644 --- a/modelcenter/PP-YOLOv2/APP/app.py +++ b/modelcenter/PP-YOLOv2/APP/app.py @@ -19,7 +19,7 @@ with gr.Blocks() as demo: with gr.Column(scale=1, min_width=100): - img_in = gr.Image(label="Input").style(height=200) + img_in = gr.Image(value="https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg",label="Input").style(height=200) with gr.Row(): btn1 = gr.Button("Clear") diff --git a/modelcenter/PP-YOLOv2/info.yaml b/modelcenter/PP-YOLOv2/info.yaml index 97c54169a104a678d3e5be69c04453933a2566c7..e166871b130ae47be43256e42290c1b02e467a8f 100644 --- a/modelcenter/PP-YOLOv2/info.yaml +++ b/modelcenter/PP-YOLOv2/info.yaml @@ -5,7 +5,7 @@ Model_Info: description: PP-YOLOv2是PP-YOLO的升级版本 description_en: PP-YOLOv2 is an upgraded version of PP-YOLO update_time: - icon: url + icon: "https://paddledet.bj.bcebos.com/modelcenter/images/icons/icon_PP-YOLOv2.png" from_repo: "PaddleDetection" Task: @@ -31,7 +31,7 @@ Example: Datasets: COCO test-dev2017, COCO train2017, COCO val2017, Pascal VOC -Pulisher: Baidu +Publisher: Baidu License: Apache 2.0 diff --git a/modelcenter/PP-YOLOv2/introduction_cn.ipynb b/modelcenter/PP-YOLOv2/introduction_cn.ipynb index fd43dcd78727cfe29354ad452987446b3fc45cc6..6163597dd7a1c017496bb9c3071ab101ad286fab 100644 --- a/modelcenter/PP-YOLOv2/introduction_cn.ipynb +++ b/modelcenter/PP-YOLOv2/introduction_cn.ipynb @@ -63,55 +63,15 @@ }, "outputs": [], "source": [ - "%cd ~/work\n", - "# 克隆PaddleDetection(从gitee上更快),本项目以做持久化处理,不用克隆了。\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* 安装" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true, - "tags": [] - }, - "outputs": [], - "source": [ - "# 运行脚本需在PaddleDetection目录下\n", - "%cd ~/work/PaddleDetection/\n", - "\n", - "# 安装所需依赖项【已经做持久化处理,无需再安装】\n", - "!pip install pyzmq \n", - "!pip install -r requirements.txt\n", - "\n", - "# 运行脚本需在PaddleDetection目录下\n", - "%cd ~/work/PaddleDetection/\n", - "# 设置python运行目录\n", - "%env PYTHONPATH=.:$PYTHONPATH\n", - "# 设置GPU\n", - "%env CUDA_VISIBLE_DEVICES=0\n", - "\n", - "# 经简单测试,提前安装所需依赖,比直接使用setup.py更快\n", - "!pip install pycocotools \n", - "!pip install cython-bbox \n", - "!pip install xmltodict \n", - "!pip install terminaltables \n", - "!pip intall motmetrics \n", - "!pip install lap \n", - "!pip install shapely \n", - "!pip install pytest-benchmark \n", - "!pip install pytest \n", - "\n", - "\n", - "# 开始安装PaddleDetection \n", - "!python setup.py install #如果安装过程中长时间卡住,可中断后继续重新执行," + "# 克隆PaddleDetection仓库\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", + "\n", + "# 安装其他依赖\n", + "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", + "!pip install -r requirements.txt" ] }, { @@ -154,7 +114,8 @@ "source": [ "# 在GPU上预测一张图片\n", "!export CUDA_VISIBLE_DEVICES=0\n", - "!python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg" + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg\n", + "!python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo_input/000000014439.jpg" ] }, { @@ -190,19 +151,6 @@ " " ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# 查看解压目录\n", - "#%cd ~/work/PaddleDetection/\n", - "#!tree -d dataset/wider_face" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -310,11 +258,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 4. 模型原理\n", - "\n", - "\n", - "(必须带有图片)\n", - "\n" + "## 4. 模型原理" ] }, { @@ -379,7 +323,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3.8.13 ('paddle_env')", "language": "python", "name": "python3" }, @@ -393,7 +337,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.8" + "version": "3.8.13" + }, + "vscode": { + "interpreter": { + "hash": "864bc28e4d94d9c1c4bd0747e4313c0ab41718ab445ced17dbe1a405af5ecc64" + } } }, "nbformat": 4, diff --git a/modelcenter/PP-YOLOv2/introduction_en.ipynb b/modelcenter/PP-YOLOv2/introduction_en.ipynb index fc094724529cb13ef8eec0b2e4e88d985f4f835d..f4c6f447082a91b489745aba2173c4ae80ed8cce 100644 --- a/modelcenter/PP-YOLOv2/introduction_en.ipynb +++ b/modelcenter/PP-YOLOv2/introduction_en.ipynb @@ -57,54 +57,13 @@ }, "outputs": [], "source": [ - "%cd /home/aistudio/work\n", + "%mkdir -p ~/work\n", + "%cd ~/work/\n", + "!git clone https://github.com/PaddlePaddle/PaddleDetection.git\n", "\n", - "!git clone https://gitee.com/paddlepaddle/PaddleDetection" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* Installation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# The script needs to be run in the PaddleDetection directory\n", - "%cd /home/aistudio/work/PaddleDetection/\n", - "\n", - "# Install the required dependencies [already persisted, no need to install again].\n", - "!pip install pyzmq -t /home/aistudio/external-libraries \n", - "# After testing on AIstudio paddlepaddle 2.2.2, an error will occur, because pyzmq needs to be installed in advance.\n", - "!pip install -r requirements.txt\n", - "\n", - "# The script needs to be run in the PaddleDetection directory.\n", - "%cd /home/aistudio/work/PaddleDetection/\n", - "# Set the python run directory.\n", - "%env PYTHONPATH=.:$PYTHONPATH\n", - "# Set GPU\n", - "%env CUDA_VISIBLE_DEVICES=0\n", - "\n", - "!pip install pycocotools \n", - "!pip install cython-bbox \n", - "!pip install xmltodict \n", - "!pip install terminaltables \n", - "!pip intall motmetrics \n", - "!pip install lap \n", - "!pip install shapely \n", - "!pip install pytest-benchmark \n", - "!pip install pytest \n", - "\n", - "\n", - "# Download PaddleDetection \n", - "!python setup.py install " + "%cd PaddleDetection\n", + "%mkdir -p demo_input demo_output\n", + "!pip install -r requirements.txt" ] }, { @@ -146,7 +105,8 @@ "source": [ "# Predict a picture on the GPU.\n", "!export CUDA_VISIBLE_DEVICES=0\n", - "!python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg" + "!wget -P demo_input -N https://paddledet.bj.bcebos.com/modelcenter/images/General/000000014439.jpg\n", + "!python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo_input/000000014439.jpg" ] }, { @@ -176,22 +136,6 @@ "* Prepare the datasets." ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "# return to /home/aistudio\n", - "%cd ~\n", - "\n", - "# Review the extract directory\n", - "%cd /home/aistudio/work/PaddleDetection/\n", - "!tree -d dataset/wider_face" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -361,7 +305,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3.10.6 64-bit", "language": "python", "name": "python3" }, @@ -375,7 +319,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.8" + "version": "3.10.6" + }, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } } }, "nbformat": 4,