diff --git a/modelcenter/ERNIE-3.0/.gitkeep b/modelcenter/ERNIE-3.0/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/ERNIE-Layout/.gitkeep b/modelcenter/ERNIE-Layout/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/ERNIE-M/.gitkeep b/modelcenter/ERNIE-M/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/ERNIE-UIE/.gitkeep b/modelcenter/ERNIE-UIE/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-ASR/.gitkeep b/modelcenter/PP-ASR/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-HGNet/.gitkeep b/modelcenter/PP-HGNet/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-HumanMatting/.gitkeep b/modelcenter/PP-HumanMatting/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-HumanMattingV2/.gitkeep b/modelcenter/PP-HumanMattingV2/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-HumanSeg/.gitkeep b/modelcenter/PP-HumanSeg/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-HumanSegV2/.gitkeep b/modelcenter/PP-HumanSegV2/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-HumanV2/.gitkeep b/modelcenter/PP-HumanV2/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-LCNet/.gitkeep b/modelcenter/PP-LCNet/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-LCNetV2/.gitkeep b/modelcenter/PP-LCNetV2/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-LiteSeg/.gitkeep b/modelcenter/PP-LiteSeg/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-MSVSR/.gitkeep b/modelcenter/PP-MSVSR/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-Matting/.gitkeep b/modelcenter/PP-Matting/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-OCR/.gitkeep b/modelcenter/PP-OCR/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-OCRv2/.gitkeep b/modelcenter/PP-OCRv2/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-OCRv3/.gitkeep b/modelcenter/PP-OCRv3/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-PicoDet/.gitkeep b/modelcenter/PP-PicoDet/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-ShiTu/.gitkeep b/modelcenter/PP-ShiTu/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-ShiTuV2/.gitkeep b/modelcenter/PP-ShiTuV2/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-Structure/.gitkeep b/modelcenter/PP-Structure/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-StructureV2/.gitkeep b/modelcenter/PP-StructureV2/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-TInyPose/.gitkeep b/modelcenter/PP-TInyPose/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-TSM/.gitkeep b/modelcenter/PP-TSM/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-TSMv2/.gitkeep b/modelcenter/PP-TSMv2/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-TTS/.gitkeep b/modelcenter/PP-TTS/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-Vehicle/.gitkeep b/modelcenter/PP-Vehicle/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-YOLO/.gitkeep b/modelcenter/PP-YOLO/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-YOLOE+/.gitkeep b/modelcenter/PP-YOLOE+/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-YOLOE/.gitkeep b/modelcenter/PP-YOLOE/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/modelcenter/PP-YOLOv2/APP/app.py b/modelcenter/PP-YOLOv2/APP/app.py new file mode 100644 index 0000000000000000000000000000000000000000..8e9e2d1a9c019283ba245180bf8cadd539e03076 --- /dev/null +++ b/modelcenter/PP-YOLOv2/APP/app.py @@ -0,0 +1,41 @@ +import gradio as gr +import numpy as np + +import cv2 as cv + + +# UGC: Define the inference fn() for your models +def model_inference(image): + json_out = { + "base64": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAg...", + "result": "123456" + } + return image, json_out + + +def clear_all(): + return None, None, None + + +with gr.Blocks() as demo: + gr.Markdown("Objective Detection") + + with gr.Column(scale=1, min_width=100): + + img_in = gr.Image( + value="https://i.picsum.photos/id/867/600/600.jpg?hmac=qE7QFJwLmlE_WKI7zMH6SgH5iY5fx8ec6ZJQBwKRT44", + shape=(200, 200), + label="Input").style(height=200) + + with gr.Row(): + btn1 = gr.Button("Clear") + btn2 = gr.Button("Submit") + + img_out = gr.Image(shape=(200, 200), label="Output").style(height=200) + json_out = gr.JSON(label="jsonOutput") + + btn2.click(fn=model_inference, inputs=img_in, outputs=[img_out, json_out]) + btn1.click(fn=clear_all, inputs=None, outputs=[img_in, img_out, json_out]) + gr.Button.style(1) + +demo.launch() diff --git a/modelcenter/PP-YOLOv2/APP/app.yml b/modelcenter/PP-YOLOv2/APP/app.yml new file mode 100644 index 0000000000000000000000000000000000000000..4542ce34b99c8497f8fbce15c5d339a795fb629c --- /dev/null +++ b/modelcenter/PP-YOLOv2/APP/app.yml @@ -0,0 +1,11 @@ +【PP-YOLOv2-App-YAML】 + +APP_Info: + title: PP-YOLOv2-App + colorFrom: blue + colorTo: yellow + sdk: gradio + sdk_version: 3.4.1 + app_file: app.py + license: apache-2.0 + device: cpu \ No newline at end of file diff --git a/modelcenter/PP-YOLOv2/APP/requirements.txt b/modelcenter/PP-YOLOv2/APP/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..9dfc4b72bf6912e3cddfb43ecb8c6a49e9699f5c --- /dev/null +++ b/modelcenter/PP-YOLOv2/APP/requirements.txt @@ -0,0 +1,2 @@ +gradio +paddlepaddle diff --git a/modelcenter/PP-YOLOv2/benchmark_cn.md b/modelcenter/PP-YOLOv2/benchmark_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..bf6ba08e1efe9883cc3f8eca4155fd701c1f0241 --- /dev/null +++ b/modelcenter/PP-YOLOv2/benchmark_cn.md @@ -0,0 +1,46 @@ +## 1. 训练Benchmark + +### 1.1 软硬件环境 + +* PP-YOLO模型训练过程中使用8 GPUs,每GPU batch size为24进行训练,如训练GPU数和batch size不使用上述配置,须参考FAQ调整学习率和迭代次数。 + +* PP-YOLO_MobileNetV3 模型训练过程中使用4GPU,每GPU batch size为32进行训练,如训练GPU数和batch size不使用上述配置,须参考FAQ调整学习率和迭代次数。 + +* PP-YOLO-tiny 模型训练过程中使用8GPU,每GPU batch size为32进行训练,如训练GPU数和batch size不使用上述配置,须参考FAQ调整学习率和迭代次数。 + +### 1.2 数据集 +PP-YOLO模型使用COCO数据集中train2017作为训练集,使用val2017和test-dev2017作为测试集. + +### 1.3 指标 (字段可根据模型情况,自行定义) + +|模型名称 | 模型简介 | 模型体积 | 输入尺寸 | ips | +|---|---|---|---|---| +|ppyolov2_r50vd_dcn_1x_coco | 目标检测 | | 640 | | +|ppyolov2_r50vd_dcn_1x_coco | 目标检测 | | 320 | | + + + +## 2. 推理 Benchmark + +### 2.1 软硬件环境 + +* PP-YOLO模型推理速度测试采用单卡V100,batch size=1进行测试,使用CUDA 10.2, CUDNN 7.5.1,TensorRT推理速度测试使用TensorRT 5.1.2.2。 + +* PP-YOLO_MobileNetV3 模型推理速度测试环境配置为麒麟990芯片单线程。 + +* PP-YOLO-tiny 模型推理速度测试环境配置为麒麟990芯片4线程,arm8架构。 + +### 2.2 数据集 +PP-YOLO模型使用COCO数据集中train2017作为训练集,使用val2017和test-dev2017作为测试集. + +### 2.3 指标(字段可根据模型情况,自行定义) + +PP-YOLOv2(R50)在COCO test数据集mAP从45.9%达到了49.5%,相较v1提升了3.6个百分点,FP32 FPS高达68.9FPS,FP16 FPS高达106.5FPS,超越了YOLOv4甚至YOLOv5!如果使用RestNet101作为骨架网络,PP-YOLOv2(R101)的mAP更高达50.3%,并且比同等精度下的YOLOv5x快15.9%! + + + + +![](https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/docs/images/ppyolo_map_fps.png) + +## 3. 相关使用说明 +请参考:https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/configs/ppyolo/README_cn.md diff --git a/modelcenter/PP-YOLOv2/benchmark_en.md b/modelcenter/PP-YOLOv2/benchmark_en.md new file mode 100644 index 0000000000000000000000000000000000000000..8ffee96ae44bc3f4b3e4cdf96f257d3394bfcb86 --- /dev/null +++ b/modelcenter/PP-YOLOv2/benchmark_en.md @@ -0,0 +1,42 @@ +## 1. Training Benchmark + +### 1.1 Environment + +* The training process of PP-YOLO model uses 8 GPUs, every GPU batch size is 24 for training. If the number GPU and batch size of training do not use the above configuration, you should refer to the FAQ to adjust the learning rate and number of iterations. + +* The training process of PP-YOLO_MobileNetV3 model uses 4GPU, every GPU batch size is 32 for training. If the number GPU and batch size for training do not use the above configuration, you should refer to the FAQ to adjust the learning rate and number of iterations. + +* The training process of PP-YOLO-tiny model uses 8GPU, every GPU batch size is 32 for training. If the number of GPUs and batch size for training do not use the above configuration, you must refer to the FAQ to adjust the learning rate and number of iterations. + +### 1.2 Datasets +The PP-YOLO model uses COCO dataset centralized train2017 as the training set and val2017 and test-dev2017 as the test set. + +### 1.3 Benchmark + +|模型名称 | 模型简介 | 模型体积 | 输入尺寸 | ips | +|---|---|---|---|---| +|ppyolov2_r50vd_dcn_1x_coco | 目标检测 | | 640 | | +|ppyolov2_r50vd_dcn_1x_coco | 目标检测 | | 320 | | + + +## 2. Inference Benchmark + +### 2.1 Environment + +* The PP-YOLO model's inference speed test is tested with single-card V100, batch size=1, CUDA 10.2, CUDNN 7.5.1, and TensorRT inference speed test using TensorRT 5.1.2.2. + +* The PP-YOLO_MobileNetV3 model's inference speed test environment is configured as a Kirin 990 chip single-threaded. + +* PP-YOLO-tiny model inference speed test environment is configured as Kirin 990 chip 4 threads, arm8 architecture. + +### 2.2 Datasets +The PP-YOLO model uses COCO dataset centralized train2017 as the training set and val2017 and test-dev2017 as the test set. + +### 2.3 Benchmark +PP-YOLOv2 (R50) mAP in the COCO test dataset rises from 45.9% to 49.5%, an increase of 3.6 percentage points compared to v1. FP32 FPS is up to 68.9FPS, FP16 FPS is up to 106.5FPS, surpassing YOLOv4 and even YOLOv5! If RestNet101 is used as the backbone network, PP-YOLOv2 (R101) has up to 50.3% mAP and 15.9% faster than YOLOv5x with the same accuracy! + +![](https://raw.githubusercontent.com/PaddlePaddle/PaddleDetection/release/2.4/docs/images/ppyolo_map_fps.png) + + +## 3. Reference +Ref: https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.4/configs/ppyolo/README_cn.md diff --git a/modelcenter/PP-YOLOv2/download_cn.md b/modelcenter/PP-YOLOv2/download_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..e40accff30c11fb9c8c5aec080e36f55455b9c0a --- /dev/null +++ b/modelcenter/PP-YOLOv2/download_cn.md @@ -0,0 +1,5 @@ +# 提供模型所支持的任务场景、推理和预训练模型文件: +|模型名称 | 模型简介 | 模型体积 | 输入尺寸 | 下载地址 | +|---|---|---|---|---| +|ppyolov2_r50vd_dcn_1x_coco | 目标检测 | | 640 | [推理模型]()/[预训练模型]() | +|ppyolov2_r50vd_dcn_1x_coco | 目标检测 | | 320 | [推理模型]()/[预训练模型]() | diff --git a/modelcenter/PP-YOLOv2/download_en.md b/modelcenter/PP-YOLOv2/download_en.md new file mode 100644 index 0000000000000000000000000000000000000000..544020ee6d772861cd311e16ec529fe1ede68e22 --- /dev/null +++ b/modelcenter/PP-YOLOv2/download_en.md @@ -0,0 +1,6 @@ +# Download + +| model | task | model_size | input_size | download | +|--------|----|------------|------------|-------------| +| ppyolo_r50vd_dcn_1x_coco | ObjectDetection | | 640 | [inference_model]()/[Pretrained_model]() | +| ppyolo_r50vd_dcn_1x_coco | ObjectDetection | | 320 | [inference_model]()/[Pretrained_model]() | diff --git a/modelcenter/PP-YOLOv2/info.yaml b/modelcenter/PP-YOLOv2/info.yaml new file mode 100644 index 0000000000000000000000000000000000000000..41aaeef8253356c8d7e9b96216ee6eccf29259d9 --- /dev/null +++ b/modelcenter/PP-YOLOv2/info.yaml @@ -0,0 +1,46 @@ +【PP-YOLOv2-YAML】(注:本yaml样例仅供大家了解数据结构,研发同学会提供页面配置前端工具,到时候自动生成yaml文件) + +Model_Info: + name: "PP-YOLOv2" + description: + description_en: + update_time: + icon: url + +Task: +- + tag: 计算机视觉 + tag_en: Computer Vision + sub_tag: 目标检测,人脸检测 + sub_tag_en: Object Detection + +Example: +- + tag: 智慧安防 + tag_en: Intelligent Security + sub_tag: 火灾/烟雾检测 + sub_tag_en: Smoke Detction + title: 基于PP-YOLOv2的火灾/烟雾检测 + title_en: Smoke Detction based on PP-YOLOv2 + url: https://aistudio.baidu.com/aistudio/projectdetail/2503301 + url_en: + + +Datasets: COCO test-dev2017, COCO train2017, COCO val2017, Pascal VOC + +Pulisher: Baidu + +License: Apache 2.0 + +Paper: +- + title: PP-YOLOv2: A Practical Object Detector + url: https://arxiv.org/pdf/2104.10419.pdf + + +IfTraining: 1 + +IfOnlineDemo: 1 + + + diff --git a/modelcenter/PP-YOLOv2/introduction_cn.ipynb b/modelcenter/PP-YOLOv2/introduction_cn.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..6b381b6b95071f71836aeb7a6d2c1a1c51b9e554 --- /dev/null +++ b/modelcenter/PP-YOLOv2/introduction_cn.ipynb @@ -0,0 +1,401 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. PP-YOLOv2模型简介\n", + "YOLO系列作为目标检测的重要算法,采用单阶段(one-stage)方法使得检测速度大幅提升,但是速度的提升也牺牲了部分准确率作为代价。因此,如何在提升YOLOv3的准确性的同时保持推理速度成为了其实际应用时的关键问题。为同时满足准确性与高效性,PP-YOLOv2作者团队做了大量优化工作,PP-YOLOv2(R50)在COCO test数据集mAP从45.9%达到了49.5%,相较v1提升了3.6个百分点,FP32 FPS高达68.9FPS,FP16 FPS高达106.5FPS,超越了YOLOv4甚至YOLOv5!如果使用RestNet101作为骨架网络,PP-YOLOv2(R101)的mAP更高达50.3%,并且比同等精度下的YOLOv5x快15.9%!\n", + "\n", + "PP-YOLO模型由飞桨官方出品,是PaddleDetection优化和改进的YOLOv3的模型。\n", + "更多关于PaddleDetection可以点击https://github.com/PaddlePaddle/PaddleDetection 进行了解。\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. 模型效果及应用场景\n", + "### 2.1 目标检测任务:\n", + "\n", + "#### 2.1.1 数据集:\n", + "\n", + "数据集以COCO格式为主,分为训练集和测试集。\n", + "\n", + "#### 2.1.2 模型效果速览:\n", + "\n", + "PP-YOLOv2在图片上的检测效果为:\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. 模型如何使用\n", + "\n", + "### 3.1 模型推理:\n", + "* 下载 \n", + "\n", + "(不在Jupyter Notebook上运行时需要将\"!\"或者\"%\"去掉。)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "jupyter": { + "outputs_hidden": false + }, + "scrolled": true, + "tags": [] + }, + "outputs": [], + "source": [ + "%cd ~/work\n", + "# 克隆PaddleDetection(从gitee上更快),本项目以做持久化处理,不用克隆了。\n", + "!git clone https://gitee.com/paddlepaddle/PaddleDetection" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* 安装" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true, + "tags": [] + }, + "outputs": [], + "source": [ + "# 运行脚本需在PaddleDetection目录下\n", + "%cd ~/work/PaddleDetection/\n", + "\n", + "# 安装所需依赖项【已经做持久化处理,无需再安装】\n", + "!pip install pyzmq \n", + "!pip install -r requirements.txt\n", + "\n", + "# 运行脚本需在PaddleDetection目录下\n", + "%cd ~/work/PaddleDetection/\n", + "# 设置python运行目录\n", + "%env PYTHONPATH=.:$PYTHONPATH\n", + "# 设置GPU\n", + "%env CUDA_VISIBLE_DEVICES=0\n", + "\n", + "# 经简单测试,提前安装所需依赖,比直接使用setup.py更快\n", + "!pip install pycocotools \n", + "!pip install cython-bbox \n", + "!pip install xmltodict \n", + "!pip install terminaltables \n", + "!pip intall motmetrics \n", + "!pip install lap \n", + "!pip install shapely \n", + "!pip install pytest-benchmark \n", + "!pip install pytest \n", + "\n", + "\n", + "# 开始安装PaddleDetection \n", + "!python setup.py install #如果安装过程中长时间卡住,可中断后继续重新执行," + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* 验证是否安装成功\n", + "如果报错,只需执行上一步操作。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# 测试是否安装成功\n", + "!python ppdet/modeling/tests/test_architectures.py" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* 快速体验\n", + "\n", + "恭喜! 您已经成功安装了PaddleDetection,接下来快速体验目标检测效果" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true, + "tags": [] + }, + "outputs": [], + "source": [ + "# 在GPU上预测一张图片\n", + "export CUDA_VISIBLE_DEVICES=0\n", + "python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "会在output文件夹下生成一个画有预测结果的同名图像。\n", + "\n", + "结果如下图:\n", + "
\n", + "\n", + "
\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3.2 模型训练:\n", + "* 克隆PaddleDetection仓库(详见3.1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* 准备数据集\n", + "\n", + "这里需要开发者自行准备数据集,以下举例假设开发者已经准备好wider_face数据集并且解压到 PaddleDetection/dataset/wider_face/下\n", + "通过以下命令确认数据集已经准备完成。\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# 查看解压目录\n", + "#%cd ~/work/PaddleDetection/\n", + "#!tree -d dataset/wider_face" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* 修改yaml配置文件\n", + "\n", + "\n", + "\n", + "修改配置文件``` configs/runtime.yml```\n", + "\n", + "```\n", + "use_gpu: true # 是否使用GPU\n", + "log_iter: 20 # 每多少个迭代次显示\n", + "save_dir: output # 模型保存目录\n", + "snapshot_epoch: 1 # 多少个epoch保存一次\n", + "print_flops: false\n", + "\n", + "```\n", + "修改配置文件``` configs/datasets/coco_detection.yml```\n", + "\n", + "```\n", + "metric: COCO \n", + "num_classes: 1 # 分类个数 \n", + "\n", + "TrainDataset:\n", + " !COCODataSet\n", + " image_dir: WIDER_train/images # 训练图像数据基于数据集根目录的相对路径\n", + " anno_path: WIDERFaceTrainCOCO.json # 训练标注文件基于数据集根目录的相对路径\n", + " dataset_dir: dataset/wider_face # 数据集根目录\n", + " data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']\n", + "\n", + "EvalDataset:\n", + " !COCODataSet\n", + " image_dir: WIDER_val/images # 测试图像数据基于数据集根目录的相对路径\n", + " anno_path: WIDERFaceValCOCO.json # 测试标注文件基于数据集根目录的相对路径\n", + " dataset_dir: dataset/wider_face\n", + "\n", + "TestDataset:\n", + " !ImageFolder\n", + " anno_path: WIDERFaceValCOCO.json\n", + " \n", + "```\n", + "修改配置文件``` configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml```\n", + "\n", + "```\n", + "_BASE_: [\n", + " '../datasets/coco_detection.yml',\n", + " '../runtime.yml',\n", + " './_base_/ppyolov2_r50vd_dcn.yml',\n", + " './_base_/optimizer_365e.yml',\n", + " './_base_/ppyolov2_reader.yml',\n", + "]\n", + "\n", + "snapshot_epoch: 8 #每训练多少个epoch保存一次模型\n", + "weights: output/ppyolov2_r50vd_dcn_365e_coco/model_final\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* 训练模型" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true, + "tags": [] + }, + "outputs": [], + "source": [ + "%cd ~/work/PaddleDetection/\n", + "%env CUDA_VISIBLE_DEVICES=0\n", + "#开始训练\n", + "!python tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --use_vdl=true " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* 模型评估\n", + "\n", + "我们提供了```configs/ppyolo/ppyolo_test.yml```用于评估COCO test-dev2017数据集的效果,评估COCO test-dev2017数据集的效果须先从COCO数据集下载页下载test-dev2017数据集,解压到```configs/ppyolo/ppyolo_test.yml```中EvalReader.dataset中配置的路径,并使用如下命令进行评估(需附上模型平均精度AP和AR评估指标,提供图片或者表格)。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "%cd ~/work/PaddleDetection/\n", + "%env CUDA_VISIBLE_DEVICES=0\n", + "#训练完以后,进行评估\n", + "!python tools/eval.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml -o use_gpu=true" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. 模型原理\n", + "\n", + "\n", + "(必须带有图片)\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* 采用 Path Aggregation Network(路径聚合网络)设计 Detection Net\n", + "\n", + "PP-YOLOv2 采用了 FPN 的变形之一—PAN(Path Aggregation Network)来从上至下的聚合特征信息。\n", + "\n", + "
\n", + "\n", + "
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* 采用 Mish 激活函数\n", + "\n", + "PP-YOLOv2的mish 激活函数应用在了 detection neck 而不是骨架网络上。\n", + "\n", + "* 更大的输入尺寸\n", + "\n", + "增加输入尺寸直接带来了目标面积的扩大。这样,网络可以更容易捕捉到小尺幅目标的信息,得到更高的性能。然而,更大的输入会带来更多的内存占用。所以在使用这个策略的同时,PP-YOLOv2同时减少 Batch Size。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. 注意事项\n", + "\n", + "不管是 PP-YOLO 还是 PP-YOLOv2,都是在寻找在产业实践中最高性价比的目标检测方案,而不是单纯的以提升单阶段目标检测的精度去堆网络和策略。关于PP-YOLOv2的论文中也特别提到,是以实验报告的角度来为业界开发者展示更多网络优化的方法,这些策略也可以被应用在其他网络的优化上,希望在给业界开发者带来更好的网络的同时,也带来更多的算法优化启发。同时,在使用PP-YOLO系列时也应当注意:\n", + "\n", + "\n", + "* PP-YOLO模型使用COCO数据集中的train2017作为训练集,使用val2017和test-dev2017作为测试集,Box APtest为mAP(IoU=0.5:0.95)评估结果。\n", + "* PP-YOLO模型训练过程中使用8 GPUs,每GPU batch size为24进行训练,如训练GPU数和batch size不使用上述配置,须参考FAQ调整学习率和迭代次数。\n", + "* PP-YOLO模型推理速度测试采用单卡V100,batch size=1进行测试,使用CUDA 10.2, CUDNN 7.5.1,TensorRT推理速度测试使用TensorRT 5.1.2.2。\n", + "* PP-YOLO模型FP32的推理速度测试数据为使用tools/export_model.py脚本导出模型后,使用deploy/python/infer.py脚本中的--run_benchnark参数使用Paddle预测库进行推理速度benchmark测试结果, 且测试的均为不包含数据预处理和模型输出后处理(NMS)的数据(与YOLOv4(AlexyAB)测试方法一致)。\n", + "* TensorRT FP16的速度测试相比于FP32去除了yolo_box(bbox解码)部分耗时,即不包含数据预处理,bbox解码和NMS(与YOLOv4(AlexyAB)测试方法一致)。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. 相关论文以及引用信息\n", + "(如果本模型有相关论文发表,或者是基于某些论文的结果,可以在这里\n", + "提供Bibtex格式的参考文献。)\n", + "```\n", + "@article{huang2021pp,\n", + " title={PP-YOLOv2: A Practical Object Detector},\n", + " author={Huang, Xin and Wang, Xinxin and Lv, Wenyu and Bai, Xiaying and Long, Xiang and Deng, Kaipeng and Dang, Qingqing and Han, Shumin and Liu, Qiwen and Hu, Xiaoguang and others},\n", + " journal={arXiv preprint arXiv:2104.10419},\n", + " year={2021}\n", + "}\n", + "```\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/modelcenter/PP-YOLOv2/introduction_en.ipynb b/modelcenter/PP-YOLOv2/introduction_en.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..260196af367088f80ac46ed62d254dbc181c0d5f --- /dev/null +++ b/modelcenter/PP-YOLOv2/introduction_en.ipynb @@ -0,0 +1,382 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1.PP-YOLOv2 Introduction\n", + "\n", + "As an important algorithm for object detection, the YOLO series adopts the one-stage method to greatly improve the detection speed, but the speed improvement also sacrifices some of the accuracy as a cost. Therefore, how to improve the accuracy of YOLOv3 while maintaining the speed of reasoning has become a key issue in its practical application.PP-YOLOv2 (R50) mAP in the COCO test dataset rises from 45.9% to 49.5%, an increase of 3.6 percentage points compared to v1. FP32 FPS is up to 68.9FPS, FP16 FPS is up to 106.5FPS, surpassing YOLOv4 and even YOLOv5! If RestNet101 is used as the backbone network, PP-YOLOv2 (R101) has up to 50.3% mAP and 15.9% faster than YOLOv5x with the same accuracy!\n", + "\n", + "The PP-YOLO model is officially produced by PaddlePaddle and is a model of the YOLOv3 optimized and improved by PaddleDetection. More information about PaddleDetection can be found here https://github.com/PaddlePaddle/PaddleDetection.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Model Effects and Application Scenarios\n", + "### 2.1 Object Detection Tasks:\n", + "\n", + "#### 2.1.1 Datasets:\n", + "\n", + "The dataset is mainly in COCO format, which is divided into training set and test set.\n", + "\n", + "#### 2.1.2 Model Effects:\n", + "\n", + "\n", + "The detection effect of PP-YOLOv2 on the picture is:\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. How to Use the Model\n", + "\n", + "### 3.1 Model Inference:\n", + "* Download \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "%cd /home/aistudio/work\n", + "\n", + "!git clone https://gitee.com/paddlepaddle/PaddleDetection" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Installation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# The script needs to be run in the PaddleDetection directory\n", + "%cd /home/aistudio/work/PaddleDetection/\n", + "\n", + "# Install the required dependencies [already persisted, no need to install again].\n", + "!pip install pyzmq -t /home/aistudio/external-libraries \n", + "# After testing on AIstudio paddlepaddle 2.2.2, an error will occur, because pyzmq needs to be installed in advance.\n", + "!pip install -r requirements.txt\n", + "\n", + "# The script needs to be run in the PaddleDetection directory.\n", + "%cd /home/aistudio/work/PaddleDetection/\n", + "# Set the python run directory.\n", + "%env PYTHONPATH=.:$PYTHONPATH\n", + "# Set GPU\n", + "%env CUDA_VISIBLE_DEVICES=0\n", + "\n", + "!pip install pycocotools \n", + "!pip install cython-bbox \n", + "!pip install xmltodict \n", + "!pip install terminaltables \n", + "!pip intall motmetrics \n", + "!pip install lap \n", + "!pip install shapely \n", + "!pip install pytest-benchmark \n", + "!pip install pytest \n", + "\n", + "\n", + "# Download PaddleDetection \n", + "!python setup.py install " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Verify whether the installation was successful or not.\n", + "If an error is reported, only perform the previous step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# Whether the installation was successful or not.\n", + "!python ppdet/modeling/tests/test_architectures.py" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Quick experience\n", + "\n", + "Congratulations! Now that you've successfully installed PaddleDetection, let's get a quick feel at object detection." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# Predict a picture on the GPU.\n", + "export CUDA_VISIBLE_DEVICES=0\n", + "python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "An image with the predicted result is generated under the output folder.\n", + "\n", + "The result is as follows:\n", + "\n", + "![](https://ai-studio-static-online.cdn.bcebos.com/76fb0d0b60fe4fe39cc7302f4c25818133f970ebdf924d2d85f70f25a586aab9)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3.2 Model Training\n", + "* Clone the PaddleDetection repository (see 3.1 for details)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Prepare the datasets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# return to /home/aistudio\n", + "%cd ~\n", + "\n", + "# Review the extract directory\n", + "%cd /home/aistudio/work/PaddleDetection/\n", + "!tree -d dataset/wider_face" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Change yaml configurations files.\n", + "\n", + "\n", + "\n", + "Change yaml configurations files``` configs/runtime.yml```\n", + "\n", + "```\n", + "use_gpu: true \n", + "log_iter: 20 \n", + "save_dir: output \n", + "snapshot_epoch: 1 \n", + "print_flops: false\n", + "\n", + "```\n", + "Change yaml configurations files``` configs/datasets/coco_detection.yml```\n", + "\n", + "```\n", + "metric: COCO \n", + "num_classes: 1 \n", + "\n", + "TrainDataset:\n", + " !COCODataSet\n", + " image_dir: WIDER_train/images \n", + " anno_path: WIDERFaceTrainCOCO.json \n", + " dataset_dir: dataset/wider_face \n", + " data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']\n", + "\n", + "EvalDataset:\n", + " !COCODataSet\n", + " image_dir: WIDER_val/images \n", + " anno_path: WIDERFaceValCOCO.json \n", + " dataset_dir: dataset/wider_face\n", + "\n", + "TestDataset:\n", + " !ImageFolder\n", + " anno_path: WIDERFaceValCOCO.json\n", + " \n", + "```\n", + "Change yaml configurations files``` configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml```\n", + "\n", + "```\n", + "_BASE_: [\n", + " '../datasets/coco_detection.yml',\n", + " '../runtime.yml',\n", + " './_base_/ppyolov2_r50vd_dcn.yml',\n", + " './_base_/optimizer_365e.yml',\n", + " './_base_/ppyolov2_reader.yml',\n", + "]\n", + "\n", + "snapshot_epoch: 8 \n", + "weights: output/ppyolov2_r50vd_dcn_365e_coco/model_final\n", + "```\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Train the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "%cd /home/aistudio/work/PaddleDetection/\n", + "%env CUDA_VISIBLE_DEVICES=0\n", + "# Beginning training\n", + "!python tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --use_vdl=true " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Model evaluation\n", + "\n", + "We provide ```configs/ppyolo/ppyolo_test.yml```for evaluating the effect of COCO test-dev2017 dataset, to evaluate the effect of COCO test-dev2017 dataset, you must first download the test-dev2017 dataset from the COCO dataset download page, and extract it to ```configs/ppyolo/ppyolo_test.yml```. The path configured in EvalReader.dataset and evaluated using the following command (attach the average accuracy AP and AR evaluation indicators of the model, and provide pictures or tables)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "%cd /home/aistudio/work/PaddleDetection/\n", + "%env CUDA_VISIBLE_DEVICES=0\n", + "\n", + "!python tools/eval.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml -o use_gpu=true" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Model Principles\n", + "* Design Detection Net using Path Aggregation Network\n", + "\n", + "PP-YOLOv2 uses one of FPN variations, PAN (Path Aggregation Network), to aggregate feature information from top to bottom.\n", + "\n", + "![](https://ai-studio-static-online.cdn.bcebos.com/5f047e2e5f3c47efbb81c6cf3d81415e531133c1feff4f36a2cc13f88210ab69)\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Use the Mish activation function\n", + "\n", + "PP-YOLOv2's mish activation function is applied to the detection neck instead of the skeleton network.\n", + "\n", + "* Larger input size\n", + "\n", + "Increasing the input size directly leads to an increase in the target area. This makes it easier for the network to capture information about small-sized targets for higher performance. However, larger inputs result in a larger memory footprint. So while using this strategy, PP-YOLOv2 also reduces the Batch Size." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Attention\n", + "\n", + "Whether it is PP-YOLO or PP-YOLOv2, they are looking for the most cost-effective object detection solution in industrial practice, rather than simply stacking networks and strategies to improve the accuracy of single-stage object detection. The paper on PP-YOLOv2 also specifically mentioned that it is to show more network optimization methods for industry developers from the perspective of experimental reports, and these strategies can also be applied to the optimization of other networks, hoping to bring better networks to industry developers and more algorithm optimization inspiration. At the same time, when using the PP-YOLO series, attention should also be paid to:\n", + "\n", + "\n", + "* The PP-YOLO model uses train2017 from the COCO dataset as the training set, val2017 and test-dev2017 as the test set, and Box APtest evaluates the results for mAP (IoU=0.5:0.95).\n", + "* PP-YOLO model training process uses 8 GPUs, each GPU batch size is 24 for training, if the number of training GPUs and batch size do not use the above configuration, you must refer to the FAQ to adjust the learning rate and number of iterations.\n", + "* PP-YOLO model inference speed test is tested with single card V100, batch size=1, CUDA 10.2, CUDNN 7.5.1, and TensorRT inference speed test using TensorRT 5.1.2.2.\n", + "* The inference speed test data of PP-YOLO model FP32 is the inference speed benchmark test result using the Paddle prediction library using the --run_benchnark parameter in the deploy/python/infer .py script after exporting the model using the tools/export_model.py script, and the test is data that does not contain data preprocessing and model output post-processing (NMS) ( Consistent with YOLOv4 (AlexyAB) test method).\n", + "* Compared to FP32, the speed test of TensorRT FP16 removes the yolo_box (bbox decoding) part of the time-consuming, i.e. does not include data preprocessing, bbox decoding and NMS (consistent with YOLOv4 (AlexyAB) test method).\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. Related papers and citations\n", + "(If this model has relevant papers published, or is based on the results of certain papers, it can be here.)\n", + "References in Bibtex format are provided. )\n", + "\n", + "```\n", + "@article{huang2021pp,\n", + " title={PP-YOLOv2: A Practical Object Detector},\n", + " author={Huang, Xin and Wang, Xinxin and Lv, Wenyu and Bai, Xiaying and Long, Xiang and Deng, Kaipeng and Dang, Qingqing and Han, Shumin and Liu, Qiwen and Hu, Xiaoguang and others},\n", + " journal={arXiv preprint arXiv:2104.10419},\n", + " year={2021}\n", + "}\n", + "```\n", + "\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/modelcenter/guide_cn.md b/modelcenter/guide_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..7e0f420bc91e20ad4d33916e3c7a032ce13a9447 --- /dev/null +++ b/modelcenter/guide_cn.md @@ -0,0 +1,37 @@ +# 1. 模型中心 + +## 1.1 模型介绍 +本部分介绍每个模型的基本信息,包括模型背景、应用场景、快速开始及模型原理等,帮助大家全方位了解模型。点击跳转【模型介绍】页面 + +## 1.2 模型空间 +本部分提供在线体验空间的所有代码,可实现基于模型的可视化demo APP。您可以对代码进行下载、预览和编辑(暂未上线),也可利用Streamlit和Gradio两种高效的方法,为模型打造炫酷的showcase效果。点击跳转【模型空间】页面 + +## 1.3 模型下载 +本部分提供模型各任务场景下的推理模型文件和预训练模型文件,您可以直接获取、下载体验。点击跳转【模型下载】页面 + +## 1.4 模型Benchmark +本部分提供模型的训练和推理Benchmark,包括软硬件环境、数据集、训练和推理指标效果等评估数据。点击跳转【模型Benchmark】页面 + +## 1.5 模型范例 +本部分提供模型相关的产业范例项目,每个范例均来源于真实业务场景,通过完整的代码实现,提供从数据准备到模型部署的全流程方案。您可以点击【运行一下】,感受模型实际的落地效果。点击跳转【模型范例】页面 + + +#2. 快速体验 + +## 2.1 在AI Studio Notebook中打开 + +此方式可直接跳转到AI Studio对应的模型项目页面。登陆后,您可以直接选择机器资源并运行,文档和代码会全部复制到项目中,欢迎您在线体验。 + +备注:AI Studio是基于飞桨的人工智能学习与实训社区,为开发者提供高效易用的学习和开发环境、丰富的体系化课程、海量开源实践项目和高价值的AI竞赛,并提供教育版支撑高校和机构老师轻松实现AI教学,助力深度学习人才培养。 + +## 2.2 在BML Notebook中打开 + +此方式可直接跳转到BML AI中台对应的模型项目页面。登陆后,您可以直接选择机器资源并运行,文档和代码会全部复制到项目中,欢迎您在线体验。 + +备注:BML 全功能 AI 开发平台是一个面向企业和个人开发者的机器学习集成开发环境,为经典机器学习和深度学习提供了从数据处理、模型训练、模型管理到模型推理的全生命周期管理服务,帮助用户更快的构建、训练和部署模型。 + +## 2.3 下载压缩包到本地 + +此方式可以直接将模型代码压缩包到本地,在您自己的环境中使用和体验。需要注意的是,您需要提前安装好飞桨框架基础环境,安装链接见:https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/develop/install/pip/windows-pip.html + +备注:压缩包内不包含模型训练文件和模型推理文件 diff --git a/modelcenter/guide_en.md b/modelcenter/guide_en.md new file mode 100644 index 0000000000000000000000000000000000000000..e8ec4f82e640ff5fc6e6a5f60fbbd884ba69cd26 --- /dev/null +++ b/modelcenter/guide_en.md @@ -0,0 +1,3 @@ +# 1. ModelCenter + +# 2. QuickStart