提交 4764c58a 编写于 作者: W wuzewu

Move third-party module

上级 be32aadd
......@@ -33,7 +33,8 @@ class Animegan_V1_Hayao_60(Module):
min_size=32,
max_size=1024):
# 加载数据处理器
processor = Processor(images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
processor = Processor(
images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
# 模型预测
outputs = self.model.predict(processor.input_datas)
......
......@@ -33,7 +33,8 @@ class Animegan_V2_Hayao_64(Module):
min_size=32,
max_size=1024):
# 加载数据处理器
processor = Processor(images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
processor = Processor(
images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
# 模型预测
outputs = self.model.predict(processor.input_datas)
......
......@@ -33,7 +33,8 @@ class Animegan_V2_Hayao_99(Module):
min_size=32,
max_size=1024):
# 加载数据处理器
processor = Processor(images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
processor = Processor(
images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
# 模型预测
outputs = self.model.predict(processor.input_datas)
......
......@@ -33,7 +33,8 @@ class Animegan_V2_Paprika_54(Module):
min_size=32,
max_size=1024):
# 加载数据处理器
processor = Processor(images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
processor = Processor(
images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
# 模型预测
outputs = self.model.predict(processor.input_datas)
......
......@@ -33,7 +33,8 @@ class Animegan_V2_Paprika_74(Module):
min_size=32,
max_size=1024):
# 加载数据处理器
processor = Processor(images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
processor = Processor(
images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
# 模型预测
outputs = self.model.predict(processor.input_datas)
......
......@@ -33,7 +33,8 @@ class Animegan_V2_Paprika_97(Module):
min_size=32,
max_size=1024):
# 加载数据处理器
processor = Processor(images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
processor = Processor(
images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
# 模型预测
outputs = self.model.predict(processor.input_datas)
......
......@@ -33,7 +33,8 @@ class Animegan_V2_Paprika_98(Module):
min_size=32,
max_size=1024):
# 加载数据处理器
processor = Processor(images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
processor = Processor(
images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
# 模型预测
outputs = self.model.predict(processor.input_datas)
......
......@@ -33,7 +33,8 @@ class Animegan_V2_Shinkai_33(Module):
min_size=32,
max_size=1024):
# 加载数据处理器
processor = Processor(images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
processor = Processor(
images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
# 模型预测
outputs = self.model.predict(processor.input_datas)
......
......@@ -33,7 +33,8 @@ class Animegan_V2_Shinkai_53(Module):
min_size=32,
max_size=1024):
# 加载数据处理器
processor = Processor(images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
processor = Processor(
images=images, paths=paths, batch_size=1, output_dir=output_dir, min_size=min_size, max_size=max_size)
# 模型预测
outputs = self.model.predict(processor.input_datas)
......
# U2Net
|模型名称|U2Net|
| :--- | :---: |
| :--- | :---: |
|类别|图像-图像分割|
|网络|U^2Net|
|数据集|-|
......@@ -17,7 +17,7 @@
- ### 应用效果展示
- 效果展示
<p align="center">
<img src="https://ai-studio-static-online.cdn.bcebos.com/4d77bc3a05cf48bba6f67b797978f4cdf10f38288b9645d59393dd85cef58eff" width = "450" height = "300" hspace='10'/> <img src="https://ai-studio-static-online.cdn.bcebos.com/11c9eba8de6d4316b672f10b285245061821f0a744e441f3b80c223881256ca0" width = "450" height = "300" hspace='10'/>
</p>
......@@ -38,7 +38,7 @@
- ```shell
$ hub install U2Net
```
- 如您安装时遇到问题,可参考:[零基础windows安装](../../../../docs/docs_ch/get_start/windows_quickstart.md)
| [零基础Linux安装](../../../../docs/docs_ch/get_start/linux_quickstart.md) | [零基础MacOS安装](../../../../docs/docs_ch/get_start/mac_quickstart.md)
......
# U2Netp
|模型名称|U2Netp|
| :--- | :---: |
| :--- | :---: |
|类别|图像-图像分割|
|网络|U^2Net|
|数据集|-|
......@@ -15,7 +15,7 @@
## 一、模型基本信息
- ### 应用效果展示
- 样例结果示例:
<p align="center">
<img src="https://ai-studio-static-online.cdn.bcebos.com/4d77bc3a05cf48bba6f67b797978f4cdf10f38288b9645d59393dd85cef58eff" width = "450" height = "300" hspace='10'/> <img src="https://ai-studio-static-online.cdn.bcebos.com/11c9eba8de6d4316b672f10b285245061821f0a744e441f3b80c223881256ca0" width = "450" height = "300" hspace='10'/>
......@@ -23,7 +23,7 @@
- ### 模型介绍
* U2Netp的网络结构如下图,其类似于编码-解码(Encoder-Decoder)结构的 U-Net, 每个 stage 由新提出的 RSU模块(residual U-block) 组成. 例如,En_1 即为基于 RSU 构建的, 它是一个小型化的模型
![](https://ai-studio-static-online.cdn.bcebos.com/999d37b4ffdd49dc9e3315b7cec7b2c6918fdd57c8594ced9dded758a497913d)
......@@ -35,14 +35,14 @@
- ### 1、环境依赖
- paddlepaddle >= 2.0.0
- paddlehub >= 2.0.0
- ### 2、安装
- ```shell
$ hub install U2Netp
```
- 如您安装时遇到问题,可参考:[零基础windows安装](../../../../docs/docs_ch/get_start/windows_quickstart.md)
| [零基础Linux安装](../../../../docs/docs_ch/get_start/linux_quickstart.md) | [零基础MacOS安装](../../../../docs/docs_ch/get_start/mac_quickstart.md)
......
# Vehicle_License_Plate_Recognition
|模型名称|Vehicle_License_Plate_Recognition|
| :--- | :---: |
| :--- | :---: |
|类别|图像 - 文字识别|
|网络|-|
|数据集|CCPD|
......@@ -17,8 +17,8 @@
- 样例结果示例:
<p align="center">
<img src="https://ai-studio-static-online.cdn.bcebos.com/35a3dab32ac948549de41afba7b51a5770d3f872d60b437d891f359a5cef8052" width = "450" height = "300" hspace='10'/> <br />
</p>
</p>
- ### 模型介绍
......@@ -27,20 +27,20 @@
## 二、安装
- ### 1、环境依赖
- ### 1、环境依赖
- paddlepaddle >= 2.0.0
- paddlepaddle >= 2.0.0
- paddlehub >= 2.0.4
- paddleocr >= 2.0.2
- paddleocr >= 2.0.2
- ### 2、安装
- ```shell
$ hub install Vehicle_License_Plate_Recognition
```
## 三、模型API预测
- ### 1、代码示例
......@@ -64,8 +64,8 @@
- **参数**
- images (list\[numpy.ndarray\]): 图片数据,ndarray.shape 为 \[H, W, C\];<br/>
- **返回**
- results(list(dict{'license', 'bbox'})): 识别到的车牌信息列表,包含车牌的位置坐标和车牌号码
......@@ -116,7 +116,7 @@
* 1.0.0
初始发布
- ```shell
$ hub install Vehicle_License_Plate_Recognition==1.0.0
```
\ No newline at end of file
```
## 模型概述
openpose 手部关键点检测模型
模型详情请参考[openpose开源项目](https://github.com/CMU-Perceptual-Computing-Lab/openpose)
## 模型安装
```shell
$hub install hand_pose_localization
```
## API 说明
```python
def keypoint_detection(
self,
images=None,
paths=None,
batch_size=1,
output_dir='output',
visualization=False
)
```
预测API,识别出人体手部关键点。
![手部关键点](https://ai-studio-static-online.cdn.bcebos.com/97e1ae7c1e68477d85b37f53ee997fbc4ef0fc12c7634301bc08749bd003cac0)
**参数**
* images (list\[numpy.ndarray\]): 图片数据,ndarray.shape 为 \[H, W, C\], 默认设为 None;
* paths (list\[str\]): 图片的路径, 默认设为 None;
* batch\_size (int): batch 的大小,默认设为 1;
* visualization (bool): 是否将识别结果保存为图片文件,默认设为 False;
* output\_dir (str): 图片的保存路径,默认设为 output。
**返回**
* res (list[list[list[int]]]): 每张图片识别到的21个手部关键点组成的列表,每个关键点的格式为[x, y],若有关键点未识别到则为None
## 预测代码示例
```python
import cv2
import paddlehub as hub
# use_gpu:是否使用GPU进行预测
model = hub.Module(name='hand_pose_localization', use_gpu=False)
# 调用关键点检测API
result = model.keypoint_detection(images=[cv2.imread('/PATH/TO/IMAGE')])
# or
# result = model.keypoint_detection(paths=['/PATH/TO/IMAGE'])
# 打印预测结果
print(result)
```
## 服务部署
PaddleHub Serving可以部署一个在线人体手部关键点检测服务。
## 第一步:启动PaddleHub Serving
运行启动命令:
```shell
$ hub serving start -m hand_pose_localization
```
这样就完成了一个人体手部关键点检测的在线服务API的部署,默认端口号为8866。
**NOTE:** 如使用GPU预测,则需要在启动服务之前,请设置CUDA\_VISIBLE\_DEVICES环境变量,否则不用设置。
## 第二步:发送预测请求
配置好服务端,以下数行代码即可实现发送预测请求,获取预测结果
```python
import requests
import json
import cv2
import base64
# 图片Base64编码函数
def cv2_to_base64(image):
data = cv2.imencode('.jpg', image)[1]
return base64.b64encode(data.tostring()).decode('utf8')
# 发送HTTP请求
data = {'images':[cv2_to_base64(cv2.imread("/PATH/TO/IMAGE"))]}
headers = {"Content-type": "application/json"}
url = "http://127.0.0.1:8866/predict/hand_pose_localization"
r = requests.post(url=url, headers=headers, data=json.dumps(data))
# 打印预测结果
print(r.json()["results"])
```
## 模型相关信息
### 模型代码
https://github.com/CMU-Perceptual-Computing-Lab/openpose
### 依赖
paddlepaddle >= 1.8.0
paddlehub >= 1.8.0
reading_pictures_writing_poems
类别 文本 - 文本生成
# 模型概述
看图写诗(reading_pictures_writing_poems),该模型可自动根据图像生成古诗词。该PaddleHub Module支持预测。
# 选择模型版本进行安装
$ hub install reading_pictures_writing_poems==1.0.0
# 命令行预测示例
$ hub run reading_pictures_writing_poems --input_image "scenery.jpg"
![](https://ai-studio-static-online.cdn.bcebos.com/69a9d5a5472449678a08e1ee5066c81b5859827647d74eb8a674afabbc205ae5)
<br>AI根据这张图片生成的古诗是: <br>
- 蕾蕾海河海,岳峰岳麓蔓。
- 不萌枝上春,自结心中线。
<br>
怎么样?还不错吧!
# Module API说明
## WritingPoem(self, image, use_gpu=False)
看图写诗预测接口,预测输入一张图像,输出一首古诗词
### 参数
- image(str): 待检测的图片路径
- use_gpu (bool): 是否使用 GPU
### 返回
- results (list[dict]): 识别结果的列表,列表中每一个元素为 dict,关键字有 image,Poetrys, 其中:
image字段为原输入图片的路径
Poetrys字段为输出的古诗词
# 代码示例
import paddlehub as hub
readingPicturesWritingPoems = hub.Module(directory="./reading_pictures_writing_poems")
readingPicturesWritingPoems.WritingPoem(image = "scenery.jpg", use_gpu=True)
# 贡献者
郑博培、彭兆帅
# 依赖
paddlepaddle >= 1.8.2
paddlehub >= 1.8.0
# SkyAR
|模型名称|SkyAR|
| :--- | :---: |
| :--- | :---: |
|类别|图像-图像分割|
|网络|UNet|
|数据集|UNet|
......@@ -13,7 +13,7 @@
## 一、模型基本信息
- ### 应用效果展示
- 样例结果示例:
* 原始视频:
......@@ -125,4 +125,3 @@
* 1.0.0
初始发布
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册