Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
5a43eba9
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
282
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5a43eba9
编写于
11月 21, 2019
作者:
B
Bin Long
提交者:
GitHub
11月 21, 2019
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #224 from ShenYuhan/update_file
update serving demo, add notes
上级
b078ad1d
3431d0a8
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
37 addition
and
4 deletion
+37
-4
demo/serving/Classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py
...ssification_vgg11_imagenet/vgg11_imagenet_serving_demo.py
+3
-0
demo/serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py
demo/serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py
+5
-2
demo/serving/Language_Model_lm_lstm/lm_lstm_serving_demo.py
demo/serving/Language_Model_lm_lstm/lm_lstm_serving_demo.py
+3
-0
demo/serving/Lexical_Analysis_lac/lac_serving_demo.py
demo/serving/Lexical_Analysis_lac/lac_serving_demo.py
+3
-0
demo/serving/Lexical_Analysis_lac/lac_with_dict_serving_demo.py
...erving/Lexical_Analysis_lac/lac_with_dict_serving_demo.py
+4
-1
demo/serving/Object_Detection_yolov3_coco2017/yolov3_coco2017_serving_demo.py
...Detection_yolov3_coco2017/yolov3_coco2017_serving_demo.py
+3
-0
demo/serving/Semantic_Model_simnet_bow/simnet_bow_serving_demo.py
...ving/Semantic_Model_simnet_bow/simnet_bow_serving_demo.py
+4
-0
demo/serving/Semantic_Segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py
...5_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py
+3
-0
demo/serving/Sentiment_Analysis_senta_lstm/senta_lstm_serving_demo.py
.../Sentiment_Analysis_senta_lstm/senta_lstm_serving_demo.py
+3
-0
paddlehub/common/hub_server.py
paddlehub/common/hub_server.py
+4
-1
paddlehub/module/module.py
paddlehub/module/module.py
+2
-0
未找到文件。
demo/serving/Classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py
浏览文件 @
5a43eba9
...
@@ -3,9 +3,12 @@ import requests
...
@@ -3,9 +3,12 @@ import requests
import
json
import
json
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
# 指定要预测的图片并生成列表[("image", img_1), ("image", img_2), ... ]
file_list
=
[
"../img/cat.jpg"
,
"../img/flower.jpg"
]
file_list
=
[
"../img/cat.jpg"
,
"../img/flower.jpg"
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
# 指定预测方法为vgg11_imagenet并发送post请求
url
=
"http://127.0.0.1:8866/predict/image/vgg11_imagenet"
url
=
"http://127.0.0.1:8866/predict/image/vgg11_imagenet"
r
=
requests
.
post
(
url
=
url
,
files
=
files
)
r
=
requests
.
post
(
url
=
url
,
files
=
files
)
# 打印预测结果
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
demo/serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py
浏览文件 @
5a43eba9
...
@@ -5,14 +5,17 @@ import base64
...
@@ -5,14 +5,17 @@ import base64
import
os
import
os
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
# 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ]
file_list
=
[
"../img/woman.png"
]
file_list
=
[
"../img/woman.png"
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
url
=
"http://127.0.0.1:8866/predict/image/stgan_celeba"
# 为每张图片对应指定info和style
data
=
{
"info"
:
[
"Female,Brown_Hair"
],
"style"
:
[
"Aged"
]}
data
=
{
"info"
:
[
"Female,Brown_Hair"
],
"style"
:
[
"Aged"
]}
# 指定图片生成方法为stgan_celeba并发送post请求
url
=
"http://127.0.0.1:8866/predict/image/stgan_celeba"
r
=
requests
.
post
(
url
=
url
,
data
=
data
,
files
=
files
)
r
=
requests
.
post
(
url
=
url
,
data
=
data
,
files
=
files
)
results
=
eval
(
r
.
json
()[
"results"
])
results
=
eval
(
r
.
json
()[
"results"
])
# 保存生成的图片到output文件夹,打印模型输出结果
if
not
os
.
path
.
exists
(
"output"
):
if
not
os
.
path
.
exists
(
"output"
):
os
.
mkdir
(
"output"
)
os
.
mkdir
(
"output"
)
for
item
in
results
:
for
item
in
results
:
...
...
demo/serving/Language_Model_lm_lstm/lm_lstm_serving_demo.py
浏览文件 @
5a43eba9
...
@@ -3,12 +3,15 @@ import requests
...
@@ -3,12 +3,15 @@ import requests
import
json
import
json
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
# 指定用于用于预测的文本并生成字典{"text": [text_1, text_2, ... ]}
text_list
=
[
text_list
=
[
"the plant which is owned by <unk> & <unk> co. was under contract with <unk> to make the cigarette filter"
,
"the plant which is owned by <unk> & <unk> co. was under contract with <unk> to make the cigarette filter"
,
"more common <unk> fibers are <unk> and are more easily rejected by the body dr. <unk> explained"
"more common <unk> fibers are <unk> and are more easily rejected by the body dr. <unk> explained"
]
]
text
=
{
"text"
:
text_list
}
text
=
{
"text"
:
text_list
}
# 指定预测方法为lm_lstm并发送post请求
url
=
"http://127.0.0.1:8866/predict/text/lm_lstm"
url
=
"http://127.0.0.1:8866/predict/text/lm_lstm"
r
=
requests
.
post
(
url
=
url
,
data
=
text
)
r
=
requests
.
post
(
url
=
url
,
data
=
text
)
# 打印预测结果
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
demo/serving/Lexical_Analysis_lac/lac_serving_demo.py
浏览文件 @
5a43eba9
...
@@ -3,9 +3,12 @@ import requests
...
@@ -3,9 +3,12 @@ import requests
import
json
import
json
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
# 指定用于用于预测的文本并生成字典{"text": [text_1, text_2, ... ]}
text_list
=
[
"今天是个好日子"
,
"天气预报说今天要下雨"
]
text_list
=
[
"今天是个好日子"
,
"天气预报说今天要下雨"
]
text
=
{
"text"
:
text_list
}
text
=
{
"text"
:
text_list
}
# 指定预测方法为lac并发送post请求
url
=
"http://127.0.0.1:8866/predict/text/lac"
url
=
"http://127.0.0.1:8866/predict/text/lac"
r
=
requests
.
post
(
url
=
url
,
data
=
text
)
r
=
requests
.
post
(
url
=
url
,
data
=
text
)
# 打印预测结果
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
demo/serving/Lexical_Analysis_lac/lac_with_dict_serving_demo.py
浏览文件 @
5a43eba9
...
@@ -3,11 +3,14 @@ import requests
...
@@ -3,11 +3,14 @@ import requests
import
json
import
json
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
# 指定用于用于预测的文本并生成字典{"text": [text_1, text_2, ... ]}
text_list
=
[
"今天是个好日子"
,
"天气预报说今天要下雨"
]
text_list
=
[
"今天是个好日子"
,
"天气预报说今天要下雨"
]
text
=
{
"text"
:
text_list
}
text
=
{
"text"
:
text_list
}
#
将用户自定义词典文件发送到预测接口即可
#
指定自定义词典{"user_dict": dict.txt}
file
=
{
"user_dict"
:
open
(
"dict.txt"
,
"rb"
)}
file
=
{
"user_dict"
:
open
(
"dict.txt"
,
"rb"
)}
# 指定预测方法为lac并发送post请求
url
=
"http://127.0.0.1:8866/predict/text/lac"
url
=
"http://127.0.0.1:8866/predict/text/lac"
r
=
requests
.
post
(
url
=
url
,
files
=
file
,
data
=
text
)
r
=
requests
.
post
(
url
=
url
,
files
=
file
,
data
=
text
)
# 打印预测结果
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
demo/serving/Object_Detection_yolov3_coco2017/yolov3_coco2017_serving_demo.py
浏览文件 @
5a43eba9
...
@@ -5,13 +5,16 @@ import base64
...
@@ -5,13 +5,16 @@ import base64
import
os
import
os
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
# 指定要检测的图片并生成列表[("image", img_1), ("image", img_2), ... ]
file_list
=
[
"../img/cat.jpg"
,
"../img/dog.jpg"
]
file_list
=
[
"../img/cat.jpg"
,
"../img/dog.jpg"
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
# 指定检测方法为yolov3_coco2017并发送post请求
url
=
"http://127.0.0.1:8866/predict/image/yolov3_coco2017"
url
=
"http://127.0.0.1:8866/predict/image/yolov3_coco2017"
r
=
requests
.
post
(
url
=
url
,
files
=
files
)
r
=
requests
.
post
(
url
=
url
,
files
=
files
)
results
=
eval
(
r
.
json
()[
"results"
])
results
=
eval
(
r
.
json
()[
"results"
])
# 保存检测生成的图片到output文件夹,打印模型输出结果
if
not
os
.
path
.
exists
(
"output"
):
if
not
os
.
path
.
exists
(
"output"
):
os
.
mkdir
(
"output"
)
os
.
mkdir
(
"output"
)
for
item
in
results
:
for
item
in
results
:
...
...
demo/serving/Semantic_Model_simnet_bow/simnet_bow_serving_demo.py
浏览文件 @
5a43eba9
...
@@ -3,11 +3,15 @@ import requests
...
@@ -3,11 +3,15 @@ import requests
import
json
import
json
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
# 指定用于用于匹配的文本并生成字典{"text_1": [text_a1, text_a2, ... ]
# "text_2": [text_b1, text_b2, ... ]}
text
=
{
text
=
{
"text_1"
:
[
"这道题太难了"
,
"这道题太难了"
,
"这道题太难了"
],
"text_1"
:
[
"这道题太难了"
,
"这道题太难了"
,
"这道题太难了"
],
"text_2"
:
[
"这道题是上一年的考题"
,
"这道题不简单"
,
"这道题很有意思"
]
"text_2"
:
[
"这道题是上一年的考题"
,
"这道题不简单"
,
"这道题很有意思"
]
}
}
# 指定匹配方法为simnet_bow并发送post请求
url
=
"http://127.0.0.1:8866/predict/text/simnet_bow"
url
=
"http://127.0.0.1:8866/predict/text/simnet_bow"
r
=
requests
.
post
(
url
=
url
,
data
=
text
)
r
=
requests
.
post
(
url
=
url
,
data
=
text
)
# 打印匹配结果
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
demo/serving/Semantic_Segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py
浏览文件 @
5a43eba9
...
@@ -5,13 +5,16 @@ import base64
...
@@ -5,13 +5,16 @@ import base64
import
os
import
os
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
# 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ]
file_list
=
[
"../img/girl.jpg"
]
file_list
=
[
"../img/girl.jpg"
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
# 指定图片分割方法为deeplabv3p_xception65_humanseg并发送post请求
url
=
"http://127.0.0.1:8866/predict/image/deeplabv3p_xception65_humanseg"
url
=
"http://127.0.0.1:8866/predict/image/deeplabv3p_xception65_humanseg"
r
=
requests
.
post
(
url
=
url
,
files
=
files
)
r
=
requests
.
post
(
url
=
url
,
files
=
files
)
results
=
eval
(
r
.
json
()[
"results"
])
results
=
eval
(
r
.
json
()[
"results"
])
# 保存分割后的图片到output文件夹,打印模型输出结果
if
not
os
.
path
.
exists
(
"output"
):
if
not
os
.
path
.
exists
(
"output"
):
os
.
mkdir
(
"output"
)
os
.
mkdir
(
"output"
)
for
item
in
results
:
for
item
in
results
:
...
...
demo/serving/Sentiment_Analysis_senta_lstm/senta_lstm_serving_demo.py
浏览文件 @
5a43eba9
...
@@ -3,9 +3,12 @@ import requests
...
@@ -3,9 +3,12 @@ import requests
import
json
import
json
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
# 指定用于用于预测的文本并生成字典{"text": [text_1, text_2, ... ]}
text_list
=
[
"我不爱吃甜食"
,
"我喜欢躺在床上看电影"
]
text_list
=
[
"我不爱吃甜食"
,
"我喜欢躺在床上看电影"
]
text
=
{
"text"
:
text_list
}
text
=
{
"text"
:
text_list
}
# 指定预测方法为senta_lstm并发送post请求
url
=
"http://127.0.0.1:8866/predict/text/senta_lstm"
url
=
"http://127.0.0.1:8866/predict/text/senta_lstm"
r
=
requests
.
post
(
url
=
url
,
data
=
text
)
r
=
requests
.
post
(
url
=
url
,
data
=
text
)
# 打印预测结果
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
print
(
json
.
dumps
(
r
.
json
(),
indent
=
4
,
ensure_ascii
=
False
))
paddlehub/common/hub_server.py
浏览文件 @
5a43eba9
...
@@ -287,7 +287,10 @@ class CacheUpdater(threading.Thread):
...
@@ -287,7 +287,10 @@ class CacheUpdater(threading.Thread):
"command"
:
"update_cache"
,
"command"
:
"update_cache"
,
"mtime"
:
os
.
stat
(
cache_path
).
st_mtime
"mtime"
:
os
.
stat
(
cache_path
).
st_mtime
}
}
r
=
srv_utils
.
hub_request
(
api_url
,
payload
,
extra
)
try
:
r
=
srv_utils
.
hub_request
(
api_url
,
payload
,
extra
)
except
Exception
as
err
:
pass
if
r
.
get
(
"update_cache"
,
0
)
==
1
:
if
r
.
get
(
"update_cache"
,
0
)
==
1
:
with
open
(
cache_path
,
'w+'
)
as
fp
:
with
open
(
cache_path
,
'w+'
)
as
fp
:
yaml
.
safe_dump
({
'resource_list'
:
r
[
'data'
]},
fp
)
yaml
.
safe_dump
({
'resource_list'
:
r
[
'data'
]},
fp
)
...
...
paddlehub/module/module.py
浏览文件 @
5a43eba9
...
@@ -128,6 +128,8 @@ class Module(object):
...
@@ -128,6 +128,8 @@ class Module(object):
elif
module_dir
:
elif
module_dir
:
self
.
_init_with_module_file
(
module_dir
=
module_dir
[
0
])
self
.
_init_with_module_file
(
module_dir
=
module_dir
[
0
])
lock
.
flock
(
fp_lock
,
lock
.
LOCK_UN
)
lock
.
flock
(
fp_lock
,
lock
.
LOCK_UN
)
name
=
module_dir
[
0
].
split
(
"/"
)[
-
1
]
version
=
module_dir
[
1
]
elif
signatures
:
elif
signatures
:
if
processor
:
if
processor
:
if
not
issubclass
(
processor
,
BaseProcessor
):
if
not
issubclass
(
processor
,
BaseProcessor
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录