Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
427e3f20
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
282
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
427e3f20
编写于
2月 14, 2020
作者:
走神的阿圆
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update serving add no-file
上级
a2effc2a
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
135 addition
and
21 deletion
+135
-21
demo/serving/module_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py
...ule_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py
+1
-1
demo/serving/module_serving/GAN_stgan_celeba/stgan_output/Bald_man.png
...module_serving/GAN_stgan_celeba/stgan_output/Bald_man.png
+0
-0
demo/serving/module_serving/classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py
...ssification_vgg11_imagenet/vgg11_imagenet_serving_demo.py
+3
-1
demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py
...knet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py
+3
-1
demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py
...5_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py
+1
-1
demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/output/girl.png
...gmentation_deeplabv3p_xception65_humanseg/output/girl.png
+0
-0
docs/tutorial/bert_service.md
docs/tutorial/bert_service.md
+2
-2
docs/tutorial/serving.md
docs/tutorial/serving.md
+11
-11
paddlehub/commands/serving.py
paddlehub/commands/serving.py
+5
-2
paddlehub/common/utils.py
paddlehub/common/utils.py
+39
-0
paddlehub/serving/app_single.py
paddlehub/serving/app_single.py
+70
-2
未找到文件。
demo/serving/module_serving/GAN_stgan_celeba/stgan_celeba_serving_demo.py
浏览文件 @
427e3f20
...
...
@@ -6,7 +6,7 @@ import os
if
__name__
==
"__main__"
:
# 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ]
file_list
=
[
"../
img
/man.png"
]
file_list
=
[
"../
../../../docs/imgs
/man.png"
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
# 为每张图片对应指定info和style
data
=
{
"info"
:
[
"Male,Black_Hair"
],
"style"
:
[
"Bald"
]}
...
...
demo/serving/module_serving/GAN_stgan_celeba/stgan_output/Bald_man.png
查看替换文件 @
a2effc2a
浏览文件 @
427e3f20
28.5 KB
|
W:
|
H:
44.5 KB
|
W:
|
H:
2-up
Swipe
Onion skin
demo/serving/module_serving/classification_vgg11_imagenet/vgg11_imagenet_serving_demo.py
浏览文件 @
427e3f20
...
...
@@ -4,7 +4,9 @@ import json
if
__name__
==
"__main__"
:
# 指定要预测的图片并生成列表[("image", img_1), ("image", img_2), ... ]
file_list
=
[
"../img/cat.jpg"
,
"../img/flower.jpg"
]
file_list
=
[
"../../../../docs/imgs/cat.jpg"
,
"../../../../docs/imgs/flower.jpg"
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
# 指定预测方法为vgg11_imagenet并发送post请求
url
=
"http://127.0.0.1:8866/predict/image/vgg11_imagenet"
...
...
demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017/yolov3_darknet53_coco2017_serving_demo.py
浏览文件 @
427e3f20
...
...
@@ -6,7 +6,9 @@ import os
if
__name__
==
"__main__"
:
# 指定要检测的图片并生成列表[("image", img_1), ("image", img_2), ... ]
file_list
=
[
"../img/cat.jpg"
,
"../img/dog.jpg"
]
file_list
=
[
"../../../../docs/imgs/cat.jpg"
,
"../../../../docs/imgs/dog.jpg"
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
# 指定检测方法为yolov3_coco2017并发送post请求
url
=
"http://127.0.0.1:8866/predict/image/yolov3_darknet53_coco2017"
...
...
demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/deeplabv3p_xception65_humanseg_serving_demo.py
浏览文件 @
427e3f20
...
...
@@ -6,7 +6,7 @@ import os
if
__name__
==
"__main__"
:
# 指定要使用的图片文件并生成列表[("image", img_1), ("image", img_2), ... ]
file_list
=
[
"../
img
/girl.jpg"
]
file_list
=
[
"../
../../../docs/imgs
/girl.jpg"
]
files
=
[(
"image"
,
(
open
(
item
,
"rb"
)))
for
item
in
file_list
]
# 指定图片分割方法为deeplabv3p_xception65_humanseg并发送post请求
url
=
"http://127.0.0.1:8866/predict/image/deeplabv3p_xception65_humanseg"
...
...
demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg/output/girl.png
查看替换文件 @
a2effc2a
浏览文件 @
427e3f20
304.2 KB
|
W:
|
H:
1.3 MB
|
W:
|
H:
2-up
Swipe
Onion skin
docs/tutorial/bert_service.md
浏览文件 @
427e3f20
...
...
@@ -9,7 +9,7 @@
<div
align=
"center"
>
<img
src=
"../
docs/
imgs/bs.png"
aligh=
"center"
width=
"100%"
alt=
"BS流程图"
/>
<img
src=
"../imgs/bs.png"
aligh=
"center"
width=
"100%"
alt=
"BS流程图"
/>
</div>
...
...
@@ -203,7 +203,7 @@ result = bc.get_result(input_text=input_text)
```
python
[[
0.9993321895599361
,
0.9994612336158751
,
0.9999646544456481
,
0.732795298099517
,
-
0.34387934207916204
,
...
]]
```
客户端代码demo文件见
[
示例
](
../demo/serving/bert_service/bert_service_client.py
)
。
客户端代码demo文件见
[
示例
](
../
../
demo/serving/bert_service/bert_service_client.py
)
。
运行命令如下:
```
shell
$
python bert_service_client.py
...
...
docs/tutorial/serving.md
浏览文件 @
427e3f20
...
...
@@ -81,7 +81,7 @@ http://0.0.0.0:8866/predict/<CATEGORY\>/\<MODULE>
<p
align=
"center"
>
<img
src=
"../
docs/
imgs/web_demo.png"
width=
"60%"
/>
<img
src=
"../imgs/web_demo.png"
width=
"60%"
/>
</p>
...
...
@@ -117,7 +117,7 @@ $ hub serving start -c serving_config.json
<p
align=
"center"
>
<img
src=
"../
docs/
imgs/start_serving_lac.png"
width=
"100%"
/>
<img
src=
"../imgs/start_serving_lac.png"
width=
"100%"
/>
</p>
...
...
@@ -171,41 +171,41 @@ if __name__ == "__main__":
}
```
此Demo的具体信息和代码请参见
[
LAC Serving
](
../demo/serving/module_serving/lexical_analysis_lac
)
。另外,下面展示了一些其他的一键服务部署Demo。
此Demo的具体信息和代码请参见
[
LAC Serving
](
../
../
demo/serving/module_serving/lexical_analysis_lac
)
。另外,下面展示了一些其他的一键服务部署Demo。
## Demo——其他模型的一键部署服务
获取其他PaddleHub Serving的一键服务部署场景示例,可参见下列demo
*
[
图像分类-基于vgg11_imagent
](
../demo/serving/module_serving/classification_vgg11_imagenet
)
*
[
图像分类-基于vgg11_imagent
](
../
../
demo/serving/module_serving/classification_vgg11_imagenet
)
  
该示例展示了利用vgg11_imagent完成图像分类服务化部署和在线预测,获取图像分类结果。
*
[
图像生成-基于stgan_celeba
](
../demo/serving/module_serving/GAN_stgan_celeba
)
*
[
图像生成-基于stgan_celeba
](
../
../
demo/serving/module_serving/GAN_stgan_celeba
)
  
该示例展示了利用stgan_celeba生成图像服务化部署和在线预测,获取指定风格的生成图像。
*
[
文本审核-基于porn_detection_lstm
](
../demo/serving/module_serving/text_censorship_porn_detection_lstm
)
*
[
文本审核-基于porn_detection_lstm
](
../
../
demo/serving/module_serving/text_censorship_porn_detection_lstm
)
  
该示例展示了利用porn_detection_lstm完成中文文本黄色敏感信息鉴定的服务化部署和在线预测,获取文本是否敏感及其置信度。
*
[
中文词法分析-基于lac
](
../demo/serving/module_serving/lexical_analysis_lac
)
*
[
中文词法分析-基于lac
](
../
../
demo/serving/module_serving/lexical_analysis_lac
)
  
该示例展示了利用lac完成中文文本分词服务化部署和在线预测,获取文本的分词结果,并可通过用户自定义词典干预分词结果。
*
[
目标检测-基于yolov3_darknet53_coco2017
](
../demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017
)
*
[
目标检测-基于yolov3_darknet53_coco2017
](
../
../
demo/serving/module_serving/object_detection_yolov3_darknet53_coco2017
)
  
该示例展示了利用yolov3_darknet53_coco2017完成目标检测服务化部署和在线预测,获取检测结果和覆盖识别框的图片。
*
[
中文语义分析-基于simnet_bow
](
../demo/serving/module_serving/semantic_model_simnet_bow
)
*
[
中文语义分析-基于simnet_bow
](
../
../
demo/serving/module_serving/semantic_model_simnet_bow
)
  
该示例展示了利用simnet_bow完成中文文本相似度检测服务化部署和在线预测,获取文本的相似程度。
*
[
图像分割-基于deeplabv3p_xception65_humanseg
](
../demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg
)
*
[
图像分割-基于deeplabv3p_xception65_humanseg
](
../
../
demo/serving/module_serving/semantic_segmentation_deeplabv3p_xception65_humanseg
)
  
该示例展示了利用deeplabv3p_xception65_humanseg完成图像分割服务化部署和在线预测,获取识别结果和分割后的图像。
*
[
中文情感分析-基于simnet_bow
](
../demo/serving/module_serving/semantic_model_simnet_bow
)
*
[
中文情感分析-基于simnet_bow
](
../
../
demo/serving/module_serving/semantic_model_simnet_bow
)
  
该示例展示了利用senta_lstm完成中文文本情感分析服务化部署和在线预测,获取文本的情感分析结果。
...
...
paddlehub/commands/serving.py
浏览文件 @
427e3f20
...
...
@@ -103,6 +103,8 @@ class ServingCommand(BaseCommand):
self
.
parser
.
add_argument
(
"--config"
,
"-c"
,
nargs
=
"?"
)
self
.
parser
.
add_argument
(
"--port"
,
"-p"
,
nargs
=
"?"
,
default
=
8866
)
self
.
parser
.
add_argument
(
"--gpu"
,
"-i"
,
nargs
=
"?"
,
default
=
0
)
self
.
parser
.
add_argument
(
"--use_singleprocess"
,
action
=
"store_true"
,
default
=
False
)
def
dump_pid_file
(
self
):
pid
=
os
.
getpid
()
...
...
@@ -336,12 +338,13 @@ class ServingCommand(BaseCommand):
def
start_serving
(
self
):
config_file
=
self
.
args
.
config
single_mode
=
self
.
args
.
use_singleprocess
if
config_file
is
not
None
:
if
os
.
path
.
exists
(
config_file
):
with
open
(
config_file
,
"r"
)
as
fp
:
configs
=
json
.
load
(
fp
)
use_multiprocess
=
configs
.
get
(
"use_multiprocess"
,
False
)
if
platform
.
system
()
==
"Windows"
:
if
single_mode
is
True
or
platform
.
system
()
==
"Windows"
:
print
(
"Warning: Windows cannot use multiprocess working "
"mode, PaddleHub Serving will switch to single process mode"
...
...
@@ -357,7 +360,7 @@ class ServingCommand(BaseCommand):
else
:
print
(
"config_file "
,
config_file
,
"not exists."
)
else
:
if
platform
.
system
()
==
"Windows"
:
if
single_mode
is
True
or
platform
.
system
()
==
"Windows"
:
print
(
"Warning: Windows cannot use multiprocess working "
"mode, PaddleHub Serving will switch to single process mode"
...
...
paddlehub/common/utils.py
浏览文件 @
427e3f20
...
...
@@ -22,9 +22,12 @@ import os
import
multiprocessing
import
hashlib
import
platform
import
base64
import
paddle.fluid
as
fluid
import
six
import
numpy
as
np
import
cv2
from
paddlehub.module
import
module_desc_pb2
from
paddlehub.common.logger
import
logger
...
...
@@ -51,6 +54,42 @@ def version_compare(version1, version2):
return
len
(
version1
)
>
len
(
version2
)
def
base64s_to_cvmats
(
base64s
):
for
index
,
value
in
enumerate
(
base64s
):
value
=
bytes
(
value
,
encoding
=
"utf8"
)
value
=
base64
.
b64decode
(
value
)
value
=
np
.
fromstring
(
value
,
np
.
uint8
)
value
=
cv2
.
imdecode
(
value
,
1
)
base64s
[
index
]
=
value
return
base64s
def
handle_mask_results
(
results
):
result
=
[]
if
len
(
results
)
<=
0
:
return
results
_id
=
results
[
0
][
"id"
]
_item
=
{
"data"
:
[],
"path"
:
results
[
0
].
get
(
"path"
,
""
),
"id"
:
results
[
0
][
"id"
]
}
for
item
in
results
:
if
item
[
"id"
]
==
_id
:
_item
[
"data"
].
append
(
item
[
"data"
])
else
:
result
.
append
(
_item
)
_id
=
_id
+
1
_item
=
{
"data"
:
[
item
[
"data"
]],
"path"
:
item
.
get
(
"path"
,
""
),
"id"
:
item
.
get
(
"id"
,
_id
)
}
result
.
append
(
_item
)
return
result
def
get_platform
():
return
platform
.
platform
()
...
...
paddlehub/serving/app_single.py
浏览文件 @
427e3f20
...
...
@@ -18,6 +18,7 @@ import time
import
os
import
base64
import
logging
import
shutil
cv_module_method
=
{
"vgg19_imagenet"
:
"predict_classification"
,
...
...
@@ -47,7 +48,9 @@ cv_module_method = {
"faster_rcnn_coco2017"
:
"predict_object_detection"
,
"cyclegan_cityscapes"
:
"predict_gan"
,
"deeplabv3p_xception65_humanseg"
:
"predict_semantic_segmentation"
,
"ace2p"
:
"predict_semantic_segmentation"
"ace2p"
:
"predict_semantic_segmentation"
,
"pyramidbox_lite_server_mask"
:
"predict_mask"
,
"pyramidbox_lite_mobile_mask"
:
"predict_mask"
}
...
...
@@ -132,6 +135,59 @@ def predict_gan(module, input_img, id, batch_size, extra={}):
return
results_pack
def
predict_mask
(
module
,
input_img
,
id
,
batch_size
,
extra
=
None
,
r_img
=
False
):
output_folder
=
"detection_result"
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
try
:
data
=
{}
if
input_img
is
not
None
:
input_img
=
{
"image"
:
input_img
}
data
.
update
(
input_img
)
if
extra
is
not
None
:
data
.
update
(
extra
)
r_img
=
True
if
"r_img"
in
extra
.
keys
()
else
False
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
results
=
utils
.
handle_mask_results
(
results
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
return
{
"result"
:
"Please check data format!"
}
finally
:
base64_list
=
[]
results_pack
=
[]
if
input_img
is
not
None
:
if
r_img
is
False
:
shutil
.
rmtree
(
output_folder
)
for
index
in
range
(
len
(
results
)):
results
[
index
][
"path"
]
=
""
results_pack
=
results
else
:
input_img
=
input_img
.
get
(
"image"
,
[])
for
index
in
range
(
len
(
input_img
)):
item
=
input_img
[
index
]
with
open
(
os
.
path
.
join
(
output_folder
,
item
),
"rb"
)
as
fp
:
b_head
=
"data:image/"
+
item
.
split
(
"."
)[
-
1
]
+
";base64"
b_body
=
base64
.
b64encode
(
fp
.
read
())
b_body
=
str
(
b_body
).
replace
(
"b'"
,
""
).
replace
(
"'"
,
""
)
b_img
=
b_head
+
","
+
b_body
base64_list
.
append
(
b_img
)
results
[
index
][
"path"
]
=
results
[
index
][
"path"
].
replace
(
id
+
"_"
,
""
)
if
results
[
index
][
"path"
]
!=
""
\
else
""
results
[
index
].
update
({
"base64"
:
b_img
})
results_pack
.
append
(
results
[
index
])
os
.
remove
(
item
)
os
.
remove
(
os
.
path
.
join
(
output_folder
,
item
))
else
:
results_pack
=
results
return
results_pack
def
predict_object_detection
(
module
,
input_img
,
id
,
batch_size
,
extra
=
{}):
output_folder
=
"detection_result"
global
use_gpu
...
...
@@ -253,6 +309,14 @@ def create_app(init_flag=False, configs=None):
extra_info
=
{}
for
item
in
list
(
request
.
form
.
keys
()):
extra_info
.
update
({
item
:
request
.
form
.
getlist
(
item
)})
for
key
in
extra_info
.
keys
():
if
isinstance
(
extra_info
[
key
],
list
):
extra_info
[
key
]
=
utils
.
base64s_to_cvmats
(
eval
(
extra_info
[
key
][
0
])[
"b64s"
])
if
isinstance
(
extra_info
[
key
][
0
],
str
)
and
"b64s"
in
extra_info
[
key
][
0
]
else
extra_info
[
key
]
file_name_list
=
[]
if
img_base64
!=
[]:
for
item
in
img_base64
:
...
...
@@ -260,7 +324,7 @@ def create_app(init_flag=False, configs=None):
if
ext
not
in
[
"jpeg"
,
"jpg"
,
"png"
]:
return
{
"result"
:
"Unrecognized file type"
}
filename
=
req_id
+
"_"
\
+
utils
.
md5
(
str
(
time
.
time
())
+
item
[
0
:
20
])
\
+
utils
.
md5
(
str
(
time
.
time
())
+
item
[
0
:
20
])
\
+
"."
\
+
ext
img_data
=
base64
.
b64decode
(
item
.
split
(
','
)[
-
1
])
...
...
@@ -281,6 +345,10 @@ def create_app(init_flag=False, configs=None):
module_type
=
module
.
type
.
split
(
"/"
)[
-
1
].
replace
(
"-"
,
"_"
).
lower
()
predict_func
=
eval
(
"predict_"
+
module_type
)
batch_size
=
batch_size_dict
.
get
(
module_name
,
1
)
if
file_name_list
==
[]:
file_name_list
=
None
if
extra_info
==
{}:
extra_info
=
None
results
=
predict_func
(
module
,
file_name_list
,
req_id
,
batch_size
,
extra_info
)
r
=
{
"results"
:
str
(
results
)}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录