Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
a4a8245b
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
282
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a4a8245b
编写于
11月 04, 2019
作者:
S
shenyuhan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add single_app()
上级
60c527d8
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
17 addition
and
47 deletion
+17
-47
paddlehub/serving/app_single.py
paddlehub/serving/app_single.py
+17
-47
未找到文件。
paddlehub/serving/app_single.py
浏览文件 @
a4a8245b
...
...
@@ -23,39 +23,7 @@ import base64
import
logging
def
get_img_output
(
module
,
base64_head
,
results
):
if
module
.
type
.
startswith
(
"CV"
):
if
"semantic-segmentation"
in
module
.
type
:
output_file
=
results
[
0
].
get
(
"processed"
,
None
)
if
output_file
is
not
None
and
os
.
path
.
exists
(
output_file
):
with
open
(
output_file
,
"rb"
)
as
fp
:
output_img_base64
=
base64
.
b64encode
(
fp
.
read
())
os
.
remove
(
output_file
)
results
=
{
"desc"
:
"Here is result."
,
"output_img"
:
base64_head
+
","
+
str
(
output_img_base64
).
replace
(
"b'"
,
""
).
replace
(
"'"
,
""
)
}
return
{
"result"
:
results
}
elif
"object-detection"
in
module
.
type
:
output_file
=
os
.
path
.
join
(
"./output"
,
results
[
0
][
"path"
])
if
output_file
is
not
None
and
os
.
path
.
exists
(
output_file
):
with
open
(
output_file
,
"rb"
)
as
fp
:
output_img_base64
=
base64
.
b64encode
(
fp
.
read
())
os
.
remove
(
output_file
)
results
=
{
"desc"
:
str
(
results
[
0
][
"data"
]),
"output_img"
:
base64_head
+
","
+
str
(
output_img_base64
).
replace
(
"b'"
,
""
).
replace
(
"'"
,
""
)
}
return
{
"result"
:
results
}
def
predict_sentiment_analysis
(
module
,
input_text
):
def
predict_sentiment_analysis
(
module
,
input_text
,
extra
=
None
):
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
...
...
@@ -68,7 +36,7 @@ def predict_sentiment_analysis(module, input_text):
return
results
def
predict_pretrained_model
(
module
,
input_text
):
def
predict_pretrained_model
(
module
,
input_text
,
extra
=
None
):
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
...
...
@@ -80,16 +48,18 @@ def predict_pretrained_model(module, input_text):
return
results
def
predict_lexical_analysis
(
module
,
input_text
,
extra
=
None
):
def
predict_lexical_analysis
(
module
,
input_text
,
extra
=
[]
):
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
data
=
{
"text"
:
input_text
}
try
:
if
extra
is
None
:
data
=
{
"text"
:
input_text
}
if
extra
is
[]
:
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
)
else
:
data
=
{
"text"
:
input_text
}
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
)
user_dict
=
extra
[
0
]
results
=
predict_method
(
data
=
data
,
user_dict
=
user_dict
,
use_gpu
=
use_gpu
)
except
Exception
as
err
:
return
{
"result"
:
"Please check data format!"
}
return
results
...
...
@@ -231,10 +201,8 @@ def create_app():
@
app_instance
.
route
(
"/predict/image/<module_name>"
,
methods
=
[
"POST"
])
def
predict_image
(
module_name
):
# 稍后保存的文件名用id+源文件名的形式以避免冲突
req_id
=
request
.
data
.
get
(
"id"
)
global
use_gpu
# 这里是一个base64的列表
img_base64
=
request
.
form
.
getlist
(
"input_img"
)
file_name_list
=
[]
if
img_base64
!=
""
:
...
...
@@ -256,11 +224,7 @@ def create_app():
file_name
=
req_id
+
"_"
+
item
.
filename
item
.
save
(
file_name
)
file_name_list
.
append
(
file_name
)
# 到这里就把所有原始文件和文件名列表都保存了
# 文件名列表可用于预测
# 获取模型
module
=
ImageModelService
.
get_module
(
module_name
)
# 根据模型种类寻找具体预测方法,即根据名字定函数
module_type
=
module
.
type
.
split
(
"/"
)[
-
1
].
replace
(
"-"
,
"_"
).
lower
()
predict_func
=
eval
(
"predict_"
+
module_type
)
results
=
predict_func
(
module
,
file_name_list
)
...
...
@@ -269,13 +233,19 @@ def create_app():
@
app_instance
.
route
(
"/predict/text/<module_name>"
,
methods
=
[
"POST"
])
def
predict_text
(
module_name
):
req_id
=
request
.
data
.
get
(
"id"
)
global
use_gpu
# 应该是一个列表
data
=
request
.
form
.
getlist
(
"input_text"
)
file
=
request
.
files
.
getlist
(
"user_dict"
)
module
=
TextModelService
.
get_module
(
module_name
)
module_type
=
module
.
type
.
split
(
"/"
)[
-
1
].
replace
(
"-"
,
"_"
).
lower
()
predict_func
=
eval
(
"predict_"
+
module_type
)
results
=
predict_func
(
module
,
data
)
file_list
=
[]
for
item
in
file
:
file_path
=
req_id
+
"_"
+
item
.
filename
file_list
.
append
(
file_path
)
item
.
save
(
file_path
)
results
=
predict_func
(
module
,
data
,
file_list
)
return
{
"results"
:
results
}
return
app_instance
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录