Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
61fe956c
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
282
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
61fe956c
编写于
11月 04, 2019
作者:
走神的阿圆
提交者:
wuzewu
11月 04, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
1.add config batch_size; 2.delete req_id_ for every file (#213)
* 1.add config batch_size; 2.delete req_id_ for every file
上级
66fb66c7
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
40 addition
and
19 deletion
+40
-19
paddlehub/serving/app_single.py
paddlehub/serving/app_single.py
+40
-19
未找到文件。
paddlehub/serving/app_single.py
浏览文件 @
61fe956c
...
...
@@ -65,14 +65,15 @@ cv_module_method = {
}
def
predict_sentiment_analysis
(
module
,
input_text
,
extra
=
None
):
def
predict_sentiment_analysis
(
module
,
input_text
,
batch_size
,
extra
=
None
):
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
try
:
data
=
input_text
[
0
]
data
.
update
(
input_text
[
1
])
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
)
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
...
...
@@ -80,13 +81,14 @@ def predict_sentiment_analysis(module, input_text, extra=None):
return
results
def
predict_pretrained_model
(
module
,
input_text
,
extra
=
None
):
def
predict_pretrained_model
(
module
,
input_text
,
batch_size
,
extra
=
None
):
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
try
:
data
=
{
"text"
:
input_text
}
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
)
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
...
...
@@ -94,18 +96,22 @@ def predict_pretrained_model(module, input_text, extra=None):
return
results
def
predict_lexical_analysis
(
module
,
input_text
,
extra
=
[]):
def
predict_lexical_analysis
(
module
,
input_text
,
batch_size
,
extra
=
[]):
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
data
=
{
"text"
:
input_text
}
try
:
if
extra
==
[]:
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
)
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
else
:
user_dict
=
extra
[
0
]
results
=
predict_method
(
data
=
data
,
user_dict
=
user_dict
,
use_gpu
=
use_gpu
)
data
=
data
,
user_dict
=
user_dict
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
for
path
in
extra
:
os
.
remove
(
path
)
except
Exception
as
err
:
...
...
@@ -115,13 +121,14 @@ def predict_lexical_analysis(module, input_text, extra=[]):
return
results
def
predict_classification
(
module
,
input_img
):
def
predict_classification
(
module
,
input_img
,
batch_size
):
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
try
:
input_img
=
{
"image"
:
input_img
}
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
)
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
...
...
@@ -129,7 +136,7 @@ def predict_classification(module, input_img):
return
results
def
predict_gan
(
module
,
input_img
,
extra
=
{}):
def
predict_gan
(
module
,
input_img
,
id
,
batch_size
,
extra
=
{}):
# special
output_folder
=
module
.
name
.
split
(
"_"
)[
0
]
+
"_"
+
"output"
global
use_gpu
...
...
@@ -137,7 +144,8 @@ def predict_gan(module, input_img, extra={}):
predict_method
=
getattr
(
module
,
method_name
)
try
:
input_img
=
{
"image"
:
input_img
}
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
)
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
...
...
@@ -155,6 +163,7 @@ def predict_gan(module, input_img, extra={}):
b_body
=
str
(
b_body
).
replace
(
"b'"
,
""
).
replace
(
"'"
,
""
)
b_img
=
b_head
+
","
+
b_body
base64_list
.
append
(
b_img
)
results
[
index
]
=
results
[
index
].
replace
(
id
+
"_"
,
""
)
results
[
index
]
=
{
"path"
:
results
[
index
]}
results
[
index
].
update
({
"base64"
:
b_img
})
results_pack
.
append
(
results
[
index
])
...
...
@@ -163,14 +172,15 @@ def predict_gan(module, input_img, extra={}):
return
results_pack
def
predict_object_detection
(
module
,
input_img
):
def
predict_object_detection
(
module
,
input_img
,
id
,
batch_size
):
output_folder
=
"output"
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
try
:
input_img
=
{
"image"
:
input_img
}
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
)
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
...
...
@@ -186,6 +196,8 @@ def predict_object_detection(module, input_img):
b_body
=
str
(
b_body
).
replace
(
"b'"
,
""
).
replace
(
"'"
,
""
)
b_img
=
b_head
+
","
+
b_body
base64_list
.
append
(
b_img
)
results
[
index
][
"path"
]
=
results
[
index
][
"path"
].
replace
(
id
+
"_"
,
""
)
results
[
index
].
update
({
"base64"
:
b_img
})
results_pack
.
append
(
results
[
index
])
os
.
remove
(
item
)
...
...
@@ -193,7 +205,7 @@ def predict_object_detection(module, input_img):
return
results_pack
def
predict_semantic_segmentation
(
module
,
input_img
):
def
predict_semantic_segmentation
(
module
,
input_img
,
id
,
batch_size
):
# special
output_folder
=
module
.
name
.
split
(
"_"
)[
-
1
]
+
"_"
+
"output"
global
use_gpu
...
...
@@ -201,7 +213,8 @@ def predict_semantic_segmentation(module, input_img):
predict_method
=
getattr
(
module
,
method_name
)
try
:
input_img
=
{
"image"
:
input_img
}
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
)
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
...
...
@@ -219,6 +232,10 @@ def predict_semantic_segmentation(module, input_img):
b_body
=
str
(
b_body
).
replace
(
"b'"
,
""
).
replace
(
"'"
,
""
)
b_img
=
b_head
+
","
+
b_body
base64_list
.
append
(
b_img
)
results
[
index
][
"origin"
]
=
results
[
index
][
"origin"
].
replace
(
id
+
"_"
,
""
)
results
[
index
][
"processed"
]
=
results
[
index
][
"processed"
].
replace
(
id
+
"_"
,
""
)
results
[
index
].
update
({
"base64"
:
b_img
})
results_pack
.
append
(
results
[
index
])
os
.
remove
(
item
)
...
...
@@ -260,7 +277,7 @@ def create_app():
@
app_instance
.
route
(
"/predict/image/<module_name>"
,
methods
=
[
"POST"
])
def
predict_image
(
module_name
):
req_id
=
request
.
data
.
get
(
"id"
)
global
use_gpu
global
use_gpu
,
batch_size_dict
img_base64
=
request
.
form
.
getlist
(
"image"
)
file_name_list
=
[]
if
img_base64
!=
[]:
...
...
@@ -289,7 +306,8 @@ def create_app():
else
:
module_type
=
module
.
type
.
split
(
"/"
)[
-
1
].
replace
(
"-"
,
"_"
).
lower
()
predict_func
=
eval
(
"predict_"
+
module_type
)
results
=
predict_func
(
module
,
file_name_list
)
batch_size
=
batch_size_dict
.
get
(
module_name
,
1
)
results
=
predict_func
(
module
,
file_name_list
,
req_id
,
batch_size
)
r
=
{
"results"
:
str
(
results
)}
return
r
...
...
@@ -316,22 +334,25 @@ def create_app():
file_path
=
req_id
+
"_"
+
item
.
filename
file_list
.
append
(
file_path
)
item
.
save
(
file_path
)
results
=
predict_func
(
module
,
data
,
file_list
)
batch_size
=
batch_size_dict
.
get
(
module_name
,
1
)
results
=
predict_func
(
module
,
data
,
batch_size
,
file_list
)
return
{
"results"
:
results
}
return
app_instance
def
config_with_file
(
configs
):
global
nlp_module
,
cv_module
global
nlp_module
,
cv_module
,
batch_size_dict
nlp_module
=
[]
cv_module
=
[]
batch_size_dict
=
{}
for
item
in
configs
:
print
(
item
)
if
item
[
"category"
]
==
"CV"
:
cv_module
.
append
(
item
[
"module"
])
elif
item
[
"category"
]
==
"NLP"
:
nlp_module
.
append
(
item
[
"module"
])
batch_size_dict
.
update
({
item
[
"module"
]:
item
[
"batch_size"
]})
def
run
(
is_use_gpu
=
False
,
configs
=
None
,
port
=
8866
,
timeout
=
60
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录