Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleHub
提交
3ed9bd13
P
PaddleHub
项目概览
PaddlePaddle
/
PaddleHub
大约 1 年 前同步成功
通知
282
Star
12117
Fork
2091
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
200
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleHub
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
200
Issue
200
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3ed9bd13
编写于
11月 04, 2019
作者:
Z
zhangxuefei
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/PaddleHub
into develop
上级
5c8a6679
61fe956c
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
564 addition
and
44 deletion
+564
-44
paddlehub/commands/serving.py
paddlehub/commands/serving.py
+68
-11
paddlehub/module/module.py
paddlehub/module/module.py
+7
-5
paddlehub/serving/app.py
paddlehub/serving/app.py
+53
-17
paddlehub/serving/app_single.py
paddlehub/serving/app_single.py
+401
-0
paddlehub/serving/templates/main.html
paddlehub/serving/templates/main.html
+28
-10
paddlehub/serving/templates/serving_config.json
paddlehub/serving/templates/serving_config.json
+2
-1
setup.py
setup.py
+5
-0
未找到文件。
paddlehub/commands/serving.py
浏览文件 @
3ed9bd13
...
@@ -18,13 +18,12 @@ from __future__ import division
...
@@ -18,13 +18,12 @@ from __future__ import division
from
__future__
import
print_function
from
__future__
import
print_function
import
argparse
import
argparse
import
subprocess
import
shlex
import
os
import
os
import
platform
import
socket
import
json
import
json
import
paddlehub
as
hub
import
paddlehub
as
hub
from
paddlehub.commands.base_command
import
BaseCommand
,
ENTRY
from
paddlehub.commands.base_command
import
BaseCommand
,
ENTRY
from
paddlehub.serving
import
app
class
ServingCommand
(
BaseCommand
):
class
ServingCommand
(
BaseCommand
):
...
@@ -41,33 +40,56 @@ class ServingCommand(BaseCommand):
...
@@ -41,33 +40,56 @@ class ServingCommand(BaseCommand):
usage
=
'%(prog)s'
,
usage
=
'%(prog)s'
,
add_help
=
True
)
add_help
=
True
)
self
.
parser
.
add_argument
(
"command"
)
self
.
parser
.
add_argument
(
"command"
)
self
.
parser
.
add_argument
(
"sub_command"
)
self
.
sub_parse
=
self
.
parser
.
add_mutually_exclusive_group
(
self
.
sub_parse
=
self
.
parser
.
add_mutually_exclusive_group
(
required
=
False
)
required
=
False
)
self
.
sub_parse
.
add_argument
(
"--start"
,
action
=
"store_true"
)
self
.
parser
.
add_argument
(
self
.
parser
.
add_argument
(
"--use_gpu"
,
action
=
"store_true"
,
default
=
False
)
"--use_gpu"
,
action
=
"store_true"
,
default
=
False
)
self
.
parser
.
add_argument
(
"--use_multiprocess"
,
action
=
"store_true"
,
default
=
False
)
self
.
parser
.
add_argument
(
"--modules"
,
"-m"
,
nargs
=
"+"
)
self
.
parser
.
add_argument
(
"--modules"
,
"-m"
,
nargs
=
"+"
)
self
.
parser
.
add_argument
(
"--config"
,
"-c"
,
nargs
=
"+"
)
self
.
parser
.
add_argument
(
"--config"
,
"-c"
,
nargs
=
"+"
)
self
.
parser
.
add_argument
(
"--port"
,
"-p"
,
nargs
=
"+"
,
default
=
[
8888
])
self
.
parser
.
add_argument
(
"--port"
,
"-p"
,
nargs
=
"+"
,
default
=
[
8866
])
@
staticmethod
def
is_port_occupied
(
ip
,
port
):
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
try
:
s
.
connect
((
ip
,
int
(
port
)))
s
.
shutdown
(
2
)
return
True
except
:
return
False
@
staticmethod
@
staticmethod
def
preinstall_modules
(
modules
):
def
preinstall_modules
(
modules
):
configs
=
[]
configs
=
[]
module_exist
=
{}
if
modules
is
not
None
:
if
modules
is
not
None
:
for
module
in
modules
:
for
module
in
modules
:
module_name
=
module
if
"=="
not
in
module
else
\
module_name
=
module
if
"=="
not
in
module
else
\
module
.
split
(
"=="
)[
0
]
module
.
split
(
"=="
)[
0
]
module_version
=
None
if
"=="
not
in
module
else
\
module_version
=
None
if
"=="
not
in
module
else
\
module
.
split
(
"=="
)[
1
]
module
.
split
(
"=="
)[
1
]
if
module_exist
.
get
(
module_name
,
""
)
!=
""
:
print
(
module_name
,
"=="
,
module_exist
.
get
(
module_name
),
" will be ignored cause new version is specified."
)
configs
.
pop
()
module_exist
.
update
({
module_name
:
module_version
})
try
:
try
:
m
=
hub
.
Module
(
name
=
module_name
,
version
=
module_version
)
m
=
hub
.
Module
(
name
=
module_name
,
version
=
module_version
)
method_name
=
m
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
if
method_name
==
""
:
raise
RuntimeError
(
"{} cannot be use for "
"predicting"
.
format
(
module_name
))
configs
.
append
({
configs
.
append
({
"module"
:
module_name
,
"module"
:
module_name
,
"version"
:
m
.
version
,
"version"
:
m
.
version
,
"category"
:
str
(
m
.
type
).
split
(
"/"
)[
0
].
upper
()
"category"
:
str
(
m
.
type
).
split
(
"/"
)[
0
].
upper
()
})
})
except
Exception
as
err
:
except
Exception
as
err
:
pass
print
(
err
,
", start Hub-Serving unsuccessfully."
)
exit
(
1
)
return
configs
return
configs
@
staticmethod
@
staticmethod
...
@@ -78,8 +100,24 @@ class ServingCommand(BaseCommand):
...
@@ -78,8 +100,24 @@ class ServingCommand(BaseCommand):
if
os
.
path
.
exists
(
config_file
):
if
os
.
path
.
exists
(
config_file
):
with
open
(
config_file
,
"r"
)
as
fp
:
with
open
(
config_file
,
"r"
)
as
fp
:
configs
=
json
.
load
(
fp
)
configs
=
json
.
load
(
fp
)
use_multiprocess
=
configs
.
get
(
"use_multiprocess"
,
False
)
if
use_multiprocess
is
True
:
if
platform
.
system
()
==
"Windows"
:
print
(
"Warning: Windows cannot use multiprocess working "
"mode, Hub-Serving will switch to single process mode"
)
from
paddlehub.serving
import
app_single
as
app
else
:
from
paddlehub.serving
import
app
else
:
from
paddlehub.serving
import
app_single
as
app
use_gpu
=
configs
.
get
(
"use_gpu"
,
False
)
use_gpu
=
configs
.
get
(
"use_gpu"
,
False
)
port
=
configs
.
get
(
"port"
,
8888
)
port
=
configs
.
get
(
"port"
,
8866
)
if
ServingCommand
.
is_port_occupied
(
"127.0.0.1"
,
port
)
is
True
:
print
(
"Port %s is occupied, please change it."
%
(
port
))
return
False
configs
=
configs
.
get
(
"modules_info"
)
configs
=
configs
.
get
(
"modules_info"
)
module
=
[
module
=
[
str
(
i
[
"module"
])
+
"=="
+
str
(
i
[
"version"
])
str
(
i
[
"module"
])
+
"=="
+
str
(
i
[
"version"
])
...
@@ -92,10 +130,23 @@ class ServingCommand(BaseCommand):
...
@@ -92,10 +130,23 @@ class ServingCommand(BaseCommand):
else
:
else
:
print
(
"config_file "
,
config_file
,
"not exists."
)
print
(
"config_file "
,
config_file
,
"not exists."
)
else
:
else
:
if
args
.
use_multiprocess
is
True
:
if
platform
.
system
()
==
"Windows"
:
print
(
"Warning: Windows cannot use multiprocess working "
"mode, Hub-Serving will switch to single process mode"
)
from
paddlehub.serving
import
app_single
as
app
else
:
from
paddlehub.serving
import
app
else
:
from
paddlehub.serving
import
app_single
as
app
module
=
args
.
modules
module
=
args
.
modules
if
module
is
not
None
:
if
module
is
not
None
:
use_gpu
=
args
.
use_gpu
use_gpu
=
args
.
use_gpu
port
=
args
.
port
[
0
]
port
=
args
.
port
[
0
]
if
ServingCommand
.
is_port_occupied
(
"127.0.0.1"
,
port
)
is
True
:
print
(
"Port %s is occupied, please change it."
%
(
port
))
return
False
module_info
=
ServingCommand
.
preinstall_modules
(
module
)
module_info
=
ServingCommand
.
preinstall_modules
(
module
)
[
[
item
.
update
({
item
.
update
({
...
@@ -111,9 +162,10 @@ class ServingCommand(BaseCommand):
...
@@ -111,9 +162,10 @@ class ServingCommand(BaseCommand):
def
show_help
():
def
show_help
():
str
=
"serving <option>
\n
"
str
=
"serving <option>
\n
"
str
+=
"
\t
Manage PaddleHub-Serving.
\n
"
str
+=
"
\t
Manage PaddleHub-Serving.
\n
"
str
+=
"
option
:
\n
"
str
+=
"
sub command
:
\n
"
str
+=
"
--
start
\n
"
str
+=
"start
\n
"
str
+=
"
\t
Start PaddleHub-Serving if specifies this parameter.
\n
"
str
+=
"
\t
Start PaddleHub-Serving if specifies this parameter.
\n
"
str
+=
"option:
\n
"
str
+=
"--modules/-m [module1==version, module2==version...]
\n
"
str
+=
"--modules/-m [module1==version, module2==version...]
\n
"
str
+=
"
\t
Pre-install modules via this parameter list.
\n
"
str
+=
"
\t
Pre-install modules via this parameter list.
\n
"
str
+=
"--port/-p XXXX
\n
"
str
+=
"--port/-p XXXX
\n
"
...
@@ -126,8 +178,13 @@ class ServingCommand(BaseCommand):
...
@@ -126,8 +178,13 @@ class ServingCommand(BaseCommand):
print
(
str
)
print
(
str
)
def
execute
(
self
,
argv
):
def
execute
(
self
,
argv
):
args
=
self
.
parser
.
parse_args
()
try
:
if
args
.
start
is
True
:
args
=
self
.
parser
.
parse_args
()
except
:
print
(
"Please refer to the instructions below."
)
ServingCommand
.
show_help
()
return
False
if
args
.
sub_command
==
"start"
:
ServingCommand
.
start_serving
(
args
)
ServingCommand
.
start_serving
(
args
)
else
:
else
:
ServingCommand
.
show_help
()
ServingCommand
.
show_help
()
...
...
paddlehub/module/module.py
浏览文件 @
3ed9bd13
...
@@ -155,9 +155,10 @@ class Module(object):
...
@@ -155,9 +155,10 @@ class Module(object):
module_name
=
name
,
module_version
=
version
,
extra
=
extra
)
module_name
=
name
,
module_version
=
version
,
extra
=
extra
)
if
not
result
:
if
not
result
:
logger
.
error
(
tips
)
logger
.
error
(
tips
)
exit
(
1
)
raise
RuntimeError
(
tips
)
logger
.
info
(
tips
)
else
:
self
.
_init_with_module_file
(
module_dir
[
0
])
logger
.
info
(
tips
)
self
.
_init_with_module_file
(
module_dir
[
0
])
def
_init_with_url
(
self
,
url
):
def
_init_with_url
(
self
,
url
):
utils
.
check_url
(
url
)
utils
.
check_url
(
url
)
...
@@ -165,8 +166,9 @@ class Module(object):
...
@@ -165,8 +166,9 @@ class Module(object):
url
,
save_path
=
"."
)
url
,
save_path
=
"."
)
if
not
result
:
if
not
result
:
logger
.
error
(
tips
)
logger
.
error
(
tips
)
exit
(
1
)
raise
RuntimeError
(
tips
)
self
.
_init_with_module_file
(
module_dir
)
else
:
self
.
_init_with_module_file
(
module_dir
)
def
_dump_processor
(
self
):
def
_dump_processor
(
self
):
import
inspect
import
inspect
...
...
paddlehub/serving/app.py
浏览文件 @
3ed9bd13
...
@@ -185,24 +185,51 @@ def create_app():
...
@@ -185,24 +185,51 @@ def create_app():
@
app_instance
.
before_request
@
app_instance
.
before_request
def
before_request
():
def
before_request
():
request
.
data
=
{
"id"
:
str
(
time
.
time
())}
request
.
data
=
{
"id"
:
utils
.
md5
(
request
.
remote_addr
+
str
(
time
.
time
()))}
print
(
request
.
remote_addr
)
pass
pass
@
app_instance
.
route
(
"/get/modules"
,
methods
=
[
"GET"
,
"POST"
])
def
get_modules_info
():
global
nlp_module
,
cv_module
module_info
=
{}
if
len
(
nlp_module
)
>
0
:
module_info
.
update
({
"nlp_module"
:
[{
"Choose..."
:
"Choose..."
}]})
for
item
in
nlp_module
:
module_info
[
"nlp_module"
].
append
({
item
:
item
})
if
len
(
cv_module
)
>
0
:
module_info
.
update
({
"cv_module"
:
[{
"Choose..."
:
"Choose..."
}]})
for
item
in
cv_module
:
module_info
[
"cv_module"
].
append
({
item
:
item
})
module_info
.
update
({
"Choose..."
:
[{
"请先选择分类"
:
"Choose..."
}]})
return
{
"module_info"
:
module_info
}
@
app_instance
.
route
(
"/predict/image/<module_name>"
,
methods
=
[
"POST"
])
@
app_instance
.
route
(
"/predict/image/<module_name>"
,
methods
=
[
"POST"
])
def
predict_iamge
(
module_name
):
def
predict_iamge
(
module_name
):
global
results_dict
global
results_dict
req_id
=
request
.
data
.
get
(
"id"
)
req_id
=
request
.
data
.
get
(
"id"
)
img_base64
=
request
.
form
.
get
(
"input_img"
,
""
)
received_file_name
=
request
.
form
.
get
(
"input_file"
,
""
)
img_base64
=
request
.
form
.
get
(
"image"
,
""
)
ext
=
received_file_name
.
split
(
"."
)[
-
1
]
if
img_base64
!=
""
:
if
ext
==
""
:
img_base64
=
request
.
form
.
get
(
"image"
,
""
)
return
{
"result"
:
"Unrecognized file type"
}
ext
=
img_base64
.
split
(
";"
)[
0
].
split
(
"/"
)[
-
1
]
if
ext
not
in
[
"jpeg"
,
"jpg"
,
"png"
]:
return
{
"result"
:
"Unrecognized file type"
}
filename
=
utils
.
md5
(
str
(
time
.
time
())
+
str
(
img_base64
))
+
"."
+
ext
base64_head
=
img_base64
.
split
(
','
)[
0
]
img_data
=
base64
.
b64decode
(
img_base64
.
split
(
','
)[
-
1
])
with
open
(
filename
,
"wb"
)
as
fp
:
fp
.
write
(
img_data
)
else
:
file
=
request
.
files
[
"image"
]
filename
=
file
.
filename
ext
=
file
.
filename
.
split
(
"."
)[
-
1
]
if
ext
not
in
[
"jpeg"
,
"jpg"
,
"png"
]:
return
{
"result"
:
"Unrecognized file type"
}
base64_head
=
"data:image/"
+
ext
+
";base64"
filename
=
utils
.
md5
(
filename
)
+
'.'
+
ext
file
.
save
(
filename
)
score
=
time
.
time
()
score
=
time
.
time
()
filename
=
utils
.
md5
(
str
(
time
.
time
())
+
str
(
img_base64
))
+
"."
+
ext
base64_head
=
img_base64
.
split
(
','
)[
0
]
img_data
=
base64
.
b64decode
(
img_base64
.
split
(
','
)[
-
1
])
with
open
(
filename
,
"wb"
)
as
fp
:
fp
.
write
(
img_data
)
file_list
=
[
filename
]
file_list
=
[
filename
]
if
queues_dict
[
module_name
].
qsize
(
if
queues_dict
[
module_name
].
qsize
(
)
+
1
>
queues_dict
[
module_name
].
get_attribute
(
"maxsize"
):
)
+
1
>
queues_dict
[
module_name
].
get_attribute
(
"maxsize"
):
...
@@ -211,9 +238,14 @@ def create_app():
...
@@ -211,9 +238,14 @@ def create_app():
data_num
=
len
(
file_list
)
data_num
=
len
(
file_list
)
results
=
[]
results
=
[]
result_len
=
0
result_len
=
0
start_time
=
time
.
time
()
while
result_len
!=
data_num
:
while
result_len
!=
data_num
:
result_len
=
len
(
results_dict
.
get
(
req_id
,
[]))
result_len
=
len
(
results_dict
.
get
(
req_id
,
[]))
if
time
.
time
()
-
start_time
>
time_out
:
results_dict
.
pop
(
req_id
,
None
)
return
{
"result"
:
"Request time out."
}
results
=
results_dict
.
get
(
req_id
)
results
=
results_dict
.
get
(
req_id
)
results_dict
.
pop
(
req_id
,
None
)
results
=
[
i
[
1
]
for
i
in
sorted
(
results
,
key
=
lambda
k
:
k
[
0
])]
results
=
[
i
[
1
]
for
i
in
sorted
(
results
,
key
=
lambda
k
:
k
[
0
])]
filename
=
results
[
0
].
get
(
"path"
)
filename
=
results
[
0
].
get
(
"path"
)
ext
=
filename
.
split
(
"."
)[
-
1
]
ext
=
filename
.
split
(
"."
)[
-
1
]
...
@@ -225,7 +257,7 @@ def create_app():
...
@@ -225,7 +257,7 @@ def create_app():
os
.
remove
(
filename
)
os
.
remove
(
filename
)
os
.
remove
(
output_file
)
os
.
remove
(
output_file
)
results
=
{
results
=
{
"
border
"
:
"
desc
"
:
str
(
results
[
0
][
"data"
]),
str
(
results
[
0
][
"data"
]),
"output_img"
:
"output_img"
:
base64_head
+
","
+
str
(
output_img_base64
).
replace
(
base64_head
+
","
+
str
(
output_img_base64
).
replace
(
...
@@ -244,7 +276,7 @@ def create_app():
...
@@ -244,7 +276,7 @@ def create_app():
def
predict_text
(
module_name
):
def
predict_text
(
module_name
):
global
results_dict
,
queues_dict
global
results_dict
,
queues_dict
req_id
=
request
.
data
.
get
(
"id"
)
req_id
=
request
.
data
.
get
(
"id"
)
data_list
=
request
.
form
.
get
(
"
input_
text"
)
data_list
=
request
.
form
.
get
(
"text"
)
score
=
time
.
time
()
score
=
time
.
time
()
data_list
=
data_list
.
splitlines
()
data_list
=
data_list
.
splitlines
()
data_temp
=
[]
data_temp
=
[]
...
@@ -261,14 +293,17 @@ def create_app():
...
@@ -261,14 +293,17 @@ def create_app():
if
data_num
+
queues_dict
[
module_name
].
qsize
(
if
data_num
+
queues_dict
[
module_name
].
qsize
(
)
>
queues_dict
[
module_name
].
get_attribute
(
"maxsize"
):
)
>
queues_dict
[
module_name
].
get_attribute
(
"maxsize"
):
return
{
"result"
:
"Too many visitors now, please come back later."
}
return
{
"result"
:
"Too many visitors now, please come back later."
}
start
=
time
.
time
()
data_2_item
(
data_list
,
req_id
,
score
,
module_name
)
data_2_item
(
data_list
,
req_id
,
score
,
module_name
)
results
=
[]
results
=
[]
result_len
=
0
result_len
=
0
start_time
=
time
.
time
()
while
result_len
!=
data_num
:
while
result_len
!=
data_num
:
result_len
=
len
(
results_dict
.
get
(
req_id
,
[]))
result_len
=
len
(
results_dict
.
get
(
req_id
,
[]))
if
time
.
time
()
-
start_time
>
time_out
:
results_dict
.
pop
(
req_id
,
None
)
return
{
"result"
:
"Request time out."
}
results
=
results_dict
.
get
(
req_id
)
results
=
results_dict
.
get
(
req_id
)
results_dict
.
pop
(
req_id
,
None
)
results
=
[
i
[
1
]
for
i
in
sorted
(
results
,
key
=
lambda
k
:
k
[
0
])]
results
=
[
i
[
1
]
for
i
in
sorted
(
results
,
key
=
lambda
k
:
k
[
0
])]
return
{
"result"
:
results
}
return
{
"result"
:
results
}
...
@@ -302,8 +337,9 @@ def config_with_file(configs):
...
@@ -302,8 +337,9 @@ def config_with_file(configs):
queue_name_list
.
append
(
item
[
"module"
])
queue_name_list
.
append
(
item
[
"module"
])
def
run
(
is_use_gpu
=
False
,
configs
=
None
,
port
=
8888
):
def
run
(
is_use_gpu
=
False
,
configs
=
None
,
port
=
8866
,
timeout
=
60
):
global
use_gpu
global
use_gpu
,
time_out
time_out
=
timeout
use_gpu
=
is_use_gpu
use_gpu
=
is_use_gpu
if
configs
is
not
None
:
if
configs
is
not
None
:
config_with_file
(
configs
)
config_with_file
(
configs
)
...
...
paddlehub/serving/app_single.py
0 → 100644
浏览文件 @
3ed9bd13
# coding: utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from
flask
import
Flask
,
request
,
render_template
from
paddlehub.serving.model_service.text_model_service
import
TextModelService
from
paddlehub.serving.model_service.image_model_service
import
ImageModelService
from
paddlehub.common
import
utils
# from model_service.text_model_service import TextModelService
# from model_service.image_model_service import ImageModelService
import
time
import
os
import
base64
import
logging
nlp_module_method
=
{
"lac"
:
"predict_lexical_analysis"
,
"simnet_bow"
:
"predict_sentiment_analysis"
,
"lm_lstm"
:
"predict_pretrained_model"
,
"senta_lstm"
:
"predict_pretrained_model"
,
"senta_gru"
:
"predict_pretrained_model"
,
"senta_cnn"
:
"predict_pretrained_model"
,
"senta_bow"
:
"predict_pretrained_model"
,
"senta_bilstm"
:
"predict_pretrained_model"
,
"emotion_detection_textcnn"
:
"predict_pretrained_model"
}
cv_module_method
=
{
"vgg19_imagenet"
:
"predict_classification"
,
"vgg16_imagenet"
:
"predict_classification"
,
"vgg13_imagenet"
:
"predict_classification"
,
"vgg11_imagenet"
:
"predict_classification"
,
"shufflenet_v2_imagenet"
:
"predict_classification"
,
"se_resnext50_32x4d_imagenet"
:
"predict_classification"
,
"se_resnext101_32x4d_imagenet"
:
"predict_classification"
,
"resnet_v2_50_imagenet"
:
"predict_classification"
,
"resnet_v2_34_imagenet"
:
"predict_classification"
,
"resnet_v2_18_imagenet"
:
"predict_classification"
,
"resnet_v2_152_imagenet"
:
"predict_classification"
,
"resnet_v2_101_imagenet"
:
"predict_classification"
,
"pnasnet_imagenet"
:
"predict_classification"
,
"nasnet_imagenet"
:
"predict_classification"
,
"mobilenet_v2_imagenet"
:
"predict_classification"
,
"googlenet_imagenet"
:
"predict_classification"
,
"alexnet_imagenet"
:
"predict_classification"
,
"yolov3_coco2017"
:
"predict_object_detection"
,
"ultra_light_fast_generic_face_detector_1mb_640"
:
"predict_object_detection"
,
"ultra_light_fast_generic_face_detector_1mb_320"
:
"predict_object_detection"
,
"ssd_mobilenet_v1_pascal"
:
"predict_object_detection"
,
"pyramidbox_face_detection"
:
"predict_object_detection"
,
"faster_rcnn_coco2017"
:
"predict_object_detection"
,
"cyclegan_cityscapes"
:
"predict_gan"
,
"deeplabv3p_xception65_humanseg"
:
"predict_semantic_segmentation"
,
"ace2p"
:
"predict_semantic_segmentation"
}
def
predict_sentiment_analysis
(
module
,
input_text
,
batch_size
,
extra
=
None
):
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
try
:
data
=
input_text
[
0
]
data
.
update
(
input_text
[
1
])
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
return
{
"result"
:
"Please check data format!"
}
return
results
def
predict_pretrained_model
(
module
,
input_text
,
batch_size
,
extra
=
None
):
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
try
:
data
=
{
"text"
:
input_text
}
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
return
{
"result"
:
"Please check data format!"
}
return
results
def
predict_lexical_analysis
(
module
,
input_text
,
batch_size
,
extra
=
[]):
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
data
=
{
"text"
:
input_text
}
try
:
if
extra
==
[]:
results
=
predict_method
(
data
=
data
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
else
:
user_dict
=
extra
[
0
]
results
=
predict_method
(
data
=
data
,
user_dict
=
user_dict
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
for
path
in
extra
:
os
.
remove
(
path
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
return
{
"result"
:
"Please check data format!"
}
return
results
def
predict_classification
(
module
,
input_img
,
batch_size
):
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
try
:
input_img
=
{
"image"
:
input_img
}
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
return
{
"result"
:
"Please check data format!"
}
return
results
def
predict_gan
(
module
,
input_img
,
id
,
batch_size
,
extra
=
{}):
# special
output_folder
=
module
.
name
.
split
(
"_"
)[
0
]
+
"_"
+
"output"
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
try
:
input_img
=
{
"image"
:
input_img
}
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
return
{
"result"
:
"Please check data format!"
}
base64_list
=
[]
results_pack
=
[]
input_img
=
input_img
.
get
(
"image"
,
[])
for
index
in
range
(
len
(
input_img
)):
# special
item
=
input_img
[
index
]
with
open
(
os
.
path
.
join
(
output_folder
,
item
),
"rb"
)
as
fp
:
# special
b_head
=
"data:image/"
+
item
.
split
(
"."
)[
-
1
]
+
";base64"
b_body
=
base64
.
b64encode
(
fp
.
read
())
b_body
=
str
(
b_body
).
replace
(
"b'"
,
""
).
replace
(
"'"
,
""
)
b_img
=
b_head
+
","
+
b_body
base64_list
.
append
(
b_img
)
results
[
index
]
=
results
[
index
].
replace
(
id
+
"_"
,
""
)
results
[
index
]
=
{
"path"
:
results
[
index
]}
results
[
index
].
update
({
"base64"
:
b_img
})
results_pack
.
append
(
results
[
index
])
os
.
remove
(
item
)
os
.
remove
(
os
.
path
.
join
(
output_folder
,
item
))
return
results_pack
def
predict_object_detection
(
module
,
input_img
,
id
,
batch_size
):
output_folder
=
"output"
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
try
:
input_img
=
{
"image"
:
input_img
}
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
return
{
"result"
:
"Please check data format!"
}
base64_list
=
[]
results_pack
=
[]
input_img
=
input_img
.
get
(
"image"
,
[])
for
index
in
range
(
len
(
input_img
)):
item
=
input_img
[
index
]
with
open
(
os
.
path
.
join
(
output_folder
,
item
),
"rb"
)
as
fp
:
b_head
=
"data:image/"
+
item
.
split
(
"."
)[
-
1
]
+
";base64"
b_body
=
base64
.
b64encode
(
fp
.
read
())
b_body
=
str
(
b_body
).
replace
(
"b'"
,
""
).
replace
(
"'"
,
""
)
b_img
=
b_head
+
","
+
b_body
base64_list
.
append
(
b_img
)
results
[
index
][
"path"
]
=
results
[
index
][
"path"
].
replace
(
id
+
"_"
,
""
)
results
[
index
].
update
({
"base64"
:
b_img
})
results_pack
.
append
(
results
[
index
])
os
.
remove
(
item
)
os
.
remove
(
os
.
path
.
join
(
output_folder
,
item
))
return
results_pack
def
predict_semantic_segmentation
(
module
,
input_img
,
id
,
batch_size
):
# special
output_folder
=
module
.
name
.
split
(
"_"
)[
-
1
]
+
"_"
+
"output"
global
use_gpu
method_name
=
module
.
desc
.
attr
.
map
.
data
[
'default_signature'
].
s
predict_method
=
getattr
(
module
,
method_name
)
try
:
input_img
=
{
"image"
:
input_img
}
results
=
predict_method
(
data
=
input_img
,
use_gpu
=
use_gpu
,
batch_size
=
batch_size
)
except
Exception
as
err
:
curr
=
time
.
strftime
(
"%Y-%m-%d %H:%M:%S"
,
time
.
localtime
(
time
.
time
()))
print
(
curr
,
" - "
,
err
)
return
{
"result"
:
"Please check data format!"
}
base64_list
=
[]
results_pack
=
[]
input_img
=
input_img
.
get
(
"image"
,
[])
for
index
in
range
(
len
(
input_img
)):
# special
item
=
input_img
[
index
]
with
open
(
results
[
index
][
"processed"
],
"rb"
)
as
fp
:
# special
b_head
=
"data:image/png;base64"
b_body
=
base64
.
b64encode
(
fp
.
read
())
b_body
=
str
(
b_body
).
replace
(
"b'"
,
""
).
replace
(
"'"
,
""
)
b_img
=
b_head
+
","
+
b_body
base64_list
.
append
(
b_img
)
results
[
index
][
"origin"
]
=
results
[
index
][
"origin"
].
replace
(
id
+
"_"
,
""
)
results
[
index
][
"processed"
]
=
results
[
index
][
"processed"
].
replace
(
id
+
"_"
,
""
)
results
[
index
].
update
({
"base64"
:
b_img
})
results_pack
.
append
(
results
[
index
])
os
.
remove
(
item
)
os
.
remove
(
results
[
index
][
"processed"
])
return
results_pack
def
create_app
():
app_instance
=
Flask
(
__name__
)
app_instance
.
config
[
"JSON_AS_ASCII"
]
=
False
gunicorn_logger
=
logging
.
getLogger
(
'gunicorn.error'
)
app_instance
.
logger
.
handlers
=
gunicorn_logger
.
handlers
app_instance
.
logger
.
setLevel
(
gunicorn_logger
.
level
)
@
app_instance
.
route
(
"/"
,
methods
=
[
"GET"
,
"POST"
])
def
index
():
return
render_template
(
"main.html"
)
@
app_instance
.
before_request
def
before_request
():
request
.
data
=
{
"id"
:
utils
.
md5
(
request
.
remote_addr
+
str
(
time
.
time
()))}
pass
@
app_instance
.
route
(
"/get/modules"
,
methods
=
[
"GET"
,
"POST"
])
def
get_modules_info
():
global
nlp_module
,
cv_module
module_info
=
{}
if
len
(
nlp_module
)
>
0
:
module_info
.
update
({
"nlp_module"
:
[{
"Choose..."
:
"Choose..."
}]})
for
item
in
nlp_module
:
module_info
[
"nlp_module"
].
append
({
item
:
item
})
if
len
(
cv_module
)
>
0
:
module_info
.
update
({
"cv_module"
:
[{
"Choose..."
:
"Choose..."
}]})
for
item
in
cv_module
:
module_info
[
"cv_module"
].
append
({
item
:
item
})
module_info
.
update
({
"Choose..."
:
[{
"请先选择分类"
:
"Choose..."
}]})
return
{
"module_info"
:
module_info
}
@
app_instance
.
route
(
"/predict/image/<module_name>"
,
methods
=
[
"POST"
])
def
predict_image
(
module_name
):
req_id
=
request
.
data
.
get
(
"id"
)
global
use_gpu
,
batch_size_dict
img_base64
=
request
.
form
.
getlist
(
"image"
)
file_name_list
=
[]
if
img_base64
!=
[]:
for
item
in
img_base64
:
ext
=
item
.
split
(
";"
)[
0
].
split
(
"/"
)[
-
1
]
if
ext
not
in
[
"jpeg"
,
"jpg"
,
"png"
]:
return
{
"result"
:
"Unrecognized file type"
}
filename
=
req_id
+
"_"
\
+
utils
.
md5
(
str
(
time
.
time
())
+
item
[
0
:
20
])
\
+
"."
\
+
ext
img_data
=
base64
.
b64decode
(
item
.
split
(
','
)[
-
1
])
file_name_list
.
append
(
filename
)
with
open
(
filename
,
"wb"
)
as
fp
:
fp
.
write
(
img_data
)
else
:
file
=
request
.
files
.
getlist
(
"image"
)
for
item
in
file
:
file_name
=
req_id
+
"_"
+
item
.
filename
item
.
save
(
file_name
)
file_name_list
.
append
(
file_name
)
module
=
ImageModelService
.
get_module
(
module_name
)
predict_func_name
=
cv_module_method
.
get
(
module_name
,
""
)
if
predict_func_name
!=
""
:
predict_func
=
eval
(
predict_func_name
)
else
:
module_type
=
module
.
type
.
split
(
"/"
)[
-
1
].
replace
(
"-"
,
"_"
).
lower
()
predict_func
=
eval
(
"predict_"
+
module_type
)
batch_size
=
batch_size_dict
.
get
(
module_name
,
1
)
results
=
predict_func
(
module
,
file_name_list
,
req_id
,
batch_size
)
r
=
{
"results"
:
str
(
results
)}
return
r
@
app_instance
.
route
(
"/predict/text/<module_name>"
,
methods
=
[
"POST"
])
def
predict_text
(
module_name
):
req_id
=
request
.
data
.
get
(
"id"
)
global
use_gpu
if
module_name
==
"simnet_bow"
:
text_1
=
request
.
form
.
getlist
(
"text_1"
)
text_2
=
request
.
form
.
getlist
(
"text_2"
)
data
=
[{
"text_1"
:
text_1
},
{
"text_2"
:
text_2
}]
else
:
data
=
request
.
form
.
getlist
(
"text"
)
file
=
request
.
files
.
getlist
(
"user_dict"
)
module
=
TextModelService
.
get_module
(
module_name
)
predict_func_name
=
nlp_module_method
.
get
(
module_name
,
""
)
if
predict_func_name
!=
""
:
predict_func
=
eval
(
predict_func_name
)
else
:
module_type
=
module
.
type
.
split
(
"/"
)[
-
1
].
replace
(
"-"
,
"_"
).
lower
()
predict_func
=
eval
(
"predict_"
+
module_type
)
file_list
=
[]
for
item
in
file
:
file_path
=
req_id
+
"_"
+
item
.
filename
file_list
.
append
(
file_path
)
item
.
save
(
file_path
)
batch_size
=
batch_size_dict
.
get
(
module_name
,
1
)
results
=
predict_func
(
module
,
data
,
batch_size
,
file_list
)
return
{
"results"
:
results
}
return
app_instance
def
config_with_file
(
configs
):
global
nlp_module
,
cv_module
,
batch_size_dict
nlp_module
=
[]
cv_module
=
[]
batch_size_dict
=
{}
for
item
in
configs
:
print
(
item
)
if
item
[
"category"
]
==
"CV"
:
cv_module
.
append
(
item
[
"module"
])
elif
item
[
"category"
]
==
"NLP"
:
nlp_module
.
append
(
item
[
"module"
])
batch_size_dict
.
update
({
item
[
"module"
]:
item
[
"batch_size"
]})
def
run
(
is_use_gpu
=
False
,
configs
=
None
,
port
=
8866
,
timeout
=
60
):
global
use_gpu
,
time_out
time_out
=
timeout
use_gpu
=
is_use_gpu
if
configs
is
not
None
:
config_with_file
(
configs
)
else
:
print
(
"Start failed cause of missing configuration."
)
return
my_app
=
create_app
()
my_app
.
run
(
host
=
"0.0.0.0"
,
port
=
port
,
debug
=
False
)
print
(
"PaddleHub-Serving has been stopped."
)
if
__name__
==
"__main__"
:
configs
=
[{
'category'
:
'NLP'
,
u
'queue_size'
:
20
,
u
'version'
:
u
'1.0.0'
,
u
'module'
:
'lac'
,
u
'batch_size'
:
20
},
{
'category'
:
'NLP'
,
u
'queue_size'
:
20
,
u
'version'
:
u
'1.0.0'
,
u
'module'
:
'senta_lstm'
,
u
'batch_size'
:
20
},
{
'category'
:
'CV'
,
u
'queue_size'
:
20
,
u
'version'
:
u
'1.0.0'
,
u
'module'
:
'yolov3_coco2017'
,
u
'batch_size'
:
20
},
{
'category'
:
'CV'
,
u
'queue_size'
:
20
,
u
'version'
:
u
'1.0.0'
,
u
'module'
:
'faster_rcnn_coco2017'
,
u
'batch_size'
:
20
}]
run
(
is_use_gpu
=
False
,
configs
=
configs
)
paddlehub/serving/templates/main.html
浏览文件 @
3ed9bd13
...
@@ -16,7 +16,8 @@
...
@@ -16,7 +16,8 @@
<html
lang=
"en"
>
<html
lang=
"en"
>
<head>
<head>
<meta
charset=
"UTF-8"
>
<meta
charset=
"UTF-8"
>
<title>
Title
</title>
<title>
Hub-Serving
</title>
<link
rel=
"shortcut icon"
href=
"https://paddlepaddle-org-cn.bj.bcebos.com/paddle-site-front/favicon.ico"
/>
<link
href=
"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css"
rel=
"stylesheet"
integrity=
"sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T"
crossorigin=
"anonymous"
>
<link
href=
"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css"
rel=
"stylesheet"
integrity=
"sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T"
crossorigin=
"anonymous"
>
<script
src=
"https://code.jquery.com/jquery-3.4.1.min.js"
integrity=
"sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo="
crossorigin=
"anonymous"
></script>
<script
src=
"https://code.jquery.com/jquery-3.4.1.min.js"
integrity=
"sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo="
crossorigin=
"anonymous"
></script>
<script
src=
"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js"
integrity=
"sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM"
crossorigin=
"anonymous"
></script>
<script
src=
"https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js"
integrity=
"sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM"
crossorigin=
"anonymous"
></script>
...
@@ -35,8 +36,6 @@
...
@@ -35,8 +36,6 @@
<select
class=
"custom-select"
id=
"inputGroupSelect01"
<select
class=
"custom-select"
id=
"inputGroupSelect01"
onchange=
"select_category(this.options[this.options.selectedIndex].value)"
>
onchange=
"select_category(this.options[this.options.selectedIndex].value)"
>
<option
selected
>
Choose...
</option>
<option
selected
>
Choose...
</option>
<option
value=
"nlp_module"
>
nlp
</option>
<option
value=
"cv_module"
>
cv
</option>
</select>
</select>
</td>
</td>
<td
style=
"width: 6%"
></td>
<td
style=
"width: 6%"
></td>
...
@@ -61,12 +60,31 @@
...
@@ -61,12 +60,31 @@
</div>
</div>
</form>
</form>
<script>
<script>
module_info
=
{
var
module_info
=
{};
"
nlp_module
"
:[{
"
Choose...
"
:
"
Choose...
"
},{
"
lac
"
:
"
lac
"
},
{
"
senta_lstm
"
:
"
senta_lstm
"
}],
$
.
ajax
({
"
cv_module
"
:[{
"
Choose...
"
:
"
Choose...
"
},{
"
yolov3
"
:
"
yolov3_coco2017
"
},{
"
faster_rcnn
"
:
"
faster_rcnn_coco2017
"
}],
type
:
"
POST
"
,
"
Choose...
"
:[{
"
请先选择分类
"
:
"
Choose...
"
}]
url
:
"
/get/modules
"
,
};
data
:
""
,
dataType
:
"
json
"
,
async
:
false
,
success
:
function
(
res
)
{
module_info
=
res
.
module_info
;
console
.
log
(
res
);
console
.
log
(
"
mo=
"
,
module_info
);
if
(
module_info
.
hasOwnProperty
(
"
nlp_module
"
))
{
s
=
document
.
getElementById
(
"
inputGroupSelect01
"
);
s
.
options
.
add
(
new
Option
(
"
nlp
"
,
"
nlp_module
"
));
}
if
(
module_info
.
hasOwnProperty
(
"
cv_module
"
))
{
s
=
document
.
getElementById
(
"
inputGroupSelect01
"
);
s
.
options
.
add
(
new
Option
(
"
cv
"
,
"
cv_module
"
));
}
}
});
function
get_module_option
(
module_categoty
)
{
function
get_module_option
(
module_categoty
)
{
options
=
module_info
[
module_categoty
];
options
=
module_info
[
module_categoty
];
html
=
""
;
html
=
""
;
...
@@ -241,7 +259,7 @@
...
@@ -241,7 +259,7 @@
},
},
success
:
function
(
data
)
{
success
:
function
(
data
)
{
data
=
data
[
"
result
"
];
data
=
data
[
"
result
"
];
document
.
getElementById
(
"
result_text
"
).
value
=
data
[
"
border
"
];
document
.
getElementById
(
"
result_text
"
).
value
=
data
[
"
desc
"
];
document
.
getElementById
(
"
result_img
"
).
src
=
data
[
"
output_img
"
];
document
.
getElementById
(
"
result_img
"
).
src
=
data
[
"
output_img
"
];
}
}
});
});
...
...
paddlehub/serving/templates/serving_config.json
浏览文件 @
3ed9bd13
...
@@ -26,5 +26,6 @@
...
@@ -26,5 +26,6 @@
}
}
],
],
"use_gpu"
:
false
,
"use_gpu"
:
false
,
"port"
:
8888
"port"
:
8866
,
"use_multiprocess"
:
false
}
}
setup.py
浏览文件 @
3ed9bd13
...
@@ -59,6 +59,11 @@ setup(
...
@@ -59,6 +59,11 @@ setup(
]
]
},
},
include_package_data
=
True
,
include_package_data
=
True
,
data_files
=
[(
'paddlehub/serving/templates'
,
[
'paddlehub/serving/templates/serving_config.json'
,
'paddlehub/serving/templates/main.html'
])],
include_data_files
=
True
,
# PyPI package information.
# PyPI package information.
classifiers
=
[
classifiers
=
[
'Development Status :: 4 - Beta'
,
'Development Status :: 4 - Beta'
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录