Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
e9278e8a
S
Serving
项目概览
PaddlePaddle
/
Serving
1 年多 前同步成功
通知
186
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e9278e8a
编写于
11月 26, 2021
作者:
F
felixhjh
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
check running environment feature
上级
b2df4cc7
变更
48
展开全部
隐藏空白更改
内联
并排
Showing
48 changed file
with
22018 addition
and
2 deletion
+22018
-2
python/paddle_serving_server/env_check/lac/lac_client/serving_client_conf.prototxt
...ver/env_check/lac/lac_client/serving_client_conf.prototxt
+14
-0
python/paddle_serving_server/env_check/lac/lac_client/serving_client_conf.stream.prototxt
..._check/lac/lac_client/serving_client_conf.stream.prototxt
+0
-0
python/paddle_serving_server/env_check/lac/lac_dict/foobar.txt
...n/paddle_serving_server/env_check/lac/lac_dict/foobar.txt
+1
-0
python/paddle_serving_server/env_check/lac/lac_dict/lyric.txt
...on/paddle_serving_server/env_check/lac/lac_dict/lyric.txt
+44
-0
python/paddle_serving_server/env_check/lac/lac_dict/q2b.dic
python/paddle_serving_server/env_check/lac/lac_dict/q2b.dic
+172
-0
python/paddle_serving_server/env_check/lac/lac_dict/tag.dic
python/paddle_serving_server/env_check/lac/lac_dict/tag.dic
+57
-0
python/paddle_serving_server/env_check/lac/lac_dict/userdict.txt
...paddle_serving_server/env_check/lac/lac_dict/userdict.txt
+10
-0
python/paddle_serving_server/env_check/lac/lac_dict/word.dic
python/paddle_serving_server/env_check/lac/lac_dict/word.dic
+20940
-0
python/paddle_serving_server/env_check/lac/lac_model/__model__
...n/paddle_serving_server/env_check/lac/lac_model/__model__
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/crfw
python/paddle_serving_server/env_check/lac/lac_model/crfw
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/fc_0.b_0
...on/paddle_serving_server/env_check/lac/lac_model/fc_0.b_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/fc_0.w_0
...on/paddle_serving_server/env_check/lac/lac_model/fc_0.w_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/fc_1.b_0
...on/paddle_serving_server/env_check/lac/lac_model/fc_1.b_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/fc_1.w_0
...on/paddle_serving_server/env_check/lac/lac_model/fc_1.w_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/fc_2.b_0
...on/paddle_serving_server/env_check/lac/lac_model/fc_2.b_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/fc_2.w_0
...on/paddle_serving_server/env_check/lac/lac_model/fc_2.w_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/fc_3.b_0
...on/paddle_serving_server/env_check/lac/lac_model/fc_3.b_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/fc_3.w_0
...on/paddle_serving_server/env_check/lac/lac_model/fc_3.w_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/fc_4.b_0
...on/paddle_serving_server/env_check/lac/lac_model/fc_4.b_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/fc_4.w_0
...on/paddle_serving_server/env_check/lac/lac_model/fc_4.w_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/fluid_time_file
...le_serving_server/env_check/lac/lac_model/fluid_time_file
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/gru_0.b_0
...n/paddle_serving_server/env_check/lac/lac_model/gru_0.b_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/gru_0.w_0
...n/paddle_serving_server/env_check/lac/lac_model/gru_0.w_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/gru_1.b_0
...n/paddle_serving_server/env_check/lac/lac_model/gru_1.b_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/gru_1.w_0
...n/paddle_serving_server/env_check/lac/lac_model/gru_1.w_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/gru_2.b_0
...n/paddle_serving_server/env_check/lac/lac_model/gru_2.b_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/gru_2.w_0
...n/paddle_serving_server/env_check/lac/lac_model/gru_2.w_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/gru_3.b_0
...n/paddle_serving_server/env_check/lac/lac_model/gru_3.b_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/gru_3.w_0
...n/paddle_serving_server/env_check/lac/lac_model/gru_3.w_0
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/serving_server_conf.prototxt
...rver/env_check/lac/lac_model/serving_server_conf.prototxt
+14
-0
python/paddle_serving_server/env_check/lac/lac_model/serving_server_conf.stream.prototxt
...v_check/lac/lac_model/serving_server_conf.stream.prototxt
+0
-0
python/paddle_serving_server/env_check/lac/lac_model/word_emb
...on/paddle_serving_server/env_check/lac/lac_model/word_emb
+0
-0
python/paddle_serving_server/env_check/run.py
python/paddle_serving_server/env_check/run.py
+31
-0
python/paddle_serving_server/env_check/simple_web_service/config_cpu.yml
...erving_server/env_check/simple_web_service/config_cpu.yml
+48
-0
python/paddle_serving_server/env_check/simple_web_service/config_gpu.yml
...erving_server/env_check/simple_web_service/config_gpu.yml
+48
-0
python/paddle_serving_server/env_check/simple_web_service/uci_housing_client/serving_client_conf.prototxt
...b_service/uci_housing_client/serving_client_conf.prototxt
+14
-0
python/paddle_serving_server/env_check/simple_web_service/uci_housing_client/serving_client_conf.stream.prototxt
...ce/uci_housing_client/serving_client_conf.stream.prototxt
+0
-0
python/paddle_serving_server/env_check/simple_web_service/uci_housing_model/__model__
.../env_check/simple_web_service/uci_housing_model/__model__
+0
-0
python/paddle_serving_server/env_check/simple_web_service/uci_housing_model/fc_0.b_0
...r/env_check/simple_web_service/uci_housing_model/fc_0.b_0
+0
-0
python/paddle_serving_server/env_check/simple_web_service/uci_housing_model/fc_0.w_0
...r/env_check/simple_web_service/uci_housing_model/fc_0.w_0
+0
-0
python/paddle_serving_server/env_check/simple_web_service/uci_housing_model/serving_server_conf.prototxt
...eb_service/uci_housing_model/serving_server_conf.prototxt
+14
-0
python/paddle_serving_server/env_check/simple_web_service/uci_housing_model/serving_server_conf.stream.prototxt
...ice/uci_housing_model/serving_server_conf.stream.prototxt
+0
-0
python/paddle_serving_server/env_check/simple_web_service/web_service.py
...erving_server/env_check/simple_web_service/web_service.py
+59
-0
python/paddle_serving_server/env_check/test_lac.py
python/paddle_serving_server/env_check/test_lac.py
+195
-0
python/paddle_serving_server/env_check/test_uci_pipeline.py
python/paddle_serving_server/env_check/test_uci_pipeline.py
+148
-0
python/paddle_serving_server/env_check/util.py
python/paddle_serving_server/env_check/util.py
+202
-0
python/paddle_serving_server/serve.py
python/paddle_serving_server/serve.py
+5
-2
python/requirements.txt
python/requirements.txt
+2
-0
未找到文件。
python/paddle_serving_server/env_check/lac/lac_client/serving_client_conf.prototxt
0 → 100644
浏览文件 @
e9278e8a
feed_var {
name: "word"
alias_name: "words"
is_lod_tensor: true
feed_type: 0
shape: -1
}
fetch_var {
name: "crf_decoding_0.tmp_0"
alias_name: "crf_decode"
is_lod_tensor: true
fetch_type: 0
shape: -1
}
python/paddle_serving_server/env_check/lac/lac_client/serving_client_conf.stream.prototxt
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_dict/foobar.txt
0 → 100644
浏览文件 @
e9278e8a
好人 12 n
\ No newline at end of file
python/paddle_serving_server/env_check/lac/lac_dict/lyric.txt
0 → 100644
浏览文件 @
e9278e8a
我沒有心
我沒有真實的自我
我只有消瘦的臉孔
所謂軟弱
所謂的順從一向是我
的座右銘
而我
沒有那海洋的寬闊
我只要熱情的撫摸
所謂空洞
所謂不安全感是我
的墓誌銘
而你
是否和我一般怯懦
是否和我一般矯作
和我一般囉唆
而你
是否和我一般退縮
是否和我一般肌迫
一般地困惑
我沒有力
我沒有滿腔的熱火
我只有滿肚的如果
所謂勇氣
所謂的認同感是我
隨便說說
而你
是否和我一般怯懦
是否和我一般矯作
是否對你來說
只是一場遊戲
雖然沒有把握
而你
是否和我一般退縮
是否和我一般肌迫
是否對你來說
只是逼不得已
雖然沒有藉口
\ No newline at end of file
python/paddle_serving_server/env_check/lac/lac_dict/q2b.dic
0 → 100644
浏览文件 @
e9278e8a
、 ,
。 .
— -
~ ~
‖ |
… .
‘ '
’ '
“ "
” "
〔 (
〕 )
〈 <
〉 >
「 '
」 '
『 "
』 "
〖 [
〗 ]
【 [
】 ]
∶ :
$ $
! !
" "
# #
% %
& &
' '
( (
) )
* *
+ +
, ,
- -
. .
/ /
0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
: :
; ;
< <
= =
> >
? ?
@ @
A a
B b
C c
D d
E e
F f
G g
H h
I i
J j
K k
L l
M m
N n
O o
P p
Q q
R r
S s
T t
U u
V v
W w
X x
Y y
Z z
[ [
\ \
] ]
^ ^
_ _
` `
a a
b b
c c
d d
e e
f f
g g
h h
i i
j j
k k
l l
m m
n n
o o
p p
q q
r r
s s
t t
u u
v v
w w
x x
y y
z z
{ {
| |
} }
 ̄ ~
〝 "
〞 "
﹐ ,
﹑ ,
﹒ .
﹔ ;
﹕ :
﹖ ?
﹗ !
﹙ (
﹚ )
﹛ {
﹜ {
﹝ [
﹞ ]
﹟ #
﹠ &
﹡ *
﹢ +
﹣ -
﹤ <
﹥ >
﹦ =
﹨ \
﹩ $
﹪ %
﹫ @
,
A a
B b
C c
D d
E e
F f
G g
H h
I i
J j
K k
L l
M m
N n
O o
P p
Q q
R r
S s
T t
U u
V v
W w
X x
Y y
Z z
python/paddle_serving_server/env_check/lac/lac_dict/tag.dic
0 → 100644
浏览文件 @
e9278e8a
0 a-B
1 a-I
2 ad-B
3 ad-I
4 an-B
5 an-I
6 c-B
7 c-I
8 d-B
9 d-I
10 f-B
11 f-I
12 m-B
13 m-I
14 n-B
15 n-I
16 nr-B
17 nr-I
18 ns-B
19 ns-I
20 nt-B
21 nt-I
22 nw-B
23 nw-I
24 nz-B
25 nz-I
26 p-B
27 p-I
28 q-B
29 q-I
30 r-B
31 r-I
32 s-B
33 s-I
34 t-B
35 t-I
36 u-B
37 u-I
38 v-B
39 v-I
40 vd-B
41 vd-I
42 vn-B
43 vn-I
44 w-B
45 w-I
46 xc-B
47 xc-I
48 PER-B
49 PER-I
50 LOC-B
51 LOC-I
52 ORG-B
53 ORG-I
54 TIME-B
55 TIME-I
56 O
python/paddle_serving_server/env_check/lac/lac_dict/userdict.txt
0 → 100644
浏览文件 @
e9278e8a
云计算 5
李小福 2 nr
创新办 3 i
easy_install 3 eng
好用 300
韩玉赏鉴 3 nz
八一双鹿 3 nz
台中
凱特琳 nz
Edu Trust认证 2000
python/paddle_serving_server/env_check/lac/lac_dict/word.dic
0 → 100644
浏览文件 @
e9278e8a
此差异已折叠。
点击以展开。
python/paddle_serving_server/env_check/lac/lac_model/__model__
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/crfw
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/fc_0.b_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/fc_0.w_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/fc_1.b_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/fc_1.w_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/fc_2.b_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/fc_2.w_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/fc_3.b_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/fc_3.w_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/fc_4.b_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/fc_4.w_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/fluid_time_file
0 → 100644
浏览文件 @
e9278e8a
python/paddle_serving_server/env_check/lac/lac_model/gru_0.b_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/gru_0.w_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/gru_1.b_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/gru_1.w_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/gru_2.b_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/gru_2.w_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/gru_3.b_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/gru_3.w_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/serving_server_conf.prototxt
0 → 100644
浏览文件 @
e9278e8a
feed_var {
name: "word"
alias_name: "words"
is_lod_tensor: true
feed_type: 0
shape: -1
}
fetch_var {
name: "crf_decoding_0.tmp_0"
alias_name: "crf_decode"
is_lod_tensor: true
fetch_type: 0
shape: -1
}
python/paddle_serving_server/env_check/lac/lac_model/serving_server_conf.stream.prototxt
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/lac/lac_model/word_emb
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/run.py
0 → 100644
浏览文件 @
e9278e8a
import
pytest
import
sys
import
os
cpp_test_cases
=
[
"test_lac.py::TestLAC::test_cpu"
,
"test_lac.py::TestLAC::test_gpu"
]
pipeline_test_cases
=
[
"test_uci_pipeline.py::TestUCIPipeline::test_cpu"
,
"test_uci_pipeline.py::TestUCIPipeline::test_gpu"
]
def
run_test_cases
(
cases_list
,
case_type
):
old_stdout
,
old_stderr
=
sys
.
stdout
,
sys
.
stderr
real_path
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
for
case
in
cases_list
:
sys
.
stdout
=
open
(
'/dev/null'
,
'w'
)
sys
.
stderr
=
open
(
'/dev/null'
,
'w'
)
args_str
=
"--disable-warnings "
+
str
(
real_path
)
+
"/"
+
case
args
=
args_str
.
split
(
" "
)
res
=
pytest
.
main
(
args
)
sys
.
stdout
,
sys
.
stderr
=
old_stdout
,
old_stderr
if
res
==
0
:
print
(
"{} {} environment running success"
.
format
(
case_type
,
case
[
-
3
:]))
else
:
print
(
"{} {} environment running failure, if you need this environment, please refer to https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Install_CN.md to configure environment"
.
format
(
case_type
,
case
[
-
3
:]))
def
unset_proxy
(
key
):
os
.
unsetenv
(
key
)
def
check_env
():
if
'https_proxy'
in
os
.
environ
or
'http_proxy'
in
os
.
environ
:
unset_proxy
(
"https_proxy"
)
unset_proxy
(
"http_proxy"
)
run_test_cases
(
cpp_test_cases
,
"C++"
)
run_test_cases
(
pipeline_test_cases
,
"Pipeline"
)
python/paddle_serving_server/env_check/simple_web_service/config_cpu.yml
0 → 100644
浏览文件 @
e9278e8a
#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG
##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num
worker_num
:
1
#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port
rpc_port
:
9998
http_port
:
18082
dag
:
#op资源类型, True, 为线程模型;False,为进程模型
is_thread_op
:
False
#tracer
tracer
:
interval_s
:
10
op
:
uci
:
#并发数,is_thread_op=True时,为线程并发;否则为进程并发
concurrency
:
1
#当op配置没有server_endpoints时,从local_service_conf读取本地服务配置
local_service_conf
:
#uci模型路径
model_config
:
uci_housing_model
#计算硬件类型: 空缺时由devices决定(CPU/GPU),0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu
device_type
:
0
#计算硬件ID,优先由device_type决定硬件类型。devices为""或空缺时为CPU预测;当为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
devices
:
"
"
# "0,1"
#client类型,包括brpc, grpc和local_predictor.local_predictor不启动Serving服务,进程内预测
client_type
:
local_predictor
#Fetch结果列表,以client_config中fetch_var的alias_name为准
fetch_list
:
[
"
price"
]
#precsion, 预测精度,降低预测精度可提升预测速度
#GPU 支持: "fp32"(default), "fp16", "int8";
#CPU 支持: "fp32"(default), "fp16", "bf16"(mkldnn); 不支持: "int8"
precision
:
"
fp32"
#ir_optim开关, 默认False
ir_optim
:
True
#use_mkldnn开关, 默认False, use_mkldnn与ir_optim同时打开才有性能提升
use_mkldnn
:
True
python/paddle_serving_server/env_check/simple_web_service/config_gpu.yml
0 → 100644
浏览文件 @
e9278e8a
#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG
##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num
worker_num
:
1
#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port
rpc_port
:
9998
http_port
:
18082
dag
:
#op资源类型, True, 为线程模型;False,为进程模型
is_thread_op
:
False
#tracer
tracer
:
interval_s
:
10
op
:
uci
:
#并发数,is_thread_op=True时,为线程并发;否则为进程并发
concurrency
:
1
#当op配置没有server_endpoints时,从local_service_conf读取本地服务配置
local_service_conf
:
#uci模型路径
model_config
:
uci_housing_model
#计算硬件类型: 空缺时由devices决定(CPU/GPU),0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu
device_type
:
1
#计算硬件ID,优先由device_type决定硬件类型。devices为""或空缺时为CPU预测;当为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
devices
:
"
0"
# "0,1"
#client类型,包括brpc, grpc和local_predictor.local_predictor不启动Serving服务,进程内预测
client_type
:
local_predictor
#Fetch结果列表,以client_config中fetch_var的alias_name为准
fetch_list
:
[
"
price"
]
#precsion, 预测精度,降低预测精度可提升预测速度
#GPU 支持: "fp32"(default), "fp16", "int8";
#CPU 支持: "fp32"(default), "fp16", "bf16"(mkldnn); 不支持: "int8"
precision
:
"
fp32"
#ir_optim开关, 默认False
ir_optim
:
True
#use_mkldnn开关, 默认False, use_mkldnn与ir_optim同时打开才有性能提升
use_mkldnn
:
True
python/paddle_serving_server/env_check/simple_web_service/uci_housing_client/serving_client_conf.prototxt
0 → 100644
浏览文件 @
e9278e8a
feed_var {
name: "x"
alias_name: "x"
is_lod_tensor: false
feed_type: 1
shape: 13
}
fetch_var {
name: "fc_0.tmp_1"
alias_name: "price"
is_lod_tensor: false
fetch_type: 1
shape: 1
}
python/paddle_serving_server/env_check/simple_web_service/uci_housing_client/serving_client_conf.stream.prototxt
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/simple_web_service/uci_housing_model/__model__
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/simple_web_service/uci_housing_model/fc_0.b_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/simple_web_service/uci_housing_model/fc_0.w_0
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/simple_web_service/uci_housing_model/serving_server_conf.prototxt
0 → 100644
浏览文件 @
e9278e8a
feed_var {
name: "x"
alias_name: "x"
is_lod_tensor: false
feed_type: 1
shape: 13
}
fetch_var {
name: "fc_0.tmp_1"
alias_name: "price"
is_lod_tensor: false
fetch_type: 1
shape: 1
}
python/paddle_serving_server/env_check/simple_web_service/uci_housing_model/serving_server_conf.stream.prototxt
0 → 100644
浏览文件 @
e9278e8a
文件已添加
python/paddle_serving_server/env_check/simple_web_service/web_service.py
0 → 100644
浏览文件 @
e9278e8a
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
paddle_serving_server.web_service
import
WebService
,
Op
import
logging
import
numpy
as
np
import
sys
_LOGGER
=
logging
.
getLogger
()
class
UciOp
(
Op
):
def
init_op
(
self
):
self
.
separator
=
","
self
.
batch_separator
=
";"
def
preprocess
(
self
,
input_dicts
,
data_id
,
log_id
):
(
_
,
input_dict
),
=
input_dicts
.
items
()
_LOGGER
.
error
(
"UciOp::preprocess >>> log_id:{}, input:{}"
.
format
(
log_id
,
input_dict
))
x_value
=
input_dict
[
"x"
].
split
(
self
.
batch_separator
)
x_lst
=
[]
for
x_val
in
x_value
:
x_lst
.
append
(
np
.
array
([
float
(
x
.
strip
())
for
x
in
x_val
.
split
(
self
.
separator
)
]).
reshape
(
1
,
13
))
input_dict
[
"x"
]
=
np
.
concatenate
(
x_lst
,
axis
=
0
)
proc_dict
=
{}
return
input_dict
,
False
,
None
,
""
def
postprocess
(
self
,
input_dicts
,
fetch_dict
,
data_id
,
log_id
):
_LOGGER
.
info
(
"UciOp::postprocess >>> data_id:{}, log_id:{}, fetch_dict:{}"
.
format
(
data_id
,
log_id
,
fetch_dict
))
fetch_dict
[
"price"
]
=
str
(
fetch_dict
[
"price"
])
return
fetch_dict
,
None
,
""
class
UciService
(
WebService
):
def
get_pipeline_response
(
self
,
read_op
):
uci_op
=
UciOp
(
name
=
"uci"
,
input_ops
=
[
read_op
])
return
uci_op
uci_service
=
UciService
(
name
=
"uci"
)
uci_service
.
prepare_pipeline_config
(
"config.yml"
)
uci_service
.
run_service
()
python/paddle_serving_server/env_check/test_lac.py
0 → 100644
浏览文件 @
e9278e8a
import
os
import
subprocess
import
numpy
as
np
import
copy
import
cv2
import
sys
from
paddle_serving_client
import
Client
,
HttpClient
from
paddle_serving_app.reader
import
LACReader
import
paddle.inference
as
paddle_infer
from
util
import
*
class
TestLAC
(
object
):
def
setup_class
(
self
):
serving_util
=
ServingTest
(
data_path
=
"lac"
,
example_path
=
"lac"
,
model_dir
=
"lac_model"
,
client_dir
=
"lac_client"
)
serving_util
.
check_model_data_exist
()
self
.
get_truth_val_by_inference
(
self
)
self
.
serving_util
=
serving_util
def
teardown_method
(
self
):
print_log
([
"stderr.log"
,
"stdout.log"
,
"log/serving.ERROR"
,
"PipelineServingLogs/pipeline.log"
],
iden
=
"after predict"
)
kill_process
(
9293
)
self
.
serving_util
.
release
()
def
get_truth_val_by_inference
(
self
):
reader
=
LACReader
()
line
=
"我爱北京天安门"
feed_data
=
reader
.
process
(
line
)
input_dict
=
{
"word"
:
np
.
array
(
feed_data
+
feed_data
).
reshape
(
len
(
feed_data
)
*
2
,
1
),
"word.lod"
:
[
0
,
len
(
feed_data
),
2
*
len
(
feed_data
)]
}
pd_config
=
paddle_infer
.
Config
(
"lac_model"
)
pd_config
.
disable_gpu
()
pd_config
.
switch_ir_optim
(
False
)
predictor
=
paddle_infer
.
create_predictor
(
pd_config
)
input_names
=
predictor
.
get_input_names
()
for
i
,
input_name
in
enumerate
(
input_names
):
input_handle
=
predictor
.
get_input_handle
(
input_name
)
# 设置变长tensor
input_handle
.
set_lod
([
input_dict
[
f
"
{
input_name
}
.lod"
]])
input_handle
.
copy_from_cpu
(
input_dict
[
input_name
])
predictor
.
run
()
output_data_dict
=
{}
output_names
=
predictor
.
get_output_names
()
for
_
,
output_data_name
in
enumerate
(
output_names
):
output_handle
=
predictor
.
get_output_handle
(
output_data_name
)
output_data
=
output_handle
.
copy_to_cpu
()
output_data_dict
[
output_data_name
]
=
output_data
# 对齐Serving output
output_data_dict
[
"crf_decode"
]
=
output_data_dict
[
"save_infer_model/scale_0"
]
del
output_data_dict
[
"save_infer_model/scale_0"
]
self
.
truth_val
=
output_data_dict
print
(
self
.
truth_val
,
self
.
truth_val
[
"crf_decode"
].
shape
)
def
predict_brpc
(
self
,
batch_size
=
2
):
reader
=
LACReader
()
line
=
"我爱北京天安门"
feed_data
=
reader
.
process
(
line
)
feed_dict
=
{
"words"
:
np
.
array
(
feed_data
+
feed_data
).
reshape
(
len
(
feed_data
)
*
2
,
1
),
"words.lod"
:
[
0
,
len
(
feed_data
),
2
*
len
(
feed_data
)]
}
fetch
=
[
"crf_decode"
]
endpoint_list
=
[
'127.0.0.1:9293'
]
client
=
Client
()
client
.
load_client_config
(
self
.
serving_util
.
client_config
)
client
.
connect
(
endpoint_list
)
fetch_map
=
client
.
predict
(
feed
=
feed_dict
,
fetch
=
fetch
,
batch
=
True
)
print
(
fetch_map
)
return
fetch_map
def
predict_http
(
self
,
mode
=
"proto"
,
compress
=
False
,
batch_size
=
2
):
reader
=
LACReader
()
line
=
"我爱北京天安门"
feed_data
=
reader
.
process
(
line
)
feed_dict
=
{
"words"
:
np
.
array
(
feed_data
+
feed_data
).
reshape
(
len
(
feed_data
)
*
2
,
1
),
"words.lod"
:
[
0
,
len
(
feed_data
),
2
*
len
(
feed_data
)]
}
fetch
=
[
"crf_decode"
]
client
=
HttpClient
()
client
.
load_client_config
(
self
.
serving_util
.
client_config
)
if
mode
==
"proto"
:
client
.
set_http_proto
(
True
)
elif
mode
==
"json"
:
client
.
set_http_proto
(
False
)
elif
mode
==
"grpc"
:
client
.
set_use_grpc_client
(
True
)
else
:
exit
(
-
1
)
if
compress
:
client
.
set_response_compress
(
True
)
client
.
set_request_compress
(
True
)
client
.
connect
([
"127.0.0.1:9293"
])
fetch_map
=
client
.
predict
(
feed
=
feed_dict
,
fetch
=
fetch
,
batch
=
True
)
result_dict
=
{}
print
(
fetch_map
)
if
isinstance
(
fetch_map
,
dict
):
for
tensor
in
fetch_map
[
"outputs"
][
0
][
"tensor"
]:
result_dict
[
tensor
[
"alias_name"
]]
=
np
.
array
(
tensor
[
"int64_data"
]).
reshape
(
tensor
[
"shape"
])
else
:
for
tensor
in
fetch_map
.
outputs
[
0
].
tensor
:
result_dict
[
tensor
.
alias_name
]
=
np
.
array
(
tensor
.
int64_data
).
reshape
(
tensor
.
shape
)
print
(
result_dict
)
return
result_dict
def
test_cpu
(
self
):
# 1.start server
self
.
serving_util
.
start_server_by_shell
(
cmd
=
f
"
{
self
.
serving_util
.
py_version
}
-m paddle_serving_server.serve --model lac_model --port 9293"
,
sleep
=
5
,
)
# 2.resource check
#assert count_process_num_on_port(9293) == 1
#assert check_gpu_memory(2) is False
# 3.keywords check
# 4.predict by brpc
# batch_size 2
result_data
=
self
.
predict_brpc
(
batch_size
=
2
)
# 删除lod信息
del
result_data
[
"crf_decode.lod"
]
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
# predict by http
# batch_size 2
result_data
=
self
.
predict_http
(
mode
=
"proto"
,
batch_size
=
2
)
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
result_data
=
self
.
predict_http
(
mode
=
"json"
,
batch_size
=
1
)
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
result_data
=
self
.
predict_http
(
mode
=
"grpc"
,
batch_size
=
1
)
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
# # compress
result_data
=
self
.
predict_http
(
mode
=
"proto"
,
compress
=
True
,
batch_size
=
1
)
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
result_data
=
self
.
predict_http
(
mode
=
"json"
,
compress
=
True
,
batch_size
=
1
)
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
# 5.release
kill_process
(
9293
)
def
test_gpu
(
self
):
# 1.start server
self
.
serving_util
.
start_server_by_shell
(
cmd
=
f
"
{
self
.
serving_util
.
py_version
}
-m paddle_serving_server.serve --model lac_model --port 9293 --gpu_ids 0"
,
sleep
=
8
,
)
# 2.resource check
assert
count_process_num_on_port
(
9293
)
==
1
#assert check_gpu_memory(3) is True
#assert check_gpu_memory(1) is False
# 3.keywords check
check_keywords_in_server_log
(
"Sync params from CPU to GPU"
,
filename
=
"stderr.log"
)
# 4.predict by brpc
# batch_size 2
result_data
=
self
.
predict_brpc
(
batch_size
=
2
)
# 删除lod信息
del
result_data
[
"crf_decode.lod"
]
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
# predict by http
# batch_size 2
result_data
=
self
.
predict_http
(
mode
=
"proto"
,
batch_size
=
2
)
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
result_data
=
self
.
predict_http
(
mode
=
"json"
,
batch_size
=
1
)
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
result_data
=
self
.
predict_http
(
mode
=
"grpc"
,
batch_size
=
1
)
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
# # compress
result_data
=
self
.
predict_http
(
mode
=
"proto"
,
compress
=
True
,
batch_size
=
1
)
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
result_data
=
self
.
predict_http
(
mode
=
"json"
,
compress
=
True
,
batch_size
=
1
)
self
.
serving_util
.
check_result
(
result_data
=
result_data
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
,
delta
=
1
)
# 5.release
kill_process
(
9293
,
2
)
python/paddle_serving_server/env_check/test_uci_pipeline.py
0 → 100644
浏览文件 @
e9278e8a
import
os
import
subprocess
import
numpy
as
np
import
copy
import
cv2
import
requests
import
json
import
sys
from
paddle_serving_server.pipeline
import
PipelineClient
from
paddle_serving_app.reader
import
CenterCrop
,
RGB2BGR
,
Transpose
,
Div
,
Normalize
,
RCNNPostprocess
from
paddle_serving_app.reader
import
Sequential
,
File2Image
,
Resize
,
Transpose
,
BGR2RGB
,
SegPostprocess
import
paddle.inference
as
paddle_infer
from
util
import
*
class
TestUCIPipeline
(
object
):
def
setup_class
(
self
):
serving_util
=
ServingTest
(
data_path
=
"fit_a_line"
,
example_path
=
"simple_web_service"
,
model_dir
=
"uci_housing_model"
,
client_dir
=
"uci_housing_client"
)
serving_util
.
check_model_data_exist
()
self
.
get_truth_val_by_inference
(
self
)
self
.
serving_util
=
serving_util
def
teardown_method
(
self
):
print_log
([
"stderr.log"
,
"stdout.log"
,
"log/serving.ERROR"
,
"PipelineServingLogs/pipeline.log"
],
iden
=
"after predict"
)
kill_process
(
9998
)
self
.
serving_util
.
release
()
def
get_truth_val_by_inference
(
self
):
data
=
np
.
array
(
[
0.0137
,
-
0.1136
,
0.2553
,
-
0.0692
,
0.0582
,
-
0.0727
,
-
0.1583
,
-
0.0584
,
0.6283
,
0.4919
,
0.1856
,
0.0795
,
-
0.0332
]).
astype
(
"float32"
)[
np
.
newaxis
,
:]
input_dict
=
{
"x"
:
data
}
pd_config
=
paddle_infer
.
Config
(
"uci_housing_model/"
)
pd_config
.
disable_gpu
()
pd_config
.
switch_ir_optim
(
False
)
predictor
=
paddle_infer
.
create_predictor
(
pd_config
)
input_names
=
predictor
.
get_input_names
()
for
i
,
input_name
in
enumerate
(
input_names
):
input_handle
=
predictor
.
get_input_handle
(
input_name
)
input_handle
.
copy_from_cpu
(
input_dict
[
input_name
])
predictor
.
run
()
output_data_dict
=
{}
output_names
=
predictor
.
get_output_names
()
for
_
,
output_data_name
in
enumerate
(
output_names
):
output_handle
=
predictor
.
get_output_handle
(
output_data_name
)
output_data
=
output_handle
.
copy_to_cpu
()
output_data_dict
[
output_data_name
]
=
output_data
# 对齐Serving output
output_data_dict
[
"prob"
]
=
output_data_dict
[
"fc_0.tmp_1"
]
del
output_data_dict
[
"fc_0.tmp_1"
]
self
.
truth_val
=
output_data_dict
print
(
self
.
truth_val
,
self
.
truth_val
[
"prob"
].
shape
)
def
predict_pipeline_rpc
(
self
,
batch_size
=
1
):
# 1.prepare feed_data
feed_dict
=
{
'x'
:
'0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332'
}
# TODO 原示例不支持batch
# 2.init client
# fetch = ["label", "prob"]
client
=
PipelineClient
()
client
.
connect
([
'127.0.0.1:9998'
])
# 3.predict for fetch_map
ret
=
client
.
predict
(
feed_dict
=
feed_dict
)
print
(
ret
)
# 转换为dict
result
=
{
"prob"
:
np
.
array
(
eval
(
ret
.
value
[
0
]))}
print
(
result
)
return
result
def
predict_pipeline_http
(
self
,
batch_size
=
1
):
# 1.prepare feed_data
data
=
'0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, '
\
'-0.0332'
feed_dict
=
{
"key"
:
[],
"value"
:
[]}
# TODO 原示例不支持batch
feed_dict
[
"key"
].
append
(
"x"
)
feed_dict
[
"value"
].
append
(
data
)
# 2.predict for fetch_map
url
=
"http://127.0.0.1:18082/uci/prediction"
r
=
requests
.
post
(
url
=
url
,
data
=
json
.
dumps
(
feed_dict
))
print
(
r
.
json
())
# 转换为dict of numpy array
result
=
{
"prob"
:
np
.
array
(
eval
(
r
.
json
()[
"value"
][
0
]))}
return
result
def
test_cpu
(
self
):
# 1.start server
self
.
serving_util
.
start_server_by_shell
(
cmd
=
f
"
{
self
.
serving_util
.
py_version
}
web_service.py config_cpu.yml"
,
sleep
=
5
,
)
# 2.resource check
assert
count_process_num_on_port
(
9998
)
==
1
# gRPC Server
assert
count_process_num_on_port
(
18082
)
==
1
# gRPC gateway 代理、转发
#assert check_gpu_memory(0) is False
# 3.keywords check
check_keywords_in_server_log
(
"MKLDNN is enabled"
,
filename
=
"stderr.log"
)
# 4.predict by rpc
# batch_size=1
result
=
self
.
predict_pipeline_rpc
(
batch_size
=
1
)
self
.
serving_util
.
check_result
(
result_data
=
result
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
)
# # predict by http
result
=
self
.
predict_pipeline_http
(
batch_size
=
1
)
# batch_size=1
self
.
serving_util
.
check_result
(
result_data
=
result
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
)
# 5.release
kill_process
(
9998
)
kill_process
(
18082
)
def
test_gpu
(
self
):
# 1.start server
self
.
serving_util
.
start_server_by_shell
(
cmd
=
f
"
{
self
.
serving_util
.
py_version
}
web_service.py config_gpu.yml"
,
sleep
=
5
,
)
# 2.resource check
assert
count_process_num_on_port
(
9998
)
==
1
# gRPC Server
assert
count_process_num_on_port
(
18082
)
==
1
# gRPC gateway 代理、转发
#assert check_gpu_memory(0) is False
# 4.predict by rpc
# batch_size=1
result
=
self
.
predict_pipeline_rpc
(
batch_size
=
1
)
self
.
serving_util
.
check_result
(
result_data
=
result
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
)
# # predict by http
result
=
self
.
predict_pipeline_http
(
batch_size
=
1
)
# batch_size=1
self
.
serving_util
.
check_result
(
result_data
=
result
,
truth_data
=
self
.
truth_val
,
batch_size
=
1
)
# 5.release
kill_process
(
9998
)
kill_process
(
18082
)
python/paddle_serving_server/env_check/util.py
0 → 100644
浏览文件 @
e9278e8a
import
os
import
pynvml
import
argparse
import
base64
import
subprocess
import
numpy
as
np
class
ServingTest
(
object
):
def
__init__
(
self
,
data_path
:
str
,
example_path
:
str
,
model_dir
:
str
,
client_dir
:
str
):
"""
需设置环境变量
CODE_PATH: repo上一级目录
DATA_PATH: 数据集根目录
py_version: python版本 python3.6~3.8
"""
code_path
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
self
.
data_path
=
f
"
{
code_path
}
/
{
data_path
}
/"
self
.
example_path
=
f
"
{
code_path
}
/
{
example_path
}
/"
self
.
py_version
=
os
.
environ
.
get
(
"PYTHON_EXECUTABLE"
)
self
.
model_dir
=
model_dir
self
.
client_config
=
f
"
{
client_dir
}
/serving_client_conf.prototxt"
os
.
chdir
(
self
.
example_path
)
print
(
"======================cur path======================"
)
print
(
os
.
getcwd
())
self
.
check_model_data_exist
()
def
check_model_data_exist
(
self
):
if
not
os
.
path
.
exists
(
f
"./
{
self
.
model_dir
}
"
):
# 软链模型数据
dir_path
,
dir_names
,
file_names
=
next
(
os
.
walk
(
self
.
data_path
))
for
dir_
in
dir_names
:
abs_path
=
os
.
path
.
join
(
dir_path
,
dir_
)
os
.
system
(
f
"ln -s
{
abs_path
}
{
dir_
}
"
)
for
file
in
file_names
:
abs_path
=
os
.
path
.
join
(
dir_path
,
file
)
os
.
system
(
f
"ln -s
{
abs_path
}
{
file
}
"
)
def
start_server_by_shell
(
self
,
cmd
:
str
,
sleep
:
int
=
5
,
err
=
"stderr.log"
,
out
=
"stdout.log"
,
wait
=
False
):
self
.
err
=
open
(
err
,
"w"
)
self
.
out
=
open
(
out
,
"w"
)
p
=
subprocess
.
Popen
(
cmd
,
shell
=
True
,
stdout
=
self
.
out
,
stderr
=
self
.
err
)
os
.
system
(
f
"sleep
{
sleep
}
"
)
if
wait
:
p
.
wait
()
print_log
([
err
,
out
])
@
staticmethod
def
check_result
(
result_data
:
dict
,
truth_data
:
dict
,
batch_size
=
1
,
delta
=
1e-3
):
# flatten
predict_result
=
{}
truth_result
=
{}
for
key
,
value
in
result_data
.
items
():
predict_result
[
key
]
=
value
.
flatten
()
for
key
,
value
in
truth_data
.
items
():
truth_result
[
key
]
=
np
.
repeat
(
value
,
repeats
=
batch_size
,
axis
=
0
).
flatten
()
# print("预测值:", predict_result)
# print("真实值:", truth_result)
# compare
for
key
in
predict_result
.
keys
():
diff_array
=
diff_compare
(
predict_result
[
key
],
truth_result
[
key
])
diff_count
=
np
.
sum
(
diff_array
>
delta
)
assert
diff_count
==
0
,
f
"total:
{
np
.
size
(
diff_array
)
}
diff count:
{
diff_count
}
max:
{
np
.
max
(
diff_array
)
}
"
# for key in predict_result.keys():
# for i, data in enumerate(predict_result[key]):
# diff = sig_fig_compare(data, truth_result[key][i])
# assert diff < delta, f"data:{data} truth:{truth_result[key][i]} diff is {diff} > {delta}, index:{i}"
@
staticmethod
def
parse_http_result
(
output
):
# 转换http client返回的proto格式数据,统一为dict包numpy array
# todo 仅支持float_data
result_dict
=
{}
if
isinstance
(
output
,
dict
):
for
tensor
in
output
[
"outputs"
][
0
][
"tensor"
]:
result_dict
[
tensor
[
"alias_name"
]]
=
np
.
array
(
tensor
[
"float_data"
]).
reshape
(
tensor
[
"shape"
])
else
:
for
tensor
in
output
.
outputs
[
0
].
tensor
:
result_dict
[
tensor
.
alias_name
]
=
np
.
array
(
tensor
.
float_data
).
reshape
(
tensor
.
shape
)
return
result_dict
@
staticmethod
def
release
(
keywords
=
"web_service.py"
):
#os.system("kill -9 $(ps -ef | grep serving | awk '{print $2}') > /dev/null 2>&1")
os
.
system
(
"kill -9 $(ps -ef | grep "
+
keywords
+
" | awk '{print $2}') > /dev/null 2>&1"
)
def
kill_process
(
port
,
sleep_time
=
0
):
command
=
"kill -9 $(netstat -nlp | grep :"
+
str
(
port
)
+
" | awk '{print $7}' | awk -F'/' '{{ print $1 }}') > /dev/null 2>&1"
os
.
system
(
command
)
# 解决端口占用
os
.
system
(
f
"sleep
{
sleep_time
}
"
)
def
check_gpu_memory
(
gpu_id
):
pynvml
.
nvmlInit
()
handle
=
pynvml
.
nvmlDeviceGetHandleByIndex
(
gpu_id
)
mem_info
=
pynvml
.
nvmlDeviceGetMemoryInfo
(
handle
)
mem_used
=
mem_info
.
used
/
1024
**
2
print
(
f
"GPU-
{
gpu_id
}
memory used:"
,
mem_used
)
return
mem_used
>
100
def
count_process_num_on_port
(
port
):
command
=
"netstat -nlp | grep :"
+
str
(
port
)
+
" | wc -l"
count
=
eval
(
os
.
popen
(
command
).
read
())
print
(
f
"port-
{
port
}
processes num:"
,
count
)
return
count
def
check_keywords_in_server_log
(
words
:
str
,
filename
=
"stderr.log"
):
p
=
subprocess
.
Popen
(
f
"grep '
{
words
}
'
{
filename
}
> grep.log && head grep.log"
,
shell
=
True
)
p
.
wait
()
assert
p
.
returncode
==
0
,
"keywords not found"
def
cv2_to_base64
(
image
):
return
base64
.
b64encode
(
image
).
decode
(
'utf8'
)
def
sig_fig_compare
(
num0
,
num1
,
delta
=
5
):
difference
=
num0
-
num1
num0_int_length
=
len
(
str
(
int
(
num0
)))
num1_int_length
=
len
(
str
(
int
(
num1
)))
num0_int
=
int
(
num0
)
num1_int
=
int
(
num1
)
if
num0
<
1
and
num1
<
1
and
difference
<
1
:
return
difference
elif
num0_int_length
==
num1_int_length
:
if
num0_int_length
>=
5
:
return
abs
(
num0_int
-
num1_int
)
else
:
scale
=
5
-
num1_int_length
num0_padding
=
num0
*
scale
num1_padding
=
num1
*
scale
return
abs
(
num0_padding
-
num1_padding
)
/
(
10
*
scale
)
elif
num0_int_length
!=
num1_int_length
:
return
difference
def
diff_compare
(
array1
,
array2
):
diff
=
np
.
abs
(
array1
-
array2
)
return
diff
def
print_log
(
file_list
,
iden
=
""
):
for
file
in
file_list
:
print
(
f
"======================
{
file
}
{
iden
}
====================="
)
if
os
.
path
.
exists
(
file
):
with
open
(
file
,
"r"
)
as
f
:
print
(
f
.
read
())
if
file
.
startswith
(
"log"
)
or
file
.
startswith
(
"PipelineServingLogs"
):
os
.
remove
(
file
)
else
:
print
(
f
"
{
file
}
not exist"
)
print
(
"======================================================"
)
def
parse_prototxt
(
file
):
with
open
(
file
,
"r"
)
as
f
:
lines
=
[
i
.
strip
().
split
(
":"
)
for
i
in
f
.
readlines
()]
engines
=
{}
for
i
in
lines
:
if
len
(
i
)
>
1
:
if
i
[
0
]
in
engines
:
engines
[
i
[
0
]].
append
(
i
[
1
].
strip
())
else
:
engines
[
i
[
0
]]
=
[
i
[
1
].
strip
()]
return
engines
def
default_args
():
parser
=
argparse
.
ArgumentParser
()
args
=
parser
.
parse_args
([])
args
.
thread
=
2
args
.
port
=
9292
args
.
device
=
"cpu"
args
.
gpu_ids
=
[
""
]
args
.
op_num
=
0
args
.
op_max_batch
=
32
args
.
model
=
[
""
]
args
.
workdir
=
"workdir"
args
.
use_mkl
=
False
args
.
precision
=
"fp32"
args
.
use_calib
=
False
args
.
mem_optim_off
=
False
args
.
ir_optim
=
False
args
.
max_body_size
=
512
*
1024
*
1024
args
.
use_encryption_model
=
False
args
.
use_multilang
=
False
args
.
use_trt
=
False
args
.
use_lite
=
False
args
.
use_xpu
=
False
args
.
product_name
=
None
args
.
container_id
=
None
args
.
gpu_multi_stream
=
False
return
args
python/paddle_serving_server/serve.py
浏览文件 @
e9278e8a
...
...
@@ -34,6 +34,7 @@ import socket
from
paddle_serving_server.env
import
CONF_HOME
import
signal
from
paddle_serving_server.util
import
*
from
paddle_serving_server.env_check.run
import
*
# web_service.py is still used by Pipeline.
...
...
@@ -114,7 +115,7 @@ def serve_args():
type
=
str
,
default
=
"start"
,
nargs
=
"?"
,
help
=
"stop or start PaddleServing"
)
help
=
"stop or start PaddleServing
, check running environemnt
"
)
parser
.
add_argument
(
"--thread"
,
type
=
int
,
...
...
@@ -446,7 +447,9 @@ if __name__ == "__main__":
os
.
_exit
(
0
)
else
:
os
.
_exit
(
-
1
)
elif
args
.
server
==
"check"
:
check_env
()
os
.
_exit
(
0
)
for
single_model_config
in
args
.
model
:
if
os
.
path
.
isdir
(
single_model_config
):
pass
...
...
python/requirements.txt
浏览文件 @
e9278e8a
...
...
@@ -20,3 +20,5 @@ sentencepiece==0.1.92; platform_machine != "aarch64"
sentencepiece; platform_machine == "aarch64"
opencv-python==4.2.0.32; platform_machine != "aarch64"
opencv-python; platform_machine == "aarch64"
pytest
pynvml
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录