Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
c1910f91
S
Serving
项目概览
PaddlePaddle
/
Serving
接近 2 年 前同步成功
通知
186
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c1910f91
编写于
5月 28, 2021
作者:
B
bjjwwang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add first example
上级
af8c9ffc
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
72 addition
and
60 deletion
+72
-60
python/examples/pipeline/PaddleClas/DarkNet53/benchmark.sh
python/examples/pipeline/PaddleClas/DarkNet53/benchmark.sh
+4
-4
python/paddle_serving_client/utils/__init__.py
python/paddle_serving_client/utils/__init__.py
+3
-3
python/paddle_serving_server/benchmark_utils.py
python/paddle_serving_server/benchmark_utils.py
+16
-14
python/paddle_serving_server/parse_profile.py
python/paddle_serving_server/parse_profile.py
+49
-39
未找到文件。
python/examples/pipeline/PaddleClas/DarkNet53/benchmark.sh
浏览文件 @
c1910f91
...
...
@@ -15,7 +15,7 @@ for thread_num in 1 2 4 8 12 16
do
for
batch_size
in
1
do
echo
"----
${
modelname
}
thread num:
${
thread_num
}
batch size:
${
batch_size
}
mode:http ----"
>>
profile_log_
$modelname
echo
"
#
----
${
modelname
}
thread num:
${
thread_num
}
batch size:
${
batch_size
}
mode:http ----"
>>
profile_log_
$modelname
# Start one web service, If you start the service yourself, you can ignore it here.
#python3 web_service.py >web.log 2>&1 &
#sleep 3
...
...
@@ -23,15 +23,15 @@ do
# --id is the serial number of the GPU card, Must be the same as the gpu id used by the server.
nvidia-smi
--id
=
3
--query-gpu
=
memory.used
--format
=
csv
-lms
1000
>
gpu_use.log 2>&1 &
nvidia-smi
--id
=
3
--query-gpu
=
utilization.gpu
--format
=
csv
-lms
1000
>
gpu_utilization.log 2>&1 &
echo
"import psutil
\n
cpu_utilization=psutil.cpu_percent(1,False)
\n
print('CPU_UTIL
IZATION
:', cpu_utilization)
\n
"
>
cpu_utilization.py
echo
"import psutil
\n
cpu_utilization=psutil.cpu_percent(1,False)
\n
print('CPU_UTIL:', cpu_utilization)
\n
"
>
cpu_utilization.py
# Start http client
python3 benchmark.py run http
$thread_num
$batch_size
>
profile 2>&1
# Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization
python3 cpu_utilization.py
>>
profile_log_
$modelname
grep
-av
'^0 %'
gpu_utilization.log
>
gpu_utilization.log.tmp
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "
MAX_GPU_MEMORY
:", max}'
gpu_use.log
>>
profile_log_
$modelname
awk
-F
' '
'{sum+=$1} END {print "GPU_UTIL
IZATION
:", sum/NR, sum, NR }'
gpu_utilization.log.tmp
>>
profile_log_
$modelname
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "
GPU_MEM
:", max}'
gpu_use.log
>>
profile_log_
$modelname
awk
-F
' '
'{sum+=$1} END {print "GPU_UTIL:", sum/NR, sum, NR }'
gpu_utilization.log.tmp
>>
profile_log_
$modelname
# Show profiles
python3 ../../../util/show_profile.py profile
$thread_num
>>
profile_log_
$modelname
...
...
python/paddle_serving_client/utils/__init__.py
浏览文件 @
c1910f91
...
...
@@ -41,9 +41,9 @@ def show_latency(latency_list):
info
=
"latency:
\n
"
info
+=
"mean: {}ms
\n
"
.
format
(
np
.
mean
(
latency_array
))
info
+=
"median: {}ms
\n
"
.
format
(
np
.
median
(
latency_array
))
info
+=
"80
percent: {}ms
\n
"
.
format
(
np
.
percentile
(
latency_array
,
80
))
info
+=
"90
percent: {}ms
\n
"
.
format
(
np
.
percentile
(
latency_array
,
90
))
info
+=
"99
percent: {}ms
\n
"
.
format
(
np
.
percentile
(
latency_array
,
99
))
info
+=
"80
_
percent: {}ms
\n
"
.
format
(
np
.
percentile
(
latency_array
,
80
))
info
+=
"90
_
percent: {}ms
\n
"
.
format
(
np
.
percentile
(
latency_array
,
90
))
info
+=
"99
_
percent: {}ms
\n
"
.
format
(
np
.
percentile
(
latency_array
,
99
))
sys
.
stderr
.
write
(
info
)
...
...
python/paddle_serving_server/benchmark_utils.py
浏览文件 @
c1910f91
...
...
@@ -23,8 +23,8 @@ import paddle.inference as paddle_infer
from
pathlib
import
Path
CUR_DIR
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
LOG_PATH_ROOT
=
f
"
{
CUR_DIR
}
/../../tools/output"
#
LOG_PATH_ROOT = f"{CUR_DIR}/../../tools/output"
LOG_PATH_ROOT
=
f
"."
class
PaddleInferBenchmark
(
object
):
def
__init__
(
self
,
...
...
@@ -73,7 +73,6 @@ class PaddleInferBenchmark(object):
# perf info
self
.
perf_info
=
perf_info
try
:
# required value
self
.
model_name
=
model_info
[
'model_name'
]
...
...
@@ -93,8 +92,8 @@ class PaddleInferBenchmark(object):
self
.
postprocess_time_s
=
perf_info
.
get
(
'postprocess_time_s'
,
0
)
self
.
total_time_s
=
perf_info
.
get
(
'total_time_s'
,
0
)
self
.
inference_time_s_90
=
perf_info
.
get
(
"inference_time_s_90"
,
""
)
self
.
inference_time_s_99
=
perf_info
.
get
(
"inference_time_s_99"
,
""
)
self
.
inference_time_s_90
=
perf_info
.
get
(
"inference_time_s_90"
,
0
)
self
.
inference_time_s_99
=
perf_info
.
get
(
"inference_time_s_99"
,
0
)
self
.
succ_rate
=
perf_info
.
get
(
"succ_rate"
,
""
)
self
.
qps
=
perf_info
.
get
(
"qps"
,
""
)
...
...
@@ -103,15 +102,17 @@ class PaddleInferBenchmark(object):
# mem info
if
isinstance
(
resource_info
,
dict
):
self
.
cpu_rss_mb
=
int
(
resource_info
.
get
(
'cpu_rss_mb'
,
0
))
self
.
cpu_vms_mb
=
int
(
resource_info
.
get
(
'cpu_vms_mb'
,
0
))
self
.
cpu_shared_mb
=
int
(
resource_info
.
get
(
'cpu_shared_mb'
,
0
))
self
.
cpu_dirty_mb
=
int
(
resource_info
.
get
(
'cpu_dirty_mb'
,
0
))
self
.
cpu_rss_mb
=
int
(
"-1"
if
'cpu_rss_mb'
not
in
resource_info
or
resource_info
.
get
(
'cpu_rss_mb'
).
strip
()
==
""
else
resource_info
.
get
(
'cpu_rss_mb'
,
0
))
self
.
cpu_vms_mb
=
int
(
"-1"
if
'cpu_vms_mb'
not
in
resource_info
or
resource_info
.
get
(
'cpu_vms_mb'
).
strip
()
==
""
else
resource_info
.
get
(
'cpu_vms_mb'
,
0
))
self
.
cpu_shared_mb
=
int
(
"-1"
if
'cpu_shared_mb'
not
in
resource_info
or
resource_info
.
get
(
'cpu_shared_mb'
).
strip
()
==
""
else
resource_info
.
get
(
'cpu_shared_mb'
,
0
))
self
.
cpu_dirty_mb
=
int
(
"-1"
if
'cpu_dirty_mb'
not
in
resource_info
or
resource_info
.
get
(
'cpu_dirty_mb'
).
strip
()
==
""
else
resource_info
.
get
(
'cpu_dirty_mb'
,
0
))
self
.
cpu_util
=
round
(
resource_info
.
get
(
'cpu_util'
,
0
),
2
)
self
.
gpu_rss_mb
=
int
(
resource_info
.
get
(
'gpu_rss_mb'
,
0
))
self
.
gpu_util
=
round
(
resource_info
.
get
(
'gpu_util'
,
0
),
2
)
self
.
gpu_mem_util
=
round
(
resource_info
.
get
(
'gpu_mem_util'
,
0
),
2
)
self
.
gpu_rss_mb
=
int
(
"-1"
if
'gpu_rss_mb'
not
in
resource_info
or
resource_info
.
get
(
'gpu_rss_mb'
).
strip
()
==
""
else
resource_info
.
get
(
'gpu_rss_mb'
,
0
))
#self.gpu_util = round(resource_info.get('gpu_util', 0), 2)
self
.
gpu_util
=
resource_info
.
get
(
'gpu_util'
,
0
)
#self.gpu_mem_util = round(resource_info.get('gpu_mem_util', 0), 2)
self
.
gpu_mem_util
=
resource_info
.
get
(
'gpu_mem_util'
,
0
)
else
:
self
.
cpu_rss_mb
=
0
self
.
cpu_vms_mb
=
0
...
...
@@ -170,6 +171,7 @@ class PaddleInferBenchmark(object):
'cpu_math_library_num_threads'
]
=
config
.
cpu_math_library_num_threads
(
)
elif
isinstance
(
config
,
dict
):
config_status
=
{}
config_status
[
'runtime_device'
]
=
config
.
get
(
'runtime_device'
,
""
)
config_status
[
'ir_optim'
]
=
config
.
get
(
'ir_optim'
,
""
)
config_status
[
'enable_tensorrt'
]
=
config
.
get
(
'enable_tensorrt'
,
""
)
...
...
@@ -238,10 +240,10 @@ class PaddleInferBenchmark(object):
self
.
logger
.
info
(
f
"
{
identifier
}
total time spent(s):
{
self
.
total_time_s
}
"
)
self
.
logger
.
info
(
f
"
{
identifier
}
preprocess_time(ms):
{
round
(
self
.
preprocess_time_s
*
1000
,
1
)
}
, inference_time(ms):
{
round
(
self
.
inference_time_s
*
1000
,
1
)
}
, postprocess_time(ms):
{
round
(
self
.
postprocess_time_s
*
1000
,
1
)
}
"
f
"
{
identifier
}
preprocess_time(ms):
{
self
.
preprocess_time_s
}
, inference_time(ms):
{
self
.
inference_time_s
}
, postprocess_time(ms):
{
self
.
postprocess_time_s
}
"
)
if
self
.
inference_time_s_90
:
self
.
lo
o
ger
.
info
(
self
.
lo
g
ger
.
info
(
f
"
{
identifier
}
90%_cost:
{
self
.
inference_time_s_90
}
, 99%_cost:
{
self
.
inference_time_s_99
}
, succ_rate:
{
self
.
succ_rate
}
"
)
if
self
.
qps
:
...
...
python/paddle_serving_server/parse_profile.py
浏览文件 @
c1910f91
...
...
@@ -17,7 +17,7 @@ import os
import
yaml
import
argparse
import
.benchmark_utils
from
.benchmark_utils
import
PaddleInferBenchmark
"""
{'CPU_UTILIZATION': 0.8, 'MAX_GPU_MEMORY': 0, 'GPU_UTILIZATION': '0 %', 'DAG': {'50': 670.256, '60': 670.256, '70': 670.765, '80': 671.23, '90': 687.546, '95': 687.546, '99': 687.546, 'avg': 670.755625, 'qps': 0.8, 'query_count': 8, 'succ': 1.0}, 'demo': {'midp': 669.484375, 'postp': 0.184875, 'prep': 1.001875}}
"""
...
...
@@ -37,6 +37,39 @@ class LogHandler(object):
def
append
(
self
,
new_str
):
self
.
fstr
+=
new_str
+
"
\n
"
def
handle_benchmark
(
benchmark_config
,
benchmark_raw
,
indentifier
):
model_info
=
{
'model_name'
:
benchmark_config
[
"model_name"
],
'precision'
:
benchmark_config
[
"precision"
]
}
data_info
=
{
'batch_size'
:
benchmark_config
[
"batch_size"
],
'shape'
:
benchmark_config
[
"input_shape"
],
'data_num'
:
benchmark_config
[
"num_of_samples"
]
}
perf_info
=
{
'preprocess_time_s'
:
""
,
'inference_time_s'
:
float
(
benchmark_raw
[
"median"
][
0
:
-
2
])
/
1000
,
# *** ms
'postprocess_time_s'
:
""
,
'total_time_s'
:
""
,
'inference_time_s_90'
:
float
(
benchmark_raw
[
"90_percent"
][
0
:
-
2
])
/
1000
,
# *** ms
'inference_time_s_99'
:
float
(
benchmark_raw
[
"99_percent"
][
0
:
-
2
])
/
1000
,
# *** ms
'qps'
:
benchmark_raw
[
"AVG_QPS"
]
}
resource_info
=
{
'cpu_rss_mb'
:
""
,
'cpu_vms_mb'
:
""
,
'cpu_shared_mb'
:
""
,
'cpu_dirty_mb'
:
""
,
'cpu_util'
:
benchmark_raw
[
"CPU_UTIL"
],
'gpu_rss_mb'
:
""
,
'gpu_util'
:
benchmark_raw
[
"GPU_UTIL"
],
'gpu_mem_util'
:
benchmark_raw
[
"GPU_MEM"
]
}
server_log
=
PaddleInferBenchmark
(
benchmark_config
,
model_info
,
data_info
,
perf_info
,
resource_info
)
server_log
(
indentifier
)
def
parse_args
():
# pylint: disable=doc-string-missing
parser
=
argparse
.
ArgumentParser
(
"serve"
)
...
...
@@ -65,41 +98,18 @@ if __name__ == "__main__":
f
=
open
(
benchmark_cfg_filename
,
'r'
)
benchmark_config
=
yaml
.
load
(
f
)
f
.
close
()
benchmark_raw_filename
=
args
.
benchmark_log
f
=
open
(
benchmark_raw_filename
,
'r'
)
benchmark_raw
=
yaml
.
load
(
f
)
f
.
close
()
model_info
=
{
'model_name'
:
benchmark_config
[
"model_name"
],
'precision'
:
benchmark_config
[
"precision"
]
}
data_info
=
{
'batch_size'
:
benchmark_config
[
"batch_size"
],
'shape'
:
benchmark_config
[
"input_shape"
],
'data_num'
:
benchmark_config
[
"num_of_samples"
]
}
perf_info
=
{
'preprocess_time_s'
:
""
,
'inference_time_s'
:
benchmark_raw
[
"DAG"
][
"avg"
],
'postprocess_time_s'
:
""
,
'total_time_s'
:
""
,
'inference_time_s_90'
:
benchmark_raw
[
"DAG"
][
"90"
],
'inference_time_s_99'
:
benchmark_raw
[
"DAG"
][
"99"
],
'succ_rate'
:
benchmark_raw
[
"DAG"
][
"succ"
],
'qps'
:
benchmark_raw
[
"DAG"
][
"qps"
]
}
resource_info
=
{
'cpu_rss_mb'
:
""
,
'cpu_vms_mb'
:
""
,
'cpu_shared_mb'
:
""
,
'cpu_dirty_mb'
:
""
,
'cpu_util'
:
benchmark_raw
[
"CPU_MEM"
],
'gpu_rss_mb'
:
""
,
'gpu_util'
:
benchmark_raw
[
"GPU_UTIL"
],
'gpu_mem_util'
:
benchmark_raw
[
"GPU_MEM"
]
}
server_log
=
benchmark_utils
.
PaddleInferBenchmark
(
benchmark_config
,
model_info
,
data_info
,
perf_info
,
resource_info
)
server_log
(
'Serving'
)
benchmark_log_filename
=
args
.
benchmark_log
f
=
open
(
benchmark_log_filename
,
'r'
)
lines
=
f
.
readlines
()
line_no
=
0
while
line_no
<
len
(
lines
):
if
len
(
lines
[
line_no
])
>
5
and
lines
[
line_no
].
startswith
(
"#---"
):
iden
=
lines
[
line_no
][
5
:
-
5
]
line_no
+=
1
sub_log
=
lines
[
line_no
:
line_no
+
13
]
sub_dict
=
yaml
.
safe_load
(
""
.
join
(
sub_log
))
handle_benchmark
(
benchmark_config
,
sub_dict
,
iden
)
line_no
+=
13
else
:
line_no
+=
1
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录