Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
0e406beb
S
Serving
项目概览
PaddlePaddle
/
Serving
大约 1 年 前同步成功
通知
186
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0e406beb
编写于
5月 17, 2021
作者:
B
bjjwwang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix pddet benchmark
上级
a4d6e1aa
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
187 addition
and
103 deletion
+187
-103
python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.sh
python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.sh
+1
-1
python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.sh
python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.sh
+1
-1
python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.sh
...s/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.sh
+1
-1
python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.sh
...ples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.sh
+1
-1
python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.sh
python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.sh
+1
-1
python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.sh
...xamples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.sh
+1
-1
python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.sh
.../examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.sh
+1
-1
python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.sh
...xamples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.sh
+1
-1
python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.sh
...amples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.sh
+1
-1
python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.py
...xamples/pipeline/PaddleDetection/faster_rcnn/benchmark.py
+24
-4
python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.sh
...xamples/pipeline/PaddleDetection/faster_rcnn/benchmark.sh
+35
-27
python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py
...xamples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py
+25
-5
python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.sh
...xamples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.sh
+34
-26
python/examples/pipeline/PaddleDetection/yolov3/benchmark.py
python/examples/pipeline/PaddleDetection/yolov3/benchmark.py
+26
-6
python/examples/pipeline/PaddleDetection/yolov3/benchmark.sh
python/examples/pipeline/PaddleDetection/yolov3/benchmark.sh
+34
-26
未找到文件。
python/examples/pipeline/PaddleClas/HRNet_W18_C/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.6"
modelname
=
"clas-
DarkNet53
"
modelname
=
"clas-
HRNet_W18_C
"
# HTTP
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
...
...
python/examples/pipeline/PaddleClas/MobileNetV1/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.6"
modelname
=
"clas-
DarkNet53
"
modelname
=
"clas-
MobileNetV1
"
# HTTP
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
...
...
python/examples/pipeline/PaddleClas/MobileNetV3_large_x1_0/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.6"
modelname
=
"clas-
DarkNet53
"
modelname
=
"clas-
MobileNetV3_large_x1_0
"
# HTTP
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
...
...
python/examples/pipeline/PaddleClas/ResNeXt101_vd_64x4d/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.6"
modelname
=
"clas-
DarkNet53
"
modelname
=
"clas-
ResNeXt101_vd_64x4d
"
# HTTP
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
...
...
python/examples/pipeline/PaddleClas/ResNet50_vd/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.6"
modelname
=
"clas-
DarkNet53
"
modelname
=
"clas-
ResNet50_vd
"
# HTTP
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
...
...
python/examples/pipeline/PaddleClas/ResNet50_vd_FPGM/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.6"
modelname
=
"clas-
DarkNet53
"
modelname
=
"clas-
ResNet50_vd_FPGM
"
# HTTP
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
...
...
python/examples/pipeline/PaddleClas/ResNet50_vd_KL/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.6"
modelname
=
"clas-
DarkNet53
"
modelname
=
"clas-
ResNet50_vd_KL
"
# HTTP
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
...
...
python/examples/pipeline/PaddleClas/ResNet50_vd_PACT/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.6"
modelname
=
"clas-
DarkNet53
"
modelname
=
"clas-
ResNet50_vd_PACT
"
# HTTP
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
...
...
python/examples/pipeline/PaddleClas/ShuffleNetV2_x1_0/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.6"
modelname
=
"clas-
DarkNet53
"
modelname
=
"clas-
ShuffleNetV2_x1_0
"
# HTTP
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
...
...
python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.py
浏览文件 @
0e406beb
...
...
@@ -46,23 +46,43 @@ def run_http(idx, batch_size):
with
open
(
os
.
path
.
join
(
"."
,
"000000570688.jpg"
),
'rb'
)
as
file
:
image_data1
=
file
.
read
()
image
=
cv2_to_base64
(
image_data1
)
latency_list
=
[]
start
=
time
.
time
()
total_num
=
0
while
True
:
l_start
=
time
.
time
()
data
=
{
"key"
:
[],
"value"
:
[]}
for
j
in
range
(
batch_size
):
data
[
"key"
].
append
(
"image_"
+
str
(
j
))
data
[
"value"
].
append
(
image
)
r
=
requests
.
post
(
url
=
url
,
data
=
json
.
dumps
(
data
))
l_end
=
time
.
time
()
total_num
+=
1
end
=
time
.
time
()
latency_list
.
append
(
l_end
*
1000
-
l_start
*
1000
)
if
end
-
start
>
70
:
print
(
"70s end"
)
#
print("70s end")
break
return
[[
end
-
start
]]
return
[[
end
-
start
]
,
latency_list
,
[
total_num
]
]
def
multithread_http
(
thread
,
batch_size
):
multi_thread_runner
=
MultiThreadRunner
()
result
=
multi_thread_runner
.
run
(
run_http
,
thread
,
batch_size
)
start
=
time
.
time
()
result
=
multi_thread_runner
.
run
(
run_http
,
thread
,
batch_size
)
end
=
time
.
time
()
total_cost
=
end
-
start
avg_cost
=
0
total_number
=
0
for
i
in
range
(
thread
):
avg_cost
+=
result
[
0
][
i
]
total_number
+=
result
[
2
][
i
]
avg_cost
=
avg_cost
/
thread
print
(
"Total cost: {}s"
.
format
(
total_cost
))
print
(
"Each thread cost: {}s. "
.
format
(
avg_cost
))
print
(
"Total count: {}. "
.
format
(
total_number
))
print
(
"AVG QPS: {} samples/s"
.
format
(
batch_size
*
total_number
/
total_cost
))
show_latency
(
result
[
1
])
def
run_rpc
(
thread
,
batch_size
):
pass
...
...
python/examples/pipeline/PaddleDetection/faster_rcnn/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.7"
modelname
=
"faster_rcnn_r50_fpn_1x_coco"
gpu_id
=
"0"
benchmark_config_filename
=
"benchmark_config.yaml"
alias
python3
=
"python3.6"
modelname
=
"det-FasterRCNN"
# HTTP
ps
-ef
|
grep
web_service |
awk
'{print $2}'
| xargs
kill
-9
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep
3
python3 benchmark.py yaml local_predictor 1 gpu
$gpu_id
# Create yaml,If you already have the config.yaml, ignore it.
#python3 benchmark.py yaml local_predictor 1 gpu
rm
-rf
profile_log_
$modelname
for
thread_num
in
1
echo
"Starting HTTP Clients..."
# Start a client in each thread, tesing the case of multiple threads.
for
thread_num
in
1 2 4 8 12 16
do
for
batch_size
in
1
for
batch_size
in
1
do
echo
"#----FasterRCNN thread num:
$thread_num
batch size:
$batch_size
mode:http ----"
>>
profile_log_
$modelname
rm
-rf
PipelineServingLogs
rm
-rf
cpu_utilization.py
python3 web_service.py
>
web.log 2>&1 &
sleep
3
nvidia-smi
--id
=
${
gpu_id
}
--query-compute-apps
=
used_memory
--format
=
csv
-lms
100
>
gpu_use.log 2>&1 &
nvidia-smi
--id
=
${
gpu_id
}
--query-gpu
=
utilization.gpu
--format
=
csv
-lms
100
>
gpu_utilization.log 2>&1 &
echo
"----
${
modelname
}
thread num:
${
thread_num
}
batch size:
${
batch_size
}
mode:http ----"
>>
profile_log_
$modelname
# Start one web service, If you start the service yourself, you can ignore it here.
#python3 web_service.py >web.log 2>&1 &
#sleep 3
# --id is the serial number of the GPU card, Must be the same as the gpu id used by the server.
nvidia-smi
--id
=
3
--query-gpu
=
memory.used
--format
=
csv
-lms
1000
>
gpu_use.log 2>&1 &
nvidia-smi
--id
=
3
--query-gpu
=
utilization.gpu
--format
=
csv
-lms
1000
>
gpu_utilization.log 2>&1 &
echo
"import psutil
\n
cpu_utilization=psutil.cpu_percent(1,False)
\n
print('CPU_UTILIZATION:', cpu_utilization)
\n
"
>
cpu_utilization.py
python3 benchmark.py run http
$thread_num
$batch_size
python3 cpu_utilization.py
>>
profile_log_
$modelname
python3
-m
paddle_serving_server_gpu.profiler
>>
profile_log_
$modelname
ps
-ef
|
grep
web_service |
awk
'{print $2}'
| xargs
kill
-9
ps
-ef
|
grep
nvidia-smi |
awk
'{print $2}'
| xargs
kill
-9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv
benchmark.tmp benchmark.log
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}'
gpu_use.log
>>
profile_log_
$modelname
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}'
gpu_utilization.log
>>
profile_log_
$modelname
cat
benchmark.log
>>
profile_log_
$modelname
python3
-m
paddle_serving_server_gpu.parse_profile
--benchmark_cfg
$benchmark_config_filename
--benchmark_log
profile_log_
$modelname
#rm -rf gpu_use.log gpu_utilization.log
# Start http client
python3 benchmark.py run http
$thread_num
$batch_size
>
profile 2>&1
# Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization
python3 cpu_utilization.py
>>
profile_log_
$modelname
grep
-av
'^0 %'
gpu_utilization.log
>
gpu_utilization.log.tmp
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}'
gpu_use.log
>>
profile_log_
$modelname
awk
-F
' '
'{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }'
gpu_utilization.log.tmp
>>
profile_log_
$modelname
# Show profiles
python3 ../../../util/show_profile.py profile
$thread_num
>>
profile_log_
$modelname
tail
-n
8 profile
>>
profile_log_
$modelname
echo
''
>>
profile_log_
$modelname
done
done
# Kill all nvidia-smi background task.
pkill nvidia-smi
python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.py
浏览文件 @
0e406beb
...
...
@@ -36,7 +36,7 @@ def gen_yml(device, gpu_id):
config
[
"dag"
][
"tracer"
]
=
{
"interval_s"
:
30
}
if
device
==
"gpu"
:
config
[
"op"
][
"ppyolo_mbv3"
][
"local_service_conf"
][
"device_type"
]
=
1
config
[
"op"
][
"ppyolo_mbv3"
][
"local_service_conf"
][
"devices"
]
=
gpu_id
config
[
"op"
][
"ppyolo_mbv3"
][
"local_service_conf"
][
"devices"
]
=
gpu_id
with
open
(
"config2.yml"
,
"w"
)
as
fout
:
yaml
.
dump
(
config
,
fout
,
default_flow_style
=
False
)
...
...
@@ -46,23 +46,43 @@ def run_http(idx, batch_size):
with
open
(
os
.
path
.
join
(
"."
,
"000000570688.jpg"
),
'rb'
)
as
file
:
image_data1
=
file
.
read
()
image
=
cv2_to_base64
(
image_data1
)
latency_list
=
[]
start
=
time
.
time
()
total_num
=
0
while
True
:
l_start
=
time
.
time
()
data
=
{
"key"
:
[],
"value"
:
[]}
for
j
in
range
(
batch_size
):
data
[
"key"
].
append
(
"image_"
+
str
(
j
))
data
[
"value"
].
append
(
image
)
r
=
requests
.
post
(
url
=
url
,
data
=
json
.
dumps
(
data
))
l_end
=
time
.
time
()
total_num
+=
1
end
=
time
.
time
()
latency_list
.
append
(
l_end
*
1000
-
l_start
*
1000
)
if
end
-
start
>
70
:
print
(
"70s end"
)
#
print("70s end")
break
return
[[
end
-
start
]]
return
[[
end
-
start
]
,
latency_list
,
[
total_num
]
]
def
multithread_http
(
thread
,
batch_size
):
multi_thread_runner
=
MultiThreadRunner
()
result
=
multi_thread_runner
.
run
(
run_http
,
thread
,
batch_size
)
start
=
time
.
time
()
result
=
multi_thread_runner
.
run
(
run_http
,
thread
,
batch_size
)
end
=
time
.
time
()
total_cost
=
end
-
start
avg_cost
=
0
total_number
=
0
for
i
in
range
(
thread
):
avg_cost
+=
result
[
0
][
i
]
total_number
+=
result
[
2
][
i
]
avg_cost
=
avg_cost
/
thread
print
(
"Total cost: {}s"
.
format
(
total_cost
))
print
(
"Each thread cost: {}s. "
.
format
(
avg_cost
))
print
(
"Total count: {}. "
.
format
(
total_number
))
print
(
"AVG QPS: {} samples/s"
.
format
(
batch_size
*
total_number
/
total_cost
))
show_latency
(
result
[
1
])
def
run_rpc
(
thread
,
batch_size
):
pass
...
...
python/examples/pipeline/PaddleDetection/ppyolo_mbv3/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.7"
modelname
=
"ppyolo_mbv3_large"
gpu_id
=
"0"
benchmark_config_filename
=
"benchmark_config.yaml"
alias
python3
=
"python3.6"
modelname
=
"det-PPYoloMbv3"
# HTTP
ps
-ef
|
grep
web_service |
awk
'{print $2}'
| xargs
kill
-9
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep
3
python3 benchmark.py yaml local_predictor 1 gpu
$gpu_id
# Create yaml,If you already have the config.yaml, ignore it.
#python3 benchmark.py yaml local_predictor 1 gpu
rm
-rf
profile_log_
$modelname
for
thread_num
in
1
echo
"Starting HTTP Clients..."
# Start a client in each thread, tesing the case of multiple threads.
for
thread_num
in
1 2 4 8 12 16
do
for
batch_size
in
1
for
batch_size
in
1
do
echo
"#----PPyolo thread num:
$thread_num
batch size:
$batch_size
mode:http ----"
>>
profile_log_
$modelname
rm
-rf
PipelineServingLogs
rm
-rf
cpu_utilization.py
python3 web_service.py
>
web.log 2>&1 &
sleep
3
nvidia-smi
--id
=
${
gpu_id
}
--query-compute-apps
=
used_memory
--format
=
csv
-lms
100
>
gpu_use.log 2>&1 &
nvidia-smi
--id
=
${
gpu_id
}
--query-gpu
=
utilization.gpu
--format
=
csv
-lms
100
>
gpu_utilization.log 2>&1 &
echo
"----
${
modelname
}
thread num:
${
thread_num
}
batch size:
${
batch_size
}
mode:http ----"
>>
profile_log_
$modelname
# Start one web service, If you start the service yourself, you can ignore it here.
#python3 web_service.py >web.log 2>&1 &
#sleep 3
# --id is the serial number of the GPU card, Must be the same as the gpu id used by the server.
nvidia-smi
--id
=
3
--query-gpu
=
memory.used
--format
=
csv
-lms
1000
>
gpu_use.log 2>&1 &
nvidia-smi
--id
=
3
--query-gpu
=
utilization.gpu
--format
=
csv
-lms
1000
>
gpu_utilization.log 2>&1 &
echo
"import psutil
\n
cpu_utilization=psutil.cpu_percent(1,False)
\n
print('CPU_UTILIZATION:', cpu_utilization)
\n
"
>
cpu_utilization.py
python3 benchmark.py run http
$thread_num
$batch_size
python3 cpu_utilization.py
>>
profile_log_
$modelname
python3
-m
paddle_serving_server_gpu.profiler
>>
profile_log_
$modelname
ps
-ef
|
grep
web_service |
awk
'{print $2}'
| xargs
kill
-9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv
benchmark.tmp benchmark.log
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}'
gpu_use.log
>>
profile_log_
$modelname
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}'
gpu_utilization.log
>>
profile_log_
$modelname
cat
benchmark.log
>>
profile_log_
$modelname
python3
-m
paddle_serving_server_gpu.parse_profile
--benchmark_cfg
$benchmark_config_filename
--benchmark_log
profile_log_
$modelname
#rm -rf gpu_use.log gpu_utilization.log
# Start http client
python3 benchmark.py run http
$thread_num
$batch_size
>
profile 2>&1
# Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization
python3 cpu_utilization.py
>>
profile_log_
$modelname
grep
-av
'^0 %'
gpu_utilization.log
>
gpu_utilization.log.tmp
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}'
gpu_use.log
>>
profile_log_
$modelname
awk
-F
' '
'{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }'
gpu_utilization.log.tmp
>>
profile_log_
$modelname
# Show profiles
python3 ../../../util/show_profile.py profile
$thread_num
>>
profile_log_
$modelname
tail
-n
8 profile
>>
profile_log_
$modelname
echo
''
>>
profile_log_
$modelname
done
done
# Kill all nvidia-smi background task.
pkill nvidia-smi
python/examples/pipeline/PaddleDetection/yolov3/benchmark.py
浏览文件 @
0e406beb
...
...
@@ -35,8 +35,8 @@ def gen_yml(device, gpu_id):
fin
.
close
()
config
[
"dag"
][
"tracer"
]
=
{
"interval_s"
:
30
}
if
device
==
"gpu"
:
config
[
"op"
][
"
faster_rcnn
"
][
"local_service_conf"
][
"device_type"
]
=
1
config
[
"op"
][
"
faster_rcnn
"
][
"local_service_conf"
][
"devices"
]
=
gpu_id
config
[
"op"
][
"
yolov3
"
][
"local_service_conf"
][
"device_type"
]
=
1
config
[
"op"
][
"
yolov3
"
][
"local_service_conf"
][
"devices"
]
=
gpu_id
with
open
(
"config2.yml"
,
"w"
)
as
fout
:
yaml
.
dump
(
config
,
fout
,
default_flow_style
=
False
)
...
...
@@ -46,23 +46,43 @@ def run_http(idx, batch_size):
with
open
(
os
.
path
.
join
(
"."
,
"000000570688.jpg"
),
'rb'
)
as
file
:
image_data1
=
file
.
read
()
image
=
cv2_to_base64
(
image_data1
)
latency_list
=
[]
start
=
time
.
time
()
total_num
=
0
while
True
:
l_start
=
time
.
time
()
data
=
{
"key"
:
[],
"value"
:
[]}
for
j
in
range
(
batch_size
):
data
[
"key"
].
append
(
"image_"
+
str
(
j
))
data
[
"value"
].
append
(
image
)
r
=
requests
.
post
(
url
=
url
,
data
=
json
.
dumps
(
data
))
l_end
=
time
.
time
()
total_num
+=
1
end
=
time
.
time
()
latency_list
.
append
(
l_end
*
1000
-
l_start
*
1000
)
if
end
-
start
>
70
:
print
(
"70s end"
)
#
print("70s end")
break
return
[[
end
-
start
]]
return
[[
end
-
start
]
,
latency_list
,
[
total_num
]
]
def
multithread_http
(
thread
,
batch_size
):
multi_thread_runner
=
MultiThreadRunner
()
result
=
multi_thread_runner
.
run
(
run_http
,
thread
,
batch_size
)
start
=
time
.
time
()
result
=
multi_thread_runner
.
run
(
run_http
,
thread
,
batch_size
)
end
=
time
.
time
()
total_cost
=
end
-
start
avg_cost
=
0
total_number
=
0
for
i
in
range
(
thread
):
avg_cost
+=
result
[
0
][
i
]
total_number
+=
result
[
2
][
i
]
avg_cost
=
avg_cost
/
thread
print
(
"Total cost: {}s"
.
format
(
total_cost
))
print
(
"Each thread cost: {}s. "
.
format
(
avg_cost
))
print
(
"Total count: {}. "
.
format
(
total_number
))
print
(
"AVG QPS: {} samples/s"
.
format
(
batch_size
*
total_number
/
total_cost
))
show_latency
(
result
[
1
])
def
run_rpc
(
thread
,
batch_size
):
pass
...
...
python/examples/pipeline/PaddleDetection/yolov3/benchmark.sh
浏览文件 @
0e406beb
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.7"
modelname
=
"yolov3_darknet53_270e_coco"
gpu_id
=
"0"
benchmark_config_filename
=
"benchmark_config.yaml"
alias
python3
=
"python3.6"
modelname
=
"det-yolov3"
# HTTP
ps
-ef
|
grep
web_service |
awk
'{print $2}'
| xargs
kill
-9
#ps -ef | grep web_service | awk '{print $2}' | xargs kill -9
sleep
3
python3 benchmark.py yaml local_predictor 1 cpu
# Create yaml,If you already have the config.yaml, ignore it.
#python3 benchmark.py yaml local_predictor 1 gpu
rm
-rf
profile_log_
$modelname
for
thread_num
in
1 8 16
echo
"Starting HTTP Clients..."
# Start a client in each thread, tesing the case of multiple threads.
for
thread_num
in
1 2 4 8 12 16
do
for
batch_size
in
1
for
batch_size
in
1
do
echo
"#----Yolov3 thread num:
$thread_num
batch size:
$batch_size
mode:http ----"
>>
profile_log_
$modelname
rm
-rf
PipelineServingLogs
rm
-rf
cpu_utilization.py
python3 web_service.py
>
web.log 2>&1 &
sleep
3
nvidia-smi
--id
=
${
gpu_id
}
--query-compute-apps
=
used_memory
--format
=
csv
-lms
100
>
gpu_use.log 2>&1 &
nvidia-smi
--id
=
${
gpu_id
}
--query-gpu
=
utilization.gpu
--format
=
csv
-lms
100
>
gpu_utilization.log 2>&1 &
echo
"----
${
modelname
}
thread num:
${
thread_num
}
batch size:
${
batch_size
}
mode:http ----"
>>
profile_log_
$modelname
# Start one web service, If you start the service yourself, you can ignore it here.
#python3 web_service.py >web.log 2>&1 &
#sleep 3
# --id is the serial number of the GPU card, Must be the same as the gpu id used by the server.
nvidia-smi
--id
=
3
--query-gpu
=
memory.used
--format
=
csv
-lms
1000
>
gpu_use.log 2>&1 &
nvidia-smi
--id
=
3
--query-gpu
=
utilization.gpu
--format
=
csv
-lms
1000
>
gpu_utilization.log 2>&1 &
echo
"import psutil
\n
cpu_utilization=psutil.cpu_percent(1,False)
\n
print('CPU_UTILIZATION:', cpu_utilization)
\n
"
>
cpu_utilization.py
python3 benchmark.py run http
$thread_num
$batch_size
python3 cpu_utilization.py
>>
profile_log_
$modelname
python3
-m
paddle_serving_server_gpu.profiler
>>
profile_log_
$modelname
ps
-ef
|
grep
web_service |
awk
'{print $2}'
| xargs
kill
-9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv
benchmark.tmp benchmark.log
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_MEM:", max}'
gpu_use.log
>>
profile_log_
$modelname
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}'
gpu_utilization.log
>>
profile_log_
$modelname
cat
benchmark.log
>>
profile_log_
$modelname
python3
-m
paddle_serving_server_gpu.parse_profile
--benchmark_cfg
$benchmark_config_filename
--benchmark_log
profile_log_
$modelname
#rm -rf gpu_use.log gpu_utilization.log
# Start http client
python3 benchmark.py run http
$thread_num
$batch_size
>
profile 2>&1
# Collect CPU metrics, Filter data that is zero momentarily, Record the maximum value of GPU memory and the average value of GPU utilization
python3 cpu_utilization.py
>>
profile_log_
$modelname
grep
-av
'^0 %'
gpu_utilization.log
>
gpu_utilization.log.tmp
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}'
gpu_use.log
>>
profile_log_
$modelname
awk
-F
' '
'{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }'
gpu_utilization.log.tmp
>>
profile_log_
$modelname
# Show profiles
python3 ../../../util/show_profile.py profile
$thread_num
>>
profile_log_
$modelname
tail
-n
8 profile
>>
profile_log_
$modelname
echo
''
>>
profile_log_
$modelname
done
done
# Kill all nvidia-smi background task.
pkill nvidia-smi
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录