Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Serving
提交
18ec3818
S
Serving
项目概览
PaddlePaddle
/
Serving
大约 2 年 前同步成功
通知
187
Star
833
Fork
253
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
105
列表
看板
标记
里程碑
合并请求
10
Wiki
2
Wiki
分析
仓库
DevOps
项目成员
Pages
S
Serving
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
105
Issue
105
列表
看板
标记
里程碑
合并请求
10
合并请求
10
Pages
分析
分析
仓库分析
DevOps
Wiki
2
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
18ec3818
编写于
4月 25, 2021
作者:
L
LDOUBLEV
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support std log for ocr
上级
fb430d25
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
70 addition
and
39 deletion
+70
-39
python/examples/pipeline/ocr/benchmark.py
python/examples/pipeline/ocr/benchmark.py
+11
-4
python/examples/pipeline/ocr/benchmark.sh
python/examples/pipeline/ocr/benchmark.sh
+18
-35
python/examples/pipeline/ocr/benchmark_config.yaml.template
python/examples/pipeline/ocr/benchmark_config.yaml.template
+32
-0
python/examples/pipeline/ocr/eval_top.sh
python/examples/pipeline/ocr/eval_top.sh
+9
-0
未找到文件。
python/examples/pipeline/ocr/benchmark.py
浏览文件 @
18ec3818
...
...
@@ -24,16 +24,19 @@ def parse_benchmark(filein, fileout):
with
open
(
fileout
,
"w"
)
as
fout
:
yaml
.
dump
(
res
,
fout
,
default_flow_style
=
False
)
def
gen_yml
(
device
):
def
gen_yml
(
device
,
gpu_id
):
fin
=
open
(
"config.yml"
,
"r"
)
config
=
yaml
.
load
(
fin
)
fin
.
close
()
config
[
"dag"
][
"tracer"
]
=
{
"interval_s"
:
10
}
if
device
==
"gpu"
:
config
[
"op"
][
"det"
][
"local_service_conf"
][
"device_type"
]
=
1
config
[
"op"
][
"det"
][
"local_service_conf"
][
"devices"
]
=
"2"
config
[
"op"
][
"det"
][
"local_service_conf"
][
"devices"
]
=
gpu_id
config
[
"op"
][
"rec"
][
"local_service_conf"
][
"device_type"
]
=
1
config
[
"op"
][
"rec"
][
"local_service_conf"
][
"devices"
]
=
"2"
config
[
"op"
][
"rec"
][
"local_service_conf"
][
"devices"
]
=
gpu_id
else
:
config
[
"op"
][
"rec"
][
"local_service_conf"
][
"device_type"
]
=
0
config
[
"op"
][
"det"
][
"local_service_conf"
][
"device_type"
]
=
0
with
open
(
"config2.yml"
,
"w"
)
as
fout
:
yaml
.
dump
(
config
,
fout
,
default_flow_style
=
False
)
...
...
@@ -85,7 +88,11 @@ if __name__ == "__main__":
mode
=
sys
.
argv
[
2
]
# brpc/ local predictor
thread
=
int
(
sys
.
argv
[
3
])
device
=
sys
.
argv
[
4
]
gen_yml
(
device
)
if
device
==
"gpu"
:
gpu_id
=
sys
.
argv
[
5
]
gen_yml
(
device
,
gpu_id
)
else
:
gen_yml
(
device
,
"0"
)
elif
sys
.
argv
[
1
]
==
"run"
:
mode
=
sys
.
argv
[
2
]
# http/ rpc
thread
=
int
(
sys
.
argv
[
3
])
...
...
python/examples/pipeline/ocr/benchmark.sh
浏览文件 @
18ec3818
export
FLAGS_profile_pipeline
=
1
alias
python3
=
"python3.7"
modelname
=
"ocr"
use_gpu
=
0
gpu_id
=
"0"
benchmark_config_filename
=
"benchmark_config.yaml"
# HTTP
ps
-ef
|
grep
web_service |
awk
'{print $2}'
| xargs
kill
-9
sleep
3
python3 benchmark.py yaml local_predictor 1 gpu
if
[
$use_gpu
-eq
1
]
;
then
python3 benchmark.py yaml local_predictor 1 gpu
$gpu_id
else
python3 benchmark.py yaml local_predictor 1 cpu
fi
rm
-rf
profile_log_
$modelname
for
thread_num
in
1
8 16
for
thread_num
in
1
do
for
batch_size
in
1
do
echo
"
----Bert thread num:
$thread_num
batch size:
$batch_size
mode:http
----"
>>
profile_log_
$modelname
echo
"
#----OCR thread num:
$thread_num
batch size:
$batch_size
mode:http use_gpu:
$use_gpu
----"
>>
profile_log_
$modelname
rm
-rf
PipelineServingLogs
rm
-rf
cpu_utilization.py
python3 web_service.py
>
web.log 2>&1 &
sleep
3
nvidia-smi
--id
=
2
--query-compute-apps
=
used_memory
--format
=
csv
-lms
100
>
gpu_use.log 2>&1 &
nvidia-smi
--id
=
2
--query-gpu
=
utilization.gpu
--format
=
csv
-lms
100
>
gpu_utilization.log 2>&1 &
nvidia-smi
--id
=
${
gpu_id
}
--query-compute-apps
=
used_memory
--format
=
csv
-lms
100
>
gpu_use.log 2>&1 &
nvidia-smi
--id
=
${
gpu_id
}
--query-gpu
=
utilization.gpu
--format
=
csv
-lms
100
>
gpu_utilization.log 2>&1 &
echo
"import psutil
\n
cpu_utilization=psutil.cpu_percent(1,False)
\n
print('CPU_UTILIZATION:', cpu_utilization)
\n
"
>
cpu_utilization.py
python3 benchmark.py run http
$thread_num
$batch_size
python3 cpu_utilization.py
>>
profile_log_
$modelname
python3
-m
paddle_serving_server_gpu.profiler
>>
profile_log_
$modelname
ps
-ef
|
grep
web_service |
awk
'{print $2}'
| xargs
kill
-9
ps
-ef
|
grep
nvidia-smi |
awk
'{print $2}'
| xargs
kill
-9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv
benchmark.tmp benchmark.log
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "
MAX_GPU_MEMORY
:", max}'
gpu_use.log
>>
profile_log_
$modelname
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL
IZATION
:", max}'
gpu_utilization.log
>>
profile_log_
$modelname
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "
GPU_MEM
:", max}'
gpu_use.log
>>
profile_log_
$modelname
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTIL:", max}'
gpu_utilization.log
>>
profile_log_
$modelname
cat
benchmark.log
>>
profile_log_
$modelname
python3
-m
paddle_serving_server_gpu.parse_profile
--benchmark_cfg
$benchmark_config_filename
--benchmark_log
profile_log_
$modelname
#rm -rf gpu_use.log gpu_utilization.log
done
done
# RPC
ps
-ef
|
grep
web_service |
awk
'{print $2}'
| xargs
kill
-9
sleep
3
python3 benchmark.py yaml local_predictor 1 gpu
for
thread_num
in
1 8 16
do
for
batch_size
in
1
do
echo
"----Bert thread num:
$thread_num
batch size:
$batch_size
mode:rpc ----"
>>
profile_log_
$modelname
rm
-rf
PipelineServingLogs
rm
-rf
cpu_utilization.py
python3 web_service.py
>
web.log 2>&1 &
sleep
3
nvidia-smi
--id
=
2
--query-compute-apps
=
used_memory
--format
=
csv
-lms
100
>
gpu_use.log 2>&1 &
nvidia-smi
--id
=
2
--query-gpu
=
utilization.gpu
--format
=
csv
-lms
100
>
gpu_utilization.log 2>&1 &
echo
"import psutil
\n
cpu_utilization=psutil.cpu_percent(1,False)
\n
print('CPU_UTILIZATION:', cpu_utilization)
\n
"
>
cpu_utilization.py
python3 benchmark.py run rpc
$thread_num
$batch_size
python3 cpu_utilization.py
>>
profile_log_
$modelname
ps
-ef
|
grep
web_service |
awk
'{print $2}'
| xargs
kill
-9
python3 benchmark.py dump benchmark.log benchmark.tmp
mv
benchmark.tmp benchmark.log
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "MAX_GPU_MEMORY:", max}'
gpu_use.log
>>
profile_log_
$modelname
awk
'BEGIN {max = 0} {if(NR>1){if ($modelname > max) max=$modelname}} END {print "GPU_UTILIZATION:", max}'
gpu_utilization.log
>>
profile_log_
$modelname
#rm -rf gpu_use.log gpu_utilization.log
cat
benchmark.log
>>
profile_log_
$modelname
done
done
python/examples/pipeline/ocr/benchmark_config.yaml.template
0 → 100644
浏览文件 @
18ec3818
cuda_version: "10.1"
cudnn_version: "7.6"
trt_version: "6.0"
python_version: "3.7"
gcc_version: "8.2"
paddle_version: "2.0.1"
cpu: "Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz X12"
gpu: "T4"
xpu: "None"
api: ""
owner: "yingxiaoting"
model_name: "ocr"
model_type: "static"
model_source: "PaddleOCR"
model_url: ""
batch_size: 1
num_of_samples: 1000
input_shape: "3,32,X"
runtime_device: "cpu"
ir_optim: true
enable_memory_optim: true
enable_tensorrt: false
precision: "fp32"
enable_mkldnn: false
cpu_math_library_num_threads: ""
python/examples/pipeline/ocr/eval_top.sh
0 → 100644
浏览文件 @
18ec3818
sed
-e
"s/imagenet/
${
modelname
}
/g"
benchmark_config.yaml.template
>
benchmark_config.yaml
sh benchmark.sh
mv
std_benchmark.log std_benchmark.log.cpu
sed
-e
"s/use_gpu=0/use_gpu=1/g"
benchmark.sh>benchmark_gpu.sh
sed
-e
"s/imagenet/
${
modelname
}
/g"
-e
"s/runtime_device:
\"
cpu
\"
/runtime_device:
\"
gpu
\"
/g"
benchmark_config.yaml.template
>
benchmark_config.yaml
sh benchmark_gpu.sh
mv
std_benchmark.log std_benchmark.log.gpu
cp
std_benchmark.log.cpu /paddle/Cls/tools/serving_log/
${
modelname
}
.log.cpu
cp
std_benchmark.log.gpu /paddle/Cls/tools/serving_log/
${
modelname
}
.log.gpu
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录