Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
9c0a4d9d
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
9c0a4d9d
编写于
2月 22, 2022
作者:
D
Double_V
提交者:
GitHub
2月 22, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #5536 from LDOUBLEV/dygraph
[benchmark] fix pretrain model download
上级
89480580
253c7d82
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
132 addition
and
49 deletion
+132
-49
benchmark/analysis.py
benchmark/analysis.py
+119
-46
benchmark/run_benchmark_det.sh
benchmark/run_benchmark_det.sh
+1
-0
benchmark/run_det.sh
benchmark/run_det.sh
+1
-0
test_tipc/benchmark_train.sh
test_tipc/benchmark_train.sh
+0
-2
test_tipc/prepare.sh
test_tipc/prepare.sh
+11
-1
未找到文件。
benchmark/analysis.py
浏览文件 @
9c0a4d9d
...
@@ -26,35 +26,57 @@ def parse_args():
...
@@ -26,35 +26,57 @@ def parse_args():
parser
.
add_argument
(
parser
.
add_argument
(
"--filename"
,
type
=
str
,
help
=
"The name of log which need to analysis."
)
"--filename"
,
type
=
str
,
help
=
"The name of log which need to analysis."
)
parser
.
add_argument
(
parser
.
add_argument
(
"--log_with_profiler"
,
type
=
str
,
help
=
"The path of train log with profiler"
)
"--log_with_profiler"
,
type
=
str
,
help
=
"The path of train log with profiler"
)
parser
.
add_argument
(
parser
.
add_argument
(
"--profiler_path"
,
type
=
str
,
help
=
"The path of profiler timeline log."
)
"--profiler_path"
,
type
=
str
,
help
=
"The path of profiler timeline log."
)
parser
.
add_argument
(
parser
.
add_argument
(
"--keyword"
,
type
=
str
,
help
=
"Keyword to specify analysis data"
)
"--keyword"
,
type
=
str
,
help
=
"Keyword to specify analysis data"
)
parser
.
add_argument
(
parser
.
add_argument
(
"--separator"
,
type
=
str
,
default
=
None
,
help
=
"Separator of different field in log"
)
"--separator"
,
type
=
str
,
default
=
None
,
help
=
"Separator of different field in log"
)
parser
.
add_argument
(
parser
.
add_argument
(
'--position'
,
type
=
int
,
default
=
None
,
help
=
'The position of data field'
)
'--position'
,
type
=
int
,
default
=
None
,
help
=
'The position of data field'
)
parser
.
add_argument
(
parser
.
add_argument
(
'--range'
,
type
=
str
,
default
=
""
,
help
=
'The range of data field to intercept'
)
'--range'
,
type
=
str
,
default
=
""
,
help
=
'The range of data field to intercept'
)
parser
.
add_argument
(
parser
.
add_argument
(
'--base_batch_size'
,
type
=
int
,
help
=
'base_batch size on gpu'
)
'--base_batch_size'
,
type
=
int
,
help
=
'base_batch size on gpu'
)
parser
.
add_argument
(
parser
.
add_argument
(
'--skip_steps'
,
type
=
int
,
default
=
0
,
help
=
'The number of steps to be skipped'
)
'--skip_steps'
,
type
=
int
,
default
=
0
,
help
=
'The number of steps to be skipped'
)
parser
.
add_argument
(
parser
.
add_argument
(
'--model_mode'
,
type
=
int
,
default
=-
1
,
help
=
'Analysis mode, default value is -1'
)
'--model_mode'
,
type
=
int
,
default
=-
1
,
help
=
'Analysis mode, default value is -1'
)
parser
.
add_argument
(
'--ips_unit'
,
type
=
str
,
default
=
None
,
help
=
'IPS unit'
)
parser
.
add_argument
(
parser
.
add_argument
(
'--ips_unit'
,
type
=
str
,
default
=
None
,
help
=
'IPS unit'
)
'--model_name'
,
parser
.
add_argument
(
type
=
str
,
'--model_name'
,
type
=
str
,
default
=
0
,
help
=
'training model_name, transformer_base'
)
default
=
0
,
help
=
'training model_name, transformer_base'
)
parser
.
add_argument
(
parser
.
add_argument
(
'--mission_name'
,
type
=
str
,
default
=
0
,
help
=
'training mission name'
)
'--mission_name'
,
type
=
str
,
default
=
0
,
help
=
'training mission name'
)
parser
.
add_argument
(
parser
.
add_argument
(
'--direction_id'
,
type
=
int
,
default
=
0
,
help
=
'training direction_id'
)
'--direction_id'
,
type
=
int
,
default
=
0
,
help
=
'training direction_id'
)
parser
.
add_argument
(
parser
.
add_argument
(
'--run_mode'
,
type
=
str
,
default
=
"sp"
,
help
=
'multi process or single process'
)
'--run_mode'
,
type
=
str
,
default
=
"sp"
,
help
=
'multi process or single process'
)
parser
.
add_argument
(
parser
.
add_argument
(
'--index'
,
type
=
int
,
default
=
1
,
help
=
'{1: speed, 2:mem, 3:profiler, 6:max_batch_size}'
)
'--index'
,
type
=
int
,
default
=
1
,
help
=
'{1: speed, 2:mem, 3:profiler, 6:max_batch_size}'
)
parser
.
add_argument
(
parser
.
add_argument
(
'--gpu_num'
,
type
=
int
,
default
=
1
,
help
=
'nums of training gpus'
)
'--gpu_num'
,
type
=
int
,
default
=
1
,
help
=
'nums of training gpus'
)
args
=
parser
.
parse_args
()
args
=
parser
.
parse_args
()
...
@@ -72,7 +94,12 @@ def _is_number(num):
...
@@ -72,7 +94,12 @@ def _is_number(num):
class
TimeAnalyzer
(
object
):
class
TimeAnalyzer
(
object
):
def
__init__
(
self
,
filename
,
keyword
=
None
,
separator
=
None
,
position
=
None
,
range
=
"-1"
):
def
__init__
(
self
,
filename
,
keyword
=
None
,
separator
=
None
,
position
=
None
,
range
=
"-1"
):
if
filename
is
None
:
if
filename
is
None
:
raise
Exception
(
"Please specify the filename!"
)
raise
Exception
(
"Please specify the filename!"
)
...
@@ -99,7 +126,8 @@ class TimeAnalyzer(object):
...
@@ -99,7 +126,8 @@ class TimeAnalyzer(object):
# Distil the string from a line.
# Distil the string from a line.
line
=
line
.
strip
()
line
=
line
.
strip
()
line_words
=
line
.
split
(
self
.
separator
)
if
self
.
separator
else
line
.
split
()
line_words
=
line
.
split
(
self
.
separator
)
if
self
.
separator
else
line
.
split
()
if
args
.
position
:
if
args
.
position
:
result
=
line_words
[
self
.
position
]
result
=
line_words
[
self
.
position
]
else
:
else
:
...
@@ -108,27 +136,36 @@ class TimeAnalyzer(object):
...
@@ -108,27 +136,36 @@ class TimeAnalyzer(object):
if
line_words
[
i
]
==
self
.
keyword
:
if
line_words
[
i
]
==
self
.
keyword
:
result
=
line_words
[
i
+
1
]
result
=
line_words
[
i
+
1
]
break
break
# Distil the result from the picked string.
# Distil the result from the picked string.
if
not
self
.
range
:
if
not
self
.
range
:
result
=
result
[
0
:]
result
=
result
[
0
:]
elif
_is_number
(
self
.
range
):
elif
_is_number
(
self
.
range
):
result
=
result
[
0
:
int
(
self
.
range
)]
result
=
result
[
0
:
int
(
self
.
range
)]
else
:
else
:
result
=
result
[
int
(
self
.
range
.
split
(
":"
)[
0
]):
int
(
self
.
range
.
split
(
":"
)[
1
])]
result
=
result
[
int
(
self
.
range
.
split
(
":"
)[
0
]):
int
(
self
.
range
.
split
(
":"
)[
1
])]
self
.
records
.
append
(
float
(
result
))
self
.
records
.
append
(
float
(
result
))
except
Exception
as
exc
:
except
Exception
as
exc
:
print
(
"line is: {}; separator={}; position={}"
.
format
(
line
,
self
.
separator
,
self
.
position
))
print
(
"line is: {}; separator={}; position={}"
.
format
(
line
,
self
.
separator
,
self
.
position
))
print
(
"Extract {} records: separator={}; position={}"
.
format
(
len
(
self
.
records
),
self
.
separator
,
self
.
position
))
print
(
"Extract {} records: separator={}; position={}"
.
format
(
len
(
self
.
records
),
self
.
separator
,
self
.
position
))
def
_get_fps
(
self
,
mode
,
batch_size
,
gpu_num
,
avg_of_records
,
run_mode
,
unit
=
None
):
def
_get_fps
(
self
,
mode
,
batch_size
,
gpu_num
,
avg_of_records
,
run_mode
,
unit
=
None
):
if
mode
==
-
1
and
run_mode
==
'sp'
:
if
mode
==
-
1
and
run_mode
==
'sp'
:
assert
unit
,
"Please set the unit when mode is -1."
assert
unit
,
"Please set the unit when mode is -1."
fps
=
gpu_num
*
avg_of_records
fps
=
gpu_num
*
avg_of_records
elif
mode
==
-
1
and
run_mode
==
'mp'
:
elif
mode
==
-
1
and
run_mode
==
'mp'
:
assert
unit
,
"Please set the unit when mode is -1."
assert
unit
,
"Please set the unit when mode is -1."
fps
=
gpu_num
*
avg_of_records
#temporarily, not used now
fps
=
gpu_num
*
avg_of_records
#temporarily, not used now
print
(
"------------this is mp"
)
print
(
"------------this is mp"
)
elif
mode
==
0
:
elif
mode
==
0
:
# s/step -> samples/s
# s/step -> samples/s
...
@@ -155,12 +192,20 @@ class TimeAnalyzer(object):
...
@@ -155,12 +192,20 @@ class TimeAnalyzer(object):
return
fps
,
unit
return
fps
,
unit
def
analysis
(
self
,
batch_size
,
gpu_num
=
1
,
skip_steps
=
0
,
mode
=-
1
,
run_mode
=
'sp'
,
unit
=
None
):
def
analysis
(
self
,
batch_size
,
gpu_num
=
1
,
skip_steps
=
0
,
mode
=-
1
,
run_mode
=
'sp'
,
unit
=
None
):
if
batch_size
<=
0
:
if
batch_size
<=
0
:
print
(
"base_batch_size should larger than 0."
)
print
(
"base_batch_size should larger than 0."
)
return
0
,
''
return
0
,
''
if
len
(
self
.
records
)
<=
skip_steps
:
# to address the condition which item of log equals to skip_steps
if
len
(
self
.
records
)
<=
skip_steps
:
# to address the condition which item of log equals to skip_steps
print
(
"no records"
)
print
(
"no records"
)
return
0
,
''
return
0
,
''
...
@@ -180,16 +225,20 @@ class TimeAnalyzer(object):
...
@@ -180,16 +225,20 @@ class TimeAnalyzer(object):
skip_max
=
self
.
records
[
i
]
skip_max
=
self
.
records
[
i
]
avg_of_records
=
sum_of_records
/
float
(
count
)
avg_of_records
=
sum_of_records
/
float
(
count
)
avg_of_records_skipped
=
sum_of_records_skipped
/
float
(
count
-
skip_steps
)
avg_of_records_skipped
=
sum_of_records_skipped
/
float
(
count
-
skip_steps
)
fps
,
fps_unit
=
self
.
_get_fps
(
mode
,
batch_size
,
gpu_num
,
avg_of_records
,
run_mode
,
unit
)
fps
,
fps_unit
=
self
.
_get_fps
(
mode
,
batch_size
,
gpu_num
,
avg_of_records
,
fps_skipped
,
_
=
self
.
_get_fps
(
mode
,
batch_size
,
gpu_num
,
avg_of_records_skipped
,
run_mode
,
unit
)
run_mode
,
unit
)
fps_skipped
,
_
=
self
.
_get_fps
(
mode
,
batch_size
,
gpu_num
,
avg_of_records_skipped
,
run_mode
,
unit
)
if
mode
==
-
1
:
if
mode
==
-
1
:
print
(
"average ips of %d steps, skip 0 step:"
%
count
)
print
(
"average ips of %d steps, skip 0 step:"
%
count
)
print
(
"
\t
Avg: %.3f %s"
%
(
avg_of_records
,
fps_unit
))
print
(
"
\t
Avg: %.3f %s"
%
(
avg_of_records
,
fps_unit
))
print
(
"
\t
FPS: %.3f %s"
%
(
fps
,
fps_unit
))
print
(
"
\t
FPS: %.3f %s"
%
(
fps
,
fps_unit
))
if
skip_steps
>
0
:
if
skip_steps
>
0
:
print
(
"average ips of %d steps, skip %d steps:"
%
(
count
,
skip_steps
))
print
(
"average ips of %d steps, skip %d steps:"
%
(
count
,
skip_steps
))
print
(
"
\t
Avg: %.3f %s"
%
(
avg_of_records_skipped
,
fps_unit
))
print
(
"
\t
Avg: %.3f %s"
%
(
avg_of_records_skipped
,
fps_unit
))
print
(
"
\t
Min: %.3f %s"
%
(
skip_min
,
fps_unit
))
print
(
"
\t
Min: %.3f %s"
%
(
skip_min
,
fps_unit
))
print
(
"
\t
Max: %.3f %s"
%
(
skip_max
,
fps_unit
))
print
(
"
\t
Max: %.3f %s"
%
(
skip_max
,
fps_unit
))
...
@@ -199,7 +248,8 @@ class TimeAnalyzer(object):
...
@@ -199,7 +248,8 @@ class TimeAnalyzer(object):
print
(
"
\t
Avg: %.3f steps/s"
%
avg_of_records
)
print
(
"
\t
Avg: %.3f steps/s"
%
avg_of_records
)
print
(
"
\t
FPS: %.3f %s"
%
(
fps
,
fps_unit
))
print
(
"
\t
FPS: %.3f %s"
%
(
fps
,
fps_unit
))
if
skip_steps
>
0
:
if
skip_steps
>
0
:
print
(
"average latency of %d steps, skip %d steps:"
%
(
count
,
skip_steps
))
print
(
"average latency of %d steps, skip %d steps:"
%
(
count
,
skip_steps
))
print
(
"
\t
Avg: %.3f steps/s"
%
avg_of_records_skipped
)
print
(
"
\t
Avg: %.3f steps/s"
%
avg_of_records_skipped
)
print
(
"
\t
Min: %.3f steps/s"
%
skip_min
)
print
(
"
\t
Min: %.3f steps/s"
%
skip_min
)
print
(
"
\t
Max: %.3f steps/s"
%
skip_max
)
print
(
"
\t
Max: %.3f steps/s"
%
skip_max
)
...
@@ -209,7 +259,8 @@ class TimeAnalyzer(object):
...
@@ -209,7 +259,8 @@ class TimeAnalyzer(object):
print
(
"
\t
Avg: %.3f s/step"
%
avg_of_records
)
print
(
"
\t
Avg: %.3f s/step"
%
avg_of_records
)
print
(
"
\t
FPS: %.3f %s"
%
(
fps
,
fps_unit
))
print
(
"
\t
FPS: %.3f %s"
%
(
fps
,
fps_unit
))
if
skip_steps
>
0
:
if
skip_steps
>
0
:
print
(
"average latency of %d steps, skip %d steps:"
%
(
count
,
skip_steps
))
print
(
"average latency of %d steps, skip %d steps:"
%
(
count
,
skip_steps
))
print
(
"
\t
Avg: %.3f s/step"
%
avg_of_records_skipped
)
print
(
"
\t
Avg: %.3f s/step"
%
avg_of_records_skipped
)
print
(
"
\t
Min: %.3f s/step"
%
skip_min
)
print
(
"
\t
Min: %.3f s/step"
%
skip_min
)
print
(
"
\t
Max: %.3f s/step"
%
skip_max
)
print
(
"
\t
Max: %.3f s/step"
%
skip_max
)
...
@@ -236,7 +287,8 @@ if __name__ == "__main__":
...
@@ -236,7 +287,8 @@ if __name__ == "__main__":
if
args
.
gpu_num
==
1
:
if
args
.
gpu_num
==
1
:
run_info
[
"log_with_profiler"
]
=
args
.
log_with_profiler
run_info
[
"log_with_profiler"
]
=
args
.
log_with_profiler
run_info
[
"profiler_path"
]
=
args
.
profiler_path
run_info
[
"profiler_path"
]
=
args
.
profiler_path
analyzer
=
TimeAnalyzer
(
args
.
filename
,
args
.
keyword
,
args
.
separator
,
args
.
position
,
args
.
range
)
analyzer
=
TimeAnalyzer
(
args
.
filename
,
args
.
keyword
,
args
.
separator
,
args
.
position
,
args
.
range
)
run_info
[
"FINAL_RESULT"
],
run_info
[
"UNIT"
]
=
analyzer
.
analysis
(
run_info
[
"FINAL_RESULT"
],
run_info
[
"UNIT"
]
=
analyzer
.
analysis
(
batch_size
=
args
.
base_batch_size
,
batch_size
=
args
.
base_batch_size
,
gpu_num
=
args
.
gpu_num
,
gpu_num
=
args
.
gpu_num
,
...
@@ -245,29 +297,50 @@ if __name__ == "__main__":
...
@@ -245,29 +297,50 @@ if __name__ == "__main__":
run_mode
=
args
.
run_mode
,
run_mode
=
args
.
run_mode
,
unit
=
args
.
ips_unit
)
unit
=
args
.
ips_unit
)
try
:
try
:
if
int
(
os
.
getenv
(
'job_fail_flag'
))
==
1
or
int
(
run_info
[
"FINAL_RESULT"
])
==
0
:
if
int
(
os
.
getenv
(
'job_fail_flag'
))
==
1
or
int
(
run_info
[
"FINAL_RESULT"
])
==
0
:
run_info
[
"JOB_FAIL_FLAG"
]
=
1
run_info
[
"JOB_FAIL_FLAG"
]
=
1
except
:
except
:
pass
pass
elif
args
.
index
==
3
:
elif
args
.
index
==
3
:
run_info
[
"FINAL_RESULT"
]
=
{}
run_info
[
"FINAL_RESULT"
]
=
{}
records_fo_total
=
TimeAnalyzer
(
args
.
filename
,
'Framework overhead'
,
None
,
3
,
''
).
records
records_fo_total
=
TimeAnalyzer
(
args
.
filename
,
'Framework overhead'
,
records_fo_ratio
=
TimeAnalyzer
(
args
.
filename
,
'Framework overhead'
,
None
,
5
).
records
None
,
3
,
''
).
records
records_ct_total
=
TimeAnalyzer
(
args
.
filename
,
'Computation time'
,
None
,
3
,
''
).
records
records_fo_ratio
=
TimeAnalyzer
(
args
.
filename
,
'Framework overhead'
,
records_gm_total
=
TimeAnalyzer
(
args
.
filename
,
'GpuMemcpy Calls'
,
None
,
4
,
''
).
records
None
,
5
).
records
records_gm_ratio
=
TimeAnalyzer
(
args
.
filename
,
'GpuMemcpy Calls'
,
None
,
6
).
records
records_ct_total
=
TimeAnalyzer
(
args
.
filename
,
'Computation time'
,
records_gmas_total
=
TimeAnalyzer
(
args
.
filename
,
'GpuMemcpyAsync Calls'
,
None
,
4
,
''
).
records
None
,
3
,
''
).
records
records_gms_total
=
TimeAnalyzer
(
args
.
filename
,
'GpuMemcpySync Calls'
,
None
,
4
,
''
).
records
records_gm_total
=
TimeAnalyzer
(
args
.
filename
,
run_info
[
"FINAL_RESULT"
][
"Framework_Total"
]
=
records_fo_total
[
0
]
if
records_fo_total
else
0
'GpuMemcpy Calls'
,
run_info
[
"FINAL_RESULT"
][
"Framework_Ratio"
]
=
records_fo_ratio
[
0
]
if
records_fo_ratio
else
0
None
,
4
,
''
).
records
run_info
[
"FINAL_RESULT"
][
"ComputationTime_Total"
]
=
records_ct_total
[
0
]
if
records_ct_total
else
0
records_gm_ratio
=
TimeAnalyzer
(
args
.
filename
,
run_info
[
"FINAL_RESULT"
][
"GpuMemcpy_Total"
]
=
records_gm_total
[
0
]
if
records_gm_total
else
0
'GpuMemcpy Calls'
,
run_info
[
"FINAL_RESULT"
][
"GpuMemcpy_Ratio"
]
=
records_gm_ratio
[
0
]
if
records_gm_ratio
else
0
None
,
6
).
records
run_info
[
"FINAL_RESULT"
][
"GpuMemcpyAsync_Total"
]
=
records_gmas_total
[
0
]
if
records_gmas_total
else
0
records_gmas_total
=
TimeAnalyzer
(
args
.
filename
,
run_info
[
"FINAL_RESULT"
][
"GpuMemcpySync_Total"
]
=
records_gms_total
[
0
]
if
records_gms_total
else
0
'GpuMemcpyAsync Calls'
,
None
,
4
,
''
).
records
records_gms_total
=
TimeAnalyzer
(
args
.
filename
,
'GpuMemcpySync Calls'
,
None
,
4
,
''
).
records
run_info
[
"FINAL_RESULT"
][
"Framework_Total"
]
=
records_fo_total
[
0
]
if
records_fo_total
else
0
run_info
[
"FINAL_RESULT"
][
"Framework_Ratio"
]
=
records_fo_ratio
[
0
]
if
records_fo_ratio
else
0
run_info
[
"FINAL_RESULT"
][
"ComputationTime_Total"
]
=
records_ct_total
[
0
]
if
records_ct_total
else
0
run_info
[
"FINAL_RESULT"
][
"GpuMemcpy_Total"
]
=
records_gm_total
[
0
]
if
records_gm_total
else
0
run_info
[
"FINAL_RESULT"
][
"GpuMemcpy_Ratio"
]
=
records_gm_ratio
[
0
]
if
records_gm_ratio
else
0
run_info
[
"FINAL_RESULT"
][
"GpuMemcpyAsync_Total"
]
=
records_gmas_total
[
0
]
if
records_gmas_total
else
0
run_info
[
"FINAL_RESULT"
][
"GpuMemcpySync_Total"
]
=
records_gms_total
[
0
]
if
records_gms_total
else
0
else
:
else
:
print
(
"Not support!"
)
print
(
"Not support!"
)
except
Exception
:
except
Exception
:
traceback
.
print_exc
()
traceback
.
print_exc
()
print
(
"{}"
.
format
(
json
.
dumps
(
run_info
))
)
# it's required, for the log file path insert to the database
print
(
"{}"
.
format
(
json
.
dumps
(
run_info
))
)
# it's required, for the log file path insert to the database
benchmark/run_benchmark_det.sh
浏览文件 @
9c0a4d9d
...
@@ -58,3 +58,4 @@ source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合
...
@@ -58,3 +58,4 @@ source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合
_set_params
$@
_set_params
$@
#_train # 如果只想产出训练log,不解析,可取消注释
#_train # 如果只想产出训练log,不解析,可取消注释
_run
# 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开
_run
# 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开
benchmark/run_det.sh
浏览文件 @
9c0a4d9d
...
@@ -36,3 +36,4 @@ for model_mode in ${model_mode_list[@]}; do
...
@@ -36,3 +36,4 @@ for model_mode in ${model_mode_list[@]}; do
done
done
test_tipc/benchmark_train.sh
浏览文件 @
9c0a4d9d
...
@@ -3,8 +3,6 @@ source test_tipc/common_func.sh
...
@@ -3,8 +3,6 @@ source test_tipc/common_func.sh
# set env
# set env
python
=
python
python
=
python
export
model_branch
=
`
git symbolic-ref HEAD 2>/dev/null |
cut
-d
"/"
-f
3
`
export
model_commit
=
$(
git log|head
-n1
|awk
'{print $2}'
)
export
str_tmp
=
$(
echo
`
pip list|grep paddlepaddle-gpu|awk
-F
' '
'{print $2}'
`
)
export
str_tmp
=
$(
echo
`
pip list|grep paddlepaddle-gpu|awk
-F
' '
'{print $2}'
`
)
export
frame_version
=
${
str_tmp
%%.post*
}
export
frame_version
=
${
str_tmp
%%.post*
}
export
frame_commit
=
$(
echo
`
${
python
}
-c
"import paddle;print(paddle.version.commit)"
`
)
export
frame_commit
=
$(
echo
`
${
python
}
-c
"import paddle;print(paddle.version.commit)"
`
)
...
...
test_tipc/prepare.sh
浏览文件 @
9c0a4d9d
...
@@ -24,7 +24,17 @@ if [ ${MODE} = "benchmark_train" ];then
...
@@ -24,7 +24,17 @@ if [ ${MODE} = "benchmark_train" ];then
pip
install
-r
requirements.txt
pip
install
-r
requirements.txt
if
[[
${
model_name
}
=
~
"det_mv3_db_v2_0"
||
${
model_name
}
=
~
"det_r50_vd_east_v2_0"
||
${
model_name
}
=
~
"det_r50_vd_pse_v2_0"
||
${
model_name
}
=
~
"det_r18_db_v2_0"
]]
;
then
if
[[
${
model_name
}
=
~
"det_mv3_db_v2_0"
||
${
model_name
}
=
~
"det_r50_vd_east_v2_0"
||
${
model_name
}
=
~
"det_r50_vd_pse_v2_0"
||
${
model_name
}
=
~
"det_r18_db_v2_0"
]]
;
then
rm
-rf
./train_data/icdar2015
rm
-rf
./train_data/icdar2015
wget
-nc
-P
./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams
--no-check-certificate
wget
-nc
-P
./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/MobileNetV3_large_x0_5_pretrained.pdparams
--no-check-certificate
wget
-nc
-P
./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar
--no-check-certificate
cd
./train_data/
&&
tar
xf icdar2015.tar
&&
cd
../
fi
if
[[
${
model_name
}
=
~
"det_r50_vd_east_v2_0"
||
${
model_name
}
=
~
"det_r50_vd_pse_v2_0"
]]
;
then
wget
-nc
-P
./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet50_vd_ssld_pretrained.pdparams
--no-check-certificate
wget
-nc
-P
./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar
--no-check-certificate
cd
./train_data/
&&
tar
xf icdar2015.tar
&&
cd
../
fi
if
[[
${
model_name
}
=
~
"det_r18_db_v2_0"
]]
;
then
wget
-nc
-P
./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet18_vd_pretrained.pdparams
--no-check-certificate
wget
-nc
-P
./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar
--no-check-certificate
wget
-nc
-P
./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar
--no-check-certificate
cd
./train_data/
&&
tar
xf icdar2015.tar
&&
cd
../
cd
./train_data/
&&
tar
xf icdar2015.tar
&&
cd
../
fi
fi
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录