Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
fecd1a3e
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
fecd1a3e
编写于
10月 14, 2022
作者:
C
cifar10
提交者:
GitHub
10月 14, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[MLU] add mlu detection config for develop branch (#7115)
上级
88e75af9
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
67 addition
and
11 deletion
+67
-11
configs/runtime.yml
configs/runtime.yml
+1
-0
ppdet/engine/trainer.py
ppdet/engine/trainer.py
+6
-6
ppdet/utils/check.py
ppdet/utils/check.py
+21
-1
tools/eval.py
tools/eval.py
+8
-1
tools/eval_mot.py
tools/eval_mot.py
+8
-1
tools/infer.py
tools/infer.py
+8
-1
tools/infer_mot.py
tools/infer_mot.py
+8
-1
tools/train.py
tools/train.py
+7
-0
未找到文件。
configs/runtime.yml
浏览文件 @
fecd1a3e
use_gpu
:
true
use_gpu
:
true
use_xpu
:
false
use_xpu
:
false
use_mlu
:
false
log_iter
:
20
log_iter
:
20
save_dir
:
output
save_dir
:
output
snapshot_epoch
:
1
snapshot_epoch
:
1
...
...
ppdet/engine/trainer.py
浏览文件 @
fecd1a3e
...
@@ -411,14 +411,14 @@ class Trainer(object):
...
@@ -411,14 +411,14 @@ class Trainer(object):
model
=
self
.
model
model
=
self
.
model
sync_bn
=
(
getattr
(
self
.
cfg
,
'norm_type'
,
None
)
==
'sync_bn'
and
sync_bn
=
(
getattr
(
self
.
cfg
,
'norm_type'
,
None
)
==
'sync_bn'
and
self
.
cfg
.
use_gpu
and
self
.
_nranks
>
1
)
(
self
.
cfg
.
use_gpu
or
self
.
cfg
.
use_mlu
)
and
self
.
_nranks
>
1
)
if
sync_bn
:
if
sync_bn
:
model
=
paddle
.
nn
.
SyncBatchNorm
.
convert_sync_batchnorm
(
model
)
model
=
paddle
.
nn
.
SyncBatchNorm
.
convert_sync_batchnorm
(
model
)
# enabel auto mixed precision mode
# enabel auto mixed precision mode
if
self
.
use_amp
:
if
self
.
use_amp
:
scaler
=
paddle
.
amp
.
GradScaler
(
scaler
=
paddle
.
amp
.
GradScaler
(
enable
=
self
.
cfg
.
use_gpu
or
self
.
cfg
.
use_npu
,
enable
=
self
.
cfg
.
use_gpu
or
self
.
cfg
.
use_npu
or
self
.
cfg
.
use_mlu
,
init_loss_scaling
=
self
.
cfg
.
get
(
'init_loss_scaling'
,
1024
))
init_loss_scaling
=
self
.
cfg
.
get
(
'init_loss_scaling'
,
1024
))
# get distributed model
# get distributed model
if
self
.
cfg
.
get
(
'fleet'
,
False
):
if
self
.
cfg
.
get
(
'fleet'
,
False
):
...
@@ -474,7 +474,7 @@ class Trainer(object):
...
@@ -474,7 +474,7 @@ class Trainer(object):
DataParallel
)
and
use_fused_allreduce_gradients
:
DataParallel
)
and
use_fused_allreduce_gradients
:
with
model
.
no_sync
():
with
model
.
no_sync
():
with
paddle
.
amp
.
auto_cast
(
with
paddle
.
amp
.
auto_cast
(
enable
=
self
.
cfg
.
use_gpu
,
enable
=
self
.
cfg
.
use_gpu
or
self
.
cfg
.
use_mlu
,
custom_white_list
=
self
.
custom_white_list
,
custom_white_list
=
self
.
custom_white_list
,
custom_black_list
=
self
.
custom_black_list
,
custom_black_list
=
self
.
custom_black_list
,
level
=
self
.
amp_level
):
level
=
self
.
amp_level
):
...
@@ -488,7 +488,7 @@ class Trainer(object):
...
@@ -488,7 +488,7 @@ class Trainer(object):
list
(
model
.
parameters
()),
None
)
list
(
model
.
parameters
()),
None
)
else
:
else
:
with
paddle
.
amp
.
auto_cast
(
with
paddle
.
amp
.
auto_cast
(
enable
=
self
.
cfg
.
use_gpu
,
enable
=
self
.
cfg
.
use_gpu
or
self
.
cfg
.
use_mlu
,
custom_white_list
=
self
.
custom_white_list
,
custom_white_list
=
self
.
custom_white_list
,
custom_black_list
=
self
.
custom_black_list
,
custom_black_list
=
self
.
custom_black_list
,
level
=
self
.
amp_level
):
level
=
self
.
amp_level
):
...
@@ -602,7 +602,7 @@ class Trainer(object):
...
@@ -602,7 +602,7 @@ class Trainer(object):
# forward
# forward
if
self
.
use_amp
:
if
self
.
use_amp
:
with
paddle
.
amp
.
auto_cast
(
with
paddle
.
amp
.
auto_cast
(
enable
=
self
.
cfg
.
use_gpu
,
enable
=
self
.
cfg
.
use_gpu
or
self
.
cfg
.
use_mlu
,
custom_white_list
=
self
.
custom_white_list
,
custom_white_list
=
self
.
custom_white_list
,
custom_black_list
=
self
.
custom_black_list
,
custom_black_list
=
self
.
custom_black_list
,
level
=
self
.
amp_level
):
level
=
self
.
amp_level
):
...
@@ -669,7 +669,7 @@ class Trainer(object):
...
@@ -669,7 +669,7 @@ class Trainer(object):
# forward
# forward
if
self
.
use_amp
:
if
self
.
use_amp
:
with
paddle
.
amp
.
auto_cast
(
with
paddle
.
amp
.
auto_cast
(
enable
=
self
.
cfg
.
use_gpu
,
enable
=
self
.
cfg
.
use_gpu
or
self
.
cfg
.
use_mlu
,
custom_white_list
=
self
.
custom_white_list
,
custom_white_list
=
self
.
custom_white_list
,
custom_black_list
=
self
.
custom_black_list
,
custom_black_list
=
self
.
custom_black_list
,
level
=
self
.
amp_level
):
level
=
self
.
amp_level
):
...
...
ppdet/utils/check.py
浏览文件 @
fecd1a3e
...
@@ -26,10 +26,30 @@ from .logger import setup_logger
...
@@ -26,10 +26,30 @@ from .logger import setup_logger
logger
=
setup_logger
(
__name__
)
logger
=
setup_logger
(
__name__
)
__all__
=
[
__all__
=
[
'check_gpu'
,
'check_npu'
,
'check_xpu'
,
'check_version'
,
'check_config'
'check_gpu'
,
'check_npu'
,
'check_xpu'
,
'check_mlu'
,
'check_version'
,
'check_config'
]
]
def
check_mlu
(
use_mlu
):
"""
Log error and exit when set use_mlu=true in paddlepaddle
cpu/gpu/xpu/npu version.
"""
err
=
"Config use_mlu cannot be set as true while you are "
\
"using paddlepaddle cpu/gpu/xpu/npu version !
\n
Please try:
\n
"
\
"
\t
1. Install paddlepaddle-mlu to run model on MLU
\n
"
\
"
\t
2. Set use_mlu as false in config file to run "
\
"model on CPU/GPU/XPU/NPU"
try
:
if
use_mlu
and
not
paddle
.
is_compiled_with_mlu
():
logger
.
error
(
err
)
sys
.
exit
(
1
)
except
Exception
as
e
:
pass
def
check_npu
(
use_npu
):
def
check_npu
(
use_npu
):
"""
"""
Log error and exit when set use_npu=true in paddlepaddle
Log error and exit when set use_npu=true in paddlepaddle
...
...
tools/eval.py
浏览文件 @
fecd1a3e
...
@@ -30,7 +30,7 @@ warnings.filterwarnings('ignore')
...
@@ -30,7 +30,7 @@ warnings.filterwarnings('ignore')
import
paddle
import
paddle
from
ppdet.core.workspace
import
load_config
,
merge_config
from
ppdet.core.workspace
import
load_config
,
merge_config
from
ppdet.utils.check
import
check_gpu
,
check_npu
,
check_xpu
,
check_version
,
check_config
from
ppdet.utils.check
import
check_gpu
,
check_npu
,
check_xpu
,
check_
mlu
,
check_
version
,
check_config
from
ppdet.utils.cli
import
ArgsParser
,
merge_args
from
ppdet.utils.cli
import
ArgsParser
,
merge_args
from
ppdet.engine
import
Trainer
,
init_parallel_env
from
ppdet.engine
import
Trainer
,
init_parallel_env
from
ppdet.metrics.coco_utils
import
json_eval_results
from
ppdet.metrics.coco_utils
import
json_eval_results
...
@@ -171,12 +171,18 @@ def main():
...
@@ -171,12 +171,18 @@ def main():
if
'use_gpu'
not
in
cfg
:
if
'use_gpu'
not
in
cfg
:
cfg
.
use_gpu
=
False
cfg
.
use_gpu
=
False
# disable mlu in config by default
if
'use_mlu'
not
in
cfg
:
cfg
.
use_mlu
=
False
if
cfg
.
use_gpu
:
if
cfg
.
use_gpu
:
place
=
paddle
.
set_device
(
'gpu'
)
place
=
paddle
.
set_device
(
'gpu'
)
elif
cfg
.
use_npu
:
elif
cfg
.
use_npu
:
place
=
paddle
.
set_device
(
'npu'
)
place
=
paddle
.
set_device
(
'npu'
)
elif
cfg
.
use_xpu
:
elif
cfg
.
use_xpu
:
place
=
paddle
.
set_device
(
'xpu'
)
place
=
paddle
.
set_device
(
'xpu'
)
elif
cfg
.
use_mlu
:
place
=
paddle
.
set_device
(
'mlu'
)
else
:
else
:
place
=
paddle
.
set_device
(
'cpu'
)
place
=
paddle
.
set_device
(
'cpu'
)
...
@@ -187,6 +193,7 @@ def main():
...
@@ -187,6 +193,7 @@ def main():
check_gpu
(
cfg
.
use_gpu
)
check_gpu
(
cfg
.
use_gpu
)
check_npu
(
cfg
.
use_npu
)
check_npu
(
cfg
.
use_npu
)
check_xpu
(
cfg
.
use_xpu
)
check_xpu
(
cfg
.
use_xpu
)
check_mlu
(
cfg
.
use_mlu
)
check_version
()
check_version
()
run
(
FLAGS
,
cfg
)
run
(
FLAGS
,
cfg
)
...
...
tools/eval_mot.py
浏览文件 @
fecd1a3e
...
@@ -30,7 +30,7 @@ warnings.filterwarnings('ignore')
...
@@ -30,7 +30,7 @@ warnings.filterwarnings('ignore')
import
paddle
import
paddle
from
ppdet.core.workspace
import
load_config
,
merge_config
from
ppdet.core.workspace
import
load_config
,
merge_config
from
ppdet.utils.check
import
check_gpu
,
check_npu
,
check_xpu
,
check_version
,
check_config
from
ppdet.utils.check
import
check_gpu
,
check_npu
,
check_xpu
,
check_
mlu
,
check_
version
,
check_config
from
ppdet.utils.cli
import
ArgsParser
from
ppdet.utils.cli
import
ArgsParser
from
ppdet.engine
import
Tracker
from
ppdet.engine
import
Tracker
...
@@ -115,12 +115,18 @@ def main():
...
@@ -115,12 +115,18 @@ def main():
if
'use_gpu'
not
in
cfg
:
if
'use_gpu'
not
in
cfg
:
cfg
.
use_gpu
=
False
cfg
.
use_gpu
=
False
# disable mlu in config by default
if
'use_mlu'
not
in
cfg
:
cfg
.
use_mlu
=
False
if
cfg
.
use_gpu
:
if
cfg
.
use_gpu
:
place
=
paddle
.
set_device
(
'gpu'
)
place
=
paddle
.
set_device
(
'gpu'
)
elif
cfg
.
use_npu
:
elif
cfg
.
use_npu
:
place
=
paddle
.
set_device
(
'npu'
)
place
=
paddle
.
set_device
(
'npu'
)
elif
cfg
.
use_xpu
:
elif
cfg
.
use_xpu
:
place
=
paddle
.
set_device
(
'xpu'
)
place
=
paddle
.
set_device
(
'xpu'
)
elif
cfg
.
use_mlu
:
place
=
paddle
.
set_device
(
'mlu'
)
else
:
else
:
place
=
paddle
.
set_device
(
'cpu'
)
place
=
paddle
.
set_device
(
'cpu'
)
...
@@ -128,6 +134,7 @@ def main():
...
@@ -128,6 +134,7 @@ def main():
check_gpu
(
cfg
.
use_gpu
)
check_gpu
(
cfg
.
use_gpu
)
check_npu
(
cfg
.
use_npu
)
check_npu
(
cfg
.
use_npu
)
check_xpu
(
cfg
.
use_xpu
)
check_xpu
(
cfg
.
use_xpu
)
check_mlu
(
cfg
.
use_mlu
)
check_version
()
check_version
()
run
(
FLAGS
,
cfg
)
run
(
FLAGS
,
cfg
)
...
...
tools/infer.py
浏览文件 @
fecd1a3e
...
@@ -32,7 +32,7 @@ import ast
...
@@ -32,7 +32,7 @@ import ast
import
paddle
import
paddle
from
ppdet.core.workspace
import
load_config
,
merge_config
from
ppdet.core.workspace
import
load_config
,
merge_config
from
ppdet.engine
import
Trainer
from
ppdet.engine
import
Trainer
from
ppdet.utils.check
import
check_gpu
,
check_npu
,
check_xpu
,
check_version
,
check_config
from
ppdet.utils.check
import
check_gpu
,
check_npu
,
check_xpu
,
check_
mlu
,
check_
version
,
check_config
from
ppdet.utils.cli
import
ArgsParser
,
merge_args
from
ppdet.utils.cli
import
ArgsParser
,
merge_args
from
ppdet.slim
import
build_slim_model
from
ppdet.slim
import
build_slim_model
...
@@ -204,12 +204,18 @@ def main():
...
@@ -204,12 +204,18 @@ def main():
if
'use_gpu'
not
in
cfg
:
if
'use_gpu'
not
in
cfg
:
cfg
.
use_gpu
=
False
cfg
.
use_gpu
=
False
# disable mlu in config by default
if
'use_mlu'
not
in
cfg
:
cfg
.
use_mlu
=
False
if
cfg
.
use_gpu
:
if
cfg
.
use_gpu
:
place
=
paddle
.
set_device
(
'gpu'
)
place
=
paddle
.
set_device
(
'gpu'
)
elif
cfg
.
use_npu
:
elif
cfg
.
use_npu
:
place
=
paddle
.
set_device
(
'npu'
)
place
=
paddle
.
set_device
(
'npu'
)
elif
cfg
.
use_xpu
:
elif
cfg
.
use_xpu
:
place
=
paddle
.
set_device
(
'xpu'
)
place
=
paddle
.
set_device
(
'xpu'
)
elif
cfg
.
use_mlu
:
place
=
paddle
.
set_device
(
'mlu'
)
else
:
else
:
place
=
paddle
.
set_device
(
'cpu'
)
place
=
paddle
.
set_device
(
'cpu'
)
...
@@ -220,6 +226,7 @@ def main():
...
@@ -220,6 +226,7 @@ def main():
check_gpu
(
cfg
.
use_gpu
)
check_gpu
(
cfg
.
use_gpu
)
check_npu
(
cfg
.
use_npu
)
check_npu
(
cfg
.
use_npu
)
check_xpu
(
cfg
.
use_xpu
)
check_xpu
(
cfg
.
use_xpu
)
check_mlu
(
cfg
.
use_mlu
)
check_version
()
check_version
()
run
(
FLAGS
,
cfg
)
run
(
FLAGS
,
cfg
)
...
...
tools/infer_mot.py
浏览文件 @
fecd1a3e
...
@@ -30,7 +30,7 @@ warnings.filterwarnings('ignore')
...
@@ -30,7 +30,7 @@ warnings.filterwarnings('ignore')
import
paddle
import
paddle
from
ppdet.core.workspace
import
load_config
,
merge_config
from
ppdet.core.workspace
import
load_config
,
merge_config
from
ppdet.engine
import
Tracker
from
ppdet.engine
import
Tracker
from
ppdet.utils.check
import
check_gpu
,
check_npu
,
check_xpu
,
check_version
,
check_config
from
ppdet.utils.check
import
check_gpu
,
check_npu
,
check_xpu
,
check_
mlu
,
check_
version
,
check_config
from
ppdet.utils.cli
import
ArgsParser
from
ppdet.utils.cli
import
ArgsParser
...
@@ -127,12 +127,18 @@ def main():
...
@@ -127,12 +127,18 @@ def main():
if
'use_gpu'
not
in
cfg
:
if
'use_gpu'
not
in
cfg
:
cfg
.
use_gpu
=
False
cfg
.
use_gpu
=
False
# disable mlu in config by default
if
'use_mlu'
not
in
cfg
:
cfg
.
use_mlu
=
False
if
cfg
.
use_gpu
:
if
cfg
.
use_gpu
:
place
=
paddle
.
set_device
(
'gpu'
)
place
=
paddle
.
set_device
(
'gpu'
)
elif
cfg
.
use_npu
:
elif
cfg
.
use_npu
:
place
=
paddle
.
set_device
(
'npu'
)
place
=
paddle
.
set_device
(
'npu'
)
elif
cfg
.
use_xpu
:
elif
cfg
.
use_xpu
:
place
=
paddle
.
set_device
(
'xpu'
)
place
=
paddle
.
set_device
(
'xpu'
)
elif
cfg
.
use_mlu
:
place
=
paddle
.
set_device
(
'mlu'
)
else
:
else
:
place
=
paddle
.
set_device
(
'cpu'
)
place
=
paddle
.
set_device
(
'cpu'
)
...
@@ -140,6 +146,7 @@ def main():
...
@@ -140,6 +146,7 @@ def main():
check_gpu
(
cfg
.
use_gpu
)
check_gpu
(
cfg
.
use_gpu
)
check_npu
(
cfg
.
use_npu
)
check_npu
(
cfg
.
use_npu
)
check_xpu
(
cfg
.
use_xpu
)
check_xpu
(
cfg
.
use_xpu
)
check_mlu
(
cfg
.
use_mlu
)
check_version
()
check_version
()
run
(
FLAGS
,
cfg
)
run
(
FLAGS
,
cfg
)
...
...
tools/train.py
浏览文件 @
fecd1a3e
...
@@ -149,12 +149,18 @@ def main():
...
@@ -149,12 +149,18 @@ def main():
if
'use_gpu'
not
in
cfg
:
if
'use_gpu'
not
in
cfg
:
cfg
.
use_gpu
=
False
cfg
.
use_gpu
=
False
# disable mlu in config by default
if
'use_mlu'
not
in
cfg
:
cfg
.
use_mlu
=
False
if
cfg
.
use_gpu
:
if
cfg
.
use_gpu
:
place
=
paddle
.
set_device
(
'gpu'
)
place
=
paddle
.
set_device
(
'gpu'
)
elif
cfg
.
use_npu
:
elif
cfg
.
use_npu
:
place
=
paddle
.
set_device
(
'npu'
)
place
=
paddle
.
set_device
(
'npu'
)
elif
cfg
.
use_xpu
:
elif
cfg
.
use_xpu
:
place
=
paddle
.
set_device
(
'xpu'
)
place
=
paddle
.
set_device
(
'xpu'
)
elif
cfg
.
use_mlu
:
place
=
paddle
.
set_device
(
'mlu'
)
else
:
else
:
place
=
paddle
.
set_device
(
'cpu'
)
place
=
paddle
.
set_device
(
'cpu'
)
...
@@ -167,6 +173,7 @@ def main():
...
@@ -167,6 +173,7 @@ def main():
check
.
check_gpu
(
cfg
.
use_gpu
)
check
.
check_gpu
(
cfg
.
use_gpu
)
check
.
check_npu
(
cfg
.
use_npu
)
check
.
check_npu
(
cfg
.
use_npu
)
check
.
check_xpu
(
cfg
.
use_xpu
)
check
.
check_xpu
(
cfg
.
use_xpu
)
check
.
check_mlu
(
cfg
.
use_mlu
)
check
.
check_version
()
check
.
check_version
()
run
(
FLAGS
,
cfg
)
run
(
FLAGS
,
cfg
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录