Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
78519771
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
78519771
编写于
10月 08, 2022
作者:
H
huangqipeng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[MLU]adapt mlu device for running dbnet network
上级
077196f3
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
14 addition
and
6 deletion
+14
-6
configs/det/det_mv3_db.yml
configs/det/det_mv3_db.yml
+1
-0
tools/program.py
tools/program.py
+8
-2
tools/train.py
tools/train.py
+5
-4
未找到文件。
configs/det/det_mv3_db.yml
浏览文件 @
78519771
Global
:
use_gpu
:
true
use_xpu
:
false
use_mlu
:
false
epoch_num
:
1200
log_smooth_window
:
20
print_batch_step
:
10
...
...
tools/program.py
浏览文件 @
78519771
...
...
@@ -114,7 +114,7 @@ def merge_config(config, opts):
return
config
def
check_device
(
use_gpu
,
use_xpu
=
False
,
use_npu
=
False
):
def
check_device
(
use_gpu
,
use_xpu
=
False
,
use_npu
=
False
,
use_mlu
=
False
):
"""
Log error and exit when set use_gpu=true in paddlepaddle
cpu version.
...
...
@@ -137,6 +137,9 @@ def check_device(use_gpu, use_xpu=False, use_npu=False):
if
use_npu
and
not
paddle
.
device
.
is_compiled_with_npu
():
print
(
err
.
format
(
"use_npu"
,
"npu"
,
"npu"
,
"use_npu"
))
sys
.
exit
(
1
)
if
use_mlu
and
not
paddle
.
device
.
is_compiled_with_mlu
():
print
(
err
.
format
(
"use_mlu"
,
"mlu"
,
"mlu"
,
"use_mlu"
))
sys
.
exit
(
1
)
except
Exception
as
e
:
pass
...
...
@@ -618,6 +621,7 @@ def preprocess(is_train=False):
use_gpu
=
config
[
'Global'
].
get
(
'use_gpu'
,
False
)
use_xpu
=
config
[
'Global'
].
get
(
'use_xpu'
,
False
)
use_npu
=
config
[
'Global'
].
get
(
'use_npu'
,
False
)
use_mlu
=
config
[
'Global'
].
get
(
'use_mlu'
,
False
)
alg
=
config
[
'Architecture'
][
'algorithm'
]
assert
alg
in
[
...
...
@@ -632,10 +636,12 @@ def preprocess(is_train=False):
device
=
'xpu:{0}'
.
format
(
os
.
getenv
(
'FLAGS_selected_xpus'
,
0
))
elif
use_npu
:
device
=
'npu:{0}'
.
format
(
os
.
getenv
(
'FLAGS_selected_npus'
,
0
))
elif
use_mlu
:
device
=
'mlu:{0}'
.
format
(
os
.
getenv
(
'FLAGS_selected_mlus'
,
0
))
else
:
device
=
'gpu:{}'
.
format
(
dist
.
ParallelEnv
()
.
dev_id
)
if
use_gpu
else
'cpu'
check_device
(
use_gpu
,
use_xpu
,
use_npu
)
check_device
(
use_gpu
,
use_xpu
,
use_npu
,
use_mlu
)
device
=
paddle
.
set_device
(
device
)
...
...
tools/train.py
浏览文件 @
78519771
...
...
@@ -149,10 +149,11 @@ def main(config, device, logger, vdl_writer):
amp_level
=
config
[
"Global"
].
get
(
"amp_level"
,
'O2'
)
amp_custom_black_list
=
config
[
'Global'
].
get
(
'amp_custom_black_list'
,
[])
if
use_amp
:
AMP_RELATED_FLAGS_SETTING
=
{
'FLAGS_cudnn_batchnorm_spatial_persistent'
:
1
,
'FLAGS_max_inplace_grad_add'
:
8
,
}
AMP_RELATED_FLAGS_SETTING
=
{
'FLAGS_max_inplace_grad_add'
:
8
,
}
if
paddle
.
is_compiled_with_cuda
():
AMP_RELATED_FLAGS_SETTING
.
update
({
'FLAGS_cudnn_batchnorm_spatial_persistent'
:
1
})
paddle
.
fluid
.
set_flags
(
AMP_RELATED_FLAGS_SETTING
)
scale_loss
=
config
[
"Global"
].
get
(
"scale_loss"
,
1.0
)
use_dynamic_loss_scaling
=
config
[
"Global"
].
get
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录