Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleOCR
提交
149e1184
P
PaddleOCR
项目概览
PaddlePaddle
/
PaddleOCR
大约 1 年 前同步成功
通知
1528
Star
32962
Fork
6643
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
108
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
108
Issue
108
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
149e1184
编写于
9月 13, 2022
作者:
Q
qili93
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[TIPC] add scripts for NPU and XPU, test=develop
上级
2cfb23cf
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
137 addition
and
38 deletion
+137
-38
test_tipc/test_train_inference_python_npu.sh
test_tipc/test_train_inference_python_npu.sh
+52
-0
test_tipc/test_train_inference_python_xpu.sh
test_tipc/test_train_inference_python_xpu.sh
+52
-0
tools/infer/utility.py
tools/infer/utility.py
+18
-8
tools/program.py
tools/program.py
+15
-30
未找到文件。
test_tipc/test_train_inference_python_npu.sh
0 → 100644
浏览文件 @
149e1184
#!/bin/bash
source
test_tipc/common_func.sh
function
readlinkf
()
{
perl
-MCwd
-e
'print Cwd::abs_path shift'
"
$1
"
;
}
function
func_parser_config
()
{
strs
=
$1
IFS
=
" "
array
=(
${
strs
}
)
tmp
=
${
array
[2]
}
echo
${
tmp
}
}
BASEDIR
=
$(
dirname
"
$0
"
)
REPO_ROOT_PATH
=
$(
readlinkf
${
BASEDIR
}
/../
)
FILENAME
=
$1
# disable mkldnn on non x86_64 env
arch
=
$(
uname
-i
)
if
[
$arch
!=
'x86_64'
]
;
then
sed
-i
's/--enable_mkldnn:True|False/--enable_mkldnn:False/g'
$FILENAME
sed
-i
's/--enable_mkldnn:True/--enable_mkldnn:False/g'
$FILENAME
fi
# change gpu to npu in tipc txt configs
sed
-i
's/use_gpu/use_npu/g'
$FILENAME
# disable benchmark as AutoLog required nvidia-smi command
sed
-i
's/--benchmark:True/--benchmark:False/g'
$FILENAME
dataline
=
`
cat
$FILENAME
`
# parser params
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
# replace training config file
grep
-n
'tools/.*yml'
$FILENAME
|
cut
-d
":"
-f
1
\
|
while
read
line_num
;
do
train_cmd
=
$(
func_parser_value
"
${
lines
[line_num-1]
}
"
)
trainer_config
=
$(
func_parser_config
${
train_cmd
}
)
sed
-i
's/use_gpu/use_npu/g'
"
$REPO_ROOT_PATH
/
$trainer_config
"
done
# change gpu to npu in execution script
sed
-i
's/\"gpu\"/\"npu\"/g'
test_tipc/test_train_inference_python.sh
# pass parameters to test_train_inference_python.sh
cmd
=
'bash test_tipc/test_train_inference_python.sh ${FILENAME} $2'
echo
-e
'\033[1;32m Started to run command: ${cmd}! \033[0m'
eval
$cmd
test_tipc/test_train_inference_python_xpu.sh
0 → 100644
浏览文件 @
149e1184
#!/bin/bash
source
test_tipc/common_func.sh
function
readlinkf
()
{
perl
-MCwd
-e
'print Cwd::abs_path shift'
"
$1
"
;
}
function
func_parser_config
()
{
strs
=
$1
IFS
=
" "
array
=(
${
strs
}
)
tmp
=
${
array
[2]
}
echo
${
tmp
}
}
BASEDIR
=
$(
dirname
"
$0
"
)
REPO_ROOT_PATH
=
$(
readlinkf
${
BASEDIR
}
/../
)
FILENAME
=
$1
# disable mkldnn on non x86_64 env
arch
=
$(
uname
-i
)
if
[
$arch
!=
'x86_64'
]
;
then
sed
-i
's/--enable_mkldnn:True|False/--enable_mkldnn:False/g'
$FILENAME
sed
-i
's/--enable_mkldnn:True/--enable_mkldnn:False/g'
$FILENAME
fi
# change gpu to xpu in tipc txt configs
sed
-i
's/use_gpu/use_xpu/g'
$FILENAME
# disable benchmark as AutoLog required nvidia-smi command
sed
-i
's/--benchmark:True/--benchmark:False/g'
$FILENAME
dataline
=
`
cat
$FILENAME
`
# parser params
IFS
=
$'
\n
'
lines
=(
${
dataline
}
)
# replace training config file
grep
-n
'tools/.*yml'
$FILENAME
|
cut
-d
":"
-f
1
\
|
while
read
line_num
;
do
train_cmd
=
$(
func_parser_value
"
${
lines
[line_num-1]
}
"
)
trainer_config
=
$(
func_parser_config
${
train_cmd
}
)
sed
-i
's/use_gpu/use_xpu/g'
"
$REPO_ROOT_PATH
/
$trainer_config
"
done
# change gpu to xpu in execution script
sed
-i
's/\"gpu\"/\"xpu\"/g'
test_tipc/test_train_inference_python.sh
# pass parameters to test_train_inference_python.sh
cmd
=
'bash test_tipc/test_train_inference_python.sh ${FILENAME} $2'
echo
-e
'\033[1;32m Started to run command: ${cmd}! \033[0m'
eval
$cmd
tools/infer/utility.py
浏览文件 @
149e1184
...
@@ -36,6 +36,7 @@ def init_args():
...
@@ -36,6 +36,7 @@ def init_args():
# params for prediction engine
# params for prediction engine
parser
.
add_argument
(
"--use_gpu"
,
type
=
str2bool
,
default
=
True
)
parser
.
add_argument
(
"--use_gpu"
,
type
=
str2bool
,
default
=
True
)
parser
.
add_argument
(
"--use_xpu"
,
type
=
str2bool
,
default
=
False
)
parser
.
add_argument
(
"--use_xpu"
,
type
=
str2bool
,
default
=
False
)
parser
.
add_argument
(
"--use_npu"
,
type
=
str2bool
,
default
=
False
)
parser
.
add_argument
(
"--ir_optim"
,
type
=
str2bool
,
default
=
True
)
parser
.
add_argument
(
"--ir_optim"
,
type
=
str2bool
,
default
=
True
)
parser
.
add_argument
(
"--use_tensorrt"
,
type
=
str2bool
,
default
=
False
)
parser
.
add_argument
(
"--use_tensorrt"
,
type
=
str2bool
,
default
=
False
)
parser
.
add_argument
(
"--min_subgraph_size"
,
type
=
int
,
default
=
15
)
parser
.
add_argument
(
"--min_subgraph_size"
,
type
=
int
,
default
=
15
)
...
@@ -245,6 +246,8 @@ def create_predictor(args, mode, logger):
...
@@ -245,6 +246,8 @@ def create_predictor(args, mode, logger):
f
"when using tensorrt, dynamic shape is a suggested option, you can use '--shape_info_filename=shape.txt' for offline dygnamic shape tuning"
f
"when using tensorrt, dynamic shape is a suggested option, you can use '--shape_info_filename=shape.txt' for offline dygnamic shape tuning"
)
)
elif
args
.
use_npu
:
config
.
enable_npu
()
elif
args
.
use_xpu
:
elif
args
.
use_xpu
:
config
.
enable_xpu
(
10
*
1024
*
1024
)
config
.
enable_xpu
(
10
*
1024
*
1024
)
else
:
else
:
...
@@ -413,7 +416,8 @@ def draw_ocr_box_txt(image,
...
@@ -413,7 +416,8 @@ def draw_ocr_box_txt(image,
for
idx
,
(
box
,
txt
)
in
enumerate
(
zip
(
boxes
,
txts
)):
for
idx
,
(
box
,
txt
)
in
enumerate
(
zip
(
boxes
,
txts
)):
if
scores
is
not
None
and
scores
[
idx
]
<
drop_score
:
if
scores
is
not
None
and
scores
[
idx
]
<
drop_score
:
continue
continue
color
=
(
random
.
randint
(
0
,
255
),
random
.
randint
(
0
,
255
),
random
.
randint
(
0
,
255
))
color
=
(
random
.
randint
(
0
,
255
),
random
.
randint
(
0
,
255
),
random
.
randint
(
0
,
255
))
draw_left
.
polygon
(
box
,
fill
=
color
)
draw_left
.
polygon
(
box
,
fill
=
color
)
img_right_text
=
draw_box_txt_fine
((
w
,
h
),
box
,
txt
,
font_path
)
img_right_text
=
draw_box_txt_fine
((
w
,
h
),
box
,
txt
,
font_path
)
pts
=
np
.
array
(
box
,
np
.
int32
).
reshape
((
-
1
,
1
,
2
))
pts
=
np
.
array
(
box
,
np
.
int32
).
reshape
((
-
1
,
1
,
2
))
...
@@ -427,8 +431,10 @@ def draw_ocr_box_txt(image,
...
@@ -427,8 +431,10 @@ def draw_ocr_box_txt(image,
def
draw_box_txt_fine
(
img_size
,
box
,
txt
,
font_path
=
"./doc/fonts/simfang.ttf"
):
def
draw_box_txt_fine
(
img_size
,
box
,
txt
,
font_path
=
"./doc/fonts/simfang.ttf"
):
box_height
=
int
(
math
.
sqrt
((
box
[
0
][
0
]
-
box
[
3
][
0
])
**
2
+
(
box
[
0
][
1
]
-
box
[
3
][
1
])
**
2
))
box_height
=
int
(
box_width
=
int
(
math
.
sqrt
((
box
[
0
][
0
]
-
box
[
1
][
0
])
**
2
+
(
box
[
0
][
1
]
-
box
[
1
][
1
])
**
2
))
math
.
sqrt
((
box
[
0
][
0
]
-
box
[
3
][
0
])
**
2
+
(
box
[
0
][
1
]
-
box
[
3
][
1
])
**
2
))
box_width
=
int
(
math
.
sqrt
((
box
[
0
][
0
]
-
box
[
1
][
0
])
**
2
+
(
box
[
0
][
1
]
-
box
[
1
][
1
])
**
2
))
if
box_height
>
2
*
box_width
and
box_height
>
30
:
if
box_height
>
2
*
box_width
and
box_height
>
30
:
img_text
=
Image
.
new
(
'RGB'
,
(
box_height
,
box_width
),
(
255
,
255
,
255
))
img_text
=
Image
.
new
(
'RGB'
,
(
box_height
,
box_width
),
(
255
,
255
,
255
))
...
@@ -444,12 +450,16 @@ def draw_box_txt_fine(img_size, box, txt, font_path="./doc/fonts/simfang.ttf"):
...
@@ -444,12 +450,16 @@ def draw_box_txt_fine(img_size, box, txt, font_path="./doc/fonts/simfang.ttf"):
font
=
create_font
(
txt
,
(
box_width
,
box_height
),
font_path
)
font
=
create_font
(
txt
,
(
box_width
,
box_height
),
font_path
)
draw_text
.
text
([
0
,
0
],
txt
,
fill
=
(
0
,
0
,
0
),
font
=
font
)
draw_text
.
text
([
0
,
0
],
txt
,
fill
=
(
0
,
0
,
0
),
font
=
font
)
pts1
=
np
.
float32
([[
0
,
0
],
[
box_width
,
0
],
[
box_width
,
box_height
],
[
0
,
box_height
]])
pts1
=
np
.
float32
(
[[
0
,
0
],
[
box_width
,
0
],
[
box_width
,
box_height
],
[
0
,
box_height
]])
pts2
=
np
.
array
(
box
,
dtype
=
np
.
float32
)
pts2
=
np
.
array
(
box
,
dtype
=
np
.
float32
)
M
=
cv2
.
getPerspectiveTransform
(
pts1
,
pts2
)
M
=
cv2
.
getPerspectiveTransform
(
pts1
,
pts2
)
img_text
=
np
.
array
(
img_text
,
dtype
=
np
.
uint8
)
img_text
=
np
.
array
(
img_text
,
dtype
=
np
.
uint8
)
img_right_text
=
cv2
.
warpPerspective
(
img_text
,
M
,
img_size
,
img_right_text
=
cv2
.
warpPerspective
(
img_text
,
M
,
img_size
,
flags
=
cv2
.
INTER_NEAREST
,
flags
=
cv2
.
INTER_NEAREST
,
borderMode
=
cv2
.
BORDER_CONSTANT
,
borderMode
=
cv2
.
BORDER_CONSTANT
,
borderValue
=
(
255
,
255
,
255
))
borderValue
=
(
255
,
255
,
255
))
...
...
tools/program.py
浏览文件 @
149e1184
...
@@ -114,7 +114,7 @@ def merge_config(config, opts):
...
@@ -114,7 +114,7 @@ def merge_config(config, opts):
return
config
return
config
def
check_device
(
use_gpu
,
use_xpu
=
False
):
def
check_device
(
use_gpu
,
use_xpu
=
False
,
use_npu
=
False
):
"""
"""
Log error and exit when set use_gpu=true in paddlepaddle
Log error and exit when set use_gpu=true in paddlepaddle
cpu version.
cpu version.
...
@@ -134,24 +134,8 @@ def check_device(use_gpu, use_xpu=False):
...
@@ -134,24 +134,8 @@ def check_device(use_gpu, use_xpu=False):
if
use_xpu
and
not
paddle
.
device
.
is_compiled_with_xpu
():
if
use_xpu
and
not
paddle
.
device
.
is_compiled_with_xpu
():
print
(
err
.
format
(
"use_xpu"
,
"xpu"
,
"xpu"
,
"use_xpu"
))
print
(
err
.
format
(
"use_xpu"
,
"xpu"
,
"xpu"
,
"use_xpu"
))
sys
.
exit
(
1
)
sys
.
exit
(
1
)
except
Exception
as
e
:
if
use_npu
and
not
paddle
.
device
.
is_compiled_with_npu
():
pass
print
(
err
.
format
(
"use_npu"
,
"npu"
,
"npu"
,
"use_npu"
))
def
check_xpu
(
use_xpu
):
"""
Log error and exit when set use_xpu=true in paddlepaddle
cpu/gpu version.
"""
err
=
"Config use_xpu cannot be set as true while you are "
\
"using paddlepaddle cpu/gpu version !
\n
Please try:
\n
"
\
"
\t
1. Install paddlepaddle-xpu to run model on XPU
\n
"
\
"
\t
2. Set use_xpu as false in config file to run "
\
"model on CPU/GPU"
try
:
if
use_xpu
and
not
paddle
.
is_compiled_with_xpu
():
print
(
err
)
sys
.
exit
(
1
)
sys
.
exit
(
1
)
except
Exception
as
e
:
except
Exception
as
e
:
pass
pass
...
@@ -279,7 +263,9 @@ def train(config,
...
@@ -279,7 +263,9 @@ def train(config,
model_average
=
True
model_average
=
True
# use amp
# use amp
if
scaler
:
if
scaler
:
with
paddle
.
amp
.
auto_cast
(
level
=
amp_level
,
custom_black_list
=
amp_custom_black_list
):
with
paddle
.
amp
.
auto_cast
(
level
=
amp_level
,
custom_black_list
=
amp_custom_black_list
):
if
model_type
==
'table'
or
extra_input
:
if
model_type
==
'table'
or
extra_input
:
preds
=
model
(
images
,
data
=
batch
[
1
:])
preds
=
model
(
images
,
data
=
batch
[
1
:])
elif
model_type
in
[
"kie"
]:
elif
model_type
in
[
"kie"
]:
...
@@ -479,7 +465,7 @@ def eval(model,
...
@@ -479,7 +465,7 @@ def eval(model,
extra_input
=
False
,
extra_input
=
False
,
scaler
=
None
,
scaler
=
None
,
amp_level
=
'O2'
,
amp_level
=
'O2'
,
amp_custom_black_list
=
[]):
amp_custom_black_list
=
[]):
model
.
eval
()
model
.
eval
()
with
paddle
.
no_grad
():
with
paddle
.
no_grad
():
total_frame
=
0.0
total_frame
=
0.0
...
@@ -500,7 +486,9 @@ def eval(model,
...
@@ -500,7 +486,9 @@ def eval(model,
# use amp
# use amp
if
scaler
:
if
scaler
:
with
paddle
.
amp
.
auto_cast
(
level
=
amp_level
,
custom_black_list
=
amp_custom_black_list
):
with
paddle
.
amp
.
auto_cast
(
level
=
amp_level
,
custom_black_list
=
amp_custom_black_list
):
if
model_type
==
'table'
or
extra_input
:
if
model_type
==
'table'
or
extra_input
:
preds
=
model
(
images
,
data
=
batch
[
1
:])
preds
=
model
(
images
,
data
=
batch
[
1
:])
elif
model_type
in
[
"kie"
]:
elif
model_type
in
[
"kie"
]:
...
@@ -627,14 +615,9 @@ def preprocess(is_train=False):
...
@@ -627,14 +615,9 @@ def preprocess(is_train=False):
logger
=
get_logger
(
log_file
=
log_file
)
logger
=
get_logger
(
log_file
=
log_file
)
# check if set use_gpu=True in paddlepaddle cpu version
# check if set use_gpu=True in paddlepaddle cpu version
use_gpu
=
config
[
'Global'
]
[
'use_gpu'
]
use_gpu
=
config
[
'Global'
]
.
get
(
'use_gpu'
,
False
)
use_xpu
=
config
[
'Global'
].
get
(
'use_xpu'
,
False
)
use_xpu
=
config
[
'Global'
].
get
(
'use_xpu'
,
False
)
use_npu
=
config
[
'Global'
].
get
(
'use_npu'
,
False
)
# check if set use_xpu=True in paddlepaddle cpu/gpu version
use_xpu
=
False
if
'use_xpu'
in
config
[
'Global'
]:
use_xpu
=
config
[
'Global'
][
'use_xpu'
]
check_xpu
(
use_xpu
)
alg
=
config
[
'Architecture'
][
'algorithm'
]
alg
=
config
[
'Architecture'
][
'algorithm'
]
assert
alg
in
[
assert
alg
in
[
...
@@ -647,10 +630,12 @@ def preprocess(is_train=False):
...
@@ -647,10 +630,12 @@ def preprocess(is_train=False):
if
use_xpu
:
if
use_xpu
:
device
=
'xpu:{0}'
.
format
(
os
.
getenv
(
'FLAGS_selected_xpus'
,
0
))
device
=
'xpu:{0}'
.
format
(
os
.
getenv
(
'FLAGS_selected_xpus'
,
0
))
elif
use_npu
:
device
=
'npu:{0}'
.
format
(
os
.
getenv
(
'FLAGS_selected_npus'
,
0
))
else
:
else
:
device
=
'gpu:{}'
.
format
(
dist
.
ParallelEnv
()
device
=
'gpu:{}'
.
format
(
dist
.
ParallelEnv
()
.
dev_id
)
if
use_gpu
else
'cpu'
.
dev_id
)
if
use_gpu
else
'cpu'
check_device
(
use_gpu
,
use_xpu
)
check_device
(
use_gpu
,
use_xpu
,
use_npu
)
device
=
paddle
.
set_device
(
device
)
device
=
paddle
.
set_device
(
device
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录