Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
80f0a355
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
80f0a355
编写于
7月 27, 2020
作者:
S
shaohua.zhang
提交者:
GitHub
7月 27, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Update utility.py
上级
c4ca35a2
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
84 addition
and
126 deletion
+84
-126
ppocr/utils/utility.py
ppocr/utils/utility.py
+84
-126
未找到文件。
ppocr/utils/utility.py
浏览文件 @
80f0a355
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve
d
.
#
#
#Licensed under the Apache License, Version 2.0 (the "License");
#
Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
http://www.apache.org/licenses/LICENSE-2.0
#
#
#Unless required by applicable law or agreed to in writing, software
#
Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#
distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#
See the License for the specific language governing permissions and
#limitations under the License.
#
limitations under the License.
from
__future__
import
absolute_import
import
logging
from
__future__
import
division
from
__future__
import
print_function
import
errno
import
os
import
os
import
shutil
import
tempfile
def
initial_logger
():
import
paddle
FORMAT
=
'%(asctime)s-%(levelname)s: %(message)s'
import
paddle.fluid
as
fluid
logging
.
basicConfig
(
level
=
logging
.
INFO
,
format
=
FORMAT
)
logger
=
logging
.
getLogger
(
__name__
)
from
.utility
import
initial_logger
return
logger
import
re
logger
=
initial_logger
()
import
importlib
def
_mkdir_if_not_exist
(
path
):
"""
def
create_module
(
module_str
):
mkdir if not exists, ignore the exception when multiprocess mkdir together
tmpss
=
module_str
.
split
(
","
)
"""
assert
len
(
tmpss
)
==
2
,
"Error formate
\
if
not
os
.
path
.
exists
(
path
):
of the module path: {}"
.
format
(
module_str
)
try
:
module_name
,
function_name
=
tmpss
[
0
],
tmpss
[
1
]
os
.
makedirs
(
path
)
somemodule
=
importlib
.
import_module
(
module_name
,
__package__
)
except
OSError
as
e
:
function
=
getattr
(
somemodule
,
function_name
)
if
e
.
errno
==
errno
.
EEXIST
and
os
.
path
.
isdir
(
path
):
return
function
logger
.
warning
(
'be happy if some process has already created {}'
.
format
(
path
))
def
get_check_global_params
(
mode
):
else
:
check_params
=
[
'use_gpu'
,
'max_text_length'
,
'image_shape'
,
\
raise
OSError
(
'Failed to mkdir {}'
.
format
(
path
))
'image_shape'
,
'character_type'
,
'loss_type'
]
if
mode
==
"train_eval"
:
check_params
=
check_params
+
[
\
def
_load_state
(
path
):
'train_batch_size_per_card'
,
'test_batch_size_per_card'
]
if
os
.
path
.
exists
(
path
+
'.pdopt'
):
elif
mode
==
"test"
:
# XXX another hack to ignore the optimizer state
check_params
=
check_params
+
[
'test_batch_size_per_card'
]
tmp
=
tempfile
.
mkdtemp
()
return
check_params
dst
=
os
.
path
.
join
(
tmp
,
os
.
path
.
basename
(
os
.
path
.
normpath
(
path
)))
shutil
.
copy
(
path
+
'.pdparams'
,
dst
+
'.pdparams'
)
state
=
fluid
.
io
.
load_program_state
(
dst
)
def
get_check_reader_params
(
mode
):
shutil
.
rmtree
(
tmp
)
check_params
=
[]
else
:
if
mode
==
"train_eval"
:
state
=
fluid
.
io
.
load_program_state
(
path
)
check_params
=
[
'TrainReader'
,
'EvalReader'
]
return
state
elif
mode
==
"test"
:
check_params
=
[
'TestReader'
]
return
check_params
def
load_params
(
exe
,
prog
,
path
,
ignore_params
=
[]):
"""
Load model from the given path.
def
get_image_file_list
(
img_file
):
Args:
imgs_lists
=
[]
exe (fluid.Executor): The fluid.Executor object.
if
img_file
is
None
or
not
os
.
path
.
exists
(
img_file
):
prog (fluid.Program): load weight to which Program object.
raise
Exception
(
"not found any img file in {}"
.
format
(
img_file
))
path (string): URL string or loca model path.
ignore_params (list): ignore variable to load when finetuning.
img_end
=
[
'jpg'
,
'png'
,
'jpeg'
,
'JPEG'
,
'JPG'
,
'bmp'
]
It can be specified by finetune_exclude_pretrained_params
if
os
.
path
.
isfile
(
img_file
)
and
img_file
.
split
(
'.'
)[
-
1
]
in
img_end
:
and the usage can refer to docs/advanced_tutorials/TRANSFER_LEARNING.md
imgs_lists
.
append
(
img_file
)
"""
elif
os
.
path
.
isdir
(
img_file
):
if
not
(
os
.
path
.
isdir
(
path
)
or
os
.
path
.
exists
(
path
+
'.pdparams'
)):
for
single_file
in
os
.
listdir
(
img_file
):
raise
ValueError
(
"Model pretrain path {} does not "
if
single_file
.
split
(
'.'
)[
-
1
]
in
img_end
:
"exists."
.
format
(
path
))
imgs_lists
.
append
(
os
.
path
.
join
(
img_file
,
single_file
))
if
len
(
imgs_lists
)
==
0
:
logger
.
info
(
'Loading parameters from {}...'
.
format
(
path
))
raise
Exception
(
"not found any img file in {}"
.
format
(
img_file
))
return
imgs_lists
ignore_set
=
set
()
state
=
_load_state
(
path
)
from
paddle
import
fluid
# ignore the parameter which mismatch the shape
# between the model and pretrain weight.
all_var_shape
=
{}
def
create_multi_devices_program
(
program
,
loss_var_name
):
for
block
in
prog
.
blocks
:
build_strategy
=
fluid
.
BuildStrategy
()
for
param
in
block
.
all_parameters
():
build_strategy
.
memory_optimize
=
False
all_var_shape
[
param
.
name
]
=
param
.
shape
build_strategy
.
enable_inplace
=
True
ignore_set
.
update
([
exec_strategy
=
fluid
.
ExecutionStrategy
()
name
for
name
,
shape
in
all_var_shape
.
items
()
exec_strategy
.
num_iteration_per_drop_scope
=
1
if
name
in
state
and
shape
!=
state
[
name
].
shape
compile_program
=
fluid
.
CompiledProgram
(
program
).
with_data_parallel
(
])
loss_name
=
loss_var_name
,
build_strategy
=
build_strategy
,
if
ignore_params
:
exec_strategy
=
exec_strategy
)
all_var_names
=
[
var
.
name
for
var
in
prog
.
list_vars
()]
return
compile_program
ignore_list
=
filter
(
lambda
var
:
any
([
re
.
match
(
name
,
var
)
for
name
in
ignore_params
]),
all_var_names
)
ignore_set
.
update
(
list
(
ignore_list
))
if
len
(
ignore_set
)
>
0
:
for
k
in
ignore_set
:
if
k
in
state
:
logger
.
warning
(
'variable {} not used'
.
format
(
k
))
del
state
[
k
]
fluid
.
io
.
set_program_state
(
prog
,
state
)
def
init_model
(
config
,
program
,
exe
):
"""
load model from checkpoint or pretrained_model
"""
checkpoints
=
config
[
'Global'
].
get
(
'checkpoints'
)
if
checkpoints
:
path
=
checkpoints
fluid
.
load
(
program
,
path
,
exe
)
logger
.
info
(
"Finish initing model from {}"
.
format
(
path
))
return
pretrain_weights
=
config
[
'Global'
].
get
(
'pretrain_weights'
)
if
pretrain_weights
:
path
=
pretrain_weights
load_params
(
exe
,
program
,
path
)
logger
.
info
(
"Finish initing model from {}"
.
format
(
path
))
return
def
save_model
(
program
,
model_path
):
"""
save model to the target path
"""
fluid
.
save
(
program
,
model_path
)
logger
.
info
(
"Already save model in {}"
.
format
(
model_path
))
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录