Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
3bed2e1f
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3bed2e1f
编写于
8月 08, 2022
作者:
文幕地方
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'dygraph' of
https://github.com/PaddlePaddle/PaddleOCR
into tttt
上级
8b6ba9b4
6445362f
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
27 addition
and
10 deletion
+27
-10
ppocr/utils/save_load.py
ppocr/utils/save_load.py
+20
-3
test_tipc/configs/layoutxlm_ser/train_infer_python.txt
test_tipc/configs/layoutxlm_ser/train_infer_python.txt
+1
-1
tools/program.py
tools/program.py
+6
-6
未找到文件。
ppocr/utils/save_load.py
浏览文件 @
3bed2e1f
...
@@ -53,6 +53,7 @@ def load_model(config, model, optimizer=None, model_type='det'):
...
@@ -53,6 +53,7 @@ def load_model(config, model, optimizer=None, model_type='det'):
checkpoints
=
global_config
.
get
(
'checkpoints'
)
checkpoints
=
global_config
.
get
(
'checkpoints'
)
pretrained_model
=
global_config
.
get
(
'pretrained_model'
)
pretrained_model
=
global_config
.
get
(
'pretrained_model'
)
best_model_dict
=
{}
best_model_dict
=
{}
is_float16
=
False
if
model_type
==
'vqa'
:
if
model_type
==
'vqa'
:
# NOTE: for vqa model, resume training is not supported now
# NOTE: for vqa model, resume training is not supported now
...
@@ -100,6 +101,9 @@ def load_model(config, model, optimizer=None, model_type='det'):
...
@@ -100,6 +101,9 @@ def load_model(config, model, optimizer=None, model_type='det'):
key
,
params
.
keys
()))
key
,
params
.
keys
()))
continue
continue
pre_value
=
params
[
key
]
pre_value
=
params
[
key
]
if
pre_value
.
dtype
==
paddle
.
float16
:
pre_value
=
pre_value
.
astype
(
paddle
.
float32
)
is_float16
=
True
if
list
(
value
.
shape
)
==
list
(
pre_value
.
shape
):
if
list
(
value
.
shape
)
==
list
(
pre_value
.
shape
):
new_state_dict
[
key
]
=
pre_value
new_state_dict
[
key
]
=
pre_value
else
:
else
:
...
@@ -107,7 +111,10 @@ def load_model(config, model, optimizer=None, model_type='det'):
...
@@ -107,7 +111,10 @@ def load_model(config, model, optimizer=None, model_type='det'):
"The shape of model params {} {} not matched with loaded params shape {} !"
.
"The shape of model params {} {} not matched with loaded params shape {} !"
.
format
(
key
,
value
.
shape
,
pre_value
.
shape
))
format
(
key
,
value
.
shape
,
pre_value
.
shape
))
model
.
set_state_dict
(
new_state_dict
)
model
.
set_state_dict
(
new_state_dict
)
if
is_float16
:
logger
.
info
(
"The parameter type is float16, which is converted to float32 when loading"
)
if
optimizer
is
not
None
:
if
optimizer
is
not
None
:
if
os
.
path
.
exists
(
checkpoints
+
'.pdopt'
):
if
os
.
path
.
exists
(
checkpoints
+
'.pdopt'
):
optim_dict
=
paddle
.
load
(
checkpoints
+
'.pdopt'
)
optim_dict
=
paddle
.
load
(
checkpoints
+
'.pdopt'
)
...
@@ -126,9 +133,10 @@ def load_model(config, model, optimizer=None, model_type='det'):
...
@@ -126,9 +133,10 @@ def load_model(config, model, optimizer=None, model_type='det'):
best_model_dict
[
'start_epoch'
]
=
states_dict
[
'epoch'
]
+
1
best_model_dict
[
'start_epoch'
]
=
states_dict
[
'epoch'
]
+
1
logger
.
info
(
"resume from {}"
.
format
(
checkpoints
))
logger
.
info
(
"resume from {}"
.
format
(
checkpoints
))
elif
pretrained_model
:
elif
pretrained_model
:
load_pretrained_params
(
model
,
pretrained_model
)
is_float16
=
load_pretrained_params
(
model
,
pretrained_model
)
else
:
else
:
logger
.
info
(
'train from scratch'
)
logger
.
info
(
'train from scratch'
)
best_model_dict
[
'is_float16'
]
=
is_float16
return
best_model_dict
return
best_model_dict
...
@@ -142,19 +150,28 @@ def load_pretrained_params(model, path):
...
@@ -142,19 +150,28 @@ def load_pretrained_params(model, path):
params
=
paddle
.
load
(
path
+
'.pdparams'
)
params
=
paddle
.
load
(
path
+
'.pdparams'
)
state_dict
=
model
.
state_dict
()
state_dict
=
model
.
state_dict
()
new_state_dict
=
{}
new_state_dict
=
{}
is_float16
=
False
for
k1
in
params
.
keys
():
for
k1
in
params
.
keys
():
if
k1
not
in
state_dict
.
keys
():
if
k1
not
in
state_dict
.
keys
():
logger
.
warning
(
"The pretrained params {} not in model"
.
format
(
k1
))
logger
.
warning
(
"The pretrained params {} not in model"
.
format
(
k1
))
else
:
else
:
if
params
[
k1
].
dtype
==
paddle
.
float16
:
params
[
k1
]
=
params
[
k1
].
astype
(
paddle
.
float32
)
is_float16
=
True
if
list
(
state_dict
[
k1
].
shape
)
==
list
(
params
[
k1
].
shape
):
if
list
(
state_dict
[
k1
].
shape
)
==
list
(
params
[
k1
].
shape
):
new_state_dict
[
k1
]
=
params
[
k1
]
new_state_dict
[
k1
]
=
params
[
k1
]
else
:
else
:
logger
.
warning
(
logger
.
warning
(
"The shape of model params {} {} not matched with loaded params {} {} !"
.
"The shape of model params {} {} not matched with loaded params {} {} !"
.
format
(
k1
,
state_dict
[
k1
].
shape
,
k1
,
params
[
k1
].
shape
))
format
(
k1
,
state_dict
[
k1
].
shape
,
k1
,
params
[
k1
].
shape
))
model
.
set_state_dict
(
new_state_dict
)
model
.
set_state_dict
(
new_state_dict
)
if
is_float16
:
logger
.
info
(
"The parameter type is float16, which is converted to float32 when loading"
)
logger
.
info
(
"load pretrain successful from {}"
.
format
(
path
))
logger
.
info
(
"load pretrain successful from {}"
.
format
(
path
))
return
model
return
is_float16
def
save_model
(
model
,
def
save_model
(
model
,
...
...
test_tipc/configs/layoutxlm_ser/train_infer_python.txt
浏览文件 @
3bed2e1f
...
@@ -6,7 +6,7 @@ Global.use_gpu:True|True
...
@@ -6,7 +6,7 @@ Global.use_gpu:True|True
Global.auto_cast:fp32
Global.auto_cast:fp32
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=17
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=17
Global.save_model_dir:./output/
Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=
8
|whole_train_whole_infer=8
Train.loader.batch_size_per_card:lite_train_lite_infer=
4
|whole_train_whole_infer=8
Architecture.Backbone.checkpoints:null
Architecture.Backbone.checkpoints:null
train_model_name:latest
train_model_name:latest
train_infer_img_dir:ppstructure/docs/vqa/input/zh_val_42.jpg
train_infer_img_dir:ppstructure/docs/vqa/input/zh_val_42.jpg
...
...
tools/program.py
浏览文件 @
3bed2e1f
...
@@ -160,18 +160,18 @@ def to_float32(preds):
...
@@ -160,18 +160,18 @@ def to_float32(preds):
for
k
in
preds
:
for
k
in
preds
:
if
isinstance
(
preds
[
k
],
dict
)
or
isinstance
(
preds
[
k
],
list
):
if
isinstance
(
preds
[
k
],
dict
)
or
isinstance
(
preds
[
k
],
list
):
preds
[
k
]
=
to_float32
(
preds
[
k
])
preds
[
k
]
=
to_float32
(
preds
[
k
])
el
if
isinstance
(
preds
[
k
],
paddle
.
Tensor
)
:
el
se
:
preds
[
k
]
=
p
reds
[
k
].
astype
(
paddle
.
float32
)
preds
[
k
]
=
p
addle
.
to_tensor
(
preds
[
k
],
dtype
=
'float32'
)
elif
isinstance
(
preds
,
list
):
elif
isinstance
(
preds
,
list
):
for
k
in
range
(
len
(
preds
)):
for
k
in
range
(
len
(
preds
)):
if
isinstance
(
preds
[
k
],
dict
):
if
isinstance
(
preds
[
k
],
dict
):
preds
[
k
]
=
to_float32
(
preds
[
k
])
preds
[
k
]
=
to_float32
(
preds
[
k
])
elif
isinstance
(
preds
[
k
],
list
):
elif
isinstance
(
preds
[
k
],
list
):
preds
[
k
]
=
to_float32
(
preds
[
k
])
preds
[
k
]
=
to_float32
(
preds
[
k
])
el
if
isinstance
(
preds
[
k
],
paddle
.
Tensor
)
:
el
se
:
preds
[
k
]
=
p
reds
[
k
].
astype
(
paddle
.
float32
)
preds
[
k
]
=
p
addle
.
to_tensor
(
preds
[
k
],
dtype
=
'float32'
)
el
if
isinstance
(
preds
[
k
],
paddle
.
Tensor
)
:
el
se
:
preds
=
p
reds
.
astype
(
paddle
.
float32
)
preds
=
p
addle
.
to_tensor
(
preds
,
dtype
=
'float32'
)
return
preds
return
preds
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录