提交 3bed2e1f 编写于 作者: 文幕地方's avatar 文幕地方

Merge branch 'dygraph' of https://github.com/PaddlePaddle/PaddleOCR into tttt

...@@ -53,6 +53,7 @@ def load_model(config, model, optimizer=None, model_type='det'): ...@@ -53,6 +53,7 @@ def load_model(config, model, optimizer=None, model_type='det'):
checkpoints = global_config.get('checkpoints') checkpoints = global_config.get('checkpoints')
pretrained_model = global_config.get('pretrained_model') pretrained_model = global_config.get('pretrained_model')
best_model_dict = {} best_model_dict = {}
is_float16 = False
if model_type == 'vqa': if model_type == 'vqa':
# NOTE: for vqa model, resume training is not supported now # NOTE: for vqa model, resume training is not supported now
...@@ -100,6 +101,9 @@ def load_model(config, model, optimizer=None, model_type='det'): ...@@ -100,6 +101,9 @@ def load_model(config, model, optimizer=None, model_type='det'):
key, params.keys())) key, params.keys()))
continue continue
pre_value = params[key] pre_value = params[key]
if pre_value.dtype == paddle.float16:
pre_value = pre_value.astype(paddle.float32)
is_float16 = True
if list(value.shape) == list(pre_value.shape): if list(value.shape) == list(pre_value.shape):
new_state_dict[key] = pre_value new_state_dict[key] = pre_value
else: else:
...@@ -107,7 +111,10 @@ def load_model(config, model, optimizer=None, model_type='det'): ...@@ -107,7 +111,10 @@ def load_model(config, model, optimizer=None, model_type='det'):
"The shape of model params {} {} not matched with loaded params shape {} !". "The shape of model params {} {} not matched with loaded params shape {} !".
format(key, value.shape, pre_value.shape)) format(key, value.shape, pre_value.shape))
model.set_state_dict(new_state_dict) model.set_state_dict(new_state_dict)
if is_float16:
logger.info(
"The parameter type is float16, which is converted to float32 when loading"
)
if optimizer is not None: if optimizer is not None:
if os.path.exists(checkpoints + '.pdopt'): if os.path.exists(checkpoints + '.pdopt'):
optim_dict = paddle.load(checkpoints + '.pdopt') optim_dict = paddle.load(checkpoints + '.pdopt')
...@@ -126,9 +133,10 @@ def load_model(config, model, optimizer=None, model_type='det'): ...@@ -126,9 +133,10 @@ def load_model(config, model, optimizer=None, model_type='det'):
best_model_dict['start_epoch'] = states_dict['epoch'] + 1 best_model_dict['start_epoch'] = states_dict['epoch'] + 1
logger.info("resume from {}".format(checkpoints)) logger.info("resume from {}".format(checkpoints))
elif pretrained_model: elif pretrained_model:
load_pretrained_params(model, pretrained_model) is_float16 = load_pretrained_params(model, pretrained_model)
else: else:
logger.info('train from scratch') logger.info('train from scratch')
best_model_dict['is_float16'] = is_float16
return best_model_dict return best_model_dict
...@@ -142,19 +150,28 @@ def load_pretrained_params(model, path): ...@@ -142,19 +150,28 @@ def load_pretrained_params(model, path):
params = paddle.load(path + '.pdparams') params = paddle.load(path + '.pdparams')
state_dict = model.state_dict() state_dict = model.state_dict()
new_state_dict = {} new_state_dict = {}
is_float16 = False
for k1 in params.keys(): for k1 in params.keys():
if k1 not in state_dict.keys(): if k1 not in state_dict.keys():
logger.warning("The pretrained params {} not in model".format(k1)) logger.warning("The pretrained params {} not in model".format(k1))
else: else:
if params[k1].dtype == paddle.float16:
params[k1] = params[k1].astype(paddle.float32)
is_float16 = True
if list(state_dict[k1].shape) == list(params[k1].shape): if list(state_dict[k1].shape) == list(params[k1].shape):
new_state_dict[k1] = params[k1] new_state_dict[k1] = params[k1]
else: else:
logger.warning( logger.warning(
"The shape of model params {} {} not matched with loaded params {} {} !". "The shape of model params {} {} not matched with loaded params {} {} !".
format(k1, state_dict[k1].shape, k1, params[k1].shape)) format(k1, state_dict[k1].shape, k1, params[k1].shape))
model.set_state_dict(new_state_dict) model.set_state_dict(new_state_dict)
if is_float16:
logger.info(
"The parameter type is float16, which is converted to float32 when loading"
)
logger.info("load pretrain successful from {}".format(path)) logger.info("load pretrain successful from {}".format(path))
return model return is_float16
def save_model(model, def save_model(model,
......
...@@ -6,7 +6,7 @@ Global.use_gpu:True|True ...@@ -6,7 +6,7 @@ Global.use_gpu:True|True
Global.auto_cast:fp32 Global.auto_cast:fp32
Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=17 Global.epoch_num:lite_train_lite_infer=1|whole_train_whole_infer=17
Global.save_model_dir:./output/ Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_lite_infer=8|whole_train_whole_infer=8 Train.loader.batch_size_per_card:lite_train_lite_infer=4|whole_train_whole_infer=8
Architecture.Backbone.checkpoints:null Architecture.Backbone.checkpoints:null
train_model_name:latest train_model_name:latest
train_infer_img_dir:ppstructure/docs/vqa/input/zh_val_42.jpg train_infer_img_dir:ppstructure/docs/vqa/input/zh_val_42.jpg
......
...@@ -160,18 +160,18 @@ def to_float32(preds): ...@@ -160,18 +160,18 @@ def to_float32(preds):
for k in preds: for k in preds:
if isinstance(preds[k], dict) or isinstance(preds[k], list): if isinstance(preds[k], dict) or isinstance(preds[k], list):
preds[k] = to_float32(preds[k]) preds[k] = to_float32(preds[k])
elif isinstance(preds[k], paddle.Tensor): else:
preds[k] = preds[k].astype(paddle.float32) preds[k] = paddle.to_tensor(preds[k], dtype='float32')
elif isinstance(preds, list): elif isinstance(preds, list):
for k in range(len(preds)): for k in range(len(preds)):
if isinstance(preds[k], dict): if isinstance(preds[k], dict):
preds[k] = to_float32(preds[k]) preds[k] = to_float32(preds[k])
elif isinstance(preds[k], list): elif isinstance(preds[k], list):
preds[k] = to_float32(preds[k]) preds[k] = to_float32(preds[k])
elif isinstance(preds[k], paddle.Tensor): else:
preds[k] = preds[k].astype(paddle.float32) preds[k] = paddle.to_tensor(preds[k], dtype='float32')
elif isinstance(preds[k], paddle.Tensor): else:
preds = preds.astype(paddle.float32) preds = paddle.to_tensor(preds, dtype='float32')
return preds return preds
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册