未验证 提交 5678d3f0 编写于 作者: L Li Fuchen 提交者: GitHub

fix a save/load bug of language model (#4511)

上级 13170fdf
......@@ -178,7 +178,7 @@ def main():
print(args.init_from_pretrain_model)
raise Warning("The pretrained params do not exist.")
return
fluid.load(main_program, args.init_from_pretrain_model)
fluid.load(main_program, args.init_from_pretrain_model, exe)
print("finish initing model from pretrained params from %s" %
(args.init_from_pretrain_model))
......
......@@ -241,8 +241,6 @@ def lm_model(hidden_size,
name="init_cell",
shape=[None, num_layers, hidden_size],
dtype='float32')
init_cell.persistable = True
init_hidden.persistable = True
init_hidden = layers.transpose(init_hidden, perm=[1, 0, 2])
init_cell = layers.transpose(init_cell, perm=[1, 0, 2])
......@@ -334,8 +332,6 @@ def lm_model(hidden_size,
loss = layers.reduce_sum(loss)
loss.persistable = True
last_cell.persistable = True
last_hidden.persistable = True
# This will feed last_hidden, last_cell to init_hidden, init_cell, which
# can be used directly in next batch. This can avoid the fetching of
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册