paddlehub的bert的sequence_output后如何加入lstm??
Created by: wxl1351641822
这是我想要实现的代码:
`import paddlehub as hub
max_seq_len=128
# dataset = MyDataset()
module = hub.Module(name="bert_chinese_L-12_H-768_A-12")
inputs, outputs, program = module.context(
trainable=True, max_seq_len=max_seq_len)
print(outputs)
# Use "pooled_output" for classification tasks on an entire sentence.
pooled_output = outputs["pooled_output"]
seq_output=outputs['sequence_output']
print(seq_output)
hidden_units=[128]
cls_feats = fluid.layers.dropout(
x=seq_output,
dropout_prob=0,
dropout_implementation="upscale_in_train")
print(cls_feats)
if hidden_units is not None:
for n_hidden in hidden_units:
# cls_feats = fluid.layers.fc(
# input=cls_feats, size=n_hidden, act=None)
# print(cls_feats)
cls_feats = fluid.layers.fc(input=cls_feats, size=n_hidden,
act=None, bias_attr=None)
print(cls_feats)
lstm_h, c = fluid.layers.dynamic_lstm(
input=cls_feats, size=n_hidden, is_reverse=False)
print(lstm_h)
# # 最大池化
lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
print(lstm_max)
# # 激活函数
cls_feats = fluid.layers.tanh(lstm_max)
print(cls_feats)
logits = fluid.layers.fc(
input=cls_feats,
size=3,
param_attr=fluid.ParamAttr(
name="cls_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name="cls_out_b", initializer=fluid.initializer.Constant(0.)),
act="softmax")
ret_infers = fluid.layers.reshape(
x=fluid.layers.argmax(logits, axis=1), shape=[-1, 1])
----------------------
Error Message Summary:
----------------------
Error: Size of target LoD should be greater than 1.
[Hint: Expected level0.size() > 1UL, but received level0.size():1 <= 1UL:1.] at (/paddle/paddle/fluid/operators/lod_reset_op.h:60)
[operator < lod_reset > error]