diff --git a/06.understand_sentiment/README.md b/06.understand_sentiment/README.md index ab3bb8562b4ec4fe5a0f5d7971ba1a69026e48fd..946243fd93c332f36da1d59dc736aa1ebb90d3b5 100644 --- a/06.understand_sentiment/README.md +++ b/06.understand_sentiment/README.md @@ -173,7 +173,7 @@ def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): # Calculate word vectorvector emb = fluid.layers.embedding( - input=data, size=[input_dim, emb_dim], is_sparse=True)=True) + input=data, size=[input_dim, emb_dim], is_sparse=True) #First stack #Fully connected layer @@ -191,7 +191,7 @@ def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): inputs = [fc, lstm] #pooling layer - pc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') + fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') #Fully connected layer, softmax prediction