diff --git a/06.understand_sentiment/README.cn.md b/06.understand_sentiment/README.cn.md index e09036cd42293ace4239b9e1424d5fedce0b6a43..8f35f06c73998b3822f6a7c2e53cd14fc5e6b3f5 100755 --- a/06.understand_sentiment/README.cn.md +++ b/06.understand_sentiment/README.cn.md @@ -151,7 +151,7 @@ BATCH_SIZE = 128 #batch的大小 ```python #文本卷积神经网络 def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -183,7 +183,7 @@ def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): #计算词向量 - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) #第一层栈 diff --git a/06.understand_sentiment/README.md b/06.understand_sentiment/README.md index 0220bfe7141f3dff79a4995413e1a3b6b8d1c4cc..4a91a5c39927466f2e8ea6b90017496986f30473 100644 --- a/06.understand_sentiment/README.md +++ b/06.understand_sentiment/README.md @@ -140,7 +140,7 @@ Note that `fluid.nets.sequence_conv_pool` contains both convolution and pooling ```python #Textconvolution neural network def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -172,7 +172,7 @@ The code of the stack bidirectional LSTM `stacked_lstm_net` is as follows: def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): # Calculate word vectorvector - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) #First stack diff --git a/06.understand_sentiment/index.cn.html b/06.understand_sentiment/index.cn.html index d111e50a2292a010db8ee89d98b3237ff14790d2..7c551554a1b94f934af0897c6847222e77873068 100644 --- a/06.understand_sentiment/index.cn.html +++ b/06.understand_sentiment/index.cn.html @@ -193,7 +193,7 @@ BATCH_SIZE = 128 #batch的大小 ```python #文本卷积神经网络 def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -225,7 +225,7 @@ def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): #计算词向量 - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) #第一层栈 diff --git a/06.understand_sentiment/index.html b/06.understand_sentiment/index.html index 94ce282506488958d2b8783290f60aa41eea2040..c363ba148af0835c623ddee7068eb05f0b4b7e0d 100644 --- a/06.understand_sentiment/index.html +++ b/06.understand_sentiment/index.html @@ -182,7 +182,7 @@ Note that `fluid.nets.sequence_conv_pool` contains both convolution and pooling ```python #Textconvolution neural network def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, @@ -214,7 +214,7 @@ The code of the stack bidirectional LSTM `stacked_lstm_net` is as follows: def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): # Calculate word vectorvector - emb = fluid.layers.embedding( + emb = fluid.embedding( input=data, size=[input_dim, emb_dim], is_sparse=True) #First stack diff --git a/06.understand_sentiment/train_conv.py b/06.understand_sentiment/train_conv.py index 46da94e29e3ed7419d84c527642b5efc7249722a..784e5a90a1fcfc6a2fa5ca13f8ffd8d1259c567f 100644 --- a/06.understand_sentiment/train_conv.py +++ b/06.understand_sentiment/train_conv.py @@ -42,8 +42,7 @@ def parse_args(): def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): - emb = fluid.layers.embedding( - input=data, size=[input_dim, emb_dim], is_sparse=True) + emb = fluid.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True) conv_3 = fluid.nets.sequence_conv_pool( input=emb, num_filters=hid_dim, diff --git a/06.understand_sentiment/train_dyn_rnn.py b/06.understand_sentiment/train_dyn_rnn.py index b967a21f66a2b0afb1b880c7f3bd1008c9fc1b97..439bc930c23baa88955a64cb5f2df02d5811c8a5 100644 --- a/06.understand_sentiment/train_dyn_rnn.py +++ b/06.understand_sentiment/train_dyn_rnn.py @@ -42,8 +42,7 @@ def parse_args(): def dynamic_rnn_lstm(data, input_dim, class_dim, emb_dim, lstm_size): - emb = fluid.layers.embedding( - input=data, size=[input_dim, emb_dim], is_sparse=True) + emb = fluid.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True) sentence = fluid.layers.fc(input=emb, size=lstm_size * 4, act='tanh') lstm, _ = fluid.layers.dynamic_lstm(sentence, size=lstm_size * 4) diff --git a/06.understand_sentiment/train_stacked_lstm.py b/06.understand_sentiment/train_stacked_lstm.py index 11a5dfdcfe2b10f6a39795e0b0a9b964ff935cb7..22cd3bc87f3aa5e34cc3d8f78c9079c3ca0aff18 100644 --- a/06.understand_sentiment/train_stacked_lstm.py +++ b/06.understand_sentiment/train_stacked_lstm.py @@ -46,8 +46,7 @@ def parse_args(): def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): assert stacked_num % 2 == 1 - emb = fluid.layers.embedding( - input=data, size=[input_dim, emb_dim], is_sparse=True) + emb = fluid.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True) fc1 = fluid.layers.fc(input=emb, size=hid_dim) lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim)