未验证 提交 8f8eebbc 编写于 作者: L Li Fuchen 提交者: GitHub

change fluid.layers.embedding to fluid.embedding to fix a bug of dim (#1934)

上级 a95aeae6
...@@ -151,7 +151,7 @@ BATCH_SIZE = 128 #batch的大小 ...@@ -151,7 +151,7 @@ BATCH_SIZE = 128 #batch的大小
```python ```python
#文本卷积神经网络 #文本卷积神经网络
def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim):
emb = fluid.layers.embedding( emb = fluid.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True) input=data, size=[input_dim, emb_dim], is_sparse=True)
conv_3 = fluid.nets.sequence_conv_pool( conv_3 = fluid.nets.sequence_conv_pool(
input=emb, input=emb,
...@@ -183,7 +183,7 @@ def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): ...@@ -183,7 +183,7 @@ def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim):
def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num):
#计算词向量 #计算词向量
emb = fluid.layers.embedding( emb = fluid.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True) input=data, size=[input_dim, emb_dim], is_sparse=True)
#第一层栈 #第一层栈
......
...@@ -140,7 +140,7 @@ Note that `fluid.nets.sequence_conv_pool` contains both convolution and pooling ...@@ -140,7 +140,7 @@ Note that `fluid.nets.sequence_conv_pool` contains both convolution and pooling
```python ```python
#Textconvolution neural network #Textconvolution neural network
def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim):
emb = fluid.layers.embedding( emb = fluid.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True) input=data, size=[input_dim, emb_dim], is_sparse=True)
conv_3 = fluid.nets.sequence_conv_pool( conv_3 = fluid.nets.sequence_conv_pool(
input=emb, input=emb,
...@@ -172,7 +172,7 @@ The code of the stack bidirectional LSTM `stacked_lstm_net` is as follows: ...@@ -172,7 +172,7 @@ The code of the stack bidirectional LSTM `stacked_lstm_net` is as follows:
def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num):
# Calculate word vectorvector # Calculate word vectorvector
emb = fluid.layers.embedding( emb = fluid.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True) input=data, size=[input_dim, emb_dim], is_sparse=True)
#First stack #First stack
......
...@@ -193,7 +193,7 @@ BATCH_SIZE = 128 #batch的大小 ...@@ -193,7 +193,7 @@ BATCH_SIZE = 128 #batch的大小
```python ```python
#文本卷积神经网络 #文本卷积神经网络
def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim):
emb = fluid.layers.embedding( emb = fluid.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True) input=data, size=[input_dim, emb_dim], is_sparse=True)
conv_3 = fluid.nets.sequence_conv_pool( conv_3 = fluid.nets.sequence_conv_pool(
input=emb, input=emb,
...@@ -225,7 +225,7 @@ def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): ...@@ -225,7 +225,7 @@ def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim):
def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num):
#计算词向量 #计算词向量
emb = fluid.layers.embedding( emb = fluid.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True) input=data, size=[input_dim, emb_dim], is_sparse=True)
#第一层栈 #第一层栈
......
...@@ -182,7 +182,7 @@ Note that `fluid.nets.sequence_conv_pool` contains both convolution and pooling ...@@ -182,7 +182,7 @@ Note that `fluid.nets.sequence_conv_pool` contains both convolution and pooling
```python ```python
#Textconvolution neural network #Textconvolution neural network
def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim):
emb = fluid.layers.embedding( emb = fluid.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True) input=data, size=[input_dim, emb_dim], is_sparse=True)
conv_3 = fluid.nets.sequence_conv_pool( conv_3 = fluid.nets.sequence_conv_pool(
input=emb, input=emb,
...@@ -214,7 +214,7 @@ The code of the stack bidirectional LSTM `stacked_lstm_net` is as follows: ...@@ -214,7 +214,7 @@ The code of the stack bidirectional LSTM `stacked_lstm_net` is as follows:
def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num):
# Calculate word vectorvector # Calculate word vectorvector
emb = fluid.layers.embedding( emb = fluid.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True) input=data, size=[input_dim, emb_dim], is_sparse=True)
#First stack #First stack
......
...@@ -42,7 +42,7 @@ def parse_args(): ...@@ -42,7 +42,7 @@ def parse_args():
def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim):
emb = fluid.layers.embedding( emb = fluid.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True) input=data, size=[input_dim, emb_dim], is_sparse=True)
conv_3 = fluid.nets.sequence_conv_pool( conv_3 = fluid.nets.sequence_conv_pool(
input=emb, input=emb,
......
...@@ -42,7 +42,7 @@ def parse_args(): ...@@ -42,7 +42,7 @@ def parse_args():
def dynamic_rnn_lstm(data, input_dim, class_dim, emb_dim, lstm_size): def dynamic_rnn_lstm(data, input_dim, class_dim, emb_dim, lstm_size):
emb = fluid.layers.embedding( emb = fluid.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True) input=data, size=[input_dim, emb_dim], is_sparse=True)
sentence = fluid.layers.fc(input=emb, size=lstm_size * 4, act='tanh') sentence = fluid.layers.fc(input=emb, size=lstm_size * 4, act='tanh')
......
...@@ -47,7 +47,7 @@ def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, ...@@ -47,7 +47,7 @@ def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim,
stacked_num): stacked_num):
assert stacked_num % 2 == 1 assert stacked_num % 2 == 1
emb = fluid.layers.embedding( emb = fluid.embedding(
input=data, size=[input_dim, emb_dim], is_sparse=True) input=data, size=[input_dim, emb_dim], is_sparse=True)
fc1 = fluid.layers.fc(input=emb, size=hid_dim) fc1 = fluid.layers.fc(input=emb, size=hid_dim)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册