diff --git a/01.fit_a_line/index.cn.html b/01.fit_a_line/index.cn.html
index 3154f30f1d0701dfd1f84d5ada65641faf49c19c..32760a22092f136746a72328b48502548caf7692 100644
--- a/01.fit_a_line/index.cn.html
+++ b/01.fit_a_line/index.cn.html
@@ -89,22 +89,6 @@ $$MSE=\frac{1}{n}\sum_{i=1}^{n}{(\hat{Y_i}-Y_i)}^2$$
## 数据集
-### 数据集接口的封装
-首先加载需要的包
-
-```python
-import paddle.v2 as paddle
-import paddle.v2.dataset.uci_housing as uci_housing
-```
-
-我们通过uci_housing模块引入了数据集合[UCI Housing Data Set](https://archive.ics.uci.edu/ml/datasets/Housing)
-
-其中,在uci_housing模块中封装了:
-
-1. 数据下载的过程。下载数据保存在~/.cache/paddle/dataset/uci_housing/housing.data。
-2. [数据预处理](#数据预处理)的过程。
-
-
### 数据集介绍
这份数据集共506行,每行包含了波士顿郊区的一类房屋的相关信息及该类房屋价格的中位数。其各维属性的意义如下:
@@ -152,157 +136,167 @@ import paddle.v2.dataset.uci_housing as uci_housing
`fit_a_line/trainer.py`演示了训练的整体过程。
-### 初始化PaddlePaddle
-
+### 配置数据提供器(Datafeeder)
+首先我们引入必要的库:
```python
-paddle.init(use_gpu=False, trainer_count=1)
+import paddle
+import paddle.fluid as fluid
+import numpy
```
-### 模型配置
+我们通过uci_housing模块引入了数据集合[UCI Housing Data Set](https://archive.ics.uci.edu/ml/datasets/Housing)
+
+其中,在uci_housing模块中封装了:
+
+1. 数据下载的过程。下载数据保存在~/.cache/paddle/dataset/uci_housing/housing.data。
+2. [数据预处理](#数据预处理)的过程。
-线性回归的模型其实就是一个采用线性激活函数(linear activation,`LinearActivation`)的全连接层(fully-connected layer,`fc_layer`):
+接下来我们定义了用于训练和测试的数据提供器。提供器每次读入一个大小为`BATCH_SIZE`的数据批次。如果用户希望加一些随机性,她可以同时定义一个批次大小和一个缓存大小。这样的话,每次数据提供器会从缓存中随机读取批次大小那么多的数据。
```python
-x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13))
-y_predict = paddle.layer.fc(input=x,
- size=1,
- act=paddle.activation.Linear())
-y = paddle.layer.data(name='y', type=paddle.data_type.dense_vector(1))
-cost = paddle.layer.square_error_cost(input=y_predict, label=y)
+BATCH_SIZE = 20
+
+train_reader = paddle.batch(
+ paddle.reader.shuffle(
+ paddle.dataset.uci_housing.train(), buf_size=500),
+ batch_size=BATCH_SIZE)
+
+test_reader = paddle.batch(
+ paddle.reader.shuffle(
+ paddle.dataset.uci_housing.test(), buf_size=500),
+ batch_size=BATCH_SIZE)
```
-### 保存网络拓扑
+### 配置训练程序
+训练程序的目的是定义一个训练模型的网络结构。对于线性回归来讲,它就是一个从输入到输出的简单的全连接层。更加复杂的结果,比如卷积神经网络,递归神经网络等会在随后的章节中介绍。训练程序必须返回`平均损失`作为第一个返回值,因为它会被后面反向传播算法所用到。
```python
-# Save the inference topology to protobuf.
-inference_topology = paddle.topology.Topology(layers=y_predict)
-with open("inference_topology.pkl", 'wb') as f:
- inference_topology.serialize_for_inference(f)
+def train_program():
+ y = fluid.layers.data(name='y', shape=[1], dtype='float32')
+
+ # feature vector of length 13
+ x = fluid.layers.data(name='x', shape=[13], dtype='float32')
+ y_predict = fluid.layers.fc(input=x, size=1, act=None)
+
+ loss = fluid.layers.square_error_cost(input=y_predict, label=y)
+ avg_loss = fluid.layers.mean(loss)
+
+ return avg_loss
```
-### 创建参数
+### Optimizer Function 配置
+
+在下面的 `SGD optimizer`,`learning_rate` 是训练的速度,与网络的训练收敛速度有关系。
```python
-parameters = paddle.parameters.create(cost)
+def optimizer_program():
+ return fluid.optimizer.SGD(learning_rate=0.001)
```
-### 创建Trainer
+### 定义运算场所
+我们可以定义运算是发生在CPU还是GPU
```python
-optimizer = paddle.optimizer.Momentum(momentum=0)
-
-trainer = paddle.trainer.SGD(cost=cost,
- parameters=parameters,
- update_equation=optimizer)
+use_cuda = False
+place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
```
-### 读取数据且打印训练的中间信息
-
-PaddlePaddle提供一个
-[reader机制](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/design/reader)
-来读取数据。 Reader返回的数据可以包括多列,我们需要一个Python dict把列
-序号映射到网络里的数据层。
+### 创建训练器
+训练器会读入一个训练程序和一些必要的其他参数:
```python
-feeding={'x': 0, 'y': 1}
+trainer = fluid.Trainer(
+ train_func=train_program,
+ place=place,
+ optimizer_func=optimizer_program)
```
-此外,我们还可以提供一个 event handler,来打印训练的进度:
+### 开始提供数据
+PaddlePaddle提供了读取数据者发生器机制来读取训练数据。读取数据者会一次提供多列数据,因此我们需要一个Python的list来定义读取顺序。
```python
-# event_handler to print training and testing info
-def event_handler(event):
- if isinstance(event, paddle.event.EndIteration):
- if event.batch_id % 100 == 0:
- print "Pass %d, Batch %d, Cost %f" % (
- event.pass_id, event.batch_id, event.cost)
-
- if isinstance(event, paddle.event.EndPass):
- result = trainer.test(
- reader=paddle.batch(
- uci_housing.test(), batch_size=2),
- feeding=feeding)
- print "Test %d, Cost %f" % (event.pass_id, result.cost)
+feed_order=['x', 'y']
```
+除此之外,可以定义一个事件相应器来处理类似`打印训练进程`的事件:
+
```python
-# event_handler to print training and testing info
-from paddle.v2.plot import Ploter
+# Specify the directory path to save the parameters
+params_dirname = "fit_a_line.inference.model"
+# Plot data
+from paddle.v2.plot import Ploter
train_title = "Train cost"
test_title = "Test cost"
-cost_ploter = Ploter(train_title, test_title)
+plot_cost = Ploter(train_title, test_title)
step = 0
+# event_handler to print training and testing info
def event_handler_plot(event):
global step
- if isinstance(event, paddle.event.EndIteration):
- if step % 10 == 0: # every 10 batches, record a train cost
- cost_ploter.append(train_title, step, event.cost)
+ if isinstance(event, fluid.EndStepEvent):
+ if event.step % 10 == 0: # every 10 batches, record a test cost
+ test_metrics = trainer.test(
+ reader=test_reader, feed_order=feed_order)
- if step % 100 == 0: # every 100 batches, record a test cost
- result = trainer.test(
- reader=paddle.batch(
- uci_housing.test(), batch_size=2),
- feeding=feeding)
- cost_ploter.append(test_title, step, result.cost)
+ plot_cost.append(test_title, step, test_metrics[0])
+ plot_cost.plot()
- if step % 100 == 0: # every 100 batches, update cost plot
- cost_ploter.plot()
+ if test_metrics[0] < 10.0:
+ # If the accuracy is good enough, we can stop the training.
+ print('loss is less than 10.0, stop')
+ trainer.stop()
- step += 1
+ # We can save the trained parameters for the inferences later
+ if params_dirname is not None:
+ trainer.save_params(params_dirname)
- if isinstance(event, paddle.event.EndPass):
- if event.pass_id % 10 == 0:
- with open('params_pass_%d.tar' % event.pass_id, 'w') as f:
- trainer.save_parameter_to_tar(f)
+ step += 1
```
### 开始训练
+我们现在可以通过调用`trainer.train()`来开始训练
```python
+%matplotlib inline
+
+# The training could take up to a few minutes.
trainer.train(
- reader=paddle.batch(
- paddle.reader.shuffle(
- uci_housing.train(), buf_size=500),
- batch_size=2),
- feeding=feeding,
+ reader=train_reader,
+ num_epochs=100,
event_handler=event_handler_plot,
- num_passes=30)
+ feed_order=feed_order)
```
![png](./image/train_and_test.png)
-### 应用模型
+## 预测
+提供一个`inference_program`和一个`params_dirname`来初始化预测器。`params_dirname`用来存储我们的参数。
+
+### 设定预测程序
+类似于`trainer.train`,预测器需要一个预测程序来做预测。我们可以稍加修改我们的训练程序来把预测值包含进来。
-#### 1. 生成测试数据
```python
-test_data_creator = paddle.dataset.uci_housing.test()
-test_data = []
-test_label = []
-
-for item in test_data_creator():
- test_data.append((item[0],))
- test_label.append(item[1])
- if len(test_data) == 5:
- break
+def inference_program():
+ x = fluid.layers.data(name='x', shape=[13], dtype='float32')
+ y_predict = fluid.layers.fc(input=x, size=1, act=None)
+ return y_predict
```
-#### 2. 推测 inference
+### 预测
+预测器会从`params_dirname`中读取已经训练好的模型,来对从未遇见过的数据进行预测。
```python
-# load parameters from tar file.
-# users can remove the comments and change the model name
-# with open('params_pass_20.tar', 'r') as f:
-# parameters = paddle.parameters.Parameters.from_tar(f)
+inferencer = fluid.Inferencer(
+ infer_func=inference_program, param_path=params_dirname, place=place)
-probs = paddle.infer(
- output_layer=y_predict, parameters=parameters, input=test_data)
+batch_size = 10
+tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32")
-for i in xrange(len(probs)):
- print "label=" + str(test_label[i][0]) + ", predict=" + str(probs[i][0])
+results = inferencer.infer({'x': tensor_x})
+print("infer results: ", results[0])
```
## 总结
diff --git a/01.fit_a_line/index.html b/01.fit_a_line/index.html
index e2de87f807b3b8a30753676dfbc2fb0b8661c1ef..cab234ff49188aa0be1941c5c81c4c1fc3a9d2a3 100644
--- a/01.fit_a_line/index.html
+++ b/01.fit_a_line/index.html
@@ -191,6 +191,14 @@ def train_program():
return avg_loss
```
+### Optimizer Function Configuration
+
+In the following `SGD` optimizer, `learning_rate` specifies the learning rate in the optimization procedure.
+
+```python
+def optimizer_program():
+ return fluid.optimizer.SGD(learning_rate=0.001)
+```
### Specify Place
Specify your training environment, you should specify if the training is on CPU or GPU.
@@ -207,7 +215,7 @@ The trainer will take the `train_program` as input.
trainer = fluid.Trainer(
train_func=train_program,
place=place,
- optimizer=fluid.optimizer.SGD(learning_rate=0.001))
+ optimizer_func=optimizer_program)
```
### Feeding Data
diff --git a/01.fit_a_line/train.py b/01.fit_a_line/train.py
index bd3f2c1d2b4d23812129a53937f4da12054f58a9..b223ceecb22bc5150f85f55a72b47d0c22c6b167 100644
--- a/01.fit_a_line/train.py
+++ b/01.fit_a_line/train.py
@@ -49,9 +49,7 @@ use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
trainer = fluid.Trainer(
- train_func=train_program,
- place=place,
- optimizer_func=optimizer_program)
+ train_func=train_program, place=place, optimizer_func=optimizer_program)
feed_order = ['x', 'y']
diff --git a/04.word2vec/index.cn.html b/04.word2vec/index.cn.html
index 5a1aaf1229e255d6044a3eb9778c200602202530..e146f6a0f47292134930f4577a57b0fbd9c9ab5b 100644
--- a/04.word2vec/index.cn.html
+++ b/04.word2vec/index.cn.html
@@ -226,6 +226,7 @@ dream that one day
```
最后,每个输入会按其单词次在字典里的位置,转化成整数的索引序列,作为PaddlePaddle的输入。
+
## 编程实现
本配置的模型结构如下图所示:
@@ -238,236 +239,236 @@ dream that one day
首先,加载所需要的包:
```python
+import paddle
+import paddle.fluid as fluid
+import numpy
+from functools import partial
import math
-import paddle.v2 as paddle
+import os
+import sys
```
然后,定义参数:
```python
-embsize = 32 # 词向量维度
-hiddensize = 256 # 隐层维度
-N = 5 # 训练5-Gram
-```
-
-用于保存和加载word_dict和embedding table的函数
-```python
-# save and load word dict and embedding table
-def save_dict_and_embedding(word_dict, embeddings):
- with open("word_dict", "w") as f:
- for key in word_dict:
- f.write(key + " " + str(word_dict[key]) + "\n")
- with open("embedding_table", "w") as f:
- numpy.savetxt(f, embeddings, delimiter=',', newline='\n')
-
-
-def load_dict_and_embedding():
- word_dict = dict()
- with open("word_dict", "r") as f:
- for line in f:
- key, value = line.strip().split(" ")
- word_dict[key] = int(value)
-
- embeddings = numpy.loadtxt("embedding_table", delimiter=",")
- return word_dict, embeddings
-```
-
-接着,定义网络结构:
+EMBED_SIZE = 32 # word vector dimension
+HIDDEN_SIZE = 256 # hidden layer dimension
+N = 5 # train 5-gram
+BATCH_SIZE = 32 # batch size
-- 将$w_t$之前的$n-1$个词 $w_{t-n+1},...w_{t-1}$,通过$|V|\times D$的矩阵映射到D维词向量(本例中取D=32)。
+# can use CPU or GPU
+use_cuda = os.getenv('WITH_GPU', '0') != '0'
-```python
-def wordemb(inlayer):
- wordemb = paddle.layer.table_projection(
- input=inlayer,
- size=embsize,
- param_attr=paddle.attr.Param(
- name="_proj",
- initial_std=0.001,
- learning_rate=1,
- l2_rate=0,
- sparse_update=True))
- return wordemb
-```
-
-- 定义输入层接受的数据类型以及名字。
-
-```python
-paddle.init(use_gpu=False, trainer_count=3) # 初始化PaddlePaddle
word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
-# 每个输入层都接受整形数据,这些数据的范围是[0, dict_size)
-firstword = paddle.layer.data(
- name="firstw", type=paddle.data_type.integer_value(dict_size))
-secondword = paddle.layer.data(
- name="secondw", type=paddle.data_type.integer_value(dict_size))
-thirdword = paddle.layer.data(
- name="thirdw", type=paddle.data_type.integer_value(dict_size))
-fourthword = paddle.layer.data(
- name="fourthw", type=paddle.data_type.integer_value(dict_size))
-nextword = paddle.layer.data(
- name="fifthw", type=paddle.data_type.integer_value(dict_size))
-
-Efirst = wordemb(firstword)
-Esecond = wordemb(secondword)
-Ethird = wordemb(thirdword)
-Efourth = wordemb(fourthword)
```
-- 将这n-1个词向量经过concat_layer连接成一个大向量作为历史文本特征。
+不同于之前的PaddlePaddle v2版本,在新的Fluid版本里,我们不必再手动计算词向量。PaddlePaddle提供了一个内置的方法`fluid.layers.embedding`,我们就可以直接用它来构造 N-gram 神经网络。
-```python
-contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth])
-```
-
-- 将历史文本特征经过一个全连接得到文本隐层特征。
-
-```python
-hidden1 = paddle.layer.fc(input=contextemb,
- size=hiddensize,
- act=paddle.activation.Sigmoid(),
- layer_attr=paddle.attr.Extra(drop_rate=0.5),
- bias_attr=paddle.attr.Param(learning_rate=2),
- param_attr=paddle.attr.Param(
- initial_std=1. / math.sqrt(embsize * 8),
- learning_rate=1))
-```
-
-- 将文本隐层特征,再经过一个全连接,映射成一个$|V|$维向量,同时通过softmax归一化得到这`|V|`个词的生成概率。
+- 我们来定义我们的 N-gram 神经网络结构。这个结构在训练和预测中都会使用到。因为词向量比较稀疏,我们传入参数 `is_sparse == True`, 可以加速稀疏矩阵的更新。
```python
-predictword = paddle.layer.fc(input=hidden1,
- size=dict_size,
- bias_attr=paddle.attr.Param(learning_rate=2),
- act=paddle.activation.Softmax())
+def inference_program(is_sparse):
+ first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64')
+ second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64')
+ third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64')
+ fourth_word = fluid.layers.data(name='fourthw', shape=[1], dtype='int64')
+
+ embed_first = fluid.layers.embedding(
+ input=first_word,
+ size=[dict_size, EMBED_SIZE],
+ dtype='float32',
+ is_sparse=is_sparse,
+ param_attr='shared_w')
+ embed_second = fluid.layers.embedding(
+ input=second_word,
+ size=[dict_size, EMBED_SIZE],
+ dtype='float32',
+ is_sparse=is_sparse,
+ param_attr='shared_w')
+ embed_third = fluid.layers.embedding(
+ input=third_word,
+ size=[dict_size, EMBED_SIZE],
+ dtype='float32',
+ is_sparse=is_sparse,
+ param_attr='shared_w')
+ embed_fourth = fluid.layers.embedding(
+ input=fourth_word,
+ size=[dict_size, EMBED_SIZE],
+ dtype='float32',
+ is_sparse=is_sparse,
+ param_attr='shared_w')
+
+ concat_embed = fluid.layers.concat(
+ input=[embed_first, embed_second, embed_third, embed_fourth], axis=1)
+ hidden1 = fluid.layers.fc(input=concat_embed,
+ size=HIDDEN_SIZE,
+ act='sigmoid')
+ predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax')
+ return predict_word
```
-- 网络的损失函数为多分类交叉熵,可直接调用`classification_cost`函数。
+- 基于以上的神经网络结构,我们可以如下定义我们的`训练`方法
```python
-cost = paddle.layer.classification_cost(input=predictword, label=nextword)
+def train_program(is_sparse):
+ # The declaration of 'next_word' must be after the invoking of inference_program,
+ # or the data input order of train program would be [next_word, firstw, secondw,
+ # thirdw, fourthw], which is not correct.
+ predict_word = inference_program(is_sparse)
+ next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64')
+ cost = fluid.layers.cross_entropy(input=predict_word, label=next_word)
+ avg_cost = fluid.layers.mean(cost)
+ return avg_cost
```
-然后,指定训练相关的参数:
+- 现在我们可以开始训练啦。如今的版本较之以前就简单了许多。我们有现成的训练和测试集:`paddle.dataset.imikolov.train()`和`paddle.dataset.imikolov.test()`。两者都会返回一个读取器。在PaddlePaddle中,读取器是一个Python的函数,每次调用,会读取下一条数据。它是一个Python的generator。
-- 训练方法(optimizer): 代表训练过程在更新权重时采用动量优化器,本教程使用Adam优化器。
-- 训练速度(learning_rate): 迭代的速度,与网络的训练收敛速度有关系。
-- 正则化(regularization): 是防止网络过拟合的一种手段,此处采用L2正则化。
+`paddle.batch` 会读入一个读取器,然后输出一个批次化了的读取器。`event_handler`亦可以一并传入`trainer.train`来时不时的输出每个步骤,批次的训练情况。
```python
-parameters = paddle.parameters.create(cost)
-adagrad = paddle.optimizer.AdaGrad(
- learning_rate=3e-3,
- regularization=paddle.optimizer.L2Regularization(8e-4))
-trainer = paddle.trainer.SGD(cost, parameters, adagrad)
+def optimizer_func():
+ # Note here we need to choose more sophisticated optimizers
+ # such as AdaGrad with a decay rate. The normal SGD converges
+ # very slowly.
+ # optimizer=fluid.optimizer.SGD(learning_rate=0.001),
+ return fluid.optimizer.AdagradOptimizer(
+ learning_rate=3e-3,
+ regularization=fluid.regularizer.L2DecayRegularizer(8e-4))
+
+
+def train(use_cuda, train_program, params_dirname):
+ train_reader = paddle.batch(
+ paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE)
+ test_reader = paddle.batch(
+ paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE)
+
+ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
+
+ def event_handler(event):
+ if isinstance(event, fluid.EndStepEvent):
+ # We output cost every 10 steps.
+ if event.step % 10 == 0:
+ outs = trainer.test(
+ reader=test_reader,
+ feed_order=['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw'])
+ avg_cost = outs[0]
+
+ print "Step %d: Average Cost %f" % (event.step, avg_cost)
+
+ # If average cost is lower than 5.8, we consider the model good enough to stop.
+ # Note 5.8 is a relatively high value. In order to get a better model, one should
+ # aim for avg_cost lower than 3.5. But the training could take longer time.
+ if avg_cost < 5.8:
+ trainer.save_params(params_dirname)
+ trainer.stop()
+
+ if math.isnan(avg_cost):
+ sys.exit("got NaN loss, training failed.")
+
+ trainer = fluid.Trainer(
+ train_func=train_program,
+ optimizer_func=optimizer_func,
+ place=place)
+
+ trainer.train(
+ reader=train_reader,
+ num_epochs=1,
+ event_handler=event_handler,
+ feed_order=['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw'])
```
-下一步,我们开始训练过程。`paddle.dataset.imikolov.train()`和`paddle.dataset.imikolov.test()`分别做训练和测试数据集。这两个函数各自返回一个reader——PaddlePaddle中的reader是一个Python函数,每次调用的时候返回一个Python generator。
-
-`paddle.batch`的输入是一个reader,输出是一个batched reader —— 在PaddlePaddle里,一个reader每次yield一条训练数据,而一个batched reader每次yield一个minbatch。
+- `trainer.train`将会开始训练。从`event_handler`返回的监控情况如下:
```python
-def event_handler(event):
- if isinstance(event, paddle.event.EndIteration):
- if event.batch_id % 100 == 0:
- print "Pass %d, Batch %d, Cost %f, %s" % (
- event.pass_id, event.batch_id, event.cost, event.metrics)
-
- if isinstance(event, paddle.event.EndPass):
- result = trainer.test(
- paddle.batch(
- paddle.dataset.imikolov.test(word_dict, N), 32))
- print "Pass %d, Testing metrics %s" % (event.pass_id, result.metrics)
- with open("model_%d.tar"%event.pass_id, 'w') as f:
- trainer.save_parameter_to_tar(f)
-
-trainer.train(
- paddle.batch(paddle.dataset.imikolov.train(word_dict, N), 32),
- num_passes=100,
- event_handler=event_handler)
-```
-
-```text
-Pass 0, Batch 0, Cost 7.870579, {'classification_error_evaluator': 1.0}, Testing metrics {'classification_error_evaluator': 0.999591588973999}
-Pass 0, Batch 100, Cost 6.136420, {'classification_error_evaluator': 0.84375}, Testing metrics {'classification_error_evaluator': 0.8328699469566345}
-Pass 0, Batch 200, Cost 5.786797, {'classification_error_evaluator': 0.8125}, Testing metrics {'classification_error_evaluator': 0.8328542709350586}
+Step 0: Average Cost 7.337213
+Step 10: Average Cost 6.136128
+Step 20: Average Cost 5.766995
...
```
-训练过程是完全自动的,event_handler里打印的日志类似如上所示:
-
-经过30个pass,我们将得到平均错误率为classification_error_evaluator=0.735611。
-
-## 保存词典和embedding
+## 模型应用
+在模型训练后,我们可以用它做一些预测。
-训练完成之后,我们可以把词典和embedding table单独保存下来,后面可以直接使用
+### 预测下一个词
+我们可以用我们训练过的模型,在得知之前的 N-gram 后,预测下一个词。
```python
-# save word dict and embedding table
-embeddings = parameters.get("_proj").reshape(len(word_dict), embsize)
-save_dict_and_embedding(word_dict, embeddings)
+def infer(use_cuda, inference_program, params_dirname=None):
+ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
+ inferencer = fluid.Inferencer(
+ infer_func=inference_program, param_path=params_dirname, place=place)
+
+ # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
+ # is simply an index to look up for the corresponding word vector and hence
+ # the shape of word (base_shape) should be [1]. The length-based level of
+ # detail (lod) info of each LoDtensor should be [[1]] meaning there is only
+ # one lod_level and there is only one sequence of one word on this level.
+ # Note that lod info should be a list of lists.
+
+ data1 = [[211]] # 'among'
+ data2 = [[6]] # 'a'
+ data3 = [[96]] # 'group'
+ data4 = [[4]] # 'of'
+ lod = [[1]]
+
+ first_word = fluid.create_lod_tensor(data1, lod, place)
+ second_word = fluid.create_lod_tensor(data2, lod, place)
+ third_word = fluid.create_lod_tensor(data3, lod, place)
+ fourth_word = fluid.create_lod_tensor(data4, lod, place)
+
+ result = inferencer.infer(
+ {
+ 'firstw': first_word,
+ 'secondw': second_word,
+ 'thirdw': third_word,
+ 'fourthw': fourth_word
+ },
+ return_numpy=False)
+
+ print(numpy.array(result[0]))
+ most_possible_word_index = numpy.argmax(result[0])
+ print(most_possible_word_index)
+ print([
+ key for key, value in word_dict.iteritems()
+ if value == most_possible_word_index
+ ][0])
```
-
-## 应用模型
-训练模型后,我们可以加载模型参数,用训练出来的词向量初始化其他模型,也可以将模型查看参数用来做后续应用。
-
-
-### 查看词向量
-
-PaddlePaddle训练出来的参数可以直接使用`parameters.get()`获取出来。例如查看单词`apple`的词向量,即为
+在经历3分钟的短暂训练后,我们得到如下的预测。我们的模型预测 `among a group of` 的下一个词是`a`。这比较符合文法规律。如果我们训练时间更长,比如几个小时,那么我们会得到的下一个预测是 `workers`。
```python
-embeddings = parameters.get("_proj").reshape(len(word_dict), embsize)
-
-print embeddings[word_dict['apple']]
-```
-
-```text
-[-0.38961065 -0.02392169 -0.00093231 0.36301503 0.13538605 0.16076435
--0.0678709 0.1090285 0.42014077 -0.24119169 -0.31847557 0.20410083
-0.04910378 0.19021918 -0.0122014 -0.04099389 -0.16924137 0.1911236
--0.10917275 0.13068172 -0.23079982 0.42699069 -0.27679482 -0.01472992
-0.2069038 0.09005053 -0.3282454 0.12717034 -0.24218646 0.25304323
-0.19072419 -0.24286366]
+[[0.00106646 0.0007907 0.00072041 ... 0.00049024 0.00041355 0.00084464]]
+6
+a
```
-
-### 修改词向量
-
-获得到的embedding为一个标准的numpy矩阵。我们可以对这个numpy矩阵进行修改,然后赋值回去。
-
+整个程序的入口很简单:
```python
-def modify_embedding(emb):
- # Add your modification here.
- pass
-
-modify_embedding(embeddings)
-parameters.set("_proj", embeddings)
-```
-
-### 计算词语之间的余弦距离
+def main(use_cuda, is_sparse):
+ if use_cuda and not fluid.core.is_compiled_with_cuda():
+ return
-两个向量之间的距离可以用余弦值来表示,余弦值在$[-1,1]$的区间内,向量间余弦值越大,其距离越近。这里我们在`calculate_dis.py`中实现不同词语的距离度量。
-用法如下:
+ params_dirname = "word2vec.inference.model"
+ train(
+ use_cuda=use_cuda,
+ train_program=partial(train_program, is_sparse),
+ params_dirname=params_dirname)
-```python
-from scipy import spatial
+ infer(
+ use_cuda=use_cuda,
+ inference_program=partial(inference_program, is_sparse),
+ params_dirname=params_dirname)
-emb_1 = embeddings[word_dict['world']]
-emb_2 = embeddings[word_dict['would']]
-print spatial.distance.cosine(emb_1, emb_2)
+main(use_cuda=use_cuda, is_sparse=True)
```
-```text
-0.99375076448
-```
## 总结
-本章中,我们介绍了词向量、语言模型和词向量的关系、以及如何通过训练神经网络模型获得词向量。在信息检索中,我们可以根据向量间的余弦夹角,来判断query和文档关键词这二者间的相关性。在句法分析和语义分析中,训练好的词向量可以用来初始化模型,以得到更好的效果。在文档分类中,有了词向量之后,可以用聚类的方法将文档中同义词进行分组。希望大家在本章后能够自行运用词向量进行相关领域的研究。
+本章中,我们介绍了词向量、语言模型和词向量的关系、以及如何通过训练神经网络模型获得词向量。在信息检索中,我们可以根据向量间的余弦夹角,来判断query和文档关键词这二者间的相关性。在句法分析和语义分析中,训练好的词向量可以用来初始化模型,以得到更好的效果。在文档分类中,有了词向量之后,可以用聚类的方法将文档中同义词进行分组,也可以用 N-gram 来预测下一个词。希望大家在本章后能够自行运用词向量进行相关领域的研究。
## 参考文献
diff --git a/04.word2vec/index.html b/04.word2vec/index.html
index d3001cc2be30a0f4fb616683222a3836fdace03d..4208db87c3a9820ad5ab6f912cd27184c61b8d34 100644
--- a/04.word2vec/index.html
+++ b/04.word2vec/index.html
@@ -283,7 +283,8 @@ dict_size = len(word_dict)
Unlike from the previous PaddlePaddle v2, in the new API (Fluid), we do not need to calculate word embedding ourselves. PaddlePaddle provides a built-in method `fluid.layers.embedding` and we can use it directly to build our N-gram neural network model.
-- We define our N-gram neural network structure as below. This structure will be used both in `train` and in `infer`
+- We define our N-gram neural network structure as below. This structure will be used both in `train` and in `infer`. We can specify `is_sparse = True` to accelerate sparse matrix update for word embedding.
+
```python
def inference_program(is_sparse):
first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64')
@@ -457,12 +458,12 @@ def infer(use_cuda, inference_program, params_dirname=None):
][0])
```
-When we spent 3 mins in training, the output is like below, which means the next word for `among a group of` is `board`. If we train the model with a longer time, it will give a meaningful prediction as `workers`.
+When we spent 3 mins in training, the output is like below, which means the next word for `among a group of` is `a`. If we train the model with a longer time, it will give a meaningful prediction as `workers`.
```text
-[[0.00144043 0.00073983 0.00042264 ... 0.00061815 0.00038701 0.00099838]]
-142
-board
+[[0.00106646 0.0007907 0.00072041 ... 0.00049024 0.00041355 0.00084464]]
+6
+a
```
The main entrance of the program is fairly simple:
diff --git a/05.recommender_system/README.md b/05.recommender_system/README.md
index 4dd27b88f7a612a2cb163db02420ef979440c9f4..2b1ed98e6747980921d63e5a33b6584063adb13b 100644
--- a/05.recommender_system/README.md
+++ b/05.recommender_system/README.md
@@ -417,7 +417,7 @@ def event_handler(event):
# get avg cost
avg_cost = np.array(avg_cost_set).mean()
-
+
plot_cost.append(test_title, event.step, avg_cost_set[0])
plot_cost.plot()
diff --git a/05.recommender_system/index.cn.html b/05.recommender_system/index.cn.html
index 97f13fdee3b1674184f5dbee771ca3301a24c5de..2fc69601d9a70e6e9f81231311e669ead5d2fc17 100644
--- a/05.recommender_system/index.cn.html
+++ b/05.recommender_system/index.cn.html
@@ -163,8 +163,9 @@ Paddle在API中提供了自动加载数据的模块。数据模块为 `paddle.da
```python
-import paddle.v2 as paddle
-paddle.init(use_gpu=False)
+import paddle
+movie_info = paddle.dataset.movielens.movie_info()
+print movie_info.values()[0]
```
@@ -252,241 +253,304 @@ print "User %s rates Movie %s with Score %s"%(user_info[uid], movie_info[mov_id]
## 模型配置说明
-下面我们开始根据输入数据的形式配置模型。
+下面我们开始根据输入数据的形式配置模型。首先引入所需的库函数以及定义全局变量。
```python
-uid = paddle.layer.data(
- name='user_id',
- type=paddle.data_type.integer_value(
- paddle.dataset.movielens.max_user_id() + 1))
-usr_emb = paddle.layer.embedding(input=uid, size=32)
-usr_fc = paddle.layer.fc(input=usr_emb, size=32)
-
-usr_gender_id = paddle.layer.data(
- name='gender_id', type=paddle.data_type.integer_value(2))
-usr_gender_emb = paddle.layer.embedding(input=usr_gender_id, size=16)
-usr_gender_fc = paddle.layer.fc(input=usr_gender_emb, size=16)
-
-usr_age_id = paddle.layer.data(
- name='age_id',
- type=paddle.data_type.integer_value(
- len(paddle.dataset.movielens.age_table)))
-usr_age_emb = paddle.layer.embedding(input=usr_age_id, size=16)
-usr_age_fc = paddle.layer.fc(input=usr_age_emb, size=16)
-
-usr_job_id = paddle.layer.data(
- name='job_id',
- type=paddle.data_type.integer_value(
- paddle.dataset.movielens.max_job_id() + 1))
-usr_job_emb = paddle.layer.embedding(input=usr_job_id, size=16)
-usr_job_fc = paddle.layer.fc(input=usr_job_emb, size=16)
+import math
+import sys
+import numpy as np
+import paddle
+import paddle.fluid as fluid
+import paddle.fluid.layers as layers
+import paddle.fluid.nets as nets
+
+IS_SPARSE = True
+USE_GPU = False
+BATCH_SIZE = 256
```
-如上述代码所示,对于每个用户,我们输入4维特征。其中包括`user_id`,`gender_id`,`age_id`,`job_id`。这几维特征均是简单的整数值。为了后续神经网络处理这些特征方便,我们借鉴NLP中的语言模型,将这几维离散的整数值,变换成embedding取出。分别形成`usr_emb`, `usr_gender_emb`, `usr_age_emb`, `usr_job_emb`。
-
+然后为我们的用户特征综合模型定义模型配置
```python
-usr_combined_features = paddle.layer.fc(
- input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc],
- size=200,
- act=paddle.activation.Tanh())
+def get_usr_combined_features():
+
+ USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
+
+ uid = layers.data(name='user_id', shape=[1], dtype='int64')
+
+ usr_emb = layers.embedding(
+ input=uid,
+ dtype='float32',
+ size=[USR_DICT_SIZE, 32],
+ param_attr='user_table',
+ is_sparse=IS_SPARSE)
+
+ usr_fc = layers.fc(input=usr_emb, size=32)
+
+ USR_GENDER_DICT_SIZE = 2
+
+ usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')
+
+ usr_gender_emb = layers.embedding(
+ input=usr_gender_id,
+ size=[USR_GENDER_DICT_SIZE, 16],
+ param_attr='gender_table',
+ is_sparse=IS_SPARSE)
+
+ usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
+
+ USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
+ usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")
+
+ usr_age_emb = layers.embedding(
+ input=usr_age_id,
+ size=[USR_AGE_DICT_SIZE, 16],
+ is_sparse=IS_SPARSE,
+ param_attr='age_table')
+
+ usr_age_fc = layers.fc(input=usr_age_emb, size=16)
+
+ USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
+ usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")
+
+ usr_job_emb = layers.embedding(
+ input=usr_job_id,
+ size=[USR_JOB_DICT_SIZE, 16],
+ param_attr='job_table',
+ is_sparse=IS_SPARSE)
+
+ usr_job_fc = layers.fc(input=usr_job_emb, size=16)
+
+ concat_embed = layers.concat(
+ input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)
+
+ usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
+
+ return usr_combined_features
```
+如上述代码所示,对于每个用户,我们输入4维特征。其中包括user_id,gender_id,age_id,job_id。这几维特征均是简单的整数值。为了后续神经网络处理这些特征方便,我们借鉴NLP中的语言模型,将这几维离散的整数值,变换成embedding取出。分别形成usr_emb, usr_gender_emb, usr_age_emb, usr_job_emb。
+
然后,我们对于所有的用户特征,均输入到一个全连接层(fc)中。将所有特征融合为一个200维度的特征。
进而,我们对每一个电影特征做类似的变换,网络配置为:
```python
-mov_id = paddle.layer.data(
- name='movie_id',
- type=paddle.data_type.integer_value(
- paddle.dataset.movielens.max_movie_id() + 1))
-mov_emb = paddle.layer.embedding(input=mov_id, size=32)
-mov_fc = paddle.layer.fc(input=mov_emb, size=32)
-
-mov_categories = paddle.layer.data(
- name='category_id',
- type=paddle.data_type.sparse_binary_vector(
- len(paddle.dataset.movielens.movie_categories())))
-mov_categories_hidden = paddle.layer.fc(input=mov_categories, size=32)
-
-movie_title_dict = paddle.dataset.movielens.get_movie_title_dict()
-mov_title_id = paddle.layer.data(
- name='movie_title',
- type=paddle.data_type.integer_value_sequence(len(movie_title_dict)))
-mov_title_emb = paddle.layer.embedding(input=mov_title_id, size=32)
-mov_title_conv = paddle.networks.sequence_conv_pool(
- input=mov_title_emb, hidden_size=32, context_len=3)
-
-mov_combined_features = paddle.layer.fc(
- input=[mov_fc, mov_categories_hidden, mov_title_conv],
- size=200,
- act=paddle.activation.Tanh())
-```
+def get_mov_combined_features():
-电影ID和电影类型分别映射到其对应的特征隐层。对于电影标题名称(title),一个ID序列表示的词语序列,在输入卷积层后,将得到每个时间窗口的特征(序列特征),然后通过在时间维度降采样得到固定维度的特征,整个过程在sequence_conv_pool实现。
+ MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
-最后再将电影的特征融合进`mov_combined_features`中。
+ mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')
+ mov_emb = layers.embedding(
+ input=mov_id,
+ dtype='float32',
+ size=[MOV_DICT_SIZE, 32],
+ param_attr='movie_table',
+ is_sparse=IS_SPARSE)
-```python
-inference = paddle.layer.cos_sim(a=usr_combined_features, b=mov_combined_features, size=1, scale=5)
-```
+ mov_fc = layers.fc(input=mov_emb, size=32)
-进而,我们使用余弦相似度计算用户特征与电影特征的相似性。并将这个相似性拟合(回归)到用户评分上。
+ CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
+ category_id = layers.data(
+ name='category_id', shape=[1], dtype='int64', lod_level=1)
-```python
-cost = paddle.layer.square_error_cost(
- input=inference,
- label=paddle.layer.data(
- name='score', type=paddle.data_type.dense_vector(1)))
-```
+ mov_categories_emb = layers.embedding(
+ input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)
-至此,我们的优化目标就是这个网络配置中的`cost`了。
+ mov_categories_hidden = layers.sequence_pool(
+ input=mov_categories_emb, pool_type="sum")
-## 训练模型
+ MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
-### 定义参数
-神经网络的模型,我们可以简单的理解为网络拓朴结构+参数。之前一节,我们定义出了优化目标`cost`。这个`cost`即为网络模型的拓扑结构。我们开始训练模型,需要先定义出参数。定义方法为:
+ mov_title_id = layers.data(
+ name='movie_title', shape=[1], dtype='int64', lod_level=1)
+ mov_title_emb = layers.embedding(
+ input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)
-```python
-parameters = paddle.parameters.create(cost)
-```
+ mov_title_conv = nets.sequence_conv_pool(
+ input=mov_title_emb,
+ num_filters=32,
+ filter_size=3,
+ act="tanh",
+ pool_type="sum")
+
+ concat_embed = layers.concat(
+ input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)
- [INFO 2017-03-06 17:12:13,284 networks.py:1472] The input order is [user_id, gender_id, age_id, job_id, movie_id, category_id, movie_title, score]
- [INFO 2017-03-06 17:12:13,287 networks.py:1478] The output order is [__square_error_cost_0__]
+ mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
+ return mov_combined_features
+```
-`parameters`是模型的所有参数集合。他是一个python的dict。我们可以查看到这个网络中的所有参数名称。因为之前定义模型的时候,我们没有指定参数名称,这里参数名称是自动生成的。当然,我们也可以指定每一个参数名称,方便日后维护。
+电影标题名称(title)是一个序列的整数,整数代表的是这个词在索引序列中的下标。这个序列会被送入 `sequence_conv_pool` 层,这个层会在时间维度上使用卷积和池化。因为如此,所以输出会是固定长度,尽管输入的序列长度各不相同。
+最后,我们定义一个`inference_program`来使用余弦相似度计算用户特征与电影特征的相似性。
```python
-print parameters.keys()
+def inference_program():
+ usr_combined_features = get_usr_combined_features()
+ mov_combined_features = get_mov_combined_features()
+
+ inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
+ scale_infer = layers.scale(x=inference, scale=5.0)
+
+ return scale_infer
```
- [u'___fc_layer_2__.wbias', u'___fc_layer_2__.w2', u'___embedding_layer_3__.w0', u'___embedding_layer_5__.w0', u'___embedding_layer_2__.w0', u'___embedding_layer_1__.w0', u'___fc_layer_1__.wbias', u'___fc_layer_0__.wbias', u'___fc_layer_1__.w0', u'___fc_layer_0__.w2', u'___fc_layer_0__.w3', u'___fc_layer_0__.w0', u'___fc_layer_0__.w1', u'___fc_layer_2__.w1', u'___fc_layer_2__.w0', u'___embedding_layer_4__.w0', u'___sequence_conv_pool_0___conv_fc.w0', u'___embedding_layer_0__.w0', u'___sequence_conv_pool_0___conv_fc.wbias']
+进而,我们定义一个`train_program`来使用`inference_program`计算出的结果,在标记数据的帮助下来计算误差。我们还定义了一个`optimizer_func`来定义优化器。
+```python
+def train_program():
-### 构造训练(trainer)
+ scale_infer = inference_program()
-下面,我们根据网络拓扑结构和模型参数来构造出一个本地训练(trainer)。在构造本地训练的时候,我们还需要指定这个训练的优化方法。这里我们使用Adam来作为优化算法。
+ label = layers.data(name='score', shape=[1], dtype='float32')
+ square_cost = layers.square_error_cost(input=scale_infer, label=label)
+ avg_cost = layers.mean(square_cost)
+ return [avg_cost, scale_infer]
-```python
-trainer = paddle.trainer.SGD(cost=cost, parameters=parameters,
- update_equation=paddle.optimizer.Adam(learning_rate=1e-4))
+
+def optimizer_func():
+ return fluid.optimizer.SGD(learning_rate=0.2)
```
- [INFO 2017-03-06 17:12:13,378 networks.py:1472] The input order is [user_id, gender_id, age_id, job_id, movie_id, category_id, movie_title, score]
- [INFO 2017-03-06 17:12:13,379 networks.py:1478] The output order is [__square_error_cost_0__]
+## 训练模型
+
+### 定义训练环境
+定义您的训练环境,可以指定训练是发生在CPU还是GPU上。
+
+```python
+use_cuda = False
+place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
+```
-### 训练
+### 定义数据提供器
+下一步是为训练和测试定义数据提供器。提供器读入一个大小为 `BATCH_SIZE`的数据。`paddle.dataset.movielens.train` 每次会在乱序化后提供一个大小为`BATCH_SIZE`的数据,乱序化的大小为缓存大小`buf_size`。
-下面我们开始训练过程。
+```python
+train_reader = paddle.batch(
+ paddle.reader.shuffle(
+ paddle.dataset.movielens.train(), buf_size=8192),
+ batch_size=BATCH_SIZE)
-我们直接使用Paddle提供的数据集读取程序。`paddle.dataset.movielens.train()`和`paddle.dataset.movielens.test()`分别做训练和预测数据集。并且通过`feeding`来指定每一个数据和data_layer的对应关系。
+test_reader = paddle.batch(
+ paddle.dataset.movielens.test(), batch_size=BATCH_SIZE)
+```
-例如,这里的feeding表示的是,对于数据层 `user_id`,使用了reader中每一条数据的第0个元素。`gender_id`数据层使用了第1个元素。以此类推。
+### 构造训练器(trainer)
+训练器需要一个训练程序和一个训练优化函数。
```python
-feeding = {
- 'user_id': 0,
- 'gender_id': 1,
- 'age_id': 2,
- 'job_id': 3,
- 'movie_id': 4,
- 'category_id': 5,
- 'movie_title': 6,
- 'score': 7
-}
+trainer = fluid.Trainer(
+ train_func=train_program, place=place, optimizer_func=optimizer_func)
```
-训练过程是完全自动的。我们可以使用event_handler与event_handler_plot来观察训练过程,或进行测试等。这里我们在event_handler_plot里面绘制了训练误差曲线和测试误差曲线。并且保存了模型。
+### 提供数据
+
+`feed_order`用来定义每条产生的数据和`paddle.layer.data`之间的映射关系。比如,`movielens.train`产生的第一列的数据对应的是`user_id`这个特征。
```python
-def event_handler(event):
- if isinstance(event, paddle.event.EndIteration):
- if event.batch_id % 100 == 0:
- print "Pass %d Batch %d Cost %.2f" % (
- event.pass_id, event.batch_id, event.cost)
+feed_order = [
+ 'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id',
+ 'movie_title', 'score'
+]
```
+### 事件处理器
+回调函数`event_handler`在一个之前定义好的事件发生后会被调用。例如,我们可以在每步训练结束后查看误差。
+
```python
-from paddle.v2.plot import Ploter
+# Specify the directory path to save the parameters
+params_dirname = "recommender_system.inference.model"
-train_title = "Train cost"
+from paddle.v2.plot import Ploter
test_title = "Test cost"
-cost_ploter = Ploter(train_title, test_title)
+plot_cost = Ploter(test_title)
-step = 0
-def event_handler_plot(event):
- global step
- if isinstance(event, paddle.event.EndIteration):
- if step % 10 == 0: # every 10 batches, record a train cost
- cost_ploter.append(train_title, step, event.cost)
+def event_handler(event):
+ if isinstance(event, fluid.EndStepEvent):
+ avg_cost_set = trainer.test(
+ reader=test_reader, feed_order=feed_order)
+
+ # get avg cost
+ avg_cost = np.array(avg_cost_set).mean()
- if step % 1000 == 0: # every 1000 batches, record a test cost
- result = trainer.test(
- reader=paddle.batch(
- paddle.dataset.movielens.test(), batch_size=256),
- feeding=feeding)
- cost_ploter.append(test_title, step, result.cost)
+ plot_cost.append(test_title, event.step, avg_cost_set[0])
+ plot_cost.plot()
- if step % 100 == 0: # every 100 batches, update cost plot
- cost_ploter.plot()
+ print("avg_cost: %s" % avg_cost)
+ print('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1,
+ float(avg_cost)))
- step += 1
+ if event.step == 20: # Adjust this number for accuracy
+ trainer.save_params(params_dirname)
+ trainer.stop()
```
+### 开始训练
+最后,我们传入训练循环数(`num_epoch`)和一些别的参数,调用 `trainer.train` 来开始训练。
+
```python
trainer.train(
- reader=paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.movielens.train(), buf_size=8192),
- batch_size=256),
- event_handler=event_handler_plot,
- feeding=feeding,
- num_passes=2)
+ num_epochs=1,
+ event_handler=event_handler,
+ reader=train_reader,
+ feed_order=feed_order)
```
-
-![png](./image/output_32_0.png)
-
## 应用模型
-在训练了几轮以后,您可以对模型进行推断。我们可以使用任意一个用户ID和电影ID,来预测该用户对该电影的评分。示例程序为:
-
+### 构建预测器
+传入`inference_program`和`params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。
```python
-import copy
-user_id = 234
-movie_id = 345
-
-user = user_info[user_id]
-movie = movie_info[movie_id]
+inferencer = fluid.Inferencer(
+ inference_program, param_path=params_dirname, place=place)
+```
-feature = user.value() + movie.value()
+### 生成测试用输入数据
+使用 create_lod_tensor(data, lod, place) 的API来生成细节层次的张量。`data`是一个序列,每个元素是一个索引号的序列。`lod`是细节层次的信息,对应于`data`。比如,data = [[10, 2, 3], [2, 3]] 意味着它包含两个序列,长度分别是3和2。于是相应地 lod = [[3, 2]],它表明其包含一层细节信息,意味着 `data` 有两个序列,长度分别是3和2。
-infer_dict = copy.copy(feeding)
-del infer_dict['score']
+在这个预测例子中,我们试着预测用户ID为1的用户对于电影'Hunchback of Notre Dame'的评分
-prediction = paddle.infer(inference, parameters=parameters, input=[feature], feeding=infer_dict)
-score = (prediction[0][0] + 5.0) / 2
-print "[Predict] User %d Rating Movie %d With Score %.2f"%(user_id, movie_id, score)
+```python
+infer_movie_id = 783
+infer_movie_name = paddle.dataset.movielens.movie_info()[infer_movie_id].title
+user_id = fluid.create_lod_tensor([[1]], [[1]], place)
+gender_id = fluid.create_lod_tensor([[1]], [[1]], place)
+age_id = fluid.create_lod_tensor([[0]], [[1]], place)
+job_id = fluid.create_lod_tensor([[10]], [[1]], place)
+movie_id = fluid.create_lod_tensor([[783]], [[1]], place) # Hunchback of Notre Dame
+category_id = fluid.create_lod_tensor([[10, 8, 9]], [[3]], place) # Animation, Children's, Musical
+movie_title = fluid.create_lod_tensor([[1069, 4140, 2923, 710, 988]], [[5]],
+ place) # 'hunchback','of','notre','dame','the'
```
- [INFO 2017-03-06 17:17:08,132 networks.py:1472] The input order is [user_id, gender_id, age_id, job_id, movie_id, category_id, movie_title]
- [INFO 2017-03-06 17:17:08,134 networks.py:1478] The output order is [__cos_sim_0__]
-
+### 测试
+现在我们可以进行预测了。我们要提供的`feed_order`应该和训练过程一致。
- [Predict] User 234 Rating Movie 345 With Score 4.16
+```python
+results = inferencer.infer(
+ {
+ 'user_id': user_id,
+ 'gender_id': gender_id,
+ 'age_id': age_id,
+ 'job_id': job_id,
+ 'movie_id': movie_id,
+ 'category_id': category_id,
+ 'movie_title': movie_title
+ },
+ return_numpy=False)
+```
## 总结
diff --git a/05.recommender_system/index.html b/05.recommender_system/index.html
index c3df396ac2414a648255443d94b5a7171cc01fa0..9aca3c3894cf6ed60f9386c022abb88022fcde86 100644
--- a/05.recommender_system/index.html
+++ b/05.recommender_system/index.html
@@ -447,6 +447,11 @@ For example, we can check the cost by `trainer.test` when `EndStepEvent` occurs
# Specify the directory path to save the parameters
params_dirname = "recommender_system.inference.model"
+from paddle.v2.plot import Ploter
+test_title = "Test cost"
+plot_cost = Ploter(test_title)
+
+
def event_handler(event):
if isinstance(event, fluid.EndStepEvent):
avg_cost_set = trainer.test(
@@ -455,11 +460,14 @@ def event_handler(event):
# get avg cost
avg_cost = np.array(avg_cost_set).mean()
+ plot_cost.append(test_title, event.step, avg_cost_set[0])
+ plot_cost.plot()
+
print("avg_cost: %s" % avg_cost)
print('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1,
float(avg_cost)))
- if float(avg_cost) < 4:
+ if event.step == 20: # Adjust this number for accuracy
trainer.save_params(params_dirname)
trainer.stop()
diff --git a/06.understand_sentiment/index.cn.html b/06.understand_sentiment/index.cn.html
index 16a5a8c039b56230376aea90d9b054ca785d42c9..2be442eabded20f444ef8962eb25b49aab7c213c 100644
--- a/06.understand_sentiment/index.cn.html
+++ b/06.understand_sentiment/index.cn.html
@@ -129,9 +129,8 @@ $$ h_t=Recrurent(x_t,h_{t-1})$$
图3. 栈式双向LSTM用于文本分类
-## 示例程序
-### 数据集介绍
+## 数据集介绍
我们以[IMDB情感分析数据集](http://ai.stanford.edu/%7Eamaas/data/sentiment/)为例进行介绍。IMDB数据集的训练集和测试集分别包含25000个已标注过的电影评论。其中,负面评论的得分小于等于4,正面评论的得分大于等于7,满分10分。
```text
@@ -145,95 +144,70 @@ aclImdb
```
Paddle在`dataset/imdb.py`中提实现了imdb数据集的自动下载和读取,并提供了读取字典、训练数据、测试数据等API。
+## 配置模型
+
+在该示例中,我们实现了两种文本分类算法,分别基于[推荐系统](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system)一节介绍过的文本卷积神经网络,以及[栈式双向LSTM](#栈式双向LSTM(Stacked Bidirectional LSTM))。我们首先引入要用到的库和定义全局变量:
+
```python
-import sys
-import paddle.v2 as paddle
+import paddle
+import paddle.fluid as fluid
+from functools import partial
+import numpy as np
+
+CLASS_DIM = 2
+EMB_DIM = 128
+HID_DIM = 512
+BATCH_SIZE = 128
+USE_GPU = False
```
-## 配置模型
-在该示例中,我们实现了两种文本分类算法,分别基于[推荐系统](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system)一节介绍过的文本卷积神经网络,以及[栈式双向LSTM](#栈式双向LSTM(Stacked Bidirectional LSTM))。
### 文本卷积神经网络
+我们构建神经网络`convolution_net`,示例代码如下。
+需要注意的是:`fluid.nets.sequence_conv_pool` 包含卷积和池化层两个操作。
```python
-def convolution_net(input_dim,
- class_dim=2,
- emb_dim=128,
- hid_dim=128,
- is_predict=False):
- data = paddle.layer.data("word",
- paddle.data_type.integer_value_sequence(input_dim))
- emb = paddle.layer.embedding(input=data, size=emb_dim)
- conv_3 = paddle.networks.sequence_conv_pool(
- input=emb, context_len=3, hidden_size=hid_dim)
- conv_4 = paddle.networks.sequence_conv_pool(
- input=emb, context_len=4, hidden_size=hid_dim)
- output = paddle.layer.fc(input=[conv_3, conv_4],
- size=class_dim,
- act=paddle.activation.Softmax())
- if not is_predict:
- lbl = paddle.layer.data("label", paddle.data_type.integer_value(2))
- cost = paddle.layer.classification_cost(input=output, label=lbl)
- return cost
- else:
- return output
+def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim):
+ emb = fluid.layers.embedding(
+ input=data, size=[input_dim, emb_dim], is_sparse=True)
+ conv_3 = fluid.nets.sequence_conv_pool(
+ input=emb,
+ num_filters=hid_dim,
+ filter_size=3,
+ act="tanh",
+ pool_type="sqrt")
+ conv_4 = fluid.nets.sequence_conv_pool(
+ input=emb,
+ num_filters=hid_dim,
+ filter_size=4,
+ act="tanh",
+ pool_type="sqrt")
+ prediction = fluid.layers.fc(
+ input=[conv_3, conv_4], size=class_dim, act="softmax")
+ return prediction
```
+
网络的输入`input_dim`表示的是词典的大小,`class_dim`表示类别数。这里,我们使用[`sequence_conv_pool`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/trainer_config_helpers/networks.py) API实现了卷积和池化操作。
### 栈式双向LSTM
+栈式双向神经网络`stacked_lstm_net`的代码片段如下:
+
```python
-def stacked_lstm_net(input_dim,
- class_dim=2,
- emb_dim=128,
- hid_dim=512,
- stacked_num=3,
- is_predict=False):
- """
- A Wrapper for sentiment classification task.
- This network uses bi-directional recurrent network,
- consisting three LSTM layers. This configure is referred to
- the paper as following url, but use fewer layrs.
- http://www.aclweb.org/anthology/P15-1109
-
- input_dim: here is word dictionary dimension.
- class_dim: number of categories.
- emb_dim: dimension of word embedding.
- hid_dim: dimension of hidden layer.
- stacked_num: number of stacked lstm-hidden layer.
- """
- assert stacked_num % 2 == 1
-
- fc_para_attr = paddle.attr.Param(learning_rate=1e-3)
- lstm_para_attr = paddle.attr.Param(initial_std=0., learning_rate=1.)
- para_attr = [fc_para_attr, lstm_para_attr]
- bias_attr = paddle.attr.Param(initial_std=0., l2_rate=0.)
- relu = paddle.activation.Relu()
- linear = paddle.activation.Linear()
-
- data = paddle.layer.data("word",
- paddle.data_type.integer_value_sequence(input_dim))
- emb = paddle.layer.embedding(input=data, size=emb_dim)
-
- fc1 = paddle.layer.fc(input=emb,
- size=hid_dim,
- act=linear,
- bias_attr=bias_attr)
- lstm1 = paddle.layer.lstmemory(
- input=fc1, act=relu, bias_attr=bias_attr)
+def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num):
+
+ emb = fluid.layers.embedding(
+ input=data, size=[input_dim, emb_dim], is_sparse=True)
+
+ fc1 = fluid.layers.fc(input=emb, size=hid_dim)
+ lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim)
inputs = [fc1, lstm1]
+
for i in range(2, stacked_num + 1):
- fc = paddle.layer.fc(input=inputs,
- size=hid_dim,
- act=linear,
- param_attr=para_attr,
- bias_attr=bias_attr)
- lstm = paddle.layer.lstmemory(
- input=fc,
- reverse=(i % 2) == 0,
- act=relu,
- bias_attr=bias_attr)
+ fc = fluid.layers.fc(input=inputs, size=hid_dim)
+ lstm, cell = fluid.layers.dynamic_lstm(
+ input=fc, size=hid_dim, is_reverse=(i % 2) == 0)
inputs = [fc, lstm]
fc_last = paddle.layer.pooling(input=inputs[0], pooling_type=paddle.pooling.Max())
@@ -244,162 +218,165 @@ def stacked_lstm_net(input_dim,
bias_attr=bias_attr,
param_attr=para_attr)
- if not is_predict:
- lbl = paddle.layer.data("label", paddle.data_type.integer_value(2))
- cost = paddle.layer.classification_cost(input=output, label=lbl)
- return cost
- else:
- return output
+ lbl = paddle.layer.data("label", paddle.data_type.integer_value(2))
+ cost = paddle.layer.classification_cost(input=output, label=lbl)
+ return cost, output
```
-网络的输入`stacked_num`表示的是LSTM的层数,需要是奇数,确保最高层LSTM正向。Paddle里面是通过一个fc和一个lstmemory来实现基于LSTM的循环神经网络。
+以上的栈式双向LSTM抽象出了高级特征并把其映射到和分类类别数同样大小的向量上。`paddle.activation.Softmax`函数用来计算分类属于某个类别的概率。
-## 训练模型
+重申一下,此处我们可以调用`convolution_net`或`stacked_lstm_net`的任何一个。我们以`convolution_net`为例。
+
+接下来我们定义预测程序(`inference_program`)。预测程序使用`convolution_net`来对`fluid.layer.data`的输入进行预测。
```python
-if __name__ == '__main__':
- # init
- paddle.init(use_gpu=False)
+def inference_program(word_dict):
+ data = fluid.layers.data(
+ name="words", shape=[1], dtype="int64", lod_level=1)
+
+ dict_dim = len(word_dict)
+ net = convolution_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM)
+ return net
```
-启动paddle程序,use_gpu=False表示用CPU训练,如果系统支持GPU也可以修改成True使用GPU训练。
-### 训练数据
+我们这里定义了`training_program`。它使用了从`inference_program`返回的结果来计算误差。我们同时定义了优化函数`optimizer_func`。
+
+因为是有监督的学习,训练集的标签也在`paddle.layer.data`中定义了。在训练过程中,交叉熵用来在`paddle.layer.classification_cost`中作为损失函数。
+
+在测试过程中,分类器会计算各个输出的概率。第一个返回的数值规定为 损耗(cost)。
-使用Paddle提供的数据集`dataset.imdb`中的API来读取训练数据。
```python
- print 'load dictionary...'
- word_dict = paddle.dataset.imdb.word_dict()
- dict_dim = len(word_dict)
- class_dim = 2
+def train_program(word_dict):
+ prediction = inference_program(word_dict)
+ label = fluid.layers.data(name="label", shape=[1], dtype="int64")
+ cost = fluid.layers.cross_entropy(input=prediction, label=label)
+ avg_cost = fluid.layers.mean(cost)
+ accuracy = fluid.layers.accuracy(input=prediction, label=label)
+ return [avg_cost, accuracy]
+
+
+def optimizer_func():
+ return fluid.optimizer.Adagrad(learning_rate=0.002)
```
-加载数据字典,这里通过`word_dict()`API可以直接构造字典。`class_dim`是指样本类别数,该示例中样本只有正负两类。
+
+## 训练模型
+
+### 定义训练环境
+
+定义您的训练是在CPU上还是在GPU上:
+
+
```python
- train_reader = paddle.batch(
- paddle.reader.shuffle(
- lambda: paddle.dataset.imdb.train(word_dict), buf_size=1000),
- batch_size=100)
- test_reader = paddle.batch(
- lambda: paddle.dataset.imdb.test(word_dict),
- batch_size=100)
+use_cuda = False
+place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
```
-这里,`dataset.imdb.train()`和`dataset.imdb.test()`分别是`dataset.imdb`中的训练数据和测试数据API。`train_reader`在训练时使用,意义是将读取的训练数据进行shuffle后,组成一个batch数据。同理,`test_reader`是在测试的时候使用,将读取的测试数据组成一个batch。
+
+### 定义数据提供器
+
+下一步是为训练和测试定义数据提供器。提供器读入一个大小为 BATCH_SIZE的数据。paddle.dataset.imdb.train 每次会在乱序化后提供一个大小为BATCH_SIZE的数据,乱序化的大小为缓存大小buf_size。
+
+注意:读取IMDB的数据可能会花费几分钟的时间,请耐心等待。
+
```python
- feeding={'word': 0, 'label': 1}
+print("Loading IMDB word dict....")
+word_dict = paddle.dataset.imdb.word_dict()
+
+print ("Reading training data....")
+train_reader = paddle.batch(
+ paddle.reader.shuffle(
+ paddle.dataset.imdb.train(word_dict), buf_size=25000),
+ batch_size=BATCH_SIZE)
```
-`feeding`用来指定`train_reader`和`test_reader`返回的数据与模型配置中data_layer的对应关系。这里表示reader返回的第0列数据对应`word`层,第1列数据对应`label`层。
-### 构造模型
+### 构造训练器(trainer)
+训练器需要一个训练程序和一个训练优化函数。
```python
- # Please choose the way to build the network
- # by uncommenting the corresponding line.
- cost = convolution_net(dict_dim, class_dim=class_dim)
- # cost = stacked_lstm_net(dict_dim, class_dim=class_dim, stacked_num=3)
+trainer = fluid.Trainer(
+ train_func=partial(train_program, word_dict),
+ place=place,
+ optimizer_func=optimizer_func)
```
-该示例中默认使用`convolution_net`网络,如果使用`stacked_lstm_net`网络,注释相应的行即可。其中cost是网络的优化目标,同时cost包含了整个网络的拓扑信息。
-### 网络参数
+### 提供数据
+
+`feed_order`用来定义每条产生的数据和`paddle.layer.data`之间的映射关系。比如,`imdb.train`产生的第一列的数据对应的是`words`这个特征。
```python
- # create parameters
- parameters = paddle.parameters.create(cost)
+feed_order = ['words', 'label']
```
-根据网络的拓扑构造网络参数。这里parameters是整个网络的参数集。
-### 优化算法
+### 事件处理器
+
+回调函数event_handler在一个之前定义好的事件发生后会被调用。例如,我们可以在每步训练结束后查看误差。
```python
- # create optimizer
- adam_optimizer = paddle.optimizer.Adam(
- learning_rate=2e-3,
- regularization=paddle.optimizer.L2Regularization(rate=8e-4),
- model_average=paddle.optimizer.ModelAverage(average_window=0.5))
+# Specify the directory path to save the parameters
+params_dirname = "understand_sentiment_conv.inference.model"
+
+def event_handler(event):
+ if isinstance(event, fluid.EndStepEvent):
+ print("Step {0}, Epoch {1} Metrics {2}".format(
+ event.step, event.epoch, map(np.array, event.metrics)))
+
+ if event.step == 10:
+ trainer.save_params(params_dirname)
+ trainer.stop()
```
-Paddle中提供了一系列优化算法的API,这里使用Adam优化算法。
-### 训练
+### 开始训练
+
+最后,我们传入训练循环数(num_epoch)和一些别的参数,调用 trainer.train 来开始训练。
-可以通过`paddle.trainer.SGD`构造一个sgd trainer,并调用`trainer.train`来训练模型。另外,通过给train函数传递一个`event_handler`来获取每个batch和每个pass结束的状态。
```python
- # End batch and end pass event handler
- def event_handler(event):
- if isinstance(event, paddle.event.EndIteration):
- if event.batch_id % 100 == 0:
- print "\nPass %d, Batch %d, Cost %f, %s" % (
- event.pass_id, event.batch_id, event.cost, event.metrics)
- else:
- sys.stdout.write('.')
- sys.stdout.flush()
- if isinstance(event, paddle.event.EndPass):
- with open('./params_pass_%d.tar' % event.pass_id, 'w') as f:
- trainer.save_parameter_to_tar(f)
-
- result = trainer.test(reader=test_reader, feeding=feeding)
- print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)
+trainer.train(
+ num_epochs=1,
+ event_handler=event_handler,
+ reader=train_reader,
+ feed_order=feed_order)
```
-比如,构造如下一个`event_handler`可以在每100个batch结束后输出cost和error;在每个pass结束后调用`trainer.test`计算一遍测试集并获得当前模型在测试集上的error。
+
+## 应用模型
+
+### 构建预测器
+
+传入`inference_program`和`params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。
+
```python
- from paddle.v2.plot import Ploter
-
- train_title = "Train cost"
- cost_ploter = Ploter(train_title)
- step = 0
- def event_handler_plot(event):
- global step
- if isinstance(event, paddle.event.EndIteration):
- cost_ploter.append(train_title, step, event.cost)
- cost_ploter.plot()
- step += 1
+inferencer = fluid.Inferencer(
+ inference_program, param_path=params_dirname, place=place)
```
-或者构造一个`event_handler_plot`画出cost曲线。
+
+### 生成测试用输入数据
+
+为了进行预测,我们任意选取3个评论。请随意选取您看好的3个。我们把评论中的每个词对应到`word_dict`中的id。如果词典中没有这个词,则设为`unknown`。
+然后我们用`create_lod_tensor`来创建细节层次的张量。
+
```python
- # create trainer
- trainer = paddle.trainer.SGD(cost=cost,
- parameters=parameters,
- update_equation=adam_optimizer)
-
- trainer.train(
- reader=train_reader,
- event_handler=event_handler,
- feeding=feeding,
- num_passes=2)
-```
-程序运行之后的输出如下。
-```text
-Pass 0, Batch 0, Cost 0.693721, {'classification_error_evaluator': 0.5546875}
-...................................................................................................
-Pass 0, Batch 100, Cost 0.294321, {'classification_error_evaluator': 0.1015625}
-...............................................................................................
-Test with Pass 0, {'classification_error_evaluator': 0.11432000249624252}
+reviews_str = [
+ 'read the book forget the movie', 'this is a great movie', 'this is very bad'
+]
+reviews = [c.split() for c in reviews_str]
+
+UNK = word_dict['']
+lod = []
+for c in reviews:
+ lod.append([word_dict.get(words, UNK) for words in c])
+
+base_shape = [[len(c) for c in lod]]
+
+tensor_words = fluid.create_lod_tensor(lod, base_shape, place)
```
## 应用模型
-可以使用训练好的模型对电影评论进行分类,下面程序展示了如何使用`paddle.infer`接口进行推断。
+现在我们可以对每一条评论进行正面或者负面的预测啦。
+
```python
- import numpy as np
-
- # Movie Reviews, from imdb test
- reviews = [
- 'Read the book, forget the movie!',
- 'This is a great movie.'
- ]
- reviews = [c.split() for c in reviews]
-
- UNK = word_dict['']
- input = []
- for c in reviews:
- input.append([[word_dict.get(words, UNK) for words in c]])
-
- # 0 stands for positive sample, 1 stands for negative sample
- label = {0:'pos', 1:'neg'}
- # Use the network used by trainer
- out = convolution_net(dict_dim, class_dim=class_dim, is_predict=True)
- # out = stacked_lstm_net(dict_dim, class_dim=class_dim, stacked_num=3, is_predict=True)
- probs = paddle.infer(output_layer=out, parameters=parameters, input=input)
-
- labs = np.argsort(-probs)
- for idx, lab in enumerate(labs):
- print idx, "predicting probability is", probs[idx], "label is", label[lab[0]]
+results = inferencer.infer({'words': tensor_words})
+
+for i, r in enumerate(results[0]):
+ print("Predict probability of ", r[0], " to be positive and ", r[1], " to be negative for review \'", reviews_str[i], "\'")
+
```
diff --git a/07.label_semantic_roles/index.cn.html b/07.label_semantic_roles/index.cn.html
index 540fb0b13a32609bfe3ce2f0a8f93bf34de99b67..0a3276359a5f5deb40df536b249936d10fa4eba7 100644
--- a/07.label_semantic_roles/index.cn.html
+++ b/07.label_semantic_roles/index.cn.html
@@ -229,22 +229,23 @@ conll05st-release/
获取词典,打印词典大小:
```python
-import math
+import math, os
import numpy as np
-import paddle.v2 as paddle
+import paddle
import paddle.v2.dataset.conll05 as conll05
-import paddle.v2.evaluator as evaluator
+import paddle.fluid as fluid
+import time
-paddle.init(use_gpu=False, trainer_count=1)
+with_gpu = os.getenv('WITH_GPU', '0') != '0'
word_dict, verb_dict, label_dict = conll05.get_dict()
word_dict_len = len(word_dict)
label_dict_len = len(label_dict)
-pred_len = len(verb_dict)
+pred_dict_len = len(verb_dict)
print word_dict_len
print label_dict_len
-print pred_len
+print pred_dict_len
```
## 模型配置说明
@@ -252,293 +253,341 @@ print pred_len
- 定义输入数据维度及模型超参数。
```python
-mark_dict_len = 2 # 谓上下文区域标志的维度,是一个0-1 2值特征,因此维度为2
-word_dim = 32 # 词向量维度
-mark_dim = 5 # 谓词上下文区域通过词表被映射为一个实向量,这个是相邻的维度
-hidden_dim = 512 # LSTM隐层向量的维度 : 512 / 4
-depth = 8 # 栈式LSTM的深度
-
-# 一条样本总共9个特征,下面定义了9个data层,每个层类型为integer_value_sequence,表示整数ID的序列类型.
-def d_type(size):
- return paddle.data_type.integer_value_sequence(size)
-
-# 句子序列
-word = paddle.layer.data(name='word_data', type=d_type(word_dict_len))
-# 谓词
-predicate = paddle.layer.data(name='verb_data', type=d_type(pred_len))
-
-# 谓词上下文5个特征
-ctx_n2 = paddle.layer.data(name='ctx_n2_data', type=d_type(word_dict_len))
-ctx_n1 = paddle.layer.data(name='ctx_n1_data', type=d_type(word_dict_len))
-ctx_0 = paddle.layer.data(name='ctx_0_data', type=d_type(word_dict_len))
-ctx_p1 = paddle.layer.data(name='ctx_p1_data', type=d_type(word_dict_len))
-ctx_p2 = paddle.layer.data(name='ctx_p2_data', type=d_type(word_dict_len))
-
-# 谓词上下区域标志
-mark = paddle.layer.data(name='mark_data', type=d_type(mark_dict_len))
-
-# 标注序列
-target = paddle.layer.data(name='target', type=d_type(label_dict_len))
-```
-
-这里需要特别说明的是hidden_dim = 512指定了LSTM隐层向量的维度为128维,关于这一点请参考PaddlePaddle官方文档中[lstmemory](http://www.paddlepaddle.org/doc/ui/api/trainer_config_helpers/layers.html#lstmemory)的说明。
-
-- 将句子序列、谓词、谓词上下文、谓词上下文区域标记通过词表,转换为实向量表示的词向量序列。
-
-```python
-
-# 在本教程中,我们加载了预训练的词向量,这里设置了:is_static=True
-# is_static 为 True 时保证了在训练 SRL 模型过程中,词表不再更新
-emb_para = paddle.attr.Param(name='emb', initial_std=0., is_static=True)
-# 设置超参数
-default_std = 1 / math.sqrt(hidden_dim) / 3.0
-std_default = paddle.attr.Param(initial_std=default_std)
-std_0 = paddle.attr.Param(initial_std=0.)
-
-predicate_embedding = paddle.layer.embedding(
- size=word_dim,
- input=predicate,
- param_attr=paddle.attr.Param(
- name='vemb', initial_std=default_std))
-mark_embedding = paddle.layer.embedding(
- size=mark_dim, input=mark, param_attr=std_0)
-
-word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2]
-emb_layers = [
- paddle.layer.embedding(
- size=word_dim, input=x, param_attr=emb_para) for x in word_input
-]
-emb_layers.append(predicate_embedding)
-emb_layers.append(mark_embedding)
-```
-
-- 8个LSTM单元以“正向/反向”的顺序对所有输入序列进行学习。
-
-```python
-hidden_0 = paddle.layer.mixed(
- size=hidden_dim,
- bias_attr=std_default,
- input=[
- paddle.layer.full_matrix_projection(
- input=emb, param_attr=std_default) for emb in emb_layers
- ])
-
+mark_dict_len = 2 # 谓上下文区域标志的维度,是一个0-1 2值特征,因此维度为2
+word_dim = 32 # 词向量维度
+mark_dim = 5 # 谓词上下文区域通过词表被映射为一个实向量,这个是相邻的维度
+hidden_dim = 512 # LSTM隐层向量的维度 : 512 / 4
+depth = 8 # 栈式LSTM的深度
mix_hidden_lr = 1e-3
-lstm_para_attr = paddle.attr.Param(initial_std=0.0, learning_rate=1.0)
-hidden_para_attr = paddle.attr.Param(
- initial_std=default_std, learning_rate=mix_hidden_lr)
-
-lstm_0 = paddle.layer.lstmemory(
- input=hidden_0,
- act=paddle.activation.Relu(),
- gate_act=paddle.activation.Sigmoid(),
- state_act=paddle.activation.Sigmoid(),
- bias_attr=std_0,
- param_attr=lstm_para_attr)
-
-#stack L-LSTM and R-LSTM with direct edges
-input_tmp = [hidden_0, lstm_0]
-
-for i in range(1, depth):
- mix_hidden = paddle.layer.mixed(
- size=hidden_dim,
- bias_attr=std_default,
- input=[
- paddle.layer.full_matrix_projection(
- input=input_tmp[0], param_attr=hidden_para_attr),
- paddle.layer.full_matrix_projection(
- input=input_tmp[1], param_attr=lstm_para_attr)
- ])
-
- lstm = paddle.layer.lstmemory(
- input=mix_hidden,
- act=paddle.activation.Relu(),
- gate_act=paddle.activation.Sigmoid(),
- state_act=paddle.activation.Sigmoid(),
- reverse=((i % 2) == 1),
- bias_attr=std_0,
- param_attr=lstm_para_attr)
-
- input_tmp = [mix_hidden, lstm]
-```
-
-- 在PaddlePaddle中,CRF的状态特征和转移特征分别由一个全连接层和一个PaddlePaddle中的CRF层分别学习。在这个例子中,我们用线性激活的paddle.layer.mixed 来学习CRF的状态特征(也可以使用paddle.layer.fc),而 paddle.layer.crf只学习转移特征。paddle.layer.crf层是一个 cost 层,处于整个网络的末端,输出给定输入序列下,标记序列的log probability作为代价。训练阶段,该层需要输入正确的标记序列作为学习目标。
-
-```python
-
-# 取最后一个栈式LSTM的输出和这个LSTM单元的输入到隐层映射,
-# 经过一个全连接层映射到标记字典的维度,来学习 CRF 的状态特征
-
-feature_out = paddle.layer.mixed(
- size=label_dict_len,
- bias_attr=std_default,
- input=[
- paddle.layer.full_matrix_projection(
- input=input_tmp[0], param_attr=hidden_para_attr),
- paddle.layer.full_matrix_projection(
- input=input_tmp[1], param_attr=lstm_para_attr)
- ], )
-
-# 学习 CRF 的转移特征
-crf_cost = paddle.layer.crf(
- size=label_dict_len,
- input=feature_out,
- label=target,
- param_attr=paddle.attr.Param(
- name='crfw',
- initial_std=default_std,
- learning_rate=mix_hidden_lr))
-```
-- CRF解码和CRF层参数名字相同,即:加载了`paddle.layer.crf`层学习到的参数。在训练阶段,为`paddle.layer.crf_decoding` 输入了正确的标记序列(target),这一层会输出是否正确标记,`evaluator.sum` 用来计算序列上的标记错误率,可以用来评估模型。解码阶段,没有输入正确的数据标签,该层通过寻找概率最高的标记序列,解码出标记结果。
+IS_SPARSE = True
+PASS_NUM = 10
+BATCH_SIZE = 10
-```python
-crf_dec = paddle.layer.crf_decoding(
- size=label_dict_len,
- input=feature_out,
- label=target,
- param_attr=paddle.attr.Param(name='crfw'))
-evaluator.sum(input=crf_dec)
+embedding_name = 'emb'
```
-## 训练模型
-
-### 定义参数
-
-首先依据模型配置的`crf_cost`定义模型参数。
-
-```python
-# create parameters
-parameters = paddle.parameters.create(crf_cost)
-```
-
-可以打印参数名字,如果在网络配置中没有指定名字,则默认生成。
-
-```python
-print parameters.keys()
-```
+这里需要特别说明的是hidden_dim = 512指定了LSTM隐层向量的维度为128维,关于这一点请参考PaddlePaddle官方文档中[lstmemory](http://www.paddlepaddle.org/doc/ui/api/trainer_config_helpers/layers.html#lstmemory)的说明。
-如上文提到,我们用基于英文维基百科训练好的词向量来初始化序列输入、谓词上下文总共6个特征的embedding层参数,在训练中不更新。
+- 如上文提到,我们用基于英文维基百科训练好的词向量来初始化序列输入、谓词上下文总共6个特征的embedding层参数,在训练中不更新。
```python
# 这里加载PaddlePaddle上版保存的二进制模型
def load_parameter(file_name, h, w):
with open(file_name, 'rb') as f:
- f.read(16)
+ f.read(16) # skip header.
return np.fromfile(f, dtype=np.float32).reshape(h, w)
-parameters.set('emb', load_parameter(conll05.get_embedding(), 44068, 32))
```
-### 构造训练(Trainer)
+- 8个LSTM单元以“正向/反向”的顺序对所有输入序列进行学习。
-然后根据网络拓扑结构和模型参数来构造出trainer用来训练,在构造时还需指定优化方法,这里使用最基本的SGD方法(momentum设置为0),同时设定了学习率、正则等。
+```python
+def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark,
+ **ignored):
+ # 8 features
+ predicate_embedding = fluid.layers.embedding(
+ input=predicate,
+ size=[pred_dict_len, word_dim],
+ dtype='float32',
+ is_sparse=IS_SPARSE,
+ param_attr='vemb')
+
+ mark_embedding = fluid.layers.embedding(
+ input=mark,
+ size=[mark_dict_len, mark_dim],
+ dtype='float32',
+ is_sparse=IS_SPARSE)
+
+ word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2]
+ # Since word vector lookup table is pre-trained, we won't update it this time.
+ # trainable being False prevents updating the lookup table during training.
+ emb_layers = [
+ fluid.layers.embedding(
+ size=[word_dict_len, word_dim],
+ input=x,
+ param_attr=fluid.ParamAttr(
+ name=embedding_name, trainable=False)) for x in word_input
+ ]
+ emb_layers.append(predicate_embedding)
+ emb_layers.append(mark_embedding)
+
+ # 8 LSTM units are trained through alternating left-to-right / right-to-left order
+ # denoted by the variable `reverse`.
+ hidden_0_layers = [
+ fluid.layers.fc(input=emb, size=hidden_dim, act='tanh')
+ for emb in emb_layers
+ ]
+
+ hidden_0 = fluid.layers.sums(input=hidden_0_layers)
+
+ lstm_0 = fluid.layers.dynamic_lstm(
+ input=hidden_0,
+ size=hidden_dim,
+ candidate_activation='relu',
+ gate_activation='sigmoid',
+ cell_activation='sigmoid')
+
+ # stack L-LSTM and R-LSTM with direct edges
+ input_tmp = [hidden_0, lstm_0]
+
+ # In PaddlePaddle, state features and transition features of a CRF are implemented
+ # by a fully connected layer and a CRF layer seperately. The fully connected layer
+ # with linear activation learns the state features, here we use fluid.layers.sums
+ # (fluid.layers.fc can be uesed as well), and the CRF layer in PaddlePaddle:
+ # fluid.layers.linear_chain_crf only
+ # learns the transition features, which is a cost layer and is the last layer of the network.
+ # fluid.layers.linear_chain_crf outputs the log probability of true tag sequence
+ # as the cost by given the input sequence and it requires the true tag sequence
+ # as target in the learning process.
+
+ for i in range(1, depth):
+ mix_hidden = fluid.layers.sums(input=[
+ fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'),
+ fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh')
+ ])
-```python
-# create optimizer
-optimizer = paddle.optimizer.Momentum(
- momentum=0,
- learning_rate=1e-3,
- regularization=paddle.optimizer.L2Regularization(rate=8e-4),
- model_average=paddle.optimizer.ModelAverage(
- average_window=0.5, max_average_window=10000), )
-
-trainer = paddle.trainer.SGD(cost=crf_cost,
- parameters=parameters,
- update_equation=optimizer,
- extra_layers=crf_dec)
+ lstm = fluid.layers.dynamic_lstm(
+ input=mix_hidden,
+ size=hidden_dim,
+ candidate_activation='relu',
+ gate_activation='sigmoid',
+ cell_activation='sigmoid',
+ is_reverse=((i % 2) == 1))
+
+ input_tmp = [mix_hidden, lstm]
+
+ # 取最后一个栈式LSTM的输出和这个LSTM单元的输入到隐层映射,
+ # 经过一个全连接层映射到标记字典的维度,来学习 CRF 的状态特征
+ feature_out = fluid.layers.sums(input=[
+ fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'),
+ fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh')
+ ])
+
+ return feature_out
```
-### 训练
+## 训练模型
-数据介绍部分提到CoNLL 2005训练集付费,这里我们使用测试集训练供大家学习。`conll05.test()`每次产生一条样本,包含9个特征,shuffle和组完batch后作为训练的输入。
+- 我们根据网络拓扑结构和模型参数来构造出trainer用来训练,在构造时还需指定优化方法,这里使用最基本的SGD方法(momentum设置为0),同时设定了学习率、正则等。
-```python
-reader = paddle.batch(
- paddle.reader.shuffle(
- conll05.test(), buf_size=8192), batch_size=2)
-```
+- 数据介绍部分提到CoNLL 2005训练集付费,这里我们使用测试集训练供大家学习。conll05.test()每次产生一条样本,包含9个特征,shuffle和组完batch后作为训练的输入。
-通过`feeding`来指定每一个数据和data_layer的对应关系。 例如 下面`feeding`表示: `conll05.test()`产生数据的第0列对应`word_data`层的特征。
+- 通过feeding来指定每一个数据和data_layer的对应关系。 例如 下面feeding表示: conll05.test()产生数据的第0列对应word_data层的特征。
+- 可以使用event_handler回调函数来观察训练过程,或进行测试等。这里我们打印了训练过程的cost,该回调函数是trainer.train函数里设定。
+
+- 通过trainer.train函数训练
```python
-feeding = {
- 'word_data': 0,
- 'ctx_n2_data': 1,
- 'ctx_n1_data': 2,
- 'ctx_0_data': 3,
- 'ctx_p1_data': 4,
- 'ctx_p2_data': 5,
- 'verb_data': 6,
- 'mark_data': 7,
- 'target': 8
-}
+def train(use_cuda, save_dirname=None, is_local=True):
+ # define network topology
+
+ # 句子序列
+ word = fluid.layers.data(
+ name='word_data', shape=[1], dtype='int64', lod_level=1)
+
+ # 谓词
+ predicate = fluid.layers.data(
+ name='verb_data', shape=[1], dtype='int64', lod_level=1)
+
+ # 谓词上下文5个特征
+ ctx_n2 = fluid.layers.data(
+ name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1)
+ ctx_n1 = fluid.layers.data(
+ name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1)
+ ctx_0 = fluid.layers.data(
+ name='ctx_0_data', shape=[1], dtype='int64', lod_level=1)
+ ctx_p1 = fluid.layers.data(
+ name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1)
+ ctx_p2 = fluid.layers.data(
+ name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1)
+
+ # 谓词上下区域标志
+ mark = fluid.layers.data(
+ name='mark_data', shape=[1], dtype='int64', lod_level=1)
+
+ # define network topology
+ feature_out = db_lstm(**locals())
+
+ # 标注序列
+ target = fluid.layers.data(
+ name='target', shape=[1], dtype='int64', lod_level=1)
+
+ # 学习 CRF 的转移特征
+ crf_cost = fluid.layers.linear_chain_crf(
+ input=feature_out,
+ label=target,
+ param_attr=fluid.ParamAttr(
+ name='crfw', learning_rate=mix_hidden_lr))
+
+ avg_cost = fluid.layers.mean(crf_cost)
+
+ sgd_optimizer = fluid.optimizer.SGD(
+ learning_rate=fluid.layers.exponential_decay(
+ learning_rate=0.01,
+ decay_steps=100000,
+ decay_rate=0.5,
+ staircase=True))
+
+ sgd_optimizer.minimize(avg_cost)
+
+ # The CRF decoding layer is used for evaluation and inference.
+ # It shares weights with CRF layer. The sharing of parameters among multiple layers
+ # is specified by using the same parameter name in these layers. If true tag sequence
+ # is provided in training process, `fluid.layers.crf_decoding` calculates labelling error
+ # for each input token and sums the error over the entire sequence.
+ # Otherwise, `fluid.layers.crf_decoding` generates the labelling tags.
+ crf_decode = fluid.layers.crf_decoding(
+ input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))
+
+ train_data = paddle.batch(
+ paddle.reader.shuffle(
+ paddle.dataset.conll05.test(), buf_size=8192),
+ batch_size=BATCH_SIZE)
+
+ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
+
+
+ feeder = fluid.DataFeeder(
+ feed_list=[
+ word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target
+ ],
+ place=place)
+ exe = fluid.Executor(place)
+
+ def train_loop(main_program):
+ exe.run(fluid.default_startup_program())
+ embedding_param = fluid.global_scope().find_var(
+ embedding_name).get_tensor()
+ embedding_param.set(
+ load_parameter(conll05.get_embedding(), word_dict_len, word_dim),
+ place)
+
+ start_time = time.time()
+ batch_id = 0
+ for pass_id in xrange(PASS_NUM):
+ for data in train_data():
+ cost = exe.run(main_program,
+ feed=feeder.feed(data),
+ fetch_list=[avg_cost])
+ cost = cost[0]
+
+ if batch_id % 10 == 0:
+ print("avg_cost:" + str(cost))
+ if batch_id != 0:
+ print("second per batch: " + str((time.time(
+ ) - start_time) / batch_id))
+ # Set the threshold low to speed up the CI test
+ if float(cost) < 60.0:
+ if save_dirname is not None:
+ fluid.io.save_inference_model(save_dirname, [
+ 'word_data', 'verb_data', 'ctx_n2_data',
+ 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data',
+ 'ctx_p2_data', 'mark_data'
+ ], [feature_out], exe)
+ return
+
+ batch_id = batch_id + 1
+
+ train_loop(fluid.default_main_program())
```
-可以使用`event_handler`回调函数来观察训练过程,或进行测试等。这里我们打印了训练过程的cost,该回调函数是`trainer.train`函数里设定。
-```python
-def event_handler(event):
- if isinstance(event, paddle.event.EndIteration):
- if event.batch_id and event.batch_id % 10 == 0:
- print "Pass %d, Batch %d, Cost %f, %s" % (
- event.pass_id, event.batch_id, event.cost, event.metrics)
- if event.batch_id % 400 == 0:
- result = trainer.test(reader=reader, feeding=feeding)
- print "\nTest with Pass %d, Batch %d, %s" % (event.pass_id, event.batch_id, result.metrics)
-
- if isinstance(event, paddle.event.EndPass):
- # save parameters
- with open('params_pass_%d.tar' % event.pass_id, 'w') as f:
- trainer.save_parameter_to_tar(f)
-
- result = trainer.test(reader=reader, feeding=feeding)
- print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)
-```
+## 应用模型
-通过`trainer.train`函数训练:
+训练完成之后,需要依据某个我们关心的性能指标选择最优的模型进行预测,可以简单的选择测试集上标记错误最少的那个模型。以下我们给出一个使用训练后的模型进行预测的示例。
```python
-trainer.train(
- reader=reader,
- event_handler=event_handler,
- num_passes=1,
- feeding=feeding)
+def infer(use_cuda, save_dirname=None):
+ if save_dirname is None:
+ return
+
+ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
+ exe = fluid.Executor(place)
+
+ inference_scope = fluid.core.Scope()
+ with fluid.scope_guard(inference_scope):
+ # Use fluid.io.load_inference_model to obtain the inference program desc,
+ # the feed_target_names (the names of variables that will be fed
+ # data using feed operators), and the fetch_targets (variables that
+ # we want to obtain data from using fetch operators).
+ [inference_program, feed_target_names,
+ fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
+
+ # Setup inputs by creating LoDTensors to represent sequences of words.
+ # Here each word is the basic element of these LoDTensors and the shape of
+ # each word (base_shape) should be [1] since it is simply an index to
+ # look up for the corresponding word vector.
+ # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
+ # which has only one lod level. Then the created LoDTensors will have only
+ # one higher level structure (sequence of words, or sentence) than the basic
+ # element (word). Hence the LoDTensor will hold data for three sentences of
+ # length 3, 4 and 2, respectively.
+ # Note that lod info should be a list of lists.
+ lod = [[3, 4, 2]]
+ base_shape = [1]
+ # The range of random integers is [low, high]
+ word = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ pred = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=pred_dict_len - 1)
+ ctx_n2 = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ ctx_n1 = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ ctx_0 = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ ctx_p1 = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ ctx_p2 = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ mark = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=mark_dict_len - 1)
+
+ # Construct feed as a dictionary of {feed_target_name: feed_target_data}
+ # and results will contain a list of data corresponding to fetch_targets.
+ assert feed_target_names[0] == 'word_data'
+ assert feed_target_names[1] == 'verb_data'
+ assert feed_target_names[2] == 'ctx_n2_data'
+ assert feed_target_names[3] == 'ctx_n1_data'
+ assert feed_target_names[4] == 'ctx_0_data'
+ assert feed_target_names[5] == 'ctx_p1_data'
+ assert feed_target_names[6] == 'ctx_p2_data'
+ assert feed_target_names[7] == 'mark_data'
+
+ results = exe.run(inference_program,
+ feed={
+ feed_target_names[0]: word,
+ feed_target_names[1]: pred,
+ feed_target_names[2]: ctx_n2,
+ feed_target_names[3]: ctx_n1,
+ feed_target_names[4]: ctx_0,
+ feed_target_names[5]: ctx_p1,
+ feed_target_names[6]: ctx_p2,
+ feed_target_names[7]: mark
+ },
+ fetch_list=fetch_targets,
+ return_numpy=False)
+ print(results[0].lod())
+ np_data = np.array(results[0])
+ print("Inference Shape: ", np_data.shape)
```
-### 应用模型
-
-训练完成之后,需要依据某个我们关心的性能指标选择最优的模型进行预测,可以简单的选择测试集上标记错误最少的那个模型。预测时使用 `paddle.layer.crf_decoding`,和训练不同的是,该层没有正确的标签层作为输入。如下所示:
+整个程序的入口如下:
```python
-predict = paddle.layer.crf_decoding(
- size=label_dict_len,
- input=feature_out,
- param_attr=paddle.attr.Param(name='crfw'))
-```
+def main(use_cuda, is_local=True):
+ if use_cuda and not fluid.core.is_compiled_with_cuda():
+ return
-这里选用测试集的一条数据作为示例。
+ # Directory for saving the trained model
+ save_dirname = "label_semantic_roles.inference.model"
-```python
-test_creator = paddle.dataset.conll05.test()
-test_data = []
-for item in test_creator():
- test_data.append(item[0:8])
- if len(test_data) == 1:
- break
-```
+ train(use_cuda, save_dirname, is_local)
+ infer(use_cuda, save_dirname)
-推断接口`paddle.infer`返回标签的索引,并查询词典`labels_reverse`,打印出标记的结果。
-```python
-labs = paddle.infer(
- output_layer=predict, parameters=parameters, input=test_data, field='id')
-assert len(labs) == len(test_data[0][0])
-labels_reverse={}
-for (k,v) in label_dict.items():
- labels_reverse[v]=k
-pre_lab = [labels_reverse[i] for i in labs]
-print pre_lab
+main(use_cuda=False)
```
## 总结
diff --git a/07.label_semantic_roles/index.html b/07.label_semantic_roles/index.html
index ca6f60cb428a4f473f888e9918116f911515b63f..7d84612aa653746fdf247f2dbc44072d85937d0b 100644
--- a/07.label_semantic_roles/index.html
+++ b/07.label_semantic_roles/index.html
@@ -251,22 +251,23 @@ We trained a language model on the English Wikipedia to get a word vector lookup
Here we fetch the dictionary, and print its size:
```python
-import math
+import math, os
import numpy as np
-import paddle.v2 as paddle
+import paddle
import paddle.v2.dataset.conll05 as conll05
-import paddle.v2.evaluator as evaluator
+import paddle.fluid as fluid
+import time
-paddle.init(use_gpu=False, trainer_count=1)
+with_gpu = os.getenv('WITH_GPU', '0') != '0'
word_dict, verb_dict, label_dict = conll05.get_dict()
word_dict_len = len(word_dict)
label_dict_len = len(label_dict)
-pred_len = len(verb_dict)
+pred_dict_len = len(verb_dict)
print word_dict_len
print label_dict_len
-print pred_len
+print pred_dict_len
```
## Model Configuration
@@ -274,290 +275,329 @@ print pred_len
- Define input data dimensions and model hyperparameters.
```python
-mark_dict_len = 2 # value range of region mark. Region mark is either 0 or 1, so range is 2
-word_dim = 32 # word vector dimension
-mark_dim = 5 # adjacent dimension
-hidden_dim = 512 # the dimension of LSTM hidden layer vector is 128 (512/4)
-depth = 8 # depth of stacked LSTM
-
-# There are 9 features per sample, so we will define 9 data layers.
-# They type for each layer is integer_value_sequence.
-def d_type(value_range):
- return paddle.data_type.integer_value_sequence(value_range)
-
-# word sequence
-word = paddle.layer.data(name='word_data', type=d_type(word_dict_len))
-# predicate
-predicate = paddle.layer.data(name='verb_data', type=d_type(pred_len))
-
-# 5 features for predicate context
-ctx_n2 = paddle.layer.data(name='ctx_n2_data', type=d_type(word_dict_len))
-ctx_n1 = paddle.layer.data(name='ctx_n1_data', type=d_type(word_dict_len))
-ctx_0 = paddle.layer.data(name='ctx_0_data', type=d_type(word_dict_len))
-ctx_p1 = paddle.layer.data(name='ctx_p1_data', type=d_type(word_dict_len))
-ctx_p2 = paddle.layer.data(name='ctx_p2_data', type=d_type(word_dict_len))
-
-# region marker sequence
-mark = paddle.layer.data(name='mark_data', type=d_type(mark_dict_len))
-
-# label sequence
-target = paddle.layer.data(name='target', type=d_type(label_dict_len))
+mark_dict_len = 2
+word_dim = 32
+mark_dim = 5
+hidden_dim = 512
+depth = 8
+mix_hidden_lr = 1e-3
+
+IS_SPARSE = True
+PASS_NUM = 10
+BATCH_SIZE = 10
+
+embedding_name = 'emb'
```
Note that `hidden_dim = 512` means a LSTM hidden vector of 128 dimension (512/4). Please refer to PaddlePaddle's official documentation for detail: [lstmemory](http://www.paddlepaddle.org/doc/ui/api/trainer_config_helpers/layers.html#lstmemory)。
-- Transform the word sequence itself, the predicate, the predicate context, and the region mark sequence into embedded vector sequences.
+- Define a parameter loader method to load the pre-trained word lookup tables from word embeddings trained on the English language Wikipedia.
```python
-
-# Since word vectorlookup table is pre-trained, we won't update it this time.
-# is_static being True prevents updating the lookup table during training.
-emb_para = paddle.attr.Param(name='emb', initial_std=0., is_static=True)
-# hyperparameter configurations
-default_std = 1 / math.sqrt(hidden_dim) / 3.0
-std_default = paddle.attr.Param(initial_std=default_std)
-std_0 = paddle.attr.Param(initial_std=0.)
-
-predicate_embedding = paddle.layer.embedding(
- size=word_dim,
- input=predicate,
- param_attr=paddle.attr.Param(
- name='vemb', initial_std=default_std))
-mark_embedding = paddle.layer.embedding(
- size=mark_dim, input=mark, param_attr=std_0)
-
-word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2]
-emb_layers = [
- paddle.layer.embedding(
- size=word_dim, input=x, param_attr=emb_para) for x in word_input
-]
-emb_layers.append(predicate_embedding)
-emb_layers.append(mark_embedding)
+def load_parameter(file_name, h, w):
+ with open(file_name, 'rb') as f:
+ f.read(16) # skip header.
+ return np.fromfile(f, dtype=np.float32).reshape(h, w)
```
+- Transform the word sequence itself, the predicate, the predicate context, and the region mark sequence into embedded vector sequences.
+
- 8 LSTM units are trained through alternating left-to-right / right-to-left order denoted by the variable `reverse`.
```python
-hidden_0 = paddle.layer.mixed(
- size=hidden_dim,
- bias_attr=std_default,
- input=[
- paddle.layer.full_matrix_projection(
- input=emb, param_attr=std_default) for emb in emb_layers
- ])
-
-mix_hidden_lr = 1e-3
-lstm_para_attr = paddle.attr.Param(initial_std=0.0, learning_rate=1.0)
-hidden_para_attr = paddle.attr.Param(
- initial_std=default_std, learning_rate=mix_hidden_lr)
-
-lstm_0 = paddle.layer.lstmemory(
- input=hidden_0,
- act=paddle.activation.Relu(),
- gate_act=paddle.activation.Sigmoid(),
- state_act=paddle.activation.Sigmoid(),
- bias_attr=std_0,
- param_attr=lstm_para_attr)
-
-# stack L-LSTM and R-LSTM with direct edges
-input_tmp = [hidden_0, lstm_0]
-
-for i in range(1, depth):
- mix_hidden = paddle.layer.mixed(
+def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark,
+ **ignored):
+ # 8 features
+ predicate_embedding = fluid.layers.embedding(
+ input=predicate,
+ size=[pred_dict_len, word_dim],
+ dtype='float32',
+ is_sparse=IS_SPARSE,
+ param_attr='vemb')
+
+ mark_embedding = fluid.layers.embedding(
+ input=mark,
+ size=[mark_dict_len, mark_dim],
+ dtype='float32',
+ is_sparse=IS_SPARSE)
+
+ word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2]
+ # Since word vector lookup table is pre-trained, we won't update it this time.
+ # trainable being False prevents updating the lookup table during training.
+ emb_layers = [
+ fluid.layers.embedding(
+ size=[word_dict_len, word_dim],
+ input=x,
+ param_attr=fluid.ParamAttr(
+ name=embedding_name, trainable=False)) for x in word_input
+ ]
+ emb_layers.append(predicate_embedding)
+ emb_layers.append(mark_embedding)
+
+ # 8 LSTM units are trained through alternating left-to-right / right-to-left order
+ # denoted by the variable `reverse`.
+ hidden_0_layers = [
+ fluid.layers.fc(input=emb, size=hidden_dim, act='tanh')
+ for emb in emb_layers
+ ]
+
+ hidden_0 = fluid.layers.sums(input=hidden_0_layers)
+
+ lstm_0 = fluid.layers.dynamic_lstm(
+ input=hidden_0,
size=hidden_dim,
- bias_attr=std_default,
- input=[
- paddle.layer.full_matrix_projection(
- input=input_tmp[0], param_attr=hidden_para_attr),
- paddle.layer.full_matrix_projection(
- input=input_tmp[1], param_attr=lstm_para_attr)
+ candidate_activation='relu',
+ gate_activation='sigmoid',
+ cell_activation='sigmoid')
+
+ # stack L-LSTM and R-LSTM with direct edges
+ input_tmp = [hidden_0, lstm_0]
+
+ # In PaddlePaddle, state features and transition features of a CRF are implemented
+ # by a fully connected layer and a CRF layer seperately. The fully connected layer
+ # with linear activation learns the state features, here we use fluid.layers.sums
+ # (fluid.layers.fc can be uesed as well), and the CRF layer in PaddlePaddle:
+ # fluid.layers.linear_chain_crf only
+ # learns the transition features, which is a cost layer and is the last layer of the network.
+ # fluid.layers.linear_chain_crf outputs the log probability of true tag sequence
+ # as the cost by given the input sequence and it requires the true tag sequence
+ # as target in the learning process.
+
+ for i in range(1, depth):
+ mix_hidden = fluid.layers.sums(input=[
+ fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'),
+ fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh')
])
- lstm = paddle.layer.lstmemory(
- input=mix_hidden,
- act=paddle.activation.Relu(),
- gate_act=paddle.activation.Sigmoid(),
- state_act=paddle.activation.Sigmoid(),
- reverse=((i % 2) == 1),
- bias_attr=std_0,
- param_attr=lstm_para_attr)
+ lstm = fluid.layers.dynamic_lstm(
+ input=mix_hidden,
+ size=hidden_dim,
+ candidate_activation='relu',
+ gate_activation='sigmoid',
+ cell_activation='sigmoid',
+ is_reverse=((i % 2) == 1))
- input_tmp = [mix_hidden, lstm]
-```
-
-- In PaddlePaddle, state features and transition features of a CRF are implemented by a fully connected layer and a CRF layer seperately. The fully connected layer with linear activation learns the state features, here we use paddle.layer.mixed (paddle.layer.fc can be uesed as well), and the CRF layer in PaddlePaddle: paddle.layer.crf only learns the transition features, which is a cost layer and is the last layer of the network. paddle.layer.crf outputs the log probability of true tag sequence as the cost by given the input sequence and it requires the true tag sequence as target in the learning process.
+ input_tmp = [mix_hidden, lstm]
-```python
+ feature_out = fluid.layers.sums(input=[
+ fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'),
+ fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh')
+ ])
-# The output of the top LSTM unit and its input are feed into a fully connected layer,
-# size of which equals to size of tag labels.
-# The fully connected layer learns the state features
-
-feature_out = paddle.layer.mixed(
- size=label_dict_len,
- bias_attr=std_default,
- input=[
- paddle.layer.full_matrix_projection(
- input=input_tmp[0], param_attr=hidden_para_attr),
- paddle.layer.full_matrix_projection(
- input=input_tmp[1], param_attr=lstm_para_attr)], )
-
-crf_cost = paddle.layer.crf(
- size=label_dict_len,
- input=feature_out,
- label=target,
- param_attr=paddle.attr.Param(
- name='crfw',
- initial_std=default_std,
- learning_rate=mix_hidden_lr))
-```
-
-- The CRF decoding layer is used for evaluation and inference. It shares weights with CRF layer. The sharing of parameters among multiple layers is specified by using the same parameter name in these layers. If true tag sequence is provided in training process, `paddle.layer.crf_decoding` calculates labelling error for each input token and `evaluator.sum` sum the error over the entire sequence. Otherwise, `paddle.layer.crf_decoding` generates the labelling tags.
-
-```python
-crf_dec = paddle.layer.crf_decoding(
- size=label_dict_len,
- input=feature_out,
- label=target,
- param_attr=paddle.attr.Param(name='crfw'))
-evaluator.sum(input=crf_dec)
+ return feature_out
```
## Train model
-### Create Parameters
-
-All necessary parameters will be traced created given output layers that we need to use.
+- In the `train` method, we will create trainer given model topology, parameters, and optimization method. We will use the most basic **SGD** method, which is a momentum optimizer with 0 momentum. Meanwhile, we will set learning rate and decay.
-```python
-parameters = paddle.parameters.create(crf_cost)
-```
+- As mentioned in data preparation section, we will use CoNLL 2005 test corpus as the training data set. `conll05.test()` outputs one training instance at a time. It is shuffled and batched into mini batches, and used as input.
-We can print out parameter name. It will be generated if not specified.
+- `feeding` is used to specify the correspondence between data instance and data layer. For example, according to the `feeding`, the 0th column of data instance produced by`conll05.test()` is matched to the data layer named `word_data`.
-```python
-print parameters.keys()
-```
+- `event_handler` can be used as callback for training events, it will be used as an argument for the `train` method. Following `event_handler` prints cost during training.
-Now we load the pre-trained word lookup tables from word embeddings trained on the English language Wikipedia.
+- `trainer.train` will train the model.
```python
-def load_parameter(file_name, h, w):
- with open(file_name, 'rb') as f:
- f.read(16)
- return np.fromfile(f, dtype=np.float32).reshape(h, w)
-parameters.set('emb', load_parameter(conll05.get_embedding(), 44068, 32))
+def train(use_cuda, save_dirname=None, is_local=True):
+ # define network topology
+ word = fluid.layers.data(
+ name='word_data', shape=[1], dtype='int64', lod_level=1)
+ predicate = fluid.layers.data(
+ name='verb_data', shape=[1], dtype='int64', lod_level=1)
+ ctx_n2 = fluid.layers.data(
+ name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1)
+ ctx_n1 = fluid.layers.data(
+ name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1)
+ ctx_0 = fluid.layers.data(
+ name='ctx_0_data', shape=[1], dtype='int64', lod_level=1)
+ ctx_p1 = fluid.layers.data(
+ name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1)
+ ctx_p2 = fluid.layers.data(
+ name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1)
+ mark = fluid.layers.data(
+ name='mark_data', shape=[1], dtype='int64', lod_level=1)
+
+ # define network topology
+ feature_out = db_lstm(**locals())
+ target = fluid.layers.data(
+ name='target', shape=[1], dtype='int64', lod_level=1)
+ crf_cost = fluid.layers.linear_chain_crf(
+ input=feature_out,
+ label=target,
+ param_attr=fluid.ParamAttr(
+ name='crfw', learning_rate=mix_hidden_lr))
+
+ avg_cost = fluid.layers.mean(crf_cost)
+
+ sgd_optimizer = fluid.optimizer.SGD(
+ learning_rate=fluid.layers.exponential_decay(
+ learning_rate=0.01,
+ decay_steps=100000,
+ decay_rate=0.5,
+ staircase=True))
+
+ sgd_optimizer.minimize(avg_cost)
+
+ # The CRF decoding layer is used for evaluation and inference.
+ # It shares weights with CRF layer. The sharing of parameters among multiple layers
+ # is specified by using the same parameter name in these layers. If true tag sequence
+ # is provided in training process, `fluid.layers.crf_decoding` calculates labelling error
+ # for each input token and sums the error over the entire sequence.
+ # Otherwise, `fluid.layers.crf_decoding` generates the labelling tags.
+ crf_decode = fluid.layers.crf_decoding(
+ input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))
+
+ train_data = paddle.batch(
+ paddle.reader.shuffle(
+ paddle.dataset.conll05.test(), buf_size=8192),
+ batch_size=BATCH_SIZE)
+
+ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
+
+
+ feeder = fluid.DataFeeder(
+ feed_list=[
+ word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target
+ ],
+ place=place)
+ exe = fluid.Executor(place)
+
+ def train_loop(main_program):
+ exe.run(fluid.default_startup_program())
+ embedding_param = fluid.global_scope().find_var(
+ embedding_name).get_tensor()
+ embedding_param.set(
+ load_parameter(conll05.get_embedding(), word_dict_len, word_dim),
+ place)
+
+ start_time = time.time()
+ batch_id = 0
+ for pass_id in xrange(PASS_NUM):
+ for data in train_data():
+ cost = exe.run(main_program,
+ feed=feeder.feed(data),
+ fetch_list=[avg_cost])
+ cost = cost[0]
+
+ if batch_id % 10 == 0:
+ print("avg_cost:" + str(cost))
+ if batch_id != 0:
+ print("second per batch: " + str((time.time(
+ ) - start_time) / batch_id))
+ # Set the threshold low to speed up the CI test
+ if float(cost) < 60.0:
+ if save_dirname is not None:
+ fluid.io.save_inference_model(save_dirname, [
+ 'word_data', 'verb_data', 'ctx_n2_data',
+ 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data',
+ 'ctx_p2_data', 'mark_data'
+ ], [feature_out], exe)
+ return
+
+ batch_id = batch_id + 1
+
+ train_loop(fluid.default_main_program())
```
-### Create Trainer
-
-We will create trainer given model topology, parameters, and optimization method. We will use the most basic **SGD** method, which is a momentum optimizer with 0 momentum. Meanwhile, we will set learning rate and regularization.
-
-```python
-optimizer = paddle.optimizer.Momentum(
- momentum=0,
- learning_rate=1e-3,
- regularization=paddle.optimizer.L2Regularization(rate=8e-4),
- model_average=paddle.optimizer.ModelAverage(
- average_window=0.5, max_average_window=10000), )
-
-trainer = paddle.trainer.SGD(cost=crf_cost,
- parameters=parameters,
- update_equation=optimizer,
- extra_layers=crf_dec)
-```
-### Trainer
+## Application
-As mentioned in data preparation section, we will use CoNLL 2005 test corpus as the training data set. `conll05.test()` outputs one training instance at a time. It is shuffled and batched into mini batches, and used as input.
+- When training is completed, we need to select an optimal model based one performance index to do inference. In this task, one can simply select the model with the least number of marks on the test set. We demonstrate doing an inference using the trained model.
```python
-reader = paddle.batch(
- paddle.reader.shuffle(
- conll05.test(), buf_size=8192), batch_size=2)
+def infer(use_cuda, save_dirname=None):
+ if save_dirname is None:
+ return
+
+ place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
+ exe = fluid.Executor(place)
+
+ inference_scope = fluid.core.Scope()
+ with fluid.scope_guard(inference_scope):
+ # Use fluid.io.load_inference_model to obtain the inference program desc,
+ # the feed_target_names (the names of variables that will be fed
+ # data using feed operators), and the fetch_targets (variables that
+ # we want to obtain data from using fetch operators).
+ [inference_program, feed_target_names,
+ fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
+
+ # Setup inputs by creating LoDTensors to represent sequences of words.
+ # Here each word is the basic element of these LoDTensors and the shape of
+ # each word (base_shape) should be [1] since it is simply an index to
+ # look up for the corresponding word vector.
+ # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
+ # which has only one lod level. Then the created LoDTensors will have only
+ # one higher level structure (sequence of words, or sentence) than the basic
+ # element (word). Hence the LoDTensor will hold data for three sentences of
+ # length 3, 4 and 2, respectively.
+ # Note that lod info should be a list of lists.
+ lod = [[3, 4, 2]]
+ base_shape = [1]
+ # The range of random integers is [low, high]
+ word = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ pred = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=pred_dict_len - 1)
+ ctx_n2 = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ ctx_n1 = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ ctx_0 = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ ctx_p1 = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ ctx_p2 = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=word_dict_len - 1)
+ mark = fluid.create_random_int_lodtensor(
+ lod, base_shape, place, low=0, high=mark_dict_len - 1)
+
+ # Construct feed as a dictionary of {feed_target_name: feed_target_data}
+ # and results will contain a list of data corresponding to fetch_targets.
+ assert feed_target_names[0] == 'word_data'
+ assert feed_target_names[1] == 'verb_data'
+ assert feed_target_names[2] == 'ctx_n2_data'
+ assert feed_target_names[3] == 'ctx_n1_data'
+ assert feed_target_names[4] == 'ctx_0_data'
+ assert feed_target_names[5] == 'ctx_p1_data'
+ assert feed_target_names[6] == 'ctx_p2_data'
+ assert feed_target_names[7] == 'mark_data'
+
+ results = exe.run(inference_program,
+ feed={
+ feed_target_names[0]: word,
+ feed_target_names[1]: pred,
+ feed_target_names[2]: ctx_n2,
+ feed_target_names[3]: ctx_n1,
+ feed_target_names[4]: ctx_0,
+ feed_target_names[5]: ctx_p1,
+ feed_target_names[6]: ctx_p2,
+ feed_target_names[7]: mark
+ },
+ fetch_list=fetch_targets,
+ return_numpy=False)
+ print(results[0].lod())
+ np_data = np.array(results[0])
+ print("Inference Shape: ", np_data.shape)
```
-`feeding` is used to specify the correspondence between data instance and data layer. For example, according to following `feeding`, the 0th column of data instance produced by`conll05.test()` is matched to the data layer named `word_data`.
+- The main entrance of the whole program is as below:
-```python
-feeding = {
- 'word_data': 0,
- 'ctx_n2_data': 1,
- 'ctx_n1_data': 2,
- 'ctx_0_data': 3,
- 'ctx_p1_data': 4,
- 'ctx_p2_data': 5,
- 'verb_data': 6,
- 'mark_data': 7,
- 'target': 8
-}
-```
-
-`event_handler` can be used as callback for training events, it will be used as an argument for the `train` method. Following `event_handler` prints cost during training.
```python
-def event_handler(event):
- if isinstance(event, paddle.event.EndIteration):
- if event.batch_id and event.batch_id % 10 == 0:
- print "Pass %d, Batch %d, Cost %f, %s" % (
- event.pass_id, event.batch_id, event.cost, event.metrics)
- if event.batch_id % 400 == 0:
- result = trainer.test(reader=reader, feeding=feeding)
- print "\nTest with Pass %d, Batch %d, %s" % (event.pass_id, event.batch_id, result.metrics)
-
- if isinstance(event, paddle.event.EndPass):
- # save parameters
- with open('params_pass_%d.tar' % event.pass_id, 'w') as f:
- trainer.save_parameter_to_tar(f)
-
- result = trainer.test(reader=reader, feeding=feeding)
- print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)
-```
+def main(use_cuda, is_local=True):
+ if use_cuda and not fluid.core.is_compiled_with_cuda():
+ return
-`trainer.train` will train the model.
+ # Directory for saving the trained model
+ save_dirname = "label_semantic_roles.inference.model"
-```python
-trainer.train(
- reader=reader,
- event_handler=event_handler,
- num_passes=10000,
- feeding=feeding)
-```
-
-### Application
-
-When training is completed, we need to select an optimal model based one performance index to do inference. In this task, one can simply select the model with the least number of marks on the test set. The `paddle.layer.crf_decoding` layer is used in the inference, but its inputs do not include the ground truth label.
-
-```python
-predict = paddle.layer.crf_decoding(
- size=label_dict_len,
- input=feature_out,
- param_attr=paddle.attr.Param(name='crfw'))
-```
+ train(use_cuda, save_dirname, is_local)
+ infer(use_cuda, save_dirname)
-Here, using one testing sample as an example.
-```python
-test_creator = paddle.dataset.conll05.test()
-test_data = []
-for item in test_creator():
- test_data.append(item[0:8])
- if len(test_data) == 1:
- break
-```
-
-The inference interface `paddle.infer` returns the index of predicting labels. Then printing the tagging results based dictionary `labels_reverse`.
-
-
-```python
-labs = paddle.infer(
- output_layer=predict, parameters=parameters, input=test_data, field='id')
-assert len(labs) == len(test_data[0][0])
-labels_reverse={}
-for (k,v) in label_dict.items():
- labels_reverse[v]=k
-pre_lab = [labels_reverse[i] for i in labs]
-print pre_lab
+main(use_cuda=False)
```
## Conclusion
diff --git a/07.label_semantic_roles/train.py b/07.label_semantic_roles/train.py
index 20ec2ca50f6f8f11b3cfd8cd71a3ecfdf28419c0..f09ee33282aae551e746fa4458c7efd8a66cd991 100644
--- a/07.label_semantic_roles/train.py
+++ b/07.label_semantic_roles/train.py
@@ -25,6 +25,7 @@ BATCH_SIZE = 10
embedding_name = 'emb'
+
def load_parameter(file_name, h, w):
with open(file_name, 'rb') as f:
f.read(16) # skip header.
@@ -52,8 +53,8 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark,
fluid.layers.embedding(
size=[word_dict_len, word_dim],
input=x,
- param_attr=fluid.ParamAttr(
- name=embedding_name, trainable=False)) for x in word_input
+ param_attr=fluid.ParamAttr(name=embedding_name, trainable=False))
+ for x in word_input
]
emb_layers.append(predicate_embedding)
emb_layers.append(mark_embedding)
@@ -125,8 +126,7 @@ def train(use_cuda, save_dirname=None, is_local=True):
crf_cost = fluid.layers.linear_chain_crf(
input=feature_out,
label=target,
- param_attr=fluid.ParamAttr(
- name='crfw', learning_rate=mix_hidden_lr))
+ param_attr=fluid.ParamAttr(name='crfw', learning_rate=mix_hidden_lr))
avg_cost = fluid.layers.mean(crf_cost)
@@ -143,13 +143,11 @@ def train(use_cuda, save_dirname=None, is_local=True):
input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))
train_data = paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.conll05.test(), buf_size=8192),
+ paddle.reader.shuffle(paddle.dataset.conll05.test(), buf_size=8192),
batch_size=BATCH_SIZE)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
-
feeder = fluid.DataFeeder(
feed_list=[
word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target
@@ -169,16 +167,15 @@ def train(use_cuda, save_dirname=None, is_local=True):
batch_id = 0
for pass_id in xrange(PASS_NUM):
for data in train_data():
- cost = exe.run(main_program,
- feed=feeder.feed(data),
- fetch_list=[avg_cost])
+ cost = exe.run(
+ main_program, feed=feeder.feed(data), fetch_list=[avg_cost])
cost = cost[0]
if batch_id % 10 == 0:
print("avg_cost:" + str(cost))
if batch_id != 0:
- print("second per batch: " + str((time.time(
- ) - start_time) / batch_id))
+ print("second per batch: " + str((
+ time.time() - start_time) / batch_id))
# Set the threshold low to speed up the CI test
if float(cost) < 60.0:
if save_dirname is not None:
@@ -252,19 +249,20 @@ def infer(use_cuda, save_dirname=None):
assert feed_target_names[6] == 'ctx_p2_data'
assert feed_target_names[7] == 'mark_data'
- results = exe.run(inference_program,
- feed={
- feed_target_names[0]: word,
- feed_target_names[1]: pred,
- feed_target_names[2]: ctx_n2,
- feed_target_names[3]: ctx_n1,
- feed_target_names[4]: ctx_0,
- feed_target_names[5]: ctx_p1,
- feed_target_names[6]: ctx_p2,
- feed_target_names[7]: mark
- },
- fetch_list=fetch_targets,
- return_numpy=False)
+ results = exe.run(
+ inference_program,
+ feed={
+ feed_target_names[0]: word,
+ feed_target_names[1]: pred,
+ feed_target_names[2]: ctx_n2,
+ feed_target_names[3]: ctx_n1,
+ feed_target_names[4]: ctx_0,
+ feed_target_names[5]: ctx_p1,
+ feed_target_names[6]: ctx_p2,
+ feed_target_names[7]: mark
+ },
+ fetch_list=fetch_targets,
+ return_numpy=False)
print(results[0].lod())
np_data = np.array(results[0])
print("Inference Shape: ", np_data.shape)
@@ -282,4 +280,3 @@ def main(use_cuda, is_local=True):
main(use_cuda=False)
-