提交 860592f0 编写于 作者: A Aston Zhang

revise gluonbook

上级 bdcff098
......@@ -20,17 +20,20 @@
| `grad_clipping`|[基于循环神经网络的语言模型](../chapter_recurrent-neural-networks/rnn.md)|
| `linreg`|[线性回归的从零开始实现](../chapter_deep-learning-basics/linear-regression-scratch.md)|
| `load_data_fashion_mnist`|[深度卷积神经网络(AlexNet)](../chapter_convolutional-neural-networks/alexnet.md)|
|`load_data_jay_lyrics`|[语言模型数据集(周杰伦专辑歌词)](../chapter_recurrent-neural-networks/lang-model-dataset.md)|
| `load_data_pikachu`|[物体检测数据集(皮卡丘)](../chapter_computer-vision/object-detection-dataset.md)|
| `optimize`|[梯度下降和随机梯度下降](../chapter_optimization/gd-sgd.md)|
| `optimize_with_trainer`|[梯度下降和随机梯度下降](../chapter_optimization/gd-sgd.md)|
| `optimize_gluon`|[梯度下降和随机梯度下降](../chapter_optimization/gd-sgd.md)|
| `plt`|[线性回归的从零开始实现](../chapter_deep-learning-basics/linear-regression-scratch.md)|
| `predict_rnn`|[基于循环神经网络的语言模型](../chapter_recurrent-neural-networks/rnn.md)|
| `predict_rnn_gluon`|[循环神经网络的Gluon实现](../chapter_recurrent-neural-networks/rnn-gluon.md)|
| `predict_sentiment`|[文本情感分类:使用循环神经网络](../chapter_natural-language-processing/sentiment-analysis.md)|
| `preprocess_imdb`|[文本情感分类:使用循环神经网络](../chapter_natural-language-processing/sentiment-analysis.md)|
| `read_imdb`|[文本情感分类:使用循环神经网络](../chapter_natural-language-processing/sentiment-analysis.md)|
| `read_voc_images`|[语义分割和数据集](../chapter_computer-vision/semantic-segmentation-and-dataset.md)|
| `Residual`|[残差网络(ResNet)](../chapter_convolutional-neural-networks/resnet.md)|
| `resnet18`|[多GPU计算的Gluon实现](../chapter_computational-performance/multiple-gpus-gluon.md)|
| `RNNModel`|[循环神经网络的Gluon实现](../chapter_recurrent-neural-networks/rnn-gluon.md)|
| `semilogy`|[欠拟合、过拟合和模型选择](../chapter_deep-learning-basics/underfit-overfit.md)|
| `set_figsize`|[线性回归的从零开始实现](../chapter_deep-learning-basics/linear-regression-scratch.md)|
| `sgd`|[线性回归的从零开始实现](../chapter_deep-learning-basics/linear-regression-scratch.md)|
......@@ -41,6 +44,7 @@
| `to_onehot`|[基于循环神经网络的语言模型](../chapter_recurrent-neural-networks/rnn.md)|
| `train`|[图像增广](../chapter_computer-vision/image-augmentation.md)|
| `train_and_predict_rnn`|[基于循环神经网络的语言模型](../chapter_recurrent-neural-networks/rnn.md)|
| `train_and_predict_rnn_gluon `|[循环神经网络的Gluon实现](../chapter_recurrent-neural-networks/rnn-gluon.md)|
| `train_ch3`|[Softmax回归的从零开始实现](../chapter_deep-learning-basics/softmax-regression-scratch.md)|
| `train_ch5`|[卷积神经网络(LeNet)](../chapter_convolutional-neural-networks/lenet.md)|
| `try_all_gpus`|[图像增广](../chapter_computer-vision/image-augmentation.md)|
......
......@@ -93,8 +93,7 @@ net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'adadelta', {'rho': 0.9999})
gb.optimize_with_trainer(trainer=trainer, features=features, labels=labels,
net=net)
gb.optimize_gluon(trainer=trainer, features=features, labels=labels, net=net)
```
## 小结
......
......@@ -102,8 +102,7 @@ net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'adagrad',
{'learning_rate': 0.9})
gb.optimize_with_trainer(trainer=trainer, features=features, labels=labels,
net=net)
gb.optimize_gluon(trainer=trainer, features=features, labels=labels, net=net)
```
## 小结
......
......@@ -108,8 +108,7 @@ net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': 0.1})
gb.optimize_with_trainer(trainer=trainer, features=features, labels=labels,
net=net)
gb.optimize_gluon(trainer=trainer, features=features, labels=labels, net=net)
```
## 小结
......
......@@ -277,8 +277,8 @@ optimize(optimizer_fn=sgd, params_vars=init_params_vars(),
net = nn.Sequential()
net.add(nn.Dense(1))
def optimize_with_trainer(trainer, features, labels, net, decay_epoch=None,
batch_size=10, log_interval=10, num_epochs=3):
def optimize_gluon(trainer, features, labels, net, decay_epoch=None,
batch_size=10, log_interval=10, num_epochs=3):
dataset = gdata.ArrayDataset(features, labels)
data_iter = gdata.DataLoader(dataset, batch_size, shuffle=True)
loss = gloss.L2Loss()
......@@ -307,26 +307,25 @@ def optimize_with_trainer(trainer, features, labels, net, decay_epoch=None,
```{.python .input}
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.2})
optimize_with_trainer(trainer=trainer, features=features, labels=labels,
net=net, decay_epoch=2, batch_size=1)
optimize_gluon(trainer=trainer, features=features, labels=labels, net=net,
decay_epoch=2, batch_size=1)
```
```{.python .input}
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.999})
optimize_with_trainer(trainer=trainer, features=features, labels=labels,
net=net, decay_epoch=None, batch_size=1000,
log_interval=1000)
optimize_gluon(trainer=trainer, features=features, labels=labels, net=net,
decay_epoch=None, batch_size=1000, log_interval=1000)
```
```{.python .input}
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.2})
optimize_with_trainer(trainer=trainer, features=features, labels=labels,
net=net, decay_epoch=2, batch_size=10)
optimize_gluon(trainer=trainer, features=features, labels=labels, net=net,
decay_epoch=2, batch_size=10)
```
本节使用的`get_data_ch7``optimize``optimize_with_trainer`函数被定义在`gluonbook`包中供后面章节调用。
本节使用的`get_data_ch7``optimize``optimize_gluon`函数被定义在`gluonbook`包中供后面章节调用。
## 小结
......
......@@ -209,24 +209,24 @@ net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': 0.2, 'momentum': 0.99})
gb.optimize_with_trainer(trainer=trainer, features=features, labels=labels,
net=net, decay_epoch=2)
gb.optimize_gluon(trainer=trainer, features=features, labels=labels, net=net,
decay_epoch=2)
```
```{.python .input}
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': 0.2, 'momentum': 0.9})
gb.optimize_with_trainer(trainer=trainer, features=features, labels=labels,
net=net, decay_epoch=2)
gb.optimize_gluon(trainer=trainer, features=features, labels=labels, net=net,
decay_epoch=2)
```
```{.python .input}
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'sgd',
{'learning_rate': 0.2, 'momentum': 0.5})
gb.optimize_with_trainer(trainer=trainer, features=features, labels=labels,
net=net, decay_epoch=2)
gb.optimize_gluon(trainer=trainer, features=features, labels=labels, net=net,
decay_epoch=2)
```
## 小结
......
......@@ -100,16 +100,14 @@ net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'rmsprop',
{'learning_rate': 0.03, 'gamma1': 0.9})
gb.optimize_with_trainer(trainer=trainer, features=features, labels=labels,
net=net)
gb.optimize_gluon(trainer=trainer, features=features, labels=labels, net=net)
```
```{.python .input}
net.initialize(init.Normal(sigma=0.01), force_reinit=True)
trainer = gluon.Trainer(net.collect_params(), 'rmsprop',
{'learning_rate': 0.03, 'gamma1': 0.999})
gb.optimize_with_trainer(trainer=trainer, features=features, labels=labels,
net=net)
gb.optimize_gluon(trainer=trainer, features=features, labels=labels, net=net)
```
## 小结
......
......@@ -48,7 +48,7 @@ print('chars:', ''.join([idx_to_char[idx] for idx in sample]))
print('indices:', sample)
```
我们将上面代码放置在GluonBook包的`load_data_jay_lyrics`函数里,调用它后会依次得到`corpus_indices``char_to_idx``idx_to_char``vocab_size`
我们将以上代码封装在`gluonbook`包里的`load_data_jay_lyrics`函数中以供后面章节调用。调用该函数后会依次得到`corpus_indices``char_to_idx``idx_to_char``vocab_size`四个变量
## 时序数据的采样
......@@ -114,7 +114,7 @@ for X, Y in data_iter_consecutive(my_seq, batch_size=2, num_steps=6):
print('X: ', X, '\nY:', Y, '\n')
```
本节定义的`data_iter_random``data_iter_consecutive`函数被保存在`gluonbook`包里以供后面章节调用
本节定义的`data_iter_random``data_iter_consecutive`函数也被保存在`gluonbook`包里
## 小结
......
......@@ -72,13 +72,13 @@ class RNNModel(nn.Block):
首先同前一节一样定义一个预测函数,这里的实现区别在于前向计算和初始化隐藏状态的函数接口稍有不同。
```{.python .input n=41}
def predict_rnn_gluon(prefix, num_chars, model, vocab_size, ctx,
idx_to_char, char_to_idx):
def predict_rnn_gluon(prefix, num_chars, model, vocab_size, ctx, idx_to_char,
char_to_idx):
# 使用 model 的成员函数来初始化隐藏状态。
state = model.begin_state(batch_size=1, ctx=ctx)
output = [char_to_idx[prefix[0]]]
for t in range(num_chars + len(prefix)):
X = nd.array([output[-1]], ctx=ctx).reshape((1,1))
X = nd.array([output[-1]], ctx=ctx).reshape((1, 1))
(Y, state) = model(X, state) # 前向计算不需要传入模型参数。
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
......@@ -107,7 +107,7 @@ def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
model.initialize(ctx=ctx, force_reinit=True, init=init.Normal(0.01))
trainer = gluon.Trainer(model.collect_params(), 'sgd',
{'learning_rate': lr, 'momentum': 0, 'wd': 0})
for epoch in range(num_epochs):
loss_sum, start = 0.0, time.time()
data_iter = gb.data_iter_consecutive(
......@@ -127,9 +127,9 @@ def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
trainer.step(1) # 因为已经误差取过均值,梯度不用再做平均。
loss_sum += l.asscalar()
if (epoch+1) % pred_period == 0:
if (epoch + 1) % pred_period == 0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch + 1, math.exp(loss_sum / (t+1)), time.time() - start))
epoch + 1, math.exp(loss_sum / (t + 1)), time.time() - start))
for prefix in prefixes:
print(' -', predict_rnn_gluon(
prefix, pred_len, model, vocab_size,
......@@ -161,8 +161,6 @@ train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
* 比较跟前一节的实现,看看Gluon的版本是不是运行速度更快?如果你觉得差别明显,试着找找原因。
>>>>>>> add links to rnn gluon
## 扫码直达[讨论区](https://discuss.gluon.ai/t/topic/4089)
![](../img/qr_rnn-gluon.svg)
......@@ -164,19 +164,18 @@ def grad_clipping(params, theta, ctx):
同样这个函数由于考虑到后面将介绍的循环神经网络,所以实现更长一些。
```{.python .input n=11}
def train_and_predict_rnn(
rnn, get_params, init_rnn_state, num_hiddens, vocab_size, ctx,
corpus_indices, idx_to_char, char_to_idx, is_random_iter,
num_epochs, num_steps, lr, clipping_theta, batch_size,
pred_period, pred_len, prefixes):
def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, ctx, corpus_indices, idx_to_char,
char_to_idx, is_random_iter, num_epochs, num_steps,
lr, clipping_theta, batch_size, pred_period,
pred_len, prefixes):
if is_random_iter:
data_iter_fn = gb.data_iter_random
else:
data_iter_fn = gb.data_iter_consecutive
params = get_params()
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(num_epochs):
if not is_random_iter: # 如使用相邻采样,在 epoch 开始时初始化隐藏变量。
state = init_rnn_state(batch_size, num_hiddens, ctx)
......@@ -205,16 +204,16 @@ def train_and_predict_rnn(
gb.sgd(params, lr, 1) # 因为已经误差取过均值,梯度不用再做平均。
loss_sum += l.asscalar()
if (epoch+1) % pred_period == 0:
if (epoch + 1) % pred_period == 0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch + 1, math.exp(loss_sum / (t+1)), time.time() - start))
epoch + 1, math.exp(loss_sum / (t + 1)), time.time() - start))
for prefix in prefixes:
print(' -', predict_rnn(
prefix, pred_len, rnn, params, init_rnn_state,
num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx))
```
以上介绍的函数,除去`get_params`外,均定义在`gluonbook`包中供后面章节调用。有了这些函数以后,我们就可以训练模型了。
以上介绍的`to_onehot``predict_rnn``grad_clipping``train_and_predict_rnn`函数均定义在`gluonbook`包中供后面章节调用。有了这些函数以后,我们就可以训练模型了。
## 训练模型并创作歌词
......
......@@ -201,22 +201,11 @@ def linreg(X, w, b):
"""Linear regression."""
return nd.dot(X, w) + b
def load_data_jay_lyrics():
with zipfile.ZipFile('../data/jaychou_lyrics.txt.zip') as zin:
with zin.open('jaychou_lyrics.txt') as f:
corpus_chars = f.read().decode('utf-8')
corpus_chars = corpus_chars.replace('\n', ' ').replace('\r', ' ')
corpus_chars = corpus_chars[0:10000]
idx_to_char = list(set(corpus_chars))
char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])
vocab_size = len(char_to_idx)
corpus_indices = [char_to_idx[char] for char in corpus_chars]
return corpus_indices, char_to_idx, idx_to_char, vocab_size
def load_data_fashion_mnist(batch_size, resize=None,
root=os.path.join('~', '.mxnet', 'datasets',
'fashion-mnist')):
"""Download the fashion mnist dataest and then load into memory."""
"""Download the fashion mnist dataset and then load into memory."""
root = os.path.expanduser(root)
transformer = []
if resize:
......@@ -237,6 +226,20 @@ def load_data_fashion_mnist(batch_size, resize=None,
return train_iter, test_iter
def load_data_jay_lyrics():
"""Load the Jay Chou lyric data set."""
with zipfile.ZipFile('../data/jaychou_lyrics.txt.zip') as zin:
with zin.open('jaychou_lyrics.txt') as f:
corpus_chars = f.read().decode('utf-8')
corpus_chars = corpus_chars.replace('\n', ' ').replace('\r', ' ')
corpus_chars = corpus_chars[0:10000]
idx_to_char = list(set(corpus_chars))
char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])
vocab_size = len(char_to_idx)
corpus_indices = [char_to_idx[char] for char in corpus_chars]
return corpus_indices, char_to_idx, idx_to_char, vocab_size
def load_data_pikachu(batch_size, edge_size=256):
"""Download the pikachu dataest and then load into memory."""
data_dir = '../data/pikachu'
......@@ -298,8 +301,8 @@ def optimize(optimizer_fn, params_vars, hyperparams, features, labels,
semilogy(es, ls, 'epoch', 'loss')
def optimize_with_trainer(trainer, features, labels, net, decay_epoch=None,
batch_size=10, log_interval=10, num_epochs=3):
def optimize_gluon(trainer, features, labels, net, decay_epoch=None,
batch_size=10, log_interval=10, num_epochs=3):
"""Optimize an objective function with a Gluon trainer."""
dataset = gdata.ArrayDataset(features, labels)
data_iter = gdata.DataLoader(dataset, batch_size, shuffle=True)
......@@ -324,8 +327,7 @@ def optimize_with_trainer(trainer, features, labels, net, decay_epoch=None,
semilogy(es, ls, 'epoch', 'loss')
def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,
def predict_rnn(prefix, num_chars, rnn, params, nnit_rnn_state,
num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx):
"""Predict next chars with a RNN model"""
state = init_rnn_state(1, num_hiddens, ctx)
......@@ -340,13 +342,13 @@ def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,
return ''.join([idx_to_char[i] for i in output])
def predict_rnn_gluon(prefix, num_chars, model, vocab_size, ctx,
idx_to_char, char_to_idx):
def predict_rnn_gluon(prefix, num_chars, model, vocab_size, ctx, idx_to_char,
char_to_idx):
"""Precit next chars with a Gluon RNN model"""
state = model.begin_state(batch_size=1, ctx=ctx)
output = [char_to_idx[prefix[0]]]
for t in range(num_chars + len(prefix)):
X = nd.array([output[-1]], ctx=ctx).reshape((1,1))
X = nd.array([output[-1]], ctx=ctx).reshape((1, 1))
(Y, state) = model(X, state)
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
......@@ -354,6 +356,7 @@ def predict_rnn_gluon(prefix, num_chars, model, vocab_size, ctx,
output.append(int(Y.argmax(axis=1).asscalar()))
return ''.join([idx_to_char[i] for i in output])
def predict_sentiment(net, vocab, sentence):
"""Predict the sentiment of a given sentence."""
sentence = nd.array([vocab.token_to_idx[token] for token in sentence],
......@@ -473,8 +476,9 @@ def resnet18(num_classes):
net.add(nn.GlobalAvgPool2D(), nn.Dense(num_classes))
return net
class RNNModel(nn.Block):
"""RNN model"""
"""RNN model."""
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
......@@ -490,6 +494,7 @@ class RNNModel(nn.Block):
def begin_state(self, *args, **kwargs):
return self.rnn.begin_state(*args, **kwargs)
def semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None,
legend=None, figsize=(3.5, 2.5)):
"""Plot x and log(y)."""
......@@ -590,11 +595,12 @@ def train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs):
% (epoch, train_l_sum / n, train_acc_sum / m, test_acc,
time() - start))
def train_and_predict_rnn(
rnn, get_params, init_rnn_state, num_hiddens, vocab_size, ctx,
corpus_indices, idx_to_char, char_to_idx, is_random_iter,
num_epochs, num_steps, lr, clipping_theta, batch_size,
pred_period, pred_len, prefixes):
def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, ctx, corpus_indices, idx_to_char,
char_to_idx, is_random_iter, num_epochs, num_steps,
lr, clipping_theta, batch_size, pred_period,
pred_len, prefixes):
"""Train an RNN model and predict the next item in the sequence."""
if is_random_iter:
data_iter_fn = data_iter_random
......@@ -625,14 +631,15 @@ def train_and_predict_rnn(
sgd(params, lr, 1)
loss_sum += l.asscalar()
if (epoch+1) % pred_period == 0:
if (epoch + 1) % pred_period == 0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch + 1, math.exp(loss_sum / (t+1)), time() - start))
epoch + 1, math.exp(loss_sum / (t + 1)), time() - start))
for prefix in prefixes:
print(' -', predict_rnn(
prefix, pred_len, rnn, params, init_rnn_state,
num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx))
def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
corpus_indices, idx_to_char, char_to_idx,
num_epochs, num_steps, lr, clipping_theta,
......@@ -661,14 +668,15 @@ def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
trainer.step(1)
loss_sum += l.asscalar()
if (epoch+1) % pred_period == 0:
if (epoch + 1) % pred_period == 0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch + 1, math.exp(loss_sum / (t+1)), time() - start))
epoch + 1, math.exp(loss_sum / (t + 1)), time() - start))
for prefix in prefixes:
print(' -', predict_rnn_gluon(
prefix, pred_len, model, vocab_size,
ctx, idx_to_char, char_to_idx))
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, trainer=None):
"""Train and evaluate a model on CPU."""
......@@ -789,3 +797,4 @@ class VOCSegDataset(gdata.Dataset):
def __len__(self):
return len(self.data)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册