diff --git a/build/build.yml b/build/build.yml index 5256bb2a560f6cb84df5a29e0453aab8c7d89909..605ac77064ddf616d45f916b16c38861ed3b0856 100644 --- a/build/build.yml +++ b/build/build.yml @@ -13,4 +13,5 @@ dependencies: - recommonmark==0.4.0 - https://github.com/mli/notedown/tarball/master - mxnet-cu80==1.2.0 + - gluonbook==0.7.0 - jieba==0.39 diff --git a/chapter_computational-performance/multiple-gpus-gluon.md b/chapter_computational-performance/multiple-gpus-gluon.md index 53be7f474891ef031d8f34861d0276e86523ee43..c846f9529d9cb593570c03f3401828064e6b1db1 100644 --- a/chapter_computational-performance/multiple-gpus-gluon.md +++ b/chapter_computational-performance/multiple-gpus-gluon.md @@ -5,8 +5,6 @@ 先导入本节实验需要的包或模块。同上一节,运行本节中的程序需要至少两块GPU。 ```{.python .input} -import sys -sys.path.append('..') import gluonbook as gb import mxnet as mx from mxnet import autograd, gluon, init, nd diff --git a/chapter_computational-performance/multiple-gpus.md b/chapter_computational-performance/multiple-gpus.md index 43e04092c023eef5fc650e79929113739c196ef9..ab3ed94528ab852e62b4549514507dc7ba0030d4 100644 --- a/chapter_computational-performance/multiple-gpus.md +++ b/chapter_computational-performance/multiple-gpus.md @@ -20,8 +20,6 @@ 为了从零开始实现多GPU训练中的数据并行,让我们先导入需要的包或模块。 ```{.python .input} -import sys -sys.path.append('..') import gluonbook as gb import mxnet as mx from mxnet import autograd, nd diff --git a/chapter_computer-vision/anchor.md b/chapter_computer-vision/anchor.md index e0863759821ac72b5d1ad304f75477b1c3d79c21..b6bccbcc55eb328670abd035133a37f13fa571fe 100644 --- a/chapter_computer-vision/anchor.md +++ b/chapter_computer-vision/anchor.md @@ -6,8 +6,6 @@ ```{.python .input n=1} %matplotlib inline -import sys -sys.path.insert(0, '..') import gluonbook as gb from mxnet import contrib, gluon, image, nd import numpy as np diff --git a/chapter_computer-vision/bounding-box.md b/chapter_computer-vision/bounding-box.md index cc67ab370c51052a8c41f5f8100479556b30d8b8..4d78aacdec6d4aa1b1dc35798c19930859f01012 100644 --- a/chapter_computer-vision/bounding-box.md +++ b/chapter_computer-vision/bounding-box.md @@ -8,8 +8,6 @@ ```{.python .input n=1} %matplotlib inline -import sys -sys.path.insert(0, '..') import gluonbook as gb from mxnet import image ``` diff --git a/chapter_computer-vision/fcn.md b/chapter_computer-vision/fcn.md index 700c342eb6e61c899ab542fd9451823331f33a0f..1395213a51249d17f6e3224afbd884b1f96e9065 100644 --- a/chapter_computer-vision/fcn.md +++ b/chapter_computer-vision/fcn.md @@ -6,12 +6,11 @@ ```{.python .input n=1} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import gluon, init, nd, image from mxnet.gluon import data as gdata, loss as gloss, model_zoo, nn import numpy as np +import sys ``` ## 转置卷积层 diff --git a/chapter_computer-vision/fine-tuning.md b/chapter_computer-vision/fine-tuning.md index 7445ef50af9656c2c2d674f0b1c58f9cd6f7bd63..e16fee7eadcff7f4161891b17f2085ead423330f 100644 --- a/chapter_computer-vision/fine-tuning.md +++ b/chapter_computer-vision/fine-tuning.md @@ -31,8 +31,6 @@ ```{.python .input n=4} %matplotlib inline -import sys -sys.path.insert(0, '..') import zipfile import gluonbook as gb from mxnet import gluon, init, nd diff --git a/chapter_computer-vision/image-augmentation.md b/chapter_computer-vision/image-augmentation.md index 8fb60c93234165bcd20a8b394dd20a24da9a283e..d28a88e912bfbe1d2b1d66f459af6c575379967e 100644 --- a/chapter_computer-vision/image-augmentation.md +++ b/chapter_computer-vision/image-augmentation.md @@ -6,8 +6,6 @@ ```{.python .input n=21} %matplotlib inline -import sys -sys.path.insert(0, '..') import gluonbook as gb import mxnet as mx from mxnet import autograd, gluon, image, init, nd diff --git a/chapter_computer-vision/kaggle-gluon-cifar10.md b/chapter_computer-vision/kaggle-gluon-cifar10.md index 01123d0f2e0c04ca7fff77ff96bf14595f9f5263..fe5ceb02336444f0fddad745ba0ad9a50de68634 100644 --- a/chapter_computer-vision/kaggle-gluon-cifar10.md +++ b/chapter_computer-vision/kaggle-gluon-cifar10.md @@ -13,9 +13,6 @@ CIFAR-10是计算机视觉领域的一个重要的数据集。本节中,我们 首先,导入实验所需的包或模块。 ```{.python .input} -import sys -sys.path.append('..') - import datetime import gluonbook as gb from mxnet import autograd, gluon, init, nd diff --git a/chapter_computer-vision/kaggle-gluon-dog.md b/chapter_computer-vision/kaggle-gluon-dog.md index 2087f1d78171d9e979becec8151d9385d6c4cbd3..709f823dc651b3261c5b89866f58a9c6c0b79c81 100644 --- a/chapter_computer-vision/kaggle-gluon-dog.md +++ b/chapter_computer-vision/kaggle-gluon-dog.md @@ -15,9 +15,6 @@ 首先,导入实验所需的包或模块。 ```{.python .input} -import sys -sys.path.append('..') - import collections import datetime import gluonbook as gb diff --git a/chapter_computer-vision/neural-style.md b/chapter_computer-vision/neural-style.md index 4c25f92cdbebdfd2139764312a4ac1860b4a960c..2fe30f950778f1fa40dccea81b4b7564bba9d298 100644 --- a/chapter_computer-vision/neural-style.md +++ b/chapter_computer-vision/neural-style.md @@ -13,8 +13,6 @@ ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, gluon, image, nd from mxnet.gluon import model_zoo, nn @@ -256,4 +254,4 @@ gb.plt.imsave('../img/neural-style-2.png', postprocess(z).asnumpy()) ## 参考文献 -[1] Gatys, Leon A., Alexander S. Ecker, and Matthias Bethge. "Image style transfer using convolutional neural networks." CVPR. 2016. +[1] Gatys, L. A., Ecker, A. S., & Bethge, M. (2016). Image style transfer using convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (pp. 2414-2423). diff --git a/chapter_computer-vision/object-detection-dataset.md b/chapter_computer-vision/object-detection-dataset.md index e76b29a819ab920faf77050a4e7dbb416c060d25..357c06d6de5a5e5981834a515cd06478fd7a6208 100644 --- a/chapter_computer-vision/object-detection-dataset.md +++ b/chapter_computer-vision/object-detection-dataset.md @@ -8,8 +8,6 @@ ```{.python .input n=1} %matplotlib inline -import sys -sys.path.insert(0, '..') import gluonbook as gb from mxnet import gluon, image from mxnet.gluon import utils as gutils diff --git a/chapter_computer-vision/semantic-segmentation-and-dataset.md b/chapter_computer-vision/semantic-segmentation-and-dataset.md index c54a728ea177e898bc098381eb22bf82dffa265e..814e96cb806345ba72368b2ed826e75a4717e22d 100644 --- a/chapter_computer-vision/semantic-segmentation-and-dataset.md +++ b/chapter_computer-vision/semantic-segmentation-and-dataset.md @@ -15,12 +15,11 @@ ```{.python .input n=1} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import gluon, image, nd from mxnet.gluon import data as gdata, utils as gutils import os +import sys import tarfile ``` diff --git a/chapter_computer-vision/ssd.md b/chapter_computer-vision/ssd.md index 866995453d9500474635ba6bdb54e64b7fb71c48..5c8a1259a43a771e1680610fa21143f8afcd4a97 100644 --- a/chapter_computer-vision/ssd.md +++ b/chapter_computer-vision/ssd.md @@ -4,8 +4,6 @@ ```{.python .input n=1} %matplotlib inline -import sys -sys.path.insert(0, '..') import gluonbook as gb from mxnet import autograd, contrib, gluon, image, init, nd from mxnet.gluon import loss as gloss, nn diff --git a/chapter_convolutional-neural-networks/alexnet.md b/chapter_convolutional-neural-networks/alexnet.md index 34f8233817b1b5dfc621b4aed6b6a3482f953e01..48b7c70fa52663813a8524fefcfd46ab78f9e3d2 100644 --- a/chapter_convolutional-neural-networks/alexnet.md +++ b/chapter_convolutional-neural-networks/alexnet.md @@ -51,12 +51,11 @@ AlextNet与LeNet的设计理念非常相似。但也有非常显著的区别。 下面我们实现(稍微简化过的)Alexnet: ```{.python .input n=1} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import nd, init, gluon from mxnet.gluon import data as gdata, loss as gloss, nn import os +import sys net = nn.Sequential() net.add( diff --git a/chapter_convolutional-neural-networks/batch-norm-gluon.md b/chapter_convolutional-neural-networks/batch-norm-gluon.md index 54343d369cfb2db30a267aadfd29a65a9774de23..55e7bd84f4377859f30af8acccae0480f93d3c70 100644 --- a/chapter_convolutional-neural-networks/batch-norm-gluon.md +++ b/chapter_convolutional-neural-networks/batch-norm-gluon.md @@ -3,8 +3,6 @@ 相比于前小节定义的BatchNorm类,`nn`模块定义的BatchNorm使用更加简单。它不需要指定输出数据的维度和特征维的大小,这些都将通过延后初始化来获取。我们实现同前小节一样的批量归一化的LeNet。 ```{.python .input n=1} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import nd, gluon, init from mxnet.gluon import loss as gloss, nn diff --git a/chapter_convolutional-neural-networks/batch-norm.md b/chapter_convolutional-neural-networks/batch-norm.md index 2d27f2355a57e33469e33003b4ed3bd9206f001d..df2d4fe5d20ad34713e5145a67d459ad4db2ebfa 100644 --- a/chapter_convolutional-neural-networks/batch-norm.md +++ b/chapter_convolutional-neural-networks/batch-norm.md @@ -30,8 +30,6 @@ $$y_i \leftarrow \gamma \hat{x}_i + \beta.$$ 下面我们通过NDArray来实现这个计算。 ```{.python .input n=72} -import sys -sys.path.insert(0, '..') import gluonbook as gb from mxnet import nd, gluon, init, autograd from mxnet.gluon import loss as gloss, nn diff --git a/chapter_convolutional-neural-networks/conv-layer.md b/chapter_convolutional-neural-networks/conv-layer.md index bc5bd7ece36e843c34d4756edf4027bcedd6c189..ebca792deeda3464d6633c8fd606db01c59fbfb2 100644 --- a/chapter_convolutional-neural-networks/conv-layer.md +++ b/chapter_convolutional-neural-networks/conv-layer.md @@ -13,8 +13,6 @@ 下面我们将上述过程实现在`corr2d`函数里,它接受`X`和`K`,输出`Y`。 ```{.python .input} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, nd from mxnet.gluon import nn diff --git a/chapter_convolutional-neural-networks/densenet.md b/chapter_convolutional-neural-networks/densenet.md index 55d670547059a2a002017b6bdf6cff0b5ffceafc..5eac898c709efc20208ad9e2a6ff2c9482fff6b8 100644 --- a/chapter_convolutional-neural-networks/densenet.md +++ b/chapter_convolutional-neural-networks/densenet.md @@ -13,8 +13,6 @@ DenseNet的主要构建模块是稠密块和过渡块,前者定义了输入和 DeseNet使用了ResNet改良版的“批量归一化、激活和卷积”结构(参见上一节习题),我们首先在`conv_block`函数里实现这个结构。 ```{.python .input n=1} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import nd, gluon, init from mxnet.gluon import loss as gloss, nn diff --git a/chapter_convolutional-neural-networks/googlenet.md b/chapter_convolutional-neural-networks/googlenet.md index 99665c4acc2b2786be62b85973243161055dd642..2a737d0bb26e1d28da84f9d3dabc57597b1895d9 100644 --- a/chapter_convolutional-neural-networks/googlenet.md +++ b/chapter_convolutional-neural-networks/googlenet.md @@ -13,8 +13,6 @@ GoogLeNet中的基础卷积块叫做Inception,得名于同名电影《盗梦 Inception块中可以自定义的超参数是每个层的输出通道数,我们以此来控制模型复杂度。 ```{.python .input n=1} -import sys -sys.path.insert(0, '..') import gluonbook as gb from mxnet import nd, init, gluon from mxnet.gluon import loss as gloss, nn diff --git a/chapter_convolutional-neural-networks/lenet.md b/chapter_convolutional-neural-networks/lenet.md index 62de0b669d940a767fb214c597a5aa9556fd80b2..7864e36107c90614e7eab98f1a0d9ea2638ac933 100644 --- a/chapter_convolutional-neural-networks/lenet.md +++ b/chapter_convolutional-neural-networks/lenet.md @@ -16,8 +16,6 @@ LeNet分为卷积层块和全连接层块两个部分。卷积层块里的基本 卷积层块把每个样本输出拉升成向量输入到全连接层块中。全连接层块由两个输出大小分别为120和84的全连接层,然后接上输出大小为10(因为数字的类别一共为10)的输出层构成。下面我们通过Sequential类来实现LeNet。 ```{.python .input} -import sys -sys.path.append('..') import gluonbook as gb import mxnet as mx from mxnet import autograd, nd, gluon, init diff --git a/chapter_convolutional-neural-networks/nin.md b/chapter_convolutional-neural-networks/nin.md index a73cb477cfbe4b01d33447d64c88efdac5ca4ff1..19a5129f42a0f154fd3595b5813eed89261616cb 100644 --- a/chapter_convolutional-neural-networks/nin.md +++ b/chapter_convolutional-neural-networks/nin.md @@ -11,8 +11,6 @@ NiN中的一个基础块由一个卷积层外加两个充当全连接层的$1\times 1$卷积层构成。第一个卷积层我们可以设置它的超参数,而第二和第三卷积层则使用固定超参数。 ```{.python .input n=2} -import sys -sys.path.insert(0, '..') import gluonbook as gb from mxnet import nd, gluon, init from mxnet.gluon import loss as gloss, nn diff --git a/chapter_convolutional-neural-networks/resnet.md b/chapter_convolutional-neural-networks/resnet.md index 6ab61722c3e45097686986195a20c8597e4971fc..2dc98fc34f4e1c2760d7efadd3c0a624b95b2d60 100644 --- a/chapter_convolutional-neural-networks/resnet.md +++ b/chapter_convolutional-neural-networks/resnet.md @@ -17,8 +17,6 @@ ResNet沿用了VGG全$3\times 3$卷积层设计。残差块里首先是两个有 残差块的实现如下。它可以设定输出通道数,是否使用额外的卷积层来修改输入通道数,以及卷积层的步幅大小。我们将`Residual`类定义在`gluonbook`包中供后面章节调用。 ```{.python .input n=1} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import nd, gluon, init from mxnet.gluon import loss as gloss, nn diff --git a/chapter_convolutional-neural-networks/vgg.md b/chapter_convolutional-neural-networks/vgg.md index baaeb468632485dbb2f0ac3cb7c5eb87b7d2903b..dab6244b52c72716073bed7f5349b5b9db07d550 100644 --- a/chapter_convolutional-neural-networks/vgg.md +++ b/chapter_convolutional-neural-networks/vgg.md @@ -9,8 +9,6 @@ AlexNet在LeNet的基础上增加了三个卷积层。但作者对它们的卷 VGG模型的基础组成规律是:连续使用数个相同的填充为1的$3\times 3$卷积层后接上一个步幅为2的$2\times 2$最大池化层。卷积层保持输入高宽,而池化层则对其减半。我们使用`vgg_block`函数来实现这个基础块,它可以指定使用卷积层的数量和其输出通道数。 ```{.python .input n=1} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import nd, init, gluon from mxnet.gluon import loss as gloss, nn diff --git a/chapter_deep-learning-basics/dropout-gluon.md b/chapter_deep-learning-basics/dropout-gluon.md index d5e8820180773fc67db0d16c2f6e6863e3f2dbe9..775e494d3110b7768a843e73eb6fc44eae2d12ce 100644 --- a/chapter_deep-learning-basics/dropout-gluon.md +++ b/chapter_deep-learning-basics/dropout-gluon.md @@ -8,8 +8,6 @@ 在多层感知机中Gluon实现的基础上,我们只需要在全连接层后添加Dropout层并指定丢弃概率。在训练模型时,Dropout层将以指定的丢弃概率随机丢弃上一层的输出元素;在测试模型时,Dropout层并不发挥作用。 ```{.python .input n=5} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, gluon, init, nd from mxnet.gluon import loss as gloss, nn diff --git a/chapter_deep-learning-basics/dropout.md b/chapter_deep-learning-basics/dropout.md index 505044968c44db9463203faea8d6f2f8723786bf..94c29c2071be580ade0d0fe82ea573c352faddde 100644 --- a/chapter_deep-learning-basics/dropout.md +++ b/chapter_deep-learning-basics/dropout.md @@ -28,8 +28,6 @@ $$h_i = \frac{\xi_i}{1-p} \phi(x_1 w_1^{(i)} + x_2 w_2^{(i)} + x_3 w_3^{(i)} + x 根据丢弃法的定义,我们可以很容易地实现它。下面的`dropout`函数将以`drop_prob`的概率丢弃NDArray输入`X`中的元素。 ```{.python .input} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, gluon, nd from mxnet.gluon import loss as gloss diff --git a/chapter_deep-learning-basics/kaggle-house-price.md b/chapter_deep-learning-basics/kaggle-house-price.md index e300f7000032d876b45de6a64b0135cd4d74af64..30f12c6131c2b57b386bdac89e6b686c88c8f57a 100644 --- a/chapter_deep-learning-basics/kaggle-house-price.md +++ b/chapter_deep-learning-basics/kaggle-house-price.md @@ -38,8 +38,6 @@ Kaggle(网站地址:https://www.kaggle.com )是一个著名的供机器学 ```{.python .input n=1} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, init, gluon, nd from mxnet.gluon import data as gdata, loss as gloss, nn diff --git a/chapter_deep-learning-basics/mlp-gluon.md b/chapter_deep-learning-basics/mlp-gluon.md index d7b54640f7fe454663ab97ef2adf6f92379f2b98..e72ee74b0902c98056aaad1a0a99e263a6cc166f 100644 --- a/chapter_deep-learning-basics/mlp-gluon.md +++ b/chapter_deep-learning-basics/mlp-gluon.md @@ -3,8 +3,6 @@ 下面我们使用Gluon来实现上一节中的多层感知机。首先我们导入所需的包或模块。 ```{.python .input} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, gluon, init, nd from mxnet.gluon import loss as gloss, nn diff --git a/chapter_deep-learning-basics/mlp-scratch.md b/chapter_deep-learning-basics/mlp-scratch.md index b0f62fe2bdaf3e7384564b54de00a830c0bb1699..8c566aa59a2f92ff3a806130716318c65e695e24 100644 --- a/chapter_deep-learning-basics/mlp-scratch.md +++ b/chapter_deep-learning-basics/mlp-scratch.md @@ -3,8 +3,6 @@ 我们已经从上一章里了解了多层感知机的原理。下面,我们一起来动手实现一个多层感知机。首先导入实现所需的包或模块。 ```{.python .input} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, gluon, nd from mxnet.gluon import loss as gloss diff --git a/chapter_deep-learning-basics/mlp.md b/chapter_deep-learning-basics/mlp.md index 419c0f22b1de994816ca65d78f2e067ee492cd5e..c2121d87a28134daebd546404c4a5a4d6ffc2931 100644 --- a/chapter_deep-learning-basics/mlp.md +++ b/chapter_deep-learning-basics/mlp.md @@ -45,8 +45,6 @@ $$\text{relu}(x) = \max(x, 0).$$ ```{.python .input} # 将图打印在 Jupyter notebook 的文本之间。 %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import nd diff --git a/chapter_deep-learning-basics/softmax-regression-gluon.md b/chapter_deep-learning-basics/softmax-regression-gluon.md index 7d6c83bf15b3e109cbd716a3eb237113b49c98f8..02a468cc41cf96d5f0af7e5fbf2a02cb43f2527a 100644 --- a/chapter_deep-learning-basics/softmax-regression-gluon.md +++ b/chapter_deep-learning-basics/softmax-regression-gluon.md @@ -5,8 +5,6 @@ 首先导入本节实现所需的包或模块。 ```{.python .input n=1} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, gluon, init, nd from mxnet.gluon import loss as gloss, nn diff --git a/chapter_deep-learning-basics/softmax-regression-scratch.md b/chapter_deep-learning-basics/softmax-regression-scratch.md index c31431147c432e988936e1b0aa9b2b586db2f70c..0d0513197d9f3148d1b49d77e2b7c6f384eb820d 100644 --- a/chapter_deep-learning-basics/softmax-regression-scratch.md +++ b/chapter_deep-learning-basics/softmax-regression-scratch.md @@ -3,11 +3,10 @@ 下面我们来动手实现Softmax回归。首先,导入实验所需的包或模块。 ```{.python .input n=1} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, nd from mxnet.gluon import data as gdata +import sys ``` ## 获取Fashion-MNIST数据集 diff --git a/chapter_deep-learning-basics/underfit-overfit.md b/chapter_deep-learning-basics/underfit-overfit.md index 868d804bf2a30b4f888c681adf97842f971437e6..bd9fcf8cf282afe9b5ca5f3299bff28338da577b 100644 --- a/chapter_deep-learning-basics/underfit-overfit.md +++ b/chapter_deep-learning-basics/underfit-overfit.md @@ -41,8 +41,6 @@ $$\hat{y} = b + \sum_{k=1}^K x^k w_k$$ ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, gluon, nd from mxnet.gluon import data as gdata, loss as gloss, nn diff --git a/chapter_deep-learning-basics/weight-decay-gluon.md b/chapter_deep-learning-basics/weight-decay-gluon.md index 9e2644b1866d8fdde0cc1722e8e0738521bff6d4..4eb1e397da63680c9ae36fd9115087242e89177e 100644 --- a/chapter_deep-learning-basics/weight-decay-gluon.md +++ b/chapter_deep-learning-basics/weight-decay-gluon.md @@ -4,8 +4,6 @@ ```{.python .input n=1} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, gluon, init, nd from mxnet.gluon import data as gdata, loss as gloss, nn diff --git a/chapter_deep-learning-basics/weight-decay.md b/chapter_deep-learning-basics/weight-decay.md index 515f33b4105097f8e23b35495c5ae849dde7326b..f44a61315089e68caf2c2c28b98479f5fc472f4d 100644 --- a/chapter_deep-learning-basics/weight-decay.md +++ b/chapter_deep-learning-basics/weight-decay.md @@ -38,8 +38,6 @@ $$y = 0.05 + \sum_{i = 1}^p 0.01x_i + \epsilon,$$ ```{.python .input n=2} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, gluon, nd diff --git a/chapter_natural-language-processing/sentiment-analysis-cnn.md b/chapter_natural-language-processing/sentiment-analysis-cnn.md index 9122ea76e6c1e44c08622d1b5acd499dc6a53ef9..9377f9ed30d42a7eb9b84ec8dcabd55dfad362fb 100644 --- a/chapter_natural-language-processing/sentiment-analysis-cnn.md +++ b/chapter_natural-language-processing/sentiment-analysis-cnn.md @@ -1,5 +1,7 @@ # 文本分类:情感分析 +TODO(@astonzhang): edits + 在之前的章节中介绍了卷积神经网络用于计算机视觉领域。 在本节将介绍如何将卷积神经网络应用于自然语言处理领域。以及参考textCNN模型使用Gluon创建一个卷积神经网络用于文本情感分类。 @@ -24,8 +26,6 @@ 在实验开始前,导入所需的包或模块。 ```{.python .input n=1} -import sys -sys.path.append('..') import collections import gluonbook as gb import mxnet as mx diff --git a/chapter_natural-language-processing/sentiment-analysis.md b/chapter_natural-language-processing/sentiment-analysis.md index aec84ace319222f0a39d277b16930c7b989f34a1..4079b4f8bc65471f8d55d67cf7dc875ad9fc34c4 100644 --- a/chapter_natural-language-processing/sentiment-analysis.md +++ b/chapter_natural-language-processing/sentiment-analysis.md @@ -11,8 +11,6 @@ 在实验开始前,导入所需的包或模块。 ```{.python .input n=1} -import sys -sys.path.append('..') import collections import gluonbook as gb import mxnet as mx diff --git a/chapter_optimization/adadelta-gluon.md b/chapter_optimization/adadelta-gluon.md index cd2204e965128080c92c097ae4dd050a78b4d541..c0143bee730d5b9f8cf3e7e5d24c964d1945c23d 100644 --- a/chapter_optimization/adadelta-gluon.md +++ b/chapter_optimization/adadelta-gluon.md @@ -6,8 +6,6 @@ ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import gluon, init, nd from mxnet.gluon import nn diff --git a/chapter_optimization/adadelta.md b/chapter_optimization/adadelta.md index 96da07cc505c7dbcbf4a4a5fcb3fb1e059eaa94b..6ad208e2647acd8499813d5ae21a572fc398e1ad 100644 --- a/chapter_optimization/adadelta.md +++ b/chapter_optimization/adadelta.md @@ -47,8 +47,6 @@ def adadelta(params, sqrs, deltas, rho, batch_size): ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, nd import numpy as np diff --git a/chapter_optimization/adagrad-gluon.md b/chapter_optimization/adagrad-gluon.md index 2f3b695b844b0148ce6c0b5e37ba1fb33c7af82c..652ac8beb49c3eeb0d3a17385288d9104fe6b9b2 100644 --- a/chapter_optimization/adagrad-gluon.md +++ b/chapter_optimization/adagrad-gluon.md @@ -7,8 +7,6 @@ ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import gluon, init, nd from mxnet.gluon import nn diff --git a/chapter_optimization/adagrad.md b/chapter_optimization/adagrad.md index 080f702fbabc1c8ba22a3c5c495332755be7afcd..54ebd7cca3e1e4cddb07f010813ac415bd54a327 100644 --- a/chapter_optimization/adagrad.md +++ b/chapter_optimization/adagrad.md @@ -56,8 +56,6 @@ def adagrad(params, sqrs, lr, batch_size): ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, nd import numpy as np diff --git a/chapter_optimization/adam-gluon.md b/chapter_optimization/adam-gluon.md index 7e1a8b212668469da94c802475280b3abaa46f09..57ef1f59c1cb23206965a4d2fef07af1ec37d7cf 100644 --- a/chapter_optimization/adam-gluon.md +++ b/chapter_optimization/adam-gluon.md @@ -6,8 +6,6 @@ ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import gluon, init, nd from mxnet.gluon import nn diff --git a/chapter_optimization/adam.md b/chapter_optimization/adam.md index 707d12218553890a1de3b4f56189f4bcfc08836a..b27a5f085b40382201445eb168afb415c542b004 100644 --- a/chapter_optimization/adam.md +++ b/chapter_optimization/adam.md @@ -62,8 +62,6 @@ def adam(params, vs, sqrs, lr, batch_size, t): ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, nd import numpy as np diff --git a/chapter_optimization/gd-sgd-gluon.md b/chapter_optimization/gd-sgd-gluon.md index bf4d27c0f7e68405b8f3dc695c271ec0959cf5f4..d01a8a414a6a9d61a89091d9ebbd89f43c58ba67 100644 --- a/chapter_optimization/gd-sgd-gluon.md +++ b/chapter_optimization/gd-sgd-gluon.md @@ -6,8 +6,6 @@ ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, gluon, init, nd from mxnet.gluon import nn, data as gdata, loss as gloss diff --git a/chapter_optimization/gd-sgd.md b/chapter_optimization/gd-sgd.md index 2c5497afe4825a49a1ba4ac741f4d100bcdefd4f..fae75973dfd10d1f63272c8822bb54e7225a7c57 100644 --- a/chapter_optimization/gd-sgd.md +++ b/chapter_optimization/gd-sgd.md @@ -121,8 +121,6 @@ def sgd(params, lr, batch_size): ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, nd import numpy as np diff --git a/chapter_optimization/momentum-gluon.md b/chapter_optimization/momentum-gluon.md index 18e48f7ae1cf28dfdcc1e31fc02961fc7df4da79..b6b0e5d977271a7675e7fd93ff69ba4b11674efe 100644 --- a/chapter_optimization/momentum-gluon.md +++ b/chapter_optimization/momentum-gluon.md @@ -6,8 +6,6 @@ ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import gluon, init, nd from mxnet.gluon import nn diff --git a/chapter_optimization/momentum.md b/chapter_optimization/momentum.md index dd8eec96e11f2259fb73a852dab1c32975768248..ff04f968b5f5d8965e6dceb9275ed88812cbddaa 100644 --- a/chapter_optimization/momentum.md +++ b/chapter_optimization/momentum.md @@ -90,8 +90,6 @@ def sgd_momentum(params, vs, lr, mom, batch_size): ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, nd import numpy as np diff --git a/chapter_optimization/optimization-intro.md b/chapter_optimization/optimization-intro.md index 1b2f4379ae19d515861e7aceca664982776139d3..824a0a97473e44ea03242a950f85f2986b029a92 100644 --- a/chapter_optimization/optimization-intro.md +++ b/chapter_optimization/optimization-intro.md @@ -25,8 +25,6 @@ ```{.python .input n=1} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mpl_toolkits import mplot3d import numpy as np diff --git a/chapter_optimization/rmsprop-gluon.md b/chapter_optimization/rmsprop-gluon.md index 7922290be0fe5286d4984be60187815b9d871962..e96a6f10fdb660d7dc7f000521e8cd937be5b6c3 100644 --- a/chapter_optimization/rmsprop-gluon.md +++ b/chapter_optimization/rmsprop-gluon.md @@ -7,8 +7,6 @@ ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import gluon, init, nd from mxnet.gluon import nn diff --git a/chapter_optimization/rmsprop.md b/chapter_optimization/rmsprop.md index 415f575023c5407ee336f5c796109687a97154e4..d497545857cae06f1bcd73ddd980bbf8091a092a 100644 --- a/chapter_optimization/rmsprop.md +++ b/chapter_optimization/rmsprop.md @@ -45,8 +45,6 @@ def rmsprop(params, sqrs, lr, gamma, batch_size): ```{.python .input} %matplotlib inline -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, nd import numpy as np diff --git a/chapter_recurrent-neural-networks/gru.md b/chapter_recurrent-neural-networks/gru.md index 0d8b25edbbefbcda119476e71cd95434dd7e6e66..c7f09372a06337c3276a02b7caa14e23771bff85 100644 --- a/chapter_recurrent-neural-networks/gru.md +++ b/chapter_recurrent-neural-networks/gru.md @@ -63,8 +63,6 @@ $$\boldsymbol{H}_t = \boldsymbol{Z}_t \odot \boldsymbol{H}_{t-1} + (1 - \boldsy 我们先读取并简单处理数据集。 ```{.python .input n=2} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import nd import zipfile diff --git a/chapter_recurrent-neural-networks/lstm.md b/chapter_recurrent-neural-networks/lstm.md index 4b9125cf43751148565366e55b32fadcc3b690b7..ccda1cbcbe8323f546fe13781367e14dc3de3be6 100644 --- a/chapter_recurrent-neural-networks/lstm.md +++ b/chapter_recurrent-neural-networks/lstm.md @@ -69,8 +69,6 @@ $$\boldsymbol{H}_t = \boldsymbol{O}_t \odot \text{tanh}(\boldsymbol{C}_t).$$ 我们先读取并简单处理数据集。 ```{.python .input n=1} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import nd import zipfile diff --git a/chapter_recurrent-neural-networks/rnn-gluon.md b/chapter_recurrent-neural-networks/rnn-gluon.md index b1a71ccb94d3cee742a596bb8358d455be9951f4..c688de3090c7e4d7dcbda777a735d15c578d5627 100644 --- a/chapter_recurrent-neural-networks/rnn-gluon.md +++ b/chapter_recurrent-neural-networks/rnn-gluon.md @@ -10,8 +10,6 @@ 首先导入实验所需的包或模块,并抽取数据集。 ```{.python .input n=1} -import sys -sys.path.append('..') import gluonbook as gb import math import mxnet as mx diff --git a/chapter_recurrent-neural-networks/rnn.md b/chapter_recurrent-neural-networks/rnn.md index 8edfa783818a3bfade614f27e6c7be0759488eb5..a73836b2745d13b7ec84d47b32aafc4c088bc828 100644 --- a/chapter_recurrent-neural-networks/rnn.md +++ b/chapter_recurrent-neural-networks/rnn.md @@ -58,8 +58,6 @@ $$\hat{\boldsymbol{Y}}_t = \text{softmax}(\boldsymbol{O}_t).$$ 首先导入实现所需的包或模块。 ```{.python .input} -import sys -sys.path.append('..') import gluonbook as gb from mxnet import autograd, nd from mxnet.gluon import loss as gloss diff --git a/environment.yml b/environment.yml index 91a8ea541148eda930def5f31a82c2b0e16dc103..d34a60fcfc903550a6e75945bceb38bc652453c7 100644 --- a/environment.yml +++ b/environment.yml @@ -7,3 +7,4 @@ dependencies: - pip: - requests==2.18.4 - mxnet==1.2.0 + - gluonbook==0.7.0 diff --git a/gluonbook/__init__.py b/gluonbook/__init__.py index 7d8d718f740c8e0a5381d05c83046253ad86f75a..008ed79df76d8984c271427e6466310d843261a7 100644 --- a/gluonbook/__init__.py +++ b/gluonbook/__init__.py @@ -1,4 +1,4 @@ from .utils import * -__version__ = '0.6.11' +__version__ = '0.7.0'