diff --git a/chapter_computer-vision/image-augmentation.md b/chapter_computer-vision/image-augmentation.md index 6211a2f8445b6f9624c904a11996ed03dba7a6d3..5abba4f46cf8835787c528131aab2f36fc7e88ad 100644 --- a/chapter_computer-vision/image-augmentation.md +++ b/chapter_computer-vision/image-augmentation.md @@ -125,11 +125,10 @@ no_aug = gdata.vision.transforms.Compose([ 接下来我们定义一个辅助函数来方便读取图像并应用图像增广。Gluon的数据集提供的`transform_first`函数将图像增广应用在每个训练样本(图像和标签)的第一个元素,即图像之上。有关`DataLoader`的详细介绍,可参考更早的[“图像分类数据集(Fashion-MNIST)”](../chapter_deep-learning-basics/fashion-mnist.md)一节。 ```{.python .input n=34} -num_workers = 0 if sys.platform.startswith('win32') else 4 def load_cifar10(is_train, augs, batch_size): return gdata.DataLoader( gdata.vision.CIFAR10(train=is_train).transform_first(augs), - batch_size=batch_size, shuffle=is_train, num_workers=num_workers) + batch_size=batch_size, shuffle=is_train, num_workers=4) ``` ### 使用多GPU训练模型 diff --git a/chapter_computer-vision/semantic-segmentation-and-dataset.md b/chapter_computer-vision/semantic-segmentation-and-dataset.md index 881f58b6fba581425093a09b2a67ad32aa69bf11..40b923b33007f7f6eec36a8299780cbb306e9242 100644 --- a/chapter_computer-vision/semantic-segmentation-and-dataset.md +++ b/chapter_computer-vision/semantic-segmentation-and-dataset.md @@ -177,11 +177,10 @@ voc_test = VOCSegDataset(False, crop_size, voc_dir, colormap2label) ```{.python .input n=11} batch_size = 64 -num_workers = 0 if sys.platform.startswith('win32') else 4 train_iter = gdata.DataLoader(voc_train, batch_size, shuffle=True, - last_batch='discard', num_workers=num_workers) + last_batch='discard', num_workers=4) test_iter = gdata.DataLoader(voc_test, batch_size, last_batch='discard', - num_workers=num_workers) + num_workers=4) ``` 打印第一个小批量的形状。不同于图像分类和目标识别,这里的标签是一个三维的数组。 diff --git a/chapter_computer-vision/ssd.md b/chapter_computer-vision/ssd.md index f4cc765a27138bf9551dc2d6c85d9224e9ddd9e9..7d1b7a592d7fc52f7e5c2506ce8d4db212cf33f9 100644 --- a/chapter_computer-vision/ssd.md +++ b/chapter_computer-vision/ssd.md @@ -57,6 +57,19 @@ Y2 = forward(nd.zeros((2, 16, 10, 10)), cls_predictor(3, 10)) (Y1.shape, Y2.shape) ``` +```{.json .output n=3} +[ + { + "data": { + "text/plain": "((2, 55, 20, 20), (2, 33, 10, 10))" + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } +] +``` + 通道维包含中心相同的锚框的预测结果。我们首先将通道维移到最后一维。因为不同尺度下批量大小仍保持不变,我们可以将预测结果转成二维的(批量大小,高$\times$宽$\times$通道数)的格式,以方便之后在维度1上的连结。 ```{.python .input n=4} @@ -69,15 +82,28 @@ def concat_preds(preds): 这样一来,尽管`Y1`和`Y2`形状不同,我们仍然可以将这两个同一批量不同尺度的预测结果连结在一起。 -```{.python .input n=6} +```{.python .input n=5} concat_preds([Y1, Y2]).shape ``` +```{.json .output n=5} +[ + { + "data": { + "text/plain": "(2, 25300)" + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } +] +``` + ### 高和宽减半块 为了在多尺度检测目标,下面定义高和宽减半块`down_sample_blk`。它串联了两个填充为1的$3\times3$卷积层和步幅为2的$2\times2$最大池化层。我们知道,填充为1的$3\times3$卷积层不改变特征图的形状,而后面的池化层直接将特征图的高和宽减半。由于$1\times 2+(3-1)+(3-1)=6$,输出特征图中每个单元在输入特征图上的感受野形状为$6\times6$。可以看出,高和宽减半块使得输出特征图中每个单元的感受野变得更广阔。 -```{.python .input n=7} +```{.python .input n=6} def down_sample_blk(num_channels): blk = nn.Sequential() for _ in range(2): @@ -90,15 +116,28 @@ def down_sample_blk(num_channels): 测试高和宽减半块的前向计算。可以看到,它改变了输入的通道数,并将高和宽减半。 -```{.python .input n=8} +```{.python .input n=7} forward(nd.zeros((2, 3, 20, 20)), down_sample_blk(10)).shape ``` +```{.json .output n=7} +[ + { + "data": { + "text/plain": "(2, 10, 10, 10)" + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } +] +``` + ### 基础网络块 基础网络块用来从原始图像抽取特征。为了计算简洁,我们在这里构造一个小的基础网络。该网络串联三个高和宽减半块,并逐步将通道数翻倍。当输入的原始图像的形状为$256\times256$时,基础网络块输出的特征图的形状为$32 \times 32$。 -```{.python .input n=9} +```{.python .input n=8} def base_net(): blk = nn.Sequential() for num_filters in [16, 32, 64]: @@ -108,11 +147,24 @@ def base_net(): forward(nd.zeros((2, 3, 256, 256)), base_net()).shape ``` +```{.json .output n=8} +[ + { + "data": { + "text/plain": "(2, 64, 32, 32)" + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } +] +``` + ### 完整的模型 SSD模型一共包含五个模块,每个模块输出的特征图既用来生成锚框,又用来预测这些锚框的类别和偏移量。第一模块为基础网络块,第二至第四模块为高和宽减半块,第五模块使用全局最大池化层将高和宽降到1。因此第二至第五模块均为图9.4中的多尺度特征块。 -```{.python .input n=10} +```{.python .input n=9} def get_blk(i): if i == 0: blk = base_net() @@ -125,7 +177,7 @@ def get_blk(i): 接下来我们定义每个模块如何进行前向计算。跟之前介绍的卷积神经网络不同,这里不仅返回卷积计算输出的特征图`Y`,还返回根据`Y`生成的当前尺度的锚框,以及基于`Y`预测的锚框类别和偏移量。 -```{.python .input n=11} +```{.python .input n=10} def blk_forward(X, blk, size, ratio, cls_predictor, bbox_predictor): Y = blk(X) anchors = contrib.ndarray.MultiBoxPrior(Y, sizes=size, ratios=ratio) @@ -136,7 +188,7 @@ def blk_forward(X, blk, size, ratio, cls_predictor, bbox_predictor): 我们提到,图9.4中较靠近顶部的多尺度特征块用来检测尺寸较大的目标,因此需要生成较大的锚框。我们在这里先将0.2到1.05之间均分五份,以确定不同尺度下锚框大小的较小值0.2、0.37、0.54等,再按$\sqrt{0.2 \times 0.37} = 0.272$、$\sqrt{0.37 \times 0.54} = 0.447$等确定不同尺度下锚框大小的较大值。 -```{.python .input n=12} +```{.python .input n=11} sizes = [[0.2, 0.272], [0.37, 0.447], [0.54, 0.619], [0.71, 0.79], [0.88, 0.961]] ratios = [[1, 2, 0.5]] * 5 @@ -145,7 +197,7 @@ num_anchors = len(sizes[0]) + len(ratios[0]) - 1 现在,我们可以定义出完整的模型`TinySSD`了。 -```{.python .input n=13} +```{.python .input n=12} class TinySSD(nn.Block): def __init__(self, num_classes, **kwargs): super(TinySSD, self).__init__(**kwargs) @@ -172,7 +224,7 @@ class TinySSD(nn.Block): 我们创建SSD模型实例并对一个高和宽均为256像素的小批量图像`X`做前向计算。我们在之前验证过,第一模块输出的特征图的形状为$32 \times 32$。由于第二至第四模块为高和宽减半块、第五模块为全局池化层,并且以特征图每个单元为中心生成4个锚框,每个图像在5个尺度下生成的锚框总数为$(32^2 + 16^2 + 8^2 + 4^2 + 1)\times 4 = 5444$。 -```{.python .input} +```{.python .input n=13} net = TinySSD(num_classes=1) net.initialize() X = nd.zeros((32, 3, 256, 256)) @@ -183,6 +235,16 @@ print('output class preds:', cls_preds.shape) print('output bbox preds:', bbox_preds.shape) ``` +```{.json .output n=13} +[ + { + "name": "stdout", + "output_type": "stream", + "text": "output anchors: (1, 5444, 4)\noutput class preds: (32, 5444, 2)\noutput bbox preds: (32, 21776)\n" + } +] +``` + ## 训练 下面我们描述如何一步步训练SSD模型来进行目标检测。 @@ -194,8 +256,6 @@ print('output bbox preds:', bbox_preds.shape) ```{.python .input n=14} batch_size = 32 train_data, test_data = gb.load_data_pikachu(batch_size) -# 为保证 GPU 计算效率,这里为每张训练图像填充了两个标签为 -1 的边界框。 -train_data.reshape(label_shape=(3, 5)) ``` 在皮卡丘数据集中,目标的类别数为1。定义好模型以后,我们需要初始化模型参数并定义优化算法。 @@ -223,7 +283,7 @@ def calc_loss(cls_preds, cls_labels, bbox_preds, bbox_labels, bbox_masks): 我们可以沿用准确率评价分类结果。因为使用了$L_1$范数损失,我们用平均绝对误差评价边界框的预测结果。 -```{.python .input n=18} +```{.python .input n=17} def cls_eval(cls_preds, cls_labels): # 由于类别预测结果放在最后一维,argmax 需要指定最后一维。 return (cls_preds.argmax(axis=-1) == cls_labels).mean().asscalar() @@ -236,7 +296,7 @@ def bbox_eval(bbox_preds, bbox_labels, bbox_masks): 在训练模型时,我们需要在模型的前向计算过程中生成多尺度的锚框`anchors`,并为每个锚框预测类别`cls_preds`和偏移量`bbox_preds`。之后,我们根据标签信息`Y`为生成的每个锚框标注类别`cls_labels`和偏移量`bbox_labels`。最后,我们根据类别和偏移量的预测和标注值计算损失函数。为了代码简洁,这里没有评价测试数据集。 -```{.python .input n=19} +```{.python .input n=18} for epoch in range(20): acc, mae = 0, 0 train_data.reset() # 从头读取数据。 @@ -262,11 +322,21 @@ for epoch in range(20): epoch + 1, 1 - acc / (i + 1), mae / (i + 1), time.time() - start)) ``` +```{.json .output n=18} +[ + { + "name": "stdout", + "output_type": "stream", + "text": "epoch 5, class err 3.02e-03, bbox mae 3.31e-03, time 8.9 sec\nepoch 10, class err 2.69e-03, bbox mae 2.90e-03, time 8.8 sec\nepoch 15, class err 2.68e-03, bbox mae 2.85e-03, time 8.9 sec\nepoch 20, class err 2.62e-03, bbox mae 2.65e-03, time 8.8 sec\n" + } +] +``` + ## 预测 在预测阶段,我们希望能把图像里面所有感兴趣的目标检测出来。下面读取测试图像,将其变换尺寸,然后转成卷积层需要的四维格式。 -```{.python .input n=20} +```{.python .input n=19} img = image.imread('../img/pikachu.jpg') feature = image.imresize(img, 256, 256).astype('float32') X = feature.transpose((2, 0, 1)).expand_dims(axis=0) @@ -274,7 +344,7 @@ X = feature.transpose((2, 0, 1)).expand_dims(axis=0) 我们通过`MultiBoxDetection`函数根据锚框及其预测偏移量得到预测边界框,并通过非极大值抑制移除相似的预测边界框。 -```{.python .input n=21} +```{.python .input n=20} def predict(X): anchors, cls_preds, bbox_preds = net(X.as_in_context(ctx)) cls_probs = cls_preds.softmax().transpose((0, 2, 1)) @@ -287,7 +357,7 @@ output = predict(X) 最后,我们将置信度不低于0.3的边界框筛选为最终输出用以展示。 -```{.python .input n=22} +```{.python .input n=21} gb.set_figsize((5, 5)) def display(img, output, threshold): @@ -303,6 +373,19 @@ def display(img, output, threshold): display(img, output, threshold=0.3) ``` +```{.json .output n=21} +[ + { + "data": { + "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", + "text/plain": "
" + }, + "metadata": {}, + "output_type": "display_data" + } +] +``` + ## 小结 * SSD是一个多尺度的目标检测模型。该模型基于基础网络块和各个多尺度特征块生成不同数量和不同大小的锚框,并通过预测锚框的类别和偏移量检测不同大小的目标。 @@ -329,7 +412,7 @@ $$ 当$\sigma$很大时该损失类似于$L_1$范数损失。当它较小时,损失函数较平滑。 -```{.python .input n=23} +```{.python .input n=22} sigmas = [10, 1, 0.5] lines = ['-', '--', '-.'] x = nd.arange(-2, 2, 0.1) @@ -341,13 +424,26 @@ for l, s in zip(lines, sigmas): gb.plt.legend(); ``` +```{.json .output n=22} +[ + { + "data": { + "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", + "text/plain": "
" + }, + "metadata": {}, + "output_type": "display_data" + } +] +``` + 在类别预测时,实验中使用了交叉熵损失:设真实类别$j$的预测概率是$p_j$,交叉熵损失为$-\log p_j$。我们还可以使用焦点损失(focal loss)[2]:给定正的超参数$\gamma$和$\alpha$,该损失的定义为 $$ - \alpha (1-p_j)^{\gamma} \log p_j.$$ 可以看到,增大$\gamma$可以有效减小正类预测概率较大时的损失。 -```{.python .input n=24} +```{.python .input n=23} def focal_loss(gamma, x): return -(1 - x) ** gamma * x.log() @@ -358,6 +454,19 @@ for l, gamma in zip(lines, [0, 1, 5]): gb.plt.legend(); ``` +```{.json .output n=23} +[ + { + "data": { + "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", + "text/plain": "
" + }, + "metadata": {}, + "output_type": "display_data" + } +] +``` + ### 训练和预测 * 当目标在图像中占比较小时,模型通常会采用比较大的输入图像尺寸。 diff --git a/chapter_convolutional-neural-networks/alexnet.md b/chapter_convolutional-neural-networks/alexnet.md index 3d74b9c1993074597791b1e01837a3b528ecaf21..9a2a2b6d593d22df9cecac4ec0b3c6add37bf3d3 100644 --- a/chapter_convolutional-neural-networks/alexnet.md +++ b/chapter_convolutional-neural-networks/alexnet.md @@ -106,13 +106,12 @@ def load_data_fashion_mnist(batch_size, resize=None, root=os.path.join( transformer = gdata.vision.transforms.Compose(transformer) mnist_train = gdata.vision.FashionMNIST(root=root, train=True) mnist_test = gdata.vision.FashionMNIST(root=root, train=False) - num_workers = 0 if sys.platform.startswith('win32') else 4 train_iter = gdata.DataLoader( mnist_train.transform_first(transformer), batch_size, shuffle=True, - num_workers=num_workers) + num_workers=4) test_iter = gdata.DataLoader( mnist_test.transform_first(transformer), batch_size, shuffle=False, - num_workers=num_workers) + num_workers=4) return train_iter, test_iter batch_size = 128 diff --git a/chapter_deep-learning-basics/fashion-mnist.md b/chapter_deep-learning-basics/fashion-mnist.md index 1c0c344c69b70015e2f954b99017ee8f603906c2..81d44579c69cf3cc5158d16e878e6c0df71bd287 100644 --- a/chapter_deep-learning-basics/fashion-mnist.md +++ b/chapter_deep-learning-basics/fashion-mnist.md @@ -88,17 +88,12 @@ show_fashion_mnist(X, get_fashion_mnist_labels(y)) ```{.python .input n=28} batch_size = 256 transformer = gdata.vision.transforms.ToTensor() -if sys.platform.startswith('win'): - num_workers = 0 # 0 表示不用额外的进程来加速读取数据。 -else: - num_workers = 4 - train_iter = gdata.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, - num_workers=num_workers) + num_workers=4) test_iter = gdata.DataLoader(mnist_test.transform_first(transformer), batch_size, shuffle=False, - num_workers=num_workers) + num_workers=4) ``` 我们将获取并读取Fashion-MNIST数据集的逻辑封装在`gluonbook.load_data_fashion_mnist`函数中供后面章节调用。该函数将返回`train_iter`和`test_iter`两个变量。随着本书内容的不断深入,我们会进一步改进该函数。它的完整实现将在[“深度卷积神经网络(AlexNet)”](../chapter_convolutional-neural-networks/alexnet.md)一节中描述。 diff --git a/chapter_deep-learning-computation/read-write.md b/chapter_deep-learning-computation/read-write.md index 79e696f5efd6f9177f8400c2778de3d065a9a667..936015b2a617fdc4a8f4313ed80b7ebae5a4a189 100644 --- a/chapter_deep-learning-computation/read-write.md +++ b/chapter_deep-learning-computation/read-write.md @@ -64,14 +64,14 @@ y = net(x) ```{.python .input} filename = 'mlp.params' -net.save_params(filename) +net.save_parameters(filename) ``` 接下来,我们再实例化一次定义好的多层感知机。跟随机初始化模型参数不同,我们在这里直接读取保存在文件里的参数。 ```{.python .input n=8} net2 = MLP() -net2.load_params(filename) +net2.load_parameters(filename) ``` 因为这两个实例都有同样的模型参数,那么对同一个输入`x`的计算结果将会是一样。我们来验证一下。 diff --git a/chapter_natural-language-processing/word2vec-gluon.md b/chapter_natural-language-processing/word2vec-gluon.md index 602e58080fb815458fb4de5e53345ab268cb8276..e6ca0ea5037093661f687a2eae5df71673c5822b 100644 --- a/chapter_natural-language-processing/word2vec-gluon.md +++ b/chapter_natural-language-processing/word2vec-gluon.md @@ -6,6 +6,7 @@ ```{.python .input n=1} import collections +import gluonbook as gb import math from mxnet import autograd, gluon, nd from mxnet.gluon import data as gdata, loss as gloss, nn @@ -181,10 +182,9 @@ def batchify(data): ```{.python .input n=14} batch_size = 512 -num_workers = 0 if sys.platform.startswith('win32') else 4 dataset = gdata.ArrayDataset(all_centers, all_contexts, all_negatives) data_iter = gdata.DataLoader(dataset, batch_size, shuffle=True, - batchify_fn=batchify, num_workers=num_workers) + batchify_fn=batchify, num_workers=4) for batch in data_iter: for name, data in zip(['centers', 'contexts_negatives', 'masks', 'labels'], batch): @@ -275,8 +275,10 @@ print('%.7f' % ((sigmd(1.1) + sigmd(-0.6) + sigmd(-2.2)) / 3)) ```{.python .input n=22} embed_size = 100 net = nn.Sequential() -net.add(nn.Embedding(input_dim=len(idx_to_token), output_dim=embed_size), - nn.Embedding(input_dim=len(idx_to_token), output_dim=embed_size)) +net.add(nn.Embedding(input_dim=len(idx_to_token), output_dim=embed_size, + sparse_grad=True), + nn.Embedding(input_dim=len(idx_to_token), output_dim=embed_size, + sparse_grad=True)) ``` ### 训练 @@ -285,13 +287,15 @@ net.add(nn.Embedding(input_dim=len(idx_to_token), output_dim=embed_size), ```{.python .input n=23} def train(net, lr, num_epochs): - net.initialize(force_reinit=True) + ctx = gb.try_gpu() + net.initialize(ctx=ctx, force_reinit=True) trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr}) for epoch in range(num_epochs): start_time, train_l_sum = time.time(), 0 for batch in data_iter: - center, context_negative, mask, label = batch + center, context_negative, mask, label = [ + data.as_in_context(ctx) for data in batch] with autograd.record(): pred = skip_gram(center, context_negative, net[0], net[1]) # 使用掩码变量 mask 来避免填充项对损失函数计算的影响。 diff --git a/gluonbook/utils.py b/gluonbook/utils.py index 22a31e9e7b64216c2604c598edf2da39b3325fab..667977f937626412ede6335b5f96c2916015128d 100644 --- a/gluonbook/utils.py +++ b/gluonbook/utils.py @@ -235,14 +235,13 @@ def load_data_fashion_mnist(batch_size, resize=None, root=os.path.join( mnist_train = gdata.vision.FashionMNIST(root=root, train=True) mnist_test = gdata.vision.FashionMNIST(root=root, train=False) - num_workers = 0 if sys.platform.startswith('win32') else 4 train_iter = gdata.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, - num_workers=num_workers) + num_workers=4) test_iter = gdata.DataLoader(mnist_test.transform_first(transformer), batch_size, shuffle=False, - num_workers=num_workers) + num_workers=4) return train_iter, test_iter