diff --git a/docs/1.0/blitz_data_parallel_tutorial.md b/docs/1.0/blitz_data_parallel_tutorial.md index b9f180f1c923fc3c5b3e44ffba88f1056ed16d8b..45f1e44be25311cc421e96d32924b16c14344c57 100644 --- a/docs/1.0/blitz_data_parallel_tutorial.md +++ b/docs/1.0/blitz_data_parallel_tutorial.md @@ -1,7 +1,7 @@ # 可选:数据并行处理 > 译者:[bat67](https://github.com/bat67) -> +> > 最新版会在[译者仓库](https://github.com/bat67/Deep-Learning-with-PyTorch-A-60-Minute-Blitz-cn)首先同步。 在这个教程里,我们将学习如何使用数据并行(`DataParallel`)来使用多GPU。 @@ -58,16 +58,16 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 要制作一个虚拟(随机)数据集,只需实现`__getitem__`。 ```python -class RandomDataset(Dataset): +class RandomDataset(Dataset): - def __init__(self, size, length): + def __init__(self, size, length): self.len = length self.data = torch.randn(length, size) - def __getitem__(self, index): + def __getitem__(self, index): return self.data[index] - def __len__(self): + def __len__(self): return self.len rand_loader = DataLoader(dataset=RandomDataset(input_size, data_size), @@ -81,14 +81,14 @@ rand_loader = DataLoader(dataset=RandomDataset(input_size, data_size), 我们在模型内部放置了一条打印语句来检测输入和输出向量的大小。请注意批等级为0时打印的内容。 ```python -class Model(nn.Module): +class Model(nn.Module): # Our model - def __init__(self, input_size, output_size): + def __init__(self, input_size, output_size): super(Model, self).__init__() self.fc = nn.Linear(input_size, output_size) - def forward(self, input): + def forward(self, input): output = self.fc(input) print("\tIn Model: input size", input.size(), "output size", output.size())