diff --git a/ppgan/datasets/base_dataset.py b/ppgan/datasets/base_dataset.py index 8ea7b8b0063b5f396575dfe1a7fe222a8267ec6a..229c8e23a9c4286cc9e1af9a223cf99c8db98922 100644 --- a/ppgan/datasets/base_dataset.py +++ b/ppgan/datasets/base_dataset.py @@ -13,10 +13,10 @@ # limitations under the License. import os +import copy from pathlib import Path -from abc import ABCMeta, abstractmethod - from paddle.io import Dataset +from abc import ABCMeta, abstractmethod from .preprocess import build_preprocess @@ -119,7 +119,7 @@ class BaseDataset(Dataset, metaclass=ABCMeta): return samples def __getitem__(self, idx): - datas = self.data_infos[idx] + datas = copy.deepcopy(self.data_infos[idx]) if hasattr(self, 'preprocess') and self.preprocess: datas = self.preprocess(datas) diff --git a/ppgan/datasets/builder.py b/ppgan/datasets/builder.py index 2e1bfb9ba7307db64bb471b120016fc4d220b28a..9ee1f41c7597bebf7cdfef611f2cfba3f162edf7 100644 --- a/ppgan/datasets/builder.py +++ b/ppgan/datasets/builder.py @@ -62,7 +62,7 @@ def build_dataloader(cfg, is_train=True, distributed=True): batch_size=batch_size, shuffle=True if is_train else False, drop_last=True if is_train else False, - use_shared_memory=False, + use_shared_memory=use_shared_memory, num_workers=num_workers) return dataloader