# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle from paddle.vision.transforms.functional import normalize from .builder import DATASETS from ppgan.utils.gfpgan_tools import * @DATASETS.register() class PairedImageDataset(paddle.io.Dataset): """Paired image dataset for image restoration. Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs. There are three modes: 1. 'lmdb': Use lmdb files. If opt['io_backend'] == lmdb. 2. 'meta_info_file': Use meta information file to generate paths. If opt['io_backend'] != lmdb and opt['meta_info_file'] is not None. 3. 'folder': Scan folders to generate paths. The rest. Args: opt (dict): Config for train datasets. It contains the following keys: dataroot_gt (str): Data root path for gt. dataroot_lq (str): Data root path for lq. meta_info_file (str): Path for meta information file. io_backend (dict): IO backend type and other kwarg. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Default: '{}'. gt_size (int): Cropped patched size for gt patches. use_hflip (bool): Use horizontal flips. use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). scale (bool): Scale, which will be added automatically. phase (str): 'train' or 'val'. """ def __init__(self, **opt): super(PairedImageDataset, self).__init__() self.opt = opt # file client (io backend) self.file_client = None self.io_backend_opt = opt['io_backend'] self.mean = opt['mean'] if 'mean' in opt else None self.std = opt['std'] if 'std' in opt else None self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq'] if 'filename_tmpl' in opt: self.filename_tmpl = opt['filename_tmpl'] else: self.filename_tmpl = '{}' if self.io_backend_opt['type'] == 'lmdb': self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder] self.io_backend_opt['client_keys'] = ['lq', 'gt'] self.paths = paired_paths_from_lmdb( [self.lq_folder, self.gt_folder], ['lq', 'gt']) elif 'meta_info_file' in self.opt and self.opt[ 'meta_info_file'] is not None: self.paths = paired_paths_from_meta_info_file( [self.lq_folder, self.gt_folder], ['lq', 'gt'], self.opt['meta_info_file'], self.filename_tmpl) else: self.paths = paired_paths_from_folder( [self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl) def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) # print(self.file_client) scale = self.opt['scale'] # Load gt and lq images. Dimension order: HWC; channel order: BGR; # image range: [0, 1], float32. gt_path = self.paths[index]['gt_path'] img_bytes = self.file_client.get(gt_path, 'gt') img_gt = imfrombytes(img_bytes, float32=True) lq_path = self.paths[index]['lq_path'] img_bytes = self.file_client.get(lq_path, 'lq') img_lq = imfrombytes(img_bytes, float32=True) # augmentation for training if self.opt['phase'] == 'train': gt_size = self.opt['gt_size'] # random crop img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path) # flip, rotation img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot']) # color space transform if 'color' in self.opt and self.opt['color'] == 'y': img_gt = bgr2ycbcr(img_gt, y_only=True)[..., None] img_lq = bgr2ycbcr(img_lq, y_only=True)[..., None] # crop the unmatched GT images during validation or testing, especially for SR benchmark datasets # TODO: It is better to update the datasets, rather than force to crop if self.opt['phase'] != 'train': img_gt = img_gt[0:img_lq.shape[0] * scale, 0:img_lq.shape[1] * scale, :] # BGR to RGB, HWC to CHW, numpy to tensor img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True) # normalize if self.mean is not None or self.std is not None: img_lq = normalize(img_lq, self.mean, self.std) img_gt = normalize(img_gt, self.mean, self.std) return { 'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path } def __len__(self): return len(self.paths)