From 4760af1c34361ccdf36e46c21be9b1055f956b8c Mon Sep 17 00:00:00 2001 From: LielinJiang Date: Tue, 29 Sep 2020 12:43:01 +0000 Subject: [PATCH] clean code --- ppgan/datasets/paired_dataset.py | 11 +---------- ppgan/datasets/transforms/builder.py | 9 --------- ppgan/datasets/transforms/transforms.py | 15 --------------- ppgan/datasets/unpaired_dataset.py | 3 +-- 4 files changed, 2 insertions(+), 36 deletions(-) diff --git a/ppgan/datasets/paired_dataset.py b/ppgan/datasets/paired_dataset.py index d15d43b..4a68bfa 100644 --- a/ppgan/datasets/paired_dataset.py +++ b/ppgan/datasets/paired_dataset.py @@ -23,7 +23,7 @@ class PairedDataset(BaseDataset): cfg.phase) # get the image directory self.AB_paths = sorted(make_dataset( self.dir_AB, cfg.max_dataset_size)) # get image paths - # assert(self.cfg.transform.load_size >= self.cfg.transform.crop_size) # crop_size should be smaller than the size of loaded image + self.input_nc = self.cfg.output_nc if self.cfg.direction == 'BtoA' else self.cfg.input_nc self.output_nc = self.cfg.input_nc if self.cfg.direction == 'BtoA' else self.cfg.output_nc self.transforms = build_transforms(cfg.transforms) @@ -53,15 +53,6 @@ class PairedDataset(BaseDataset): B = AB[:h, w2:, :] # apply the same transform to both A and B - # transform_params = get_params(self.opt, A.size) - # transform_params = get_params(self.cfg.transform, (w2, h)) - - # A_transform = get_transform(self.cfg.transform, transform_params, grayscale=(self.input_nc == 1)) - # B_transform = get_transform(self.cfg.transform, transform_params, grayscale=(self.output_nc == 1)) - - # A = A_transform(A) - # B = B_transform(B) - # A, B = self.transforms((A, B)) A, B = self.transforms((A, B)) return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path} diff --git a/ppgan/datasets/transforms/builder.py b/ppgan/datasets/transforms/builder.py index 01e8f9c..6dfc88a 100644 --- a/ppgan/datasets/transforms/builder.py +++ b/ppgan/datasets/transforms/builder.py @@ -25,11 +25,6 @@ class Compose(object): def __call__(self, data): for f in self.transforms: try: - # multi-fileds in a sample - # if isinstance(data, Sequence): - # data = f(*data) - # # single field in a sample, call transform directly - # else: data = f(data) except Exception as e: stack_info = traceback.format_exc() @@ -39,10 +34,6 @@ class Compose(object): return data -def build_transform(cfg): - pass - - def build_transforms(cfg): transforms = [] diff --git a/ppgan/datasets/transforms/transforms.py b/ppgan/datasets/transforms/transforms.py index 97287aa..f2b2156 100644 --- a/ppgan/datasets/transforms/transforms.py +++ b/ppgan/datasets/transforms/transforms.py @@ -1,15 +1,11 @@ import sys -import types import random import numbers -import warnings -import traceback import collections import numpy as np from paddle.utils import try_import import paddle.vision.transforms.functional as F -import paddle.vision.transforms.transforms as T from .builder import TRANSFORMS @@ -31,7 +27,6 @@ class Transform(): """ if args: for k, v in args.items(): - # print(k, v) if k != "self" and not k.startswith("_"): setattr(self, k, v) @@ -39,7 +34,6 @@ class Transform(): raise NotImplementedError def __call__(self, inputs): - # print('debug:', type(inputs), type(inputs[0])) if isinstance(inputs, tuple): inputs = list(inputs) if self.keys is not None: @@ -177,10 +171,6 @@ class RandomHorizontalFlip(Transform): return img -# import paddle -# paddle.vision.transforms.RandomHorizontalFlip - - @TRANSFORMS.register() class PairedRandomHorizontalFlip(RandomHorizontalFlip): def __init__(self, prob=0.5, keys=None): @@ -271,11 +261,6 @@ class Permute(Transform): return img -# import paddle -# paddle.vision.transforms.Normalize -# TRANSFORMS.register(T.Normalize) - - class Crop(): def __init__(self, pos, size): self.pos = pos diff --git a/ppgan/datasets/unpaired_dataset.py b/ppgan/datasets/unpaired_dataset.py index 45a7c4f..232f7bd 100644 --- a/ppgan/datasets/unpaired_dataset.py +++ b/ppgan/datasets/unpaired_dataset.py @@ -35,8 +35,7 @@ class UnpairedDataset(BaseDataset): btoA = self.cfg.direction == 'BtoA' input_nc = self.cfg.output_nc if btoA else self.cfg.input_nc # get the number of channels of input image output_nc = self.cfg.input_nc if btoA else self.cfg.output_nc # get the number of channels of output image - # self.transform_A = get_transform(self.cfg.transform, grayscale=(input_nc == 1)) - # self.transform_B = get_transform(self.cfg.transform, grayscale=(output_nc == 1)) + self.transform_A = build_transforms(self.cfg.transforms) self.transform_B = build_transforms(self.cfg.transforms) -- GitLab