提交 2e091477 编写于 作者: L LielinJiang

clean code

上级 42a44536
......@@ -61,7 +61,6 @@ class AlignedDataset(BaseDataset):
B = B_transform(B)
return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
# return A, B, index #{'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
def __len__(self):
"""Return the total number of images in the dataset."""
......
......@@ -65,27 +65,24 @@ def get_transform(cfg, params=None, grayscale=False, method=cv2.INTER_CUBIC, con
transform_list = []
if grayscale:
print('grayscale not support for now!!!')
# transform_list.append(transforms.Grayscale(1))
pass
if 'resize' in cfg.preprocess:
osize = (cfg.load_size, cfg.load_size)
# print('os size:', osize)
transform_list.append(transforms.Resize(osize, method))
elif 'scale_width' in cfg.preprocess:
print('scale_width not support for now!!!')
# transform_list.append(transforms.Lambda(lambda img: __scale_width(img, cfg.load_size, cfg.crop_size, method)))
pass
if 'crop' in cfg.preprocess:
# print('crop not support for now!!!', cfg.crop_size)
# transform_list.append(T.RandomCrop(cfg.crop_size))
if params is None:
transform_list.append(T.RandomCrop(cfg.crop_size))
else:
# print('crop not support for now!!!')
transform_list.append(T.Crop(params['crop_pos'], cfg.crop_size))
if cfg.preprocess == 'none':
print('preprocess not support for now!!!')
# transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
pass
if not cfg.no_flip:
if params is None:
......@@ -97,48 +94,3 @@ def get_transform(cfg, params=None, grayscale=False, method=cv2.INTER_CUBIC, con
transform_list += [transforms.Permute(to_rgb=True)]
transform_list += [transforms.Normalize((127.5, 127.5, 127.5), (127.5, 127.5, 127.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), method)
def __scale_width(img, target_size, crop_size, method=Image.BICUBIC):
ow, oh = img.size
if ow == target_size and oh >= crop_size:
return img
w = target_size
h = int(max(target_size * oh / ow, crop_size))
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
......@@ -33,15 +33,9 @@ class SingleDataset(BaseDataset):
A_paths(str) - - the path of the image
"""
A_path = self.A_paths[index]
# A_img = Image.open(A_path).convert('RGB')
A_img = cv2.imread(A_path)
A = self.transform(A_img)
# items = {}
# if self.cfg.direction == 'AtoB':
# items = {'A': A, 'A_paths': A_path}
# else:
# items = {'B': A, 'B_paths': A_path}
# return items
return {'A': A, 'A_paths': A_path}
def __len__(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册