提交 f4a61fdb 编写于 作者: T Tinazhang

Adding new UT cases and missing TCs in PY Ops.

上级 c0d38e40
......@@ -21,7 +21,8 @@ import mindspore.dataset.transforms.vision.py_transforms as py_vision
import mindspore.dataset.transforms.vision.utils as mode
import mindspore.dataset as ds
from mindspore import log as logger
from util import save_and_check_md5, visualize
from util import save_and_check_md5, visualize, config_get_set_seed, \
config_get_set_num_parallel_workers
GENERATE_GOLDEN = False
......@@ -30,11 +31,11 @@ DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def test_random_crop_op(plot=False):
def test_random_crop_op_c(plot=False):
"""
Test RandomCrop Op
Test RandomCrop Op in c transforms
"""
logger.info("test_random_crop_op")
logger.info("test_random_crop_op_c")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -58,13 +59,47 @@ def test_random_crop_op(plot=False):
if plot:
visualize(image, image_cropped)
def test_random_crop_op_py(plot=False):
"""
Test RandomCrop op in py transforms
"""
logger.info("test_random_crop_op_py")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms1 = [
py_vision.Decode(),
py_vision.RandomCrop([512, 512], [200, 200, 200, 200]),
py_vision.ToTensor()
]
transform1 = py_vision.ComposeOp(transforms1)
data1 = data1.map(input_columns=["image"], operations=transform1())
# Second dataset
# Second dataset for comparison
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
]
transform2 = py_vision.ComposeOp(transforms2)
data2 = data2.map(input_columns=["image"], operations=transform2())
crop_images = []
original_images = []
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
crop = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
crop_images.append(crop)
original_images.append(original)
if plot:
visualize(original_images, crop_images)
def test_random_crop_01_c():
"""
Test RandomCrop op with c_transforms: size is a single integer, expected to pass
"""
logger.info("test_random_crop_01_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -77,13 +112,17 @@ def test_random_crop_01_c():
filename = "random_crop_01_c_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_01_py():
"""
Test RandomCrop op with py_transforms: size is a single integer, expected to pass
"""
logger.info("test_random_crop_01_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -99,13 +138,17 @@ def test_random_crop_01_py():
filename = "random_crop_01_py_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_02_c():
"""
Test RandomCrop op with c_transforms: size is a list/tuple with length 2, expected to pass
"""
logger.info("test_random_crop_02_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -118,13 +161,17 @@ def test_random_crop_02_c():
filename = "random_crop_02_c_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_02_py():
"""
Test RandomCrop op with py_transforms: size is a list/tuple with length 2, expected to pass
"""
logger.info("test_random_crop_02_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -140,13 +187,17 @@ def test_random_crop_02_py():
filename = "random_crop_02_py_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_03_c():
"""
Test RandomCrop op with c_transforms: input image size == crop size, expected to pass
"""
logger.info("test_random_crop_03_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -159,13 +210,17 @@ def test_random_crop_03_c():
filename = "random_crop_03_c_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_03_py():
"""
Test RandomCrop op with py_transforms: input image size == crop size, expected to pass
"""
logger.info("test_random_crop_03_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -181,27 +236,28 @@ def test_random_crop_03_py():
filename = "random_crop_03_py_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_04_c():
"""
Test RandomCrop op with c_transforms: input image size < crop size, expected to fail
"""
logger.info("test_random_crop_04_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The size of the image is 4032*2268
random_crop_op = c_vision.RandomCrop([2268, 4033])
decode_op = c_vision.Decode()
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_op)
try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The size of the image is 4032*2268
random_crop_op = c_vision.RandomCrop([2268, 4033])
decode_op = c_vision.Decode()
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_op)
image_list = []
for item in data.create_dict_iterator():
image = item["image"]
image_list.append(image.shape)
except Exception as e:
data.create_dict_iterator().get_next()
except RuntimeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Crop size is greater than the image dim" in str(e)
def test_random_crop_04_py():
"""
......@@ -209,25 +265,20 @@ def test_random_crop_04_py():
input image size < crop size, expected to fail
"""
logger.info("test_random_crop_04_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The size of the image is 4032*2268
transforms = [
py_vision.Decode(),
py_vision.RandomCrop([2268, 4033]),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The size of the image is 4032*2268
transforms = [
py_vision.Decode(),
py_vision.RandomCrop([2268, 4033]),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
image_list = []
for item in data.create_dict_iterator():
image = (item["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_list.append(image.shape)
except Exception as e:
data.create_dict_iterator().get_next()
except RuntimeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
def test_random_crop_05_c():
......@@ -237,8 +288,8 @@ def test_random_crop_05_c():
expected to pass
"""
logger.info("test_random_crop_05_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -251,6 +302,10 @@ def test_random_crop_05_c():
filename = "random_crop_05_c_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_05_py():
"""
Test RandomCrop op with py_transforms:
......@@ -258,8 +313,8 @@ def test_random_crop_05_py():
expected to pass
"""
logger.info("test_random_crop_05_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -275,30 +330,28 @@ def test_random_crop_05_py():
filename = "random_crop_05_py_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_06_c():
"""
Test RandomCrop op with c_transforms:
invalid size, expected to raise TypeError
"""
logger.info("test_random_crop_06_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: if size is neither an int nor a list of length 2, an exception will raise
random_crop_op = c_vision.RandomCrop([512, 512, 375])
decode_op = c_vision.Decode()
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_op)
image_list = []
for item in data.create_dict_iterator():
image = item["image"]
image_list.append(image.shape)
except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Size" in str(e)
assert "Size should be a single integer" in str(e)
def test_random_crop_06_py():
"""
......@@ -306,12 +359,10 @@ def test_random_crop_06_py():
invalid size, expected to raise TypeError
"""
logger.info("test_random_crop_06_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: if size is neither an int nor a list of length 2, an exception will raise
transforms = [
py_vision.Decode(),
......@@ -320,13 +371,9 @@ def test_random_crop_06_py():
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
image_list = []
for item in data.create_dict_iterator():
image = (item["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_list.append(image.shape)
except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Size" in str(e)
assert "Size should be a single integer" in str(e)
def test_random_crop_07_c():
"""
......@@ -335,8 +382,8 @@ def test_random_crop_07_c():
expected to pass
"""
logger.info("test_random_crop_07_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -349,6 +396,10 @@ def test_random_crop_07_c():
filename = "random_crop_07_c_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_07_py():
"""
Test RandomCrop op with py_transforms:
......@@ -356,8 +407,8 @@ def test_random_crop_07_py():
expected to pass
"""
logger.info("test_random_crop_07_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -373,14 +424,18 @@ def test_random_crop_07_py():
filename = "random_crop_07_py_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_08_c():
"""
Test RandomCrop op with c_transforms: padding_mode is Border.EDGE,
expected to pass
"""
logger.info("test_random_crop_08_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -393,14 +448,18 @@ def test_random_crop_08_c():
filename = "random_crop_08_c_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_08_py():
"""
Test RandomCrop op with py_transforms: padding_mode is Border.EDGE,
expected to pass
"""
logger.info("test_random_crop_08_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -416,13 +475,15 @@ def test_random_crop_08_py():
filename = "random_crop_08_py_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_09():
"""
Test RandomCrop op: invalid type of input image (not PIL), expected to raise TypeError
"""
logger.info("test_random_crop_09")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -433,13 +494,10 @@ def test_random_crop_09():
py_vision.RandomCrop(512)
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
try:
data = data.map(input_columns=["image"], operations=transform())
image_list = []
for item in data.create_dict_iterator():
image = item["image"]
image_list.append(image.shape)
except Exception as e:
data.create_dict_iterator().get_next()
except RuntimeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "should be PIL Image" in str(e)
......@@ -448,8 +506,6 @@ def test_random_crop_comp(plot=False):
Test RandomCrop and compare between python and c image augmentation
"""
logger.info("Test RandomCrop with c_transform and py_transform comparison")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
cropped_size = 512
# First dataset
......@@ -479,6 +535,7 @@ def test_random_crop_comp(plot=False):
if plot:
visualize(image_c_cropped, image_py_cropped)
if __name__ == "__main__":
test_random_crop_01_c()
test_random_crop_02_c()
......@@ -497,5 +554,6 @@ if __name__ == "__main__":
test_random_crop_07_py()
test_random_crop_08_py()
test_random_crop_09()
test_random_crop_op(True)
test_random_crop_op_c(True)
test_random_crop_op_py(True)
test_random_crop_comp(True)
......@@ -23,7 +23,8 @@ import mindspore.dataset.transforms.vision.py_transforms as py_vision
import mindspore.dataset.transforms.vision.utils as mode
import mindspore.dataset as ds
from mindspore import log as logger
from util import diff_mse, save_and_check_md5, visualize
from util import diff_mse, save_and_check_md5, visualize, \
config_get_set_seed, config_get_set_num_parallel_workers
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
......@@ -31,11 +32,11 @@ SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
GENERATE_GOLDEN = False
def test_random_crop_and_resize_op(plot=False):
def test_random_crop_and_resize_op_c(plot=False):
"""
Test RandomCropAndResize op
Test RandomCropAndResize op in c transforms
"""
logger.info("test_random_crop_and_resize_op")
logger.info("test_random_crop_and_resize_op_c")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -64,13 +65,51 @@ def test_random_crop_and_resize_op(plot=False):
if plot:
visualize(original_images, crop_and_resize_images)
def test_random_crop_and_resize_op_py(plot=False):
"""
Test RandomCropAndResize op in py transforms
"""
logger.info("test_random_crop_and_resize_op_py")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms1 = [
py_vision.Decode(),
py_vision.RandomResizedCrop((256, 512), (1, 1), (0.5, 0.5)),
py_vision.ToTensor()
]
transform1 = py_vision.ComposeOp(transforms1)
data1 = data1.map(input_columns=["image"], operations=transform1())
# Second dataset
# Second dataset for comparison
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
]
transform2 = py_vision.ComposeOp(transforms2)
data2 = data2.map(input_columns=["image"], operations=transform2())
num_iter = 0
crop_and_resize_images = []
original_images = []
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
crop_and_resize = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
original = cv2.resize(original, (512,256))
mse = diff_mse(crop_and_resize, original)
logger.info("random_crop_and_resize_op_{}, mse: {}".format(num_iter + 1, mse))
num_iter += 1
crop_and_resize_images.append(crop_and_resize)
original_images.append(original)
if plot:
visualize(original_images, crop_and_resize_images)
def test_random_crop_and_resize_01():
"""
Test RandomCropAndResize with md5 check, expected to pass
"""
logger.info("test_random_crop_and_resize_01")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -94,14 +133,18 @@ def test_random_crop_and_resize_01():
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_02():
"""
Test RandomCropAndResize with md5 check:Image interpolation mode is Inter.NEAREST,
expected to pass
"""
logger.info("test_random_crop_and_resize_02")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -125,13 +168,17 @@ def test_random_crop_and_resize_02():
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_03():
"""
Test RandomCropAndResize with md5 check: max_attempts is 1, expected to pass
"""
logger.info("test_random_crop_and_resize_03")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
......@@ -155,27 +202,25 @@ def test_random_crop_and_resize_03():
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_crop_and_resize_04_c():
"""
Test RandomCropAndResize with c_tranforms: invalid range of scale (max<min),
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_04_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
# If input range of scale is not in the order of (min, max), ValueError will be raised.
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5))
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_and_resize_op)
image_list = []
for item in data.create_dict_iterator():
image = item["image"]
image_list.append(image.shape)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input range is not valid" in str(e)
......@@ -186,12 +231,10 @@ def test_random_crop_and_resize_04_py():
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_04_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
# If input range of scale is not in the order of (min, max), ValueError will be raised.
......@@ -200,10 +243,6 @@ def test_random_crop_and_resize_04_py():
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
image_list = []
for item in data.create_dict_iterator():
image = item["image"]
image_list.append(image.shape)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input range is not valid" in str(e)
......@@ -214,21 +253,15 @@ def test_random_crop_and_resize_05_c():
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_05_c")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5))
# If input range of ratio is not in the order of (min, max), ValueError will be raised.
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_crop_and_resize_op)
image_list = []
for item in data.create_dict_iterator():
image = item["image"]
image_list.append(image.shape)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input range is not valid" in str(e)
......@@ -239,12 +272,10 @@ def test_random_crop_and_resize_05_py():
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_05_py")
ds.config.set_seed(0)
ds.config.set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
# If input range of ratio is not in the order of (min, max), ValueError will be raised.
......@@ -253,10 +284,6 @@ def test_random_crop_and_resize_05_py():
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
image_list = []
for item in data.create_dict_iterator():
image = item["image"]
image_list.append(image.shape)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input range is not valid" in str(e)
......@@ -295,7 +322,8 @@ def test_random_crop_and_resize_comp(plot=False):
visualize(image_c_cropped, image_py_cropped)
if __name__ == "__main__":
test_random_crop_and_resize_op(True)
test_random_crop_and_resize_op_c(True)
test_random_crop_and_resize_op_py(True)
test_random_crop_and_resize_01()
test_random_crop_and_resize_02()
test_random_crop_and_resize_03()
......
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RandomGrayscale op in DE
"""
import numpy as np
import mindspore.dataset.transforms.vision.py_transforms as py_vision
import mindspore.dataset as ds
from mindspore import log as logger
from util import save_and_check_md5, visualize, \
config_get_set_seed, config_get_set_num_parallel_workers
GENERATE_GOLDEN = False
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def test_random_grayscale_valid_prob(plot=False):
"""
Test RandomGrayscale Op: valid input, expect to pass
"""
logger.info("test_random_grayscale_valid_prob")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms1 = [
py_vision.Decode(),
# Note: prob is 1 so the output should always be grayscale images
py_vision.RandomGrayscale(1),
py_vision.ToTensor()
]
transform1 = py_vision.ComposeOp(transforms1)
data1 = data1.map(input_columns=["image"], operations=transform1())
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
]
transform2 = py_vision.ComposeOp(transforms2)
data2 = data2.map(input_columns=["image"], operations=transform2())
image_gray = []
image = []
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_gray.append(image1)
image.append(image2)
if plot:
visualize(image, image_gray)
def test_random_grayscale_input_grayscale_images():
"""
Test RandomGrayscale Op: valid parameter with grayscale images as input, expect to pass
"""
logger.info("test_random_grayscale_input_grayscale_images")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms1 = [
py_vision.Decode(),
py_vision.Grayscale(1),
# Note: If the input images is grayscale image with 1 channel.
py_vision.RandomGrayscale(0.5),
py_vision.ToTensor()
]
transform1 = py_vision.ComposeOp(transforms1)
data1 = data1.map(input_columns=["image"], operations=transform1())
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
]
transform2 = py_vision.ComposeOp(transforms2)
data2 = data2.map(input_columns=["image"], operations=transform2())
image_gray = []
image = []
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_gray.append(image1)
image.append(image2)
assert len(image1.shape) == 3
assert image1.shape[2] == 1
assert len(image2.shape) == 3
assert image2.shape[2] == 3
# Restore config
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_grayscale_md5_valid_input():
"""
Test RandomGrayscale with md5 comparison: valid parameter, expect to pass
"""
logger.info("test_random_grayscale_md5_valid_input")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomGrayscale(0.8),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
# Check output images with md5 comparison
filename = "random_grayscale_01_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_grayscale_md5_no_param():
"""
Test RandomGrayscale with md5 comparison: no parameter given, expect to pass
"""
logger.info("test_random_grayscale_md5_no_param")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomGrayscale(),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
# Check output images with md5 comparison
filename = "random_grayscale_02_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_grayscale_invalid_param():
"""
Test RandomGrayscale: invalid parameter given, expect to raise error
"""
logger.info("test_random_grayscale_invalid_param")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
py_vision.RandomGrayscale(1.5),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not within the required range" in str(e)
if __name__ == "__main__":
test_random_grayscale_valid_prob(True)
test_random_grayscale_input_grayscale_images()
test_random_grayscale_md5_valid_input()
test_random_grayscale_md5_no_param()
test_random_grayscale_invalid_param()
......@@ -17,10 +17,14 @@ Testing the random horizontal flip op in DE
"""
import matplotlib.pyplot as plt
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.vision.c_transforms as vision
import mindspore.dataset.transforms.vision.c_transforms as c_vision
import mindspore.dataset.transforms.vision.py_transforms as py_vision
from mindspore import log as logger
from util import save_and_check_md5, visualize, diff_mse, \
config_get_set_seed, config_get_set_num_parallel_workers
GENERATE_GOLDEN = False
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
......@@ -37,7 +41,7 @@ def h_flip(image):
return image
def visualize(image_de_random_horizontal, image_pil_random_horizontal, mse, image_original):
def visualize_mse(image_de_random_horizontal, image_pil_random_horizontal, mse, image_original):
"""
visualizes the image using DE op and Numpy op
"""
......@@ -61,14 +65,14 @@ def visualize(image_de_random_horizontal, image_pil_random_horizontal, mse, imag
def test_random_horizontal_op():
"""
Test random_horizontal
Test RandomHorizontalFlip op
"""
logger.info("Test random_horizontal")
logger.info("test_random_horizontal_op")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = vision.Decode()
random_horizontal_op = vision.RandomHorizontalFlip()
decode_op = c_vision.Decode()
random_horizontal_op = c_vision.RandomHorizontalFlip()
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_horizontal_op)
......@@ -84,17 +88,144 @@ def test_random_horizontal_op():
break
image_h_flipped = item1["image"]
image = item2["image"]
image_h_flipped_2 = h_flip(image)
diff = image_h_flipped - image_h_flipped_2
mse = np.sum(np.power(diff, 2))
mse = diff_mse(image_h_flipped, image_h_flipped_2)
logger.info("image_{}, mse: {}".format(num_iter + 1, mse))
# Uncomment below line if you want to visualize images
# visualize(image_h_flipped, image_h_flipped_2, mse, image)
# visualize_mse(image_h_flipped, image_h_flipped_2, mse, image)
num_iter += 1
def test_random_horizontal_valid_prob_c():
"""
Test RandomHorizontalFlip op with c_transforms: valid non-default input, expect to pass
"""
logger.info("test_random_horizontal_valid_prob_c")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_horizontal_op = c_vision.RandomHorizontalFlip(0.8)
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_horizontal_op)
filename = "random_horizontal_01_c_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_horizontal_valid_prob_py():
"""
Test RandomHorizontalFlip op with py_transforms: valid non-default input, expect to pass
"""
logger.info("test_random_horizontal_valid_prob_py")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomHorizontalFlip(0.8),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
filename = "random_horizontal_01_py_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_horizontal_invalid_prob_c():
"""
Test RandomHorizontalFlip op in c_transforms: invalid input, expect to raise error
"""
logger.info("test_random_horizontal_invalid_prob_c")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
# Note: Valid range of prob should be [0.0, 1.0]
random_horizontal_op = c_vision.RandomHorizontalFlip(1.5)
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_horizontal_op)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not" in str(e)
def test_random_horizontal_invalid_prob_py():
"""
Test RandomHorizontalFlip op in py_transforms: invalid input, expect to raise error
"""
logger.info("test_random_horizontal_invalid_prob_py")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
# Note: Valid range of prob should be [0.0, 1.0]
py_vision.RandomHorizontalFlip(1.5),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not" in str(e)
def test_random_horizontal_comp(plot=False):
"""
Test test_random_horizontal_flip and compare between python and c image augmentation ops
"""
logger.info("test_random_horizontal_comp")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
# Note: The image must be flipped if prob is set to be 1
random_horizontal_op = c_vision.RandomHorizontalFlip(1)
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_horizontal_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
# Note: The image must be flipped if prob is set to be 1
py_vision.RandomHorizontalFlip(1),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data2 = data2.map(input_columns=["image"], operations=transform())
images_list_c = []
images_list_py = []
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
image_c = item1["image"]
image_py = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
images_list_c.append(image_c)
images_list_py.append(image_py)
# Check if the output images are the same
mse = diff_mse(image_c, image_py)
assert mse < 0.001
if plot:
visualize(images_list_c, images_list_py)
if __name__ == "__main__":
test_random_horizontal_op()
test_random_horizontal_valid_prob_c()
test_random_horizontal_valid_prob_py()
test_random_horizontal_invalid_prob_c()
test_random_horizontal_invalid_prob_py()
test_random_horizontal_comp(True)
......@@ -19,8 +19,13 @@ import matplotlib.pyplot as plt
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.vision.c_transforms as vision
import mindspore.dataset.transforms.vision.c_transforms as c_vision
import mindspore.dataset.transforms.vision.py_transforms as py_vision
from mindspore import log as logger
from util import save_and_check_md5, visualize, diff_mse, \
config_get_set_seed, config_get_set_num_parallel_workers
GENERATE_GOLDEN = False
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
......@@ -37,7 +42,7 @@ def v_flip(image):
return image
def visualize(image_de_random_vertical, image_pil_random_vertical, mse, image_original):
def visualize_with_mse(image_de_random_vertical, image_pil_random_vertical, mse, image_original):
"""
visualizes the image using DE op and Numpy op
"""
......@@ -61,14 +66,14 @@ def visualize(image_de_random_vertical, image_pil_random_vertical, mse, image_or
def test_random_vertical_op():
"""
Test random_vertical
Test random_vertical with default probability
"""
logger.info("Test random_vertical")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = vision.Decode()
random_vertical_op = vision.RandomVerticalFlip()
decode_op = c_vision.Decode()
random_vertical_op = c_vision.RandomVerticalFlip()
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_vertical_op)
......@@ -92,9 +97,139 @@ def test_random_vertical_op():
mse = np.sum(np.power(diff, 2))
logger.info("image_{}, mse: {}".format(num_iter + 1, mse))
# Uncomment below line if you want to visualize images
# visualize(image_v_flipped, image_v_flipped_2, mse, image)
# visualize_with_mse(image_v_flipped, image_v_flipped_2, mse, image)
num_iter += 1
def test_random_vertical_valid_prob_c():
"""
Test RandomVerticalFlip op with c_transforms: valid non-default input, expect to pass
"""
logger.info("test_random_vertical_valid_prob_c")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
random_horizontal_op = c_vision.RandomVerticalFlip(0.8)
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_horizontal_op)
filename = "random_vertical_01_c_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_vertical_valid_prob_py():
"""
Test RandomVerticalFlip op with py_transforms: valid non-default input, expect to pass
"""
logger.info("test_random_vertical_valid_prob_py")
original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomVerticalFlip(0.8),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
filename = "random_vertical_01_py_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_vertical_invalid_prob_c():
"""
Test RandomVerticalFlip op in c_transforms: invalid input, expect to raise error
"""
logger.info("test_random_vertical_invalid_prob_c")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
try:
# Note: Valid range of prob should be [0.0, 1.0]
random_horizontal_op = c_vision.RandomVerticalFlip(1.5)
data = data.map(input_columns=["image"], operations=decode_op)
data = data.map(input_columns=["image"], operations=random_horizontal_op)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not" in str(e)
def test_random_vertical_invalid_prob_py():
"""
Test RandomVerticalFlip op in py_transforms: invalid input, expect to raise error
"""
logger.info("test_random_vertical_invalid_prob_py")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
# Note: Valid range of prob should be [0.0, 1.0]
py_vision.RandomVerticalFlip(1.5),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data = data.map(input_columns=["image"], operations=transform())
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input is not" in str(e)
def test_random_vertical_comp(plot=False):
"""
Test test_random_vertical_flip and compare between python and c image augmentation ops
"""
logger.info("test_random_vertical_comp")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
# Note: The image must be flipped if prob is set to be 1
random_horizontal_op = c_vision.RandomVerticalFlip(1)
data1 = data1.map(input_columns=["image"], operations=decode_op)
data1 = data1.map(input_columns=["image"], operations=random_horizontal_op)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
# Note: The image must be flipped if prob is set to be 1
py_vision.RandomVerticalFlip(1),
py_vision.ToTensor()
]
transform = py_vision.ComposeOp(transforms)
data2 = data2.map(input_columns=["image"], operations=transform())
images_list_c = []
images_list_py = []
for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()):
image_c = item1["image"]
image_py = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
images_list_c.append(image_c)
images_list_py.append(image_py)
# Check if the output images are the same
mse = diff_mse(image_c, image_py)
assert mse < 0.001
if plot:
visualize(images_list_c, images_list_py)
if __name__ == "__main__":
test_random_vertical_op()
test_random_vertical_valid_prob_c()
test_random_vertical_valid_prob_py()
test_random_vertical_invalid_prob_c()
test_random_vertical_invalid_prob_py()
test_random_vertical_comp(True)
......@@ -28,7 +28,7 @@ from mindspore import log as logger
from mindspore.dataset.transforms.vision import Inter
from test_minddataset_sampler import add_and_remove_cv_file, get_data, CV_DIR_NAME, CV_FILE_NAME
from util import config_get_set_num_parallel_workers
def test_imagefolder(remove_json_files=True):
"""
......@@ -176,6 +176,7 @@ def test_random_crop():
logger.info("test_random_crop")
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"])
......@@ -201,6 +202,9 @@ def test_random_crop():
assert np.array_equal(item1['image'], item1_1['image'])
_ = item2["image"]
# Restore configuration num_parallel_workers
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def validate_jsonfile(filepath):
try:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册