From c3de5c7a54b447a550f2fb9ecac5f2f2d273d0d8 Mon Sep 17 00:00:00 2001 From: Tinazhang Date: Mon, 1 Jun 2020 15:03:05 -0400 Subject: [PATCH] adding new TCs to six ops and skip md5 case in RandomPerspective --- .../dataset/golden/normalize_01_c_result.npz | Bin 0 -> 644 bytes .../dataset/golden/normalize_01_py_result.npz | Bin 0 -> 644 bytes .../dataset/golden/normalize_02_py_result.npz | Bin 0 -> 644 bytes .../dataset/golden/normalize_03_py_result.npz | Bin 0 -> 644 bytes .../dataset/golden/normalize_04_py_result.npz | Bin 0 -> 644 bytes .../golden/random_affine_01_result.npz | Bin 0 -> 644 bytes .../dataset/golden/random_apply_01_result.npz | Bin 0 -> 644 bytes .../dataset/golden/random_order_01_result.npz | Bin 0 -> 644 bytes .../golden/random_perspective_01_result.npz | Bin 0 -> 644 bytes tests/ut/python/dataset/test_center_crop.py | 4 +- tests/ut/python/dataset/test_normalizeOp.py | 308 +++++++++++++++--- tests/ut/python/dataset/test_random_affine.py | 207 ++++++++++++ tests/ut/python/dataset/test_random_apply.py | 133 ++++++++ tests/ut/python/dataset/test_random_choice.py | 136 ++++++++ tests/ut/python/dataset/test_random_order.py | 100 ++++++ .../python/dataset/test_random_perspective.py | 128 ++++++++ 16 files changed, 973 insertions(+), 43 deletions(-) create mode 100644 tests/ut/data/dataset/golden/normalize_01_c_result.npz create mode 100644 tests/ut/data/dataset/golden/normalize_01_py_result.npz create mode 100644 tests/ut/data/dataset/golden/normalize_02_py_result.npz create mode 100644 tests/ut/data/dataset/golden/normalize_03_py_result.npz create mode 100644 tests/ut/data/dataset/golden/normalize_04_py_result.npz create mode 100644 tests/ut/data/dataset/golden/random_affine_01_result.npz create mode 100644 tests/ut/data/dataset/golden/random_apply_01_result.npz create mode 100644 tests/ut/data/dataset/golden/random_order_01_result.npz create mode 100644 tests/ut/data/dataset/golden/random_perspective_01_result.npz create mode 100644 tests/ut/python/dataset/test_random_affine.py create mode 100644 tests/ut/python/dataset/test_random_apply.py create mode 100644 tests/ut/python/dataset/test_random_choice.py create mode 100644 tests/ut/python/dataset/test_random_order.py create mode 100644 tests/ut/python/dataset/test_random_perspective.py diff --git a/tests/ut/data/dataset/golden/normalize_01_c_result.npz b/tests/ut/data/dataset/golden/normalize_01_c_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..d397435ece37f9e8b45b926746bafd030281b7bb GIT binary patch literal 644 zcmWIWW@Zs#fB;2?r#5T3{xC8yfG{V62t#5~QM`d(UO^=zg8*0%q!1(t0+anheFGvH z8Oj){)l*W7lZ(`?6x3_{)pZoq)AEZ-iW2kU^NUhaLBei{ImM|!@#2icf>a=1!%#=T zNK;3lR)KuL)xeybSDIT;sh6Bzl&Y6onp2VqbZ=rMSA0=wa(-TMNl|HX30ENlL={(F z3PiS$(VL;Qkja@bsgSw7kR_;)Rl}PR2$|Xn*`P{ON-7IdxeD1Mn1J5&wgkx7IZ gSHc1M9|{`4X$78)0=!w-Kr)O#Xbz;!K>^GF0E6kRIRF3v literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/normalize_01_py_result.npz b/tests/ut/data/dataset/golden/normalize_01_py_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..9d794aa5f2331aca3728595469f9b43619c0922d GIT binary patch literal 644 zcmWIWW@Zs#fB;2?-{M<^{xC8yfG{V62t#5~QM`d(UO^=zg8*0%q!1(t0+anheFGvH z8Oj){)l*W7lZ(`?6x3_{)pZoq)AEZ-iW2kU^NUhaLBei{ImM|!@#2icf>a=1!%#=T zNK;3lR)KuL)xeybSDIT;sh6Bzl&Y6onp2VqbZ=rMSA0=wa(-TMNl|HX30ENlL={(F z3PiS$(VL;Qkja@bsgSw7kR_;)Rl}PR2$|Xn*`P{ON-7IdxeD1Mn1J5&wQMpf(R7=7il6Q6Z(+C7;u9uC$rq}Y6sjeqrj`_{C+P)vGct)V g<4QO{|3g6oIIX~wQGhoq8%Tx`2+e`CIVgY`0B$O;SO5S3 literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/normalize_02_py_result.npz b/tests/ut/data/dataset/golden/normalize_02_py_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..82ab777c10b0f8d4276f955cbe17d58934ec7c0d GIT binary patch literal 644 zcmWIWW@Zs#fB;2?qrQLg{xC8yfG{V62t#5~QM`d(UO^=zg8*0%q!1(t0+anheFGvH z8Oj){)l*W7lZ(`?6x3_{)pZoq)AEZ-iW2kU^NUhaLBei{ImM|!@#2icf>a=1!%#=T zNK;3lR)KuL)xeybSDIT;sh6Bzl&Y6onp2VqbZ=rMSA0=wa(-TMNl|HX30ENlL={(F z3PiS$(VL;Qkja@bsgSw7kR_;)Rl}PR2$|Xn*`P{ON-7IdxeD1Mn1J5&wXeRSZw#yo(x{*+CtTmLbasS)RIE=B)tG{MkWzv gTnPv0e<)}GrxkcI3h-uS1IaJ~p*fH?2L&(#0C?7}x&QzG literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/normalize_03_py_result.npz b/tests/ut/data/dataset/golden/normalize_03_py_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..44db3250a6a7252627f329bee25882bccd2a08d5 GIT binary patch literal 644 zcmWIWW@Zs#fB;2?%^RL9{=>+?0K%LMA`FQ|MezoDc?Fe>3<6+5kV23o2u$`1^$mz* zWGG{(R!>PSPA*cnQc$n)SJzQcPs=YVDN4+X&o4?z1qr()<`ky_#fvi%3sQl64MQCT zBTXHJS_Sd}R|9i$UTJPYrCxG=QL0{UX--Kd(7lP3T=7Mz$@zK3B}JvlC0vCJ5LH}x zDG=F0MsJ4JLMCU%q(bKQLYAOHRt;}PAY^JQWP>VADXA<-VkKmaDZ?Mn)Ik~(7xc{L(ff%2KTiRr0@e6fZ68W{{3 zU@HYOfc6On6$*juV`(iE&VX2uW>P2uu|PDaPz+{)O`$ld1q>yH5=ouT0<#UY6QsQx zPj_FfHoq7A?0KPNNug8{%o1skB{D&UvQSIpGQ@y-rXPBmFg0uY>gQ#`xwhH4W`*)4 zg$i&ziXc5oL50dtJt|;5H^Ss*o2t%#b8E}dNwcSHolvM+QmB@cnp#q*o}?Gx&B!Fe hj4R;){SO5V;IslyMgiWeY#06mOuHS5V2wAOIEwDFjJ^z+}Hr-+)L) zhBAg~^_0}&LuqFrRwFD=9FXt-J4j+6Hq)$ literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/random_affine_01_result.npz b/tests/ut/data/dataset/golden/random_affine_01_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..85a887961d88bf0f1c60d8fba3c13fa6d80f3979 GIT binary patch literal 644 zcmWIWW@Zs#fB;2?_ix;%{9$BZ0AWrB5r)K~qId(nyn;$b1_7`jNFhiP1Sb21`UXTY zGL$h?tEZ$ECl{$(DX7=@tLrGJr{x!w6eZ@x=NF}8(>1*t&3hM|sv zk*1D9tpfRgtARN=uQa!yQZG5bC{-`FG^ZpJ=-$LiuK1$VQXzADAxlsptA;lt5HhtDvO$%mlvEa^auu>iFaf>kZ&AqM&EU=0 zUdS0#$fe=U90BrwO(D0RpP$!%AOI8I_9cZpNu9BUyc!X#K>5tv#PrlczSu&3jSPki zu$2NCK>GxP3WY%Sv9uNnXFx1SGbt2-SRfiyC;8ayse z<$rHoHcFhebzPxkNug8{%o1skB{D&UvQSIpGQ@y-eBOFFte(@}_$V&$OX{P~`wQhu z3Kigb6hV5Ff(n(PdQ`xASl_y3%#{(i$ag)2DbL7pa-nKTp;}UEYDuAbl3su}Ba;X- gu7m^hKNK{8(+WHp1$eWvfn*qg&>Tpcg94ZV0El|8r2qf` literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/random_apply_01_result.npz b/tests/ut/data/dataset/golden/random_apply_01_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..df6bda9cd07e851939b1773d8222381c22e82ed4 GIT binary patch literal 644 zcmWIWW@Zs#fB;2?GYe&0{xC8yfG{V62t#5~QM`d(UO^=zg8*0%q!1(t0+anheFGvH z8Oj){)l*W7lZ(`?6x3_{)pZoq)AEZ-iW2kU^NUhaLBei{ImM|!@#2icf>a=1!%#=T zNK;3lR)KuL)xeybSDIT;sh6Bzl&Y6onp2VqbZ=rMSA0=wa(-TMNl|HX30ENlL={(F z3PiS$(VL;Qkja@bsgSw7kR_;)Rl}PR2$|Xn*`P{ON-7IdxeD1Mn1J5&w8(>1*t&3hM|sv zk*1D9tpfRgtARN=uQa!yQZG5bC{-`FG^ZpJ=-$LiuK1$VQXzADAxlsptA;lt5HhtDvO$%mlvEa^auu>iFaf>kZ&AqM&EU=0 zUdS0#$fe=U90BrwO(D0RpP$!%AOI8I_9cZpNu9BUyc!X#K>5tv#PrlczSu&3jSPki zu$2NCK>GxP3WY%Sv9uNnXFx1SGbt2-SRfiyCw=d5R4&YUdHan3EKtWdS2P%SAnwWLrzNiV>gkx7IZ gSHc1M9|{`4X$78)0=!w-Kr)O#Xbz;!K>^GF02<=0a=1!%#=T zNK;3lR)KuL)xeybSDIT;sh6Bzl&Y6onp2VqbZ=rMSA0=wa(-TMNl|HX30ENlL={(F z3PiS$(VL;Qkja@bsgSw7kR_;)Rl}PR2$|Xn*`P{ON-7IdxeD1Mn1J5&w>Q*GKnzb fN;p9OLqP*Lt-zB}fHx}}NQMyz&4IKzD1aFNHt4RQ literal 0 HcmV?d00001 diff --git a/tests/ut/python/dataset/test_center_crop.py b/tests/ut/python/dataset/test_center_crop.py index 33ba89e73..e741a6d4a 100644 --- a/tests/ut/python/dataset/test_center_crop.py +++ b/tests/ut/python/dataset/test_center_crop.py @@ -141,9 +141,9 @@ def test_crop_grayscale(height=375, width=375): if __name__ == "__main__": - test_center_crop_op(600, 600, True) + test_center_crop_op(600, 600, plot=True) test_center_crop_op(300, 600) test_center_crop_op(600, 300) test_center_crop_md5() - test_center_crop_comp(True) + test_center_crop_comp(plot=True) test_crop_grayscale() diff --git a/tests/ut/python/dataset/test_normalizeOp.py b/tests/ut/python/dataset/test_normalizeOp.py index 5c9c7aff9..994f3cdb9 100644 --- a/tests/ut/python/dataset/test_normalizeOp.py +++ b/tests/ut/python/dataset/test_normalizeOp.py @@ -12,26 +12,54 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== - +""" +Testing Normalize op in DE +""" import numpy as np - +import matplotlib.pyplot as plt import mindspore.dataset as ds -import mindspore.dataset.transforms.vision.c_transforms as vision +import mindspore.dataset.transforms.vision.c_transforms as c_vision +import mindspore.dataset.transforms.vision.py_transforms as py_vision from mindspore import log as logger +from util import diff_mse, save_and_check_md5 DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" +GENERATE_GOLDEN = False + + +def visualize_mse(image_de_normalized, image_np_normalized, mse, image_original): + """ + visualizes the image using DE op and Numpy op + """ + plt.subplot(141) + plt.imshow(image_original) + plt.title("Original image") + + plt.subplot(142) + plt.imshow(image_de_normalized) + plt.title("DE normalized image") + + plt.subplot(143) + plt.imshow(image_np_normalized) + plt.title("Numpy normalized image") + + plt.subplot(144) + plt.imshow(image_de_normalized - image_np_normalized) + plt.title("Difference image, mse : {}".format(mse)) + plt.show() + -def normalize_np(image): +def normalize_np(image, mean, std): """ Apply the normalization """ # DE decodes the image in RGB by deafult, hence # the values here are in RGB image = np.array(image, np.float32) - image = image - np.array([121.0, 115.0, 100.0]) - image = image * (1.0 / np.array([70.0, 68.0, 71.0])) + image = image - np.array(mean) + image = image * (1.0 / np.array(std)) return image @@ -41,7 +69,7 @@ def get_normalized(image_id): Reads the image using DE ops and then normalizes using Numpy """ data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) - decode_op = vision.Decode() + decode_op = c_vision.Decode() data1 = data1.map(input_columns=["image"], operations=decode_op) num_iter = 0 for item in data1.create_dict_iterator(): @@ -52,15 +80,61 @@ def get_normalized(image_id): return None -def test_normalize_op(): +def util_test_normalize(mean, std, op_type): """ - Test Normalize + Utility function for testing Normalize. Input arguments are given by other tests """ - logger.info("Test Normalize") + if op_type == "cpp": + # define map operations + decode_op = c_vision.Decode() + normalize_op = c_vision.Normalize(mean, std) + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data = data.map(input_columns=["image"], operations=decode_op) + data = data.map(input_columns=["image"], operations=normalize_op) + elif op_type == "python": + # define map operations + transforms = [ + py_vision.Decode(), + py_vision.ToTensor(), + py_vision.Normalize(mean, std) + ] + transform = py_vision.ComposeOp(transforms) + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data = data.map(input_columns=["image"], operations=transform()) + else: + raise ValueError("Wrong parameter value") + return data + +def util_test_normalize_grayscale(num_output_channels, mean, std): + """ + Utility function for testing Normalize. Input arguments are given by other tests + """ + transforms = [ + py_vision.Decode(), + py_vision.Grayscale(num_output_channels), + py_vision.ToTensor(), + py_vision.Normalize(mean, std) + ] + transform = py_vision.ComposeOp(transforms) + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data = data.map(input_columns=["image"], operations=transform()) + return data + + +def test_normalize_op_c(plot=False): + """ + Test Normalize in cpp transformations + """ + logger.info("Test Normalize in cpp") + mean = [121.0, 115.0, 100.0] + std = [70.0, 68.0, 71.0] # define map operations - decode_op = vision.Decode() - normalize_op = vision.Normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0]) + decode_op = c_vision.Decode() + normalize_op = c_vision.Normalize(mean, std) # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) @@ -74,36 +148,64 @@ def test_normalize_op(): num_iter = 0 for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): image_de_normalized = item1["image"] - image_np_normalized = normalize_np(item2["image"]) - diff = image_de_normalized - image_np_normalized - mse = np.sum(np.power(diff, 2)) + image_original = item2["image"] + image_np_normalized = normalize_np(image_original, mean, std) + mse = diff_mse(image_de_normalized, image_np_normalized) logger.info("image_{}, mse: {}".format(num_iter + 1, mse)) assert mse < 0.01 - # Uncomment these blocks to see visual results - # plt.subplot(131) - # plt.imshow(image_de_normalized) - # plt.title("DE normalize image") - # - # plt.subplot(132) - # plt.imshow(image_np_normalized) - # plt.title("Numpy normalized image") - # - # plt.subplot(133) - # plt.imshow(diff) - # plt.title("Difference image, mse : {}".format(mse)) - # - # plt.show() + if plot: + visualize_mse(image_de_normalized, image_np_normalized, mse, image_original) + num_iter += 1 + + +def test_normalize_op_py(plot=False): + """ + Test Normalize in python transformations + """ + logger.info("Test Normalize in python") + mean = [0.475, 0.45, 0.392] + std = [0.275, 0.267, 0.278] + # define map operations + transforms = [ + py_vision.Decode(), + py_vision.ToTensor() + ] + transform = py_vision.ComposeOp(transforms) + normalize_op = py_vision.Normalize(mean, std) + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data1 = data1.map(input_columns=["image"], operations=transform()) + data1 = data1.map(input_columns=["image"], operations=normalize_op) + + # Second dataset + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data2 = data2.map(input_columns=["image"], operations=transform()) + + num_iter = 0 + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + image_de_normalized = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image_np_normalized = (normalize_np(item2["image"].transpose(1, 2, 0), mean, std) * 255).astype(np.uint8) + image_original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + mse = diff_mse(image_de_normalized, image_np_normalized) + logger.info("image_{}, mse: {}".format(num_iter + 1, mse)) + assert mse < 0.01 + if plot: + visualize_mse(image_de_normalized, image_np_normalized, mse, image_original) num_iter += 1 def test_decode_op(): + """ + Test Decode op + """ logger.info("Test Decode") data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image", "label"], num_parallel_workers=1, shuffle=False) # define map operations - decode_op = vision.Decode() + decode_op = c_vision.Decode() # apply map operations on images data1 = data1.map(input_columns=["image"], operations=decode_op) @@ -112,22 +214,21 @@ def test_decode_op(): for item in data1.create_dict_iterator(): logger.info("Looping inside iterator {}".format(num_iter)) _ = item["image"] - # plt.subplot(131) - # plt.imshow(image) - # plt.title("DE image") - # plt.show() num_iter += 1 def test_decode_normalize_op(): + """ + Test Decode op followed by Normalize op + """ logger.info("Test [Decode, Normalize] in one Map") data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image", "label"], num_parallel_workers=1, shuffle=False) # define map operations - decode_op = vision.Decode() - normalize_op = vision.Normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0]) + decode_op = c_vision.Decode() + normalize_op = c_vision.Normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0]) # apply map operations on images data1 = data1.map(input_columns=["image"], operations=[decode_op, normalize_op]) @@ -136,14 +237,139 @@ def test_decode_normalize_op(): for item in data1.create_dict_iterator(): logger.info("Looping inside iterator {}".format(num_iter)) _ = item["image"] - # plt.subplot(131) - # plt.imshow(image) - # plt.title("DE image") - # plt.show() num_iter += 1 +def test_normalize_md5_01(): + """ + Test Normalize with md5 check: valid mean and std + expected to pass + """ + logger.info("test_normalize_md5_01") + data_c = util_test_normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0], "cpp") + data_py = util_test_normalize([0.475, 0.45, 0.392], [0.275, 0.267, 0.278], "python") + + # check results with md5 comparison + filename1 = "normalize_01_c_result.npz" + filename2 = "normalize_01_py_result.npz" + save_and_check_md5(data_c, filename1, generate_golden=GENERATE_GOLDEN) + save_and_check_md5(data_py, filename2, generate_golden=GENERATE_GOLDEN) + + +def test_normalize_md5_02(): + """ + Test Normalize with md5 check: len(mean)=len(std)=1 with RGB images + expected to pass + """ + logger.info("test_normalize_md5_02") + data_py = util_test_normalize([0.475], [0.275], "python") + + # check results with md5 comparison + filename2 = "normalize_02_py_result.npz" + save_and_check_md5(data_py, filename2, generate_golden=GENERATE_GOLDEN) + + +def test_normalize_exception_unequal_size_c(): + """ + Test Normalize in c transformation: len(mean) != len(std) + expected to raise ValueError + """ + logger.info("test_normalize_exception_unequal_size_c") + try: + _ = c_vision.Normalize([100, 250, 125], [50, 50, 75, 75]) + except ValueError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "Length of mean and std must be equal" + + +def test_normalize_exception_unequal_size_py(): + """ + Test Normalize in python transformation: len(mean) != len(std) + expected to raise ValueError + """ + logger.info("test_normalize_exception_unequal_size_py") + try: + _ = py_vision.Normalize([0.50, 0.30, 0.75], [0.18, 0.32, 0.71, 0.72]) + except ValueError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "Length of mean and std must be equal" + + +def test_normalize_exception_invalid_size_py(): + """ + Test Normalize in python transformation: len(mean)=len(std)=2 + expected to raise RuntimeError + """ + logger.info("test_normalize_exception_invalid_size_py") + data = util_test_normalize([0.75, 0.25], [0.18, 0.32], "python") + try: + _ = data.create_dict_iterator().get_next() + except RuntimeError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert "Length of mean and std must both be 1 or" in str(e) + + +def test_normalize_exception_invalid_range_py(): + """ + Test Normalize in python transformation: value is not in range [0,1] + expected to raise ValueError + """ + logger.info("test_normalize_exception_invalid_range_py") + try: + _ = py_vision.Normalize([0.75, 1.25, 0.5], [0.1, 0.18, 1.32]) + except ValueError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert "Input is not within the required range" in str(e) + + +def test_normalize_grayscale_md5_01(): + """ + Test Normalize with md5 check: len(mean)=len(std)=1 with 1 channel grayscale images + expected to pass + """ + logger.info("test_normalize_grayscale_md5_01") + data = util_test_normalize_grayscale(1, [0.5], [0.175]) + # check results with md5 comparison + filename = "normalize_03_py_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + +def test_normalize_grayscale_md5_02(): + """ + Test Normalize with md5 check: len(mean)=len(std)=3 with 3 channel grayscale images + expected to pass + """ + logger.info("test_normalize_grayscale_md5_02") + data = util_test_normalize_grayscale(3, [0.5, 0.5, 0.5], [0.175, 0.235, 0.512]) + # check results with md5 comparison + filename = "normalize_04_py_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + +def test_normalize_grayscale_exception(): + """ + Test Normalize: len(mean)=len(std)=3 with 1 channel grayscale images + expected to raise RuntimeError + """ + logger.info("test_normalize_grayscale_exception") + try: + _ = util_test_normalize_grayscale(1, [0.5, 0.5, 0.5], [0.175, 0.235, 0.512]) + except RuntimeError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert "Input is not within the required range" in str(e) + + if __name__ == "__main__": test_decode_op() test_decode_normalize_op() - test_normalize_op() + test_normalize_op_c(plot=True) + test_normalize_op_py(plot=True) + test_normalize_md5_01() + test_normalize_md5_02() + test_normalize_exception_unequal_size_c() + test_normalize_exception_unequal_size_py() + test_normalize_exception_invalid_size_py() + test_normalize_exception_invalid_range_py() + test_normalize_grayscale_md5_01() + test_normalize_grayscale_md5_02() + test_normalize_grayscale_exception() diff --git a/tests/ut/python/dataset/test_random_affine.py b/tests/ut/python/dataset/test_random_affine.py new file mode 100644 index 000000000..807cd3cf4 --- /dev/null +++ b/tests/ut/python/dataset/test_random_affine.py @@ -0,0 +1,207 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +Testing RandomAffine op in DE +""" +import numpy as np +import mindspore.dataset as ds +import mindspore.dataset.transforms.vision.py_transforms as py_vision +from mindspore import log as logger +from util import visualize, save_and_check_md5, \ + config_get_set_seed, config_get_set_num_parallel_workers + +GENERATE_GOLDEN = False + +DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] +SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" + + +def test_random_affine_op(plot=False): + """ + Test RandomAffine in python transformations + """ + logger.info("test_random_affine_op") + # define map operations + transforms1 = [ + py_vision.Decode(), + py_vision.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1)), + py_vision.ToTensor() + ] + transform1 = py_vision.ComposeOp(transforms1) + + transforms2 = [ + py_vision.Decode(), + py_vision.ToTensor() + ] + transform2 = py_vision.ComposeOp(transforms2) + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data1 = data1.map(input_columns=["image"], operations=transform1()) + # Second dataset + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data2 = data2.map(input_columns=["image"], operations=transform2()) + + image_affine = [] + image_original = [] + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image_affine.append(image1) + image_original.append(image2) + if plot: + visualize(image_original, image_affine) + + +def test_random_affine_md5(): + """ + Test RandomAffine with md5 comparison + """ + logger.info("test_random_affine_md5") + original_seed = config_get_set_seed(55) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + # define map operations + transforms = [ + py_vision.Decode(), + py_vision.RandomAffine(degrees=(-5, 15), translate=(0.1, 0.3), + scale=(0.9, 1.1), shear=(-10, 10, -5, 5)), + py_vision.ToTensor() + ] + transform = py_vision.ComposeOp(transforms) + + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data = data.map(input_columns=["image"], operations=transform()) + + # check results with md5 comparison + filename = "random_affine_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers((original_num_parallel_workers)) + + +def test_random_affine_exception_negative_degrees(): + """ + Test RandomAffine: input degrees in negative, expected to raise ValueError + """ + logger.info("test_random_affine_exception_negative_degrees") + try: + _ = py_vision.RandomAffine(degrees=-15) + except ValueError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "If degrees is a single number, it cannot be negative." + + +def test_random_affine_exception_translation_range(): + """ + Test RandomAffine: translation value is not in [0, 1], expected to raise ValueError + """ + logger.info("test_random_affine_exception_translation_range") + try: + _ = py_vision.RandomAffine(degrees=15, translate=(0.1, 1.5)) + except ValueError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "translation values should be between 0 and 1" + + +def test_random_affine_exception_scale_value(): + """ + Test RandomAffine: scale is not positive, expected to raise ValueError + """ + logger.info("test_random_affine_exception_scale_value") + try: + _ = py_vision.RandomAffine(degrees=15, scale=(0.0, 1.1)) + except ValueError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "scale values should be positive" + + +def test_random_affine_exception_shear_value(): + """ + Test RandomAffine: shear is a number but is not positive, expected to raise ValueError + """ + logger.info("test_random_affine_exception_shear_value") + try: + _ = py_vision.RandomAffine(degrees=15, shear=-5) + except ValueError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "If shear is a single number, it must be positive." + + +def test_random_affine_exception_degrees_size(): + """ + Test RandomAffine: degrees is a list or tuple and its length is not 2, + expected to raise TypeError + """ + logger.info("test_random_affine_exception_degrees_size") + try: + _ = py_vision.RandomAffine(degrees=[15]) + except TypeError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "If degrees is a sequence, the length must be 2." + + +def test_random_affine_exception_translate_size(): + """ + Test RandomAffine: translate is not list or a tuple of length 2, + expected to raise TypeError + """ + logger.info("test_random_affine_exception_translate_size") + try: + _ = py_vision.RandomAffine(degrees=15, translate=(0.1)) + except TypeError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "translate should be a list or tuple of length 2." + + +def test_random_affine_exception_scale_size(): + """ + Test RandomAffine: scale is not a list or tuple of length 2, + expected to raise TypeError + """ + logger.info("test_random_affine_exception_scale_size") + try: + _ = py_vision.RandomAffine(degrees=15, scale=(0.5)) + except TypeError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "scale should be a list or tuple of length 2." + + +def test_random_affine_exception_shear_size(): + """ + Test RandomAffine: shear is not a list or tuple of length 2 or 4, + expected to raise TypeError + """ + logger.info("test_random_affine_exception_shear_size") + try: + _ = py_vision.RandomAffine(degrees=15, shear=(-5, 5, 10)) + except TypeError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "shear should be a list or tuple and it must be of length 2 or 4." + + +if __name__ == "__main__": + test_random_affine_op(plot=True) + test_random_affine_md5() + test_random_affine_exception_negative_degrees() + test_random_affine_exception_translation_range() + test_random_affine_exception_scale_value() + test_random_affine_exception_shear_value() + test_random_affine_exception_degrees_size() + test_random_affine_exception_translate_size() + test_random_affine_exception_scale_size() + test_random_affine_exception_shear_size() diff --git a/tests/ut/python/dataset/test_random_apply.py b/tests/ut/python/dataset/test_random_apply.py new file mode 100644 index 000000000..b3795d509 --- /dev/null +++ b/tests/ut/python/dataset/test_random_apply.py @@ -0,0 +1,133 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +Testing RandomApply op in DE +""" +import numpy as np +import mindspore.dataset as ds +import mindspore.dataset.transforms.vision.py_transforms as py_vision +from mindspore import log as logger +from util import visualize, config_get_set_seed, \ + config_get_set_num_parallel_workers, save_and_check_md5 + +GENERATE_GOLDEN = False + +DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] +SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" + + +def test_random_apply_op(plot=False): + """ + Test RandomApply in python transformations + """ + logger.info("test_random_apply_op") + # define map operations + transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)] + transforms1 = [ + py_vision.Decode(), + py_vision.RandomApply(transforms_list, prob=0.6), + py_vision.ToTensor() + ] + transform1 = py_vision.ComposeOp(transforms1) + + transforms2 = [ + py_vision.Decode(), + py_vision.ToTensor() + ] + transform2 = py_vision.ComposeOp(transforms2) + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data1 = data1.map(input_columns=["image"], operations=transform1()) + # Second dataset + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data2 = data2.map(input_columns=["image"], operations=transform2()) + + image_apply = [] + image_original = [] + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image_apply.append(image1) + image_original.append(image2) + if plot: + visualize(image_original, image_apply) + + +def test_random_apply_md5(): + """ + Test RandomApply op with md5 check + """ + logger.info("test_random_apply_md5") + original_seed = config_get_set_seed(10) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + # define map operations + transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)] + transforms = [ + py_vision.Decode(), + # Note: using default value "prob=0.5" + py_vision.RandomApply(transforms_list), + py_vision.ToTensor() + ] + transform = py_vision.ComposeOp(transforms) + + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data = data.map(input_columns=["image"], operations=transform()) + + # check results with md5 comparison + filename = "random_apply_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers((original_num_parallel_workers)) + + +def test_random_apply_exception_random_crop_badinput(): + """ + Test RandomApply: test invalid input for one of the transform functions, + expected to raise error + """ + logger.info("test_random_apply_exception_random_crop_badinput") + original_seed = config_get_set_seed(200) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + # define map operations + transforms_list = [py_vision.Resize([32, 32]), + py_vision.RandomCrop(100), # crop size > image size + py_vision.RandomRotation(30)] + transforms = [ + py_vision.Decode(), + py_vision.RandomApply(transforms_list, prob=0.6), + py_vision.ToTensor() + ] + transform = py_vision.ComposeOp(transforms) + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data = data.map(input_columns=["image"], operations=transform()) + try: + _ = data.create_dict_iterator().get_next() + except RuntimeError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert "Crop size" in str(e) + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers(original_num_parallel_workers) + + +if __name__ == '__main__': + test_random_apply_op(plot=True) + test_random_apply_md5() + test_random_apply_exception_random_crop_badinput() diff --git a/tests/ut/python/dataset/test_random_choice.py b/tests/ut/python/dataset/test_random_choice.py new file mode 100644 index 000000000..3d01bcf36 --- /dev/null +++ b/tests/ut/python/dataset/test_random_choice.py @@ -0,0 +1,136 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +Testing RandomChoice op in DE +""" +import numpy as np +import mindspore.dataset as ds +import mindspore.dataset.transforms.vision.py_transforms as py_vision +from mindspore import log as logger +from util import visualize, diff_mse + +DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] +SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" + + +def test_random_choice_op(plot=False): + """ + Test RandomChoice in python transformations + """ + logger.info("test_random_choice_op") + # define map operations + transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)] + transforms1 = [ + py_vision.Decode(), + py_vision.RandomChoice(transforms_list), + py_vision.ToTensor() + ] + transform1 = py_vision.ComposeOp(transforms1) + + transforms2 = [ + py_vision.Decode(), + py_vision.ToTensor() + ] + transform2 = py_vision.ComposeOp(transforms2) + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data1 = data1.map(input_columns=["image"], operations=transform1()) + # Second dataset + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data2 = data2.map(input_columns=["image"], operations=transform2()) + + image_choice = [] + image_original = [] + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image_choice.append(image1) + image_original.append(image2) + if plot: + visualize(image_original, image_choice) + + +def test_random_choice_comp(plot=False): + """ + Test RandomChoice and compare with single CenterCrop results + """ + logger.info("test_random_choice_comp") + # define map operations + transforms_list = [py_vision.CenterCrop(64)] + transforms1 = [ + py_vision.Decode(), + py_vision.RandomChoice(transforms_list), + py_vision.ToTensor() + ] + transform1 = py_vision.ComposeOp(transforms1) + + transforms2 = [ + py_vision.Decode(), + py_vision.CenterCrop(64), + py_vision.ToTensor() + ] + transform2 = py_vision.ComposeOp(transforms2) + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data1 = data1.map(input_columns=["image"], operations=transform1()) + # Second dataset + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data2 = data2.map(input_columns=["image"], operations=transform2()) + + image_choice = [] + image_original = [] + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image_choice.append(image1) + image_original.append(image2) + + mse = diff_mse(image1, image2) + assert mse == 0 + if plot: + visualize(image_original, image_choice) + + +def test_random_choice_exception_random_crop_badinput(): + """ + Test RandomChoice: hit error in RandomCrop with greater crop size, + expected to raise error + """ + logger.info("test_random_choice_exception_random_crop_badinput") + # define map operations + # note: crop size[5000, 5000] > image size[4032, 2268] + transforms_list = [py_vision.RandomCrop(5000)] + transforms = [ + py_vision.Decode(), + py_vision.RandomChoice(transforms_list), + py_vision.ToTensor() + ] + transform = py_vision.ComposeOp(transforms) + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data = data.map(input_columns=["image"], operations=transform()) + try: + _ = data.create_dict_iterator().get_next() + except RuntimeError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert "Crop size" in str(e) + + +if __name__ == '__main__': + test_random_choice_op(plot=True) + test_random_choice_comp(plot=True) + test_random_choice_exception_random_crop_badinput() diff --git a/tests/ut/python/dataset/test_random_order.py b/tests/ut/python/dataset/test_random_order.py new file mode 100644 index 000000000..146f56bcc --- /dev/null +++ b/tests/ut/python/dataset/test_random_order.py @@ -0,0 +1,100 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +Testing RandomOrder op in DE +""" +import numpy as np +import mindspore.dataset as ds +import mindspore.dataset.transforms.vision.py_transforms as py_vision +from mindspore import log as logger +from util import visualize, diff_mse, config_get_set_seed, \ + config_get_set_num_parallel_workers, save_and_check_md5 + +GENERATE_GOLDEN = False + +DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] +SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" + + +def test_random_order_op(plot=False): + """ + Test RandomOrder in python transformations + """ + logger.info("test_random_order_op") + # define map operations + transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)] + transforms1 = [ + py_vision.Decode(), + py_vision.RandomOrder(transforms_list), + py_vision.ToTensor() + ] + transform1 = py_vision.ComposeOp(transforms1) + + transforms2 = [ + py_vision.Decode(), + py_vision.ToTensor() + ] + transform2 = py_vision.ComposeOp(transforms2) + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data1 = data1.map(input_columns=["image"], operations=transform1()) + # Second dataset + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data2 = data2.map(input_columns=["image"], operations=transform2()) + + image_order = [] + image_original = [] + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image_order.append(image1) + image_original.append(image2) + if plot: + visualize(image_original, image_order) + + +def test_random_order_md5(): + """ + Test RandomOrder op with md5 check + """ + logger.info("test_random_order_md5") + original_seed = config_get_set_seed(8) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + # define map operations + transforms_list = [py_vision.RandomCrop(64), py_vision.RandomRotation(30)] + transforms = [ + py_vision.Decode(), + py_vision.RandomOrder(transforms_list), + py_vision.ToTensor() + ] + transform = py_vision.ComposeOp(transforms) + + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data = data.map(input_columns=["image"], operations=transform()) + + # check results with md5 comparison + filename = "random_order_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers((original_num_parallel_workers)) + + +if __name__ == '__main__': + test_random_order_op(plot=True) + test_random_order_md5() diff --git a/tests/ut/python/dataset/test_random_perspective.py b/tests/ut/python/dataset/test_random_perspective.py new file mode 100644 index 000000000..f856cb827 --- /dev/null +++ b/tests/ut/python/dataset/test_random_perspective.py @@ -0,0 +1,128 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +Testing RandomPerspective op in DE +""" +import numpy as np +import mindspore.dataset as ds +import mindspore.dataset.transforms.vision.py_transforms as py_vision +from mindspore.dataset.transforms.vision.utils import Inter +from mindspore import log as logger +from util import visualize, save_and_check_md5, \ + config_get_set_seed, config_get_set_num_parallel_workers + + +GENERATE_GOLDEN = False + +DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] +SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" + + +def test_random_perspective_op(plot=False): + """ + Test RandomPerspective in python transformations + """ + logger.info("test_random_perspective_op") + # define map operations + transforms1 = [ + py_vision.Decode(), + py_vision.RandomPerspective(), + py_vision.ToTensor() + ] + transform1 = py_vision.ComposeOp(transforms1) + + transforms2 = [ + py_vision.Decode(), + py_vision.ToTensor() + ] + transform2 = py_vision.ComposeOp(transforms2) + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data1 = data1.map(input_columns=["image"], operations=transform1()) + # Second dataset + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data2 = data2.map(input_columns=["image"], operations=transform2()) + + image_perspective = [] + image_original = [] + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image_perspective.append(image1) + image_original.append(image2) + if plot: + visualize(image_original, image_perspective) + + +def skip_test_random_perspective_md5(): + """ + Test RandomPerspective with md5 comparison + """ + logger.info("test_random_perspective_md5") + original_seed = config_get_set_seed(5) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + + # define map operations + transforms = [ + py_vision.Decode(), + py_vision.RandomPerspective(distortion_scale=0.3, prob=0.7, + interpolation=Inter.BILINEAR), + py_vision.ToTensor() + ] + transform = py_vision.ComposeOp(transforms) + + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data = data.map(input_columns=["image"], operations=transform()) + + # check results with md5 comparison + filename = "random_perspective_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers((original_num_parallel_workers)) + + +def test_random_perspective_exception_distortion_scale_range(): + """ + Test RandomPerspective: distortion_scale is not in [0, 1], expected to raise ValueError + """ + logger.info("test_random_perspective_exception_distortion_scale_range") + try: + _ = py_vision.RandomPerspective(distortion_scale=1.5) + except ValueError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "Input is not within the required range" + + +def test_random_perspective_exception_prob_range(): + """ + Test RandomPerspective: prob is not in [0, 1], expected to raise ValueError + """ + logger.info("test_random_perspective_exception_prob_range") + try: + _ = py_vision.RandomPerspective(prob=1.2) + except ValueError as e: + logger.info("Got an exception in DE: {}".format(str(e))) + assert str(e) == "Input is not within the required range" + + +if __name__ == "__main__": + test_random_perspective_op(plot=True) + skip_test_random_perspective_md5() + test_random_perspective_exception_distortion_scale_range() + test_random_perspective_exception_prob_range() -- GitLab