From 5cd313635583ab9bfa034e41126c0da387b17c26 Mon Sep 17 00:00:00 2001 From: tinazhang66 Date: Thu, 25 Jun 2020 11:31:35 -0400 Subject: [PATCH] remove local defined mse and add missing mse/md5 validation --- .../dataset/golden/cut_out_01_c_result.npz | Bin 0 -> 644 bytes .../dataset/golden/cut_out_01_py_result.npz | Bin 0 -> 644 bytes .../dataset/golden/equalize_01_result.npz | Bin 0 -> 713 bytes .../dataset/golden/five_crop_01_result.npz | Bin 0 -> 644 bytes .../data/dataset/golden/invert_01_result.npz | Bin 0 -> 713 bytes .../data/dataset/golden/pad_01_c_result.npz | Bin 0 -> 644 bytes .../data/dataset/golden/pad_01_py_result.npz | Bin 0 -> 644 bytes .../dataset/golden/random_color_01_result.npz | Bin 0 -> 713 bytes .../random_color_adjust_01_c_result.npz | Bin 0 -> 644 bytes .../random_color_adjust_01_py_result.npz | Bin 0 -> 644 bytes .../random_crop_decode_resize_01_result.npz | Bin 0 -> 644 bytes .../golden/random_erasing_01_result.npz | Bin 0 -> 644 bytes .../golden/random_resize_01_result.npz | Bin 0 -> 644 bytes .../golden/random_rotation_01_c_result.npz | Bin 0 -> 644 bytes .../golden/random_rotation_01_py_result.npz | Bin 0 -> 817 bytes .../golden/random_sharpness_01_result.npz | Bin 0 -> 713 bytes .../data/dataset/golden/rescale_01_result.npz | Bin 0 -> 644 bytes tests/ut/python/dataset/test_autocontrast.py | 4 +- tests/ut/python/dataset/test_cut_out.py | 105 ++++++++++++++-- tests/ut/python/dataset/test_equalize.py | 24 +++- tests/ut/python/dataset/test_five_crop.py | 26 +++- tests/ut/python/dataset/test_invert.py | 23 +++- .../dataset/test_linear_transformation.py | 32 ++--- tests/ut/python/dataset/test_pad.py | 36 +++++- tests/ut/python/dataset/test_random_color.py | 45 +++++-- .../dataset/test_random_color_adjust.py | 40 +++++- .../dataset/test_random_crop_and_resize.py | 2 + .../dataset/test_random_crop_decode_resize.py | 42 +++++-- .../ut/python/dataset/test_random_erasing.py | 31 ++++- .../dataset/test_random_horizontal_flip.py | 3 +- tests/ut/python/dataset/test_random_resize.py | 30 ++++- .../ut/python/dataset/test_random_rotation.py | 116 ++++++++++++++++-- .../python/dataset/test_random_sharpness.py | 52 ++++++-- .../dataset/test_random_vertical_flip.py | 7 +- tests/ut/python/dataset/test_rescale_op.py | 25 +++- .../ut/python/dataset/test_uniform_augment.py | 6 +- 36 files changed, 565 insertions(+), 84 deletions(-) create mode 100644 tests/ut/data/dataset/golden/cut_out_01_c_result.npz create mode 100644 tests/ut/data/dataset/golden/cut_out_01_py_result.npz create mode 100644 tests/ut/data/dataset/golden/equalize_01_result.npz create mode 100644 tests/ut/data/dataset/golden/five_crop_01_result.npz create mode 100644 tests/ut/data/dataset/golden/invert_01_result.npz create mode 100644 tests/ut/data/dataset/golden/pad_01_c_result.npz create mode 100644 tests/ut/data/dataset/golden/pad_01_py_result.npz create mode 100644 tests/ut/data/dataset/golden/random_color_01_result.npz create mode 100644 tests/ut/data/dataset/golden/random_color_adjust_01_c_result.npz create mode 100644 tests/ut/data/dataset/golden/random_color_adjust_01_py_result.npz create mode 100644 tests/ut/data/dataset/golden/random_crop_decode_resize_01_result.npz create mode 100644 tests/ut/data/dataset/golden/random_erasing_01_result.npz create mode 100644 tests/ut/data/dataset/golden/random_resize_01_result.npz create mode 100644 tests/ut/data/dataset/golden/random_rotation_01_c_result.npz create mode 100644 tests/ut/data/dataset/golden/random_rotation_01_py_result.npz create mode 100644 tests/ut/data/dataset/golden/random_sharpness_01_result.npz create mode 100644 tests/ut/data/dataset/golden/rescale_01_result.npz diff --git a/tests/ut/data/dataset/golden/cut_out_01_c_result.npz b/tests/ut/data/dataset/golden/cut_out_01_c_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..ce8c3237989efb6fe253511775f26d1d079193d5 GIT binary patch literal 644 zcmWIWW@Zs#fB;1XiEBy5e;64UK$w$3gdwr0DBeIXub`5VK>#cWQV5a+fysWMz5$Vp z3}p<}>M5zk$wlf`3hFif>N*PQY57GZMTvRw`9&$IAYr$}oZ?iVcyUHzK`M~1VW^{E zq^YA&t3W>BYG6*zE6pva)Jx7UO4Z9P%_+$Qx;L?sE50Z-IX|zsq^LBxgsYGNqKYdo z1tMF>=*`et$mGnJRLI<3$P!e@s^QHDgiLLPY*3{sC6xuKT!rirOh9k?TNHA5Gk7z$ z7jgy_a%p%oM}YiaQ^@V-=jZhw2*8B5eMuotQfF)-uSNtbP(Cv^F+H`AFSd|hBZDCW zY^6X3&_2PSLLrcSEUksY84wH7ObSIH7KjEFioqyOd`y< f5)RP+P|yHQEAV6#;LXYgl3@fwb0BRF3Sb5R;WVhc literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/cut_out_01_py_result.npz b/tests/ut/data/dataset/golden/cut_out_01_py_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..036c3b20bdf1308207e2103455e72414d20fc6b0 GIT binary patch literal 644 zcmWIWW@Zs#fB;2?E=`frKa30vAk4`i!jM>06mOuHS5V2wAOIEwDFjJ^z+}Hr-+)L) zhBAg~^_0}&LuqFrRwFD=9FXt-J4j+6ix}*!soX6ExB+^qENo1 zPywz-5u`^cs8AWIM+L0M>u0gd_T?S1hVyRU+H=3)OrdH?p;}UEYDuAbl3su}Ba;X- gu7m^hKNK{8(+WHp1$eWvfn*qg&>Tpcg94ZV0HaB*K>z>% literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/equalize_01_result.npz b/tests/ut/data/dataset/golden/equalize_01_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..8636a3cc550e8d76f5def85174db9faff065c6ac GIT binary patch literal 713 zcmWIWW@Zs#fB;2?OF|njIWjRYfG{V62t#5~QM`d(UO^=zg8*0%q!1(t0+anheFGvH z8Oj){)l*W7lZ(`?6x3_{)pZoq)AEZ-iW2kU^NUhaLBei{ImM|!@#2icf>a=1!$?QL zNK;3lR)KuL)xeybSDIT;sh6Bzl&Y6onp2VqbZ=rMSA0=wa(-TMNl|HX30ENlL={(F z3PiS$(VL;Qkja@bsgSw7kR_;)Rl}Rno5`E0t&k0>G^M1nAeF0-Jp$xYe~Us6Zw7D1 z_Cn5}LM{z&<_JchXiXuvpP!%Ce;@!8-u5MhJV~9gg}fROtU&pk#H7@mLcZ8SevJ%< z46u~~89@64g9?Q}_OY}U3THqpNHZxEfmk3KR44|sz@|_f)dGf+LW!hKXMvBG|F<}6 zmK(L{ev&=%v?!@ivZPQd31*2j$P$^LLRqLKav5US^vIVKDkP8(>1*t&3hM|sv zk*1D9tpfRgtARN=uQa!yQZG5bC{-`FG^ZpJ=-$LiuK1$VQXzADAxlsptA;lt5HhtDvO$%mlvEa^auu>iFaf>kZ&AqM&EU=0 zUdS0#$fe=U90BrwO(D0RpP$!%AOI8I_9cZpNu9BUyc!X#K>5tv#PrlczSu&3jSPki zu$2NCK>GxP3WY%Sv9uNnXFx1SGbt2-SRfiyCgU0x_zQYe)KvqTzXiA+$TEYuRY3^Aadc5zqcyRY7~stGhn#_ZBtQ7B(h zr~uca2-2ezRHzKqqXO3RwEh44iOz=(PjYQ4PM0h5E>tZkR7*-tEh$t_(hKlrWD;S< gm2iOmhk^!hT7f5{0B=?{kPIUbngeNbPyjOk0QpU=ivR!s literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/invert_01_result.npz b/tests/ut/data/dataset/golden/invert_01_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..fc549c16af725de5b1c7d41c95548ec6bf1a12bf GIT binary patch literal 713 zcmWIWW@Zs#fB;2?DrYlUMa*?`~f_ja=x{iW+T7FSUQDRg18ARMP@xdWK9<%(;S7icX(ojt5DP?u3dLX+*c6JRTEI|JD3R3ZEb#I2{}yM> za-%lgPqIgz79|x*mJ~`Q!7PymSt1iuC=0bjE<+5P9{G|&g``xlOEYs5(^Cr-V+)nw zE>#8@q7qc73N=IxY)HrJGq2|IeSXVg=lAW!g#CPl>LrC5aEmoTdbENHwV`@+u#cWQV5a+fysWMz5$Vp z3}p<}>M5zk$wlf`3hFif>N*PQY57GZMTvRw`9&$IAYr$}oZ?iVcyUHzK`M~1VW^{E zq^YA&t3W>BYG6*zE6pva)Jx7UO4Z9P%_+$Qx;L?sE50Z-IX|zsq^LBxgsYGNqKYdo z1tMF>=*`et$mGnJRLI<3$P!e@s^QHDgiLLPY*3{sC6xuKT!rirOh9k?TNHA5Gk7z$ z7jgy_a%p%oM}YiaQ^@V-=jZhw2*8B5eMuotQfF)-uSNtbP(Cv^F+H`AFSd|hBZDCW zY^6X3&_2PSLLrcSEUksY84wH7ObSIH7KjEFioqurob9aG zDO7;#Q3UBx3My2F>QMpf@e}m^F!9Du=J`)n3$9HMOKrJxMRXn~_O` h8CSvq`X34!z-a}Zi~_t_*+4RkKxhu6%|QXo003=`vF88) literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/pad_01_py_result.npz b/tests/ut/data/dataset/golden/pad_01_py_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..095257f847e31328065fa03f8f2021872f1a565e GIT binary patch literal 644 zcmWIWW@Zs#fB;2?3t4xj|6ycc0AWrB5r)K~qId(nyn;$b1_7`jNFhiP1Sb21`UXTY zGL$h?tEZ$ECl{$(DX7=@tLrGJr{x!w6eZ@x=NF}8(>1*t&3hM|sv zk*1D9tpfRgtARN=uQa!yQZG5bC{-`FG^ZpJ=-$LiuK1$VQXzADAxlsptA;lt5HhtDvO$%mlvEa^auu>iFaf>kZ&AqM&EU=0 zUdS0#$fe=U90BrwO(D0RpP$!%AOI8I_9cZpNu9BUyc!X#K>5tv#PrlczSu&3jSPki zu$2NCK>GxP3WY%Sv9uNnXFx1SGbt2-SRfiyCa=1!$?QL zNK;3lR)KuL)xeybSDIT;sh6Bzl&Y6onp2VqbZ=rMSA0=wa(-TMNl|HX30ENlL={(F z3PiS$(VL;Qkja@bsgSw7kR_;)Rl}Rno5`E0t&k0>G^M1nAeF0-Jp$xYe~Us6Zw7D1 z_Cn5}LM{z&<_JchXiXuvpP!%Ce;@!8-u5MhJV~9gg}fROtU&pk#H7@mLcZ8SevJ%< z46u~~89@64g9?Q}_OY}U3THqpNHZxEfmk3KR44|sz@|_f)dGf+LW!hKXMvBG|F<}6 zmK(L{ev&=%v?!@ivZPQd31*2j$P$^LLRqLKav5US^vIVKDkP{{lTe)>d!By)>w{kqGA>LrC5aEmoTdbENHwV`@+zXsDhC8eg86zV7G1$Z+ui7?|zeZWA3f(B54Bhq4kH!B-R OfDs7IfpiTxJpuq}-^3mO literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/random_color_adjust_01_c_result.npz b/tests/ut/data/dataset/golden/random_color_adjust_01_c_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..bcb318681fa103bb2c428bb5b52302a455f7455c GIT binary patch literal 644 zcmWIWW@Zs#fB;1XSF!7V|1dHzfG{V62t#5~QM`d(UO^=zg8*0%q!1(t0+anheFGvH z8Oj){)l*W7lZ(`?6x3_{)pZoq)AEZ-iW2kU^NUhaLBei{ImM|!@#2icf>a=1!%#=T zNK;3lR)KuL)xeybSDIT;sh6Bzl&Y6onp2VqbZ=rMSA0=wa(-TMNl|HX30ENlL={(F z3PiS$(VL;Qkja@bsgSw7kR_;)Rl}PR2$|Xn*`P{ON-7IdxeD1Mn1J5&wJ)_&t`Pai(0+Gpis4>P%SAnwWLrzNiV>gkx7IZ gSHc1M9|{`4X$78)0=!w-Kr)O#Xbz;!K>^GF0OZrG@c;k- literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/random_color_adjust_01_py_result.npz b/tests/ut/data/dataset/golden/random_color_adjust_01_py_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..7855123b6712d346ad6e77ba7a28b9f7ec346527 GIT binary patch literal 644 zcmWIWW@Zs#fB;2?xoc)#`oqY;0K%LMA`FQ|MezoDc?Fe>3<6+5kV23o2u$`1^$mz* zWGG{(R!>PSPA*cnQc$n)SJzQcPs=YVDN4+X&o4?z1qr()<`ky_#fvi%3sQl64MQCT zBTXHJS_Sd}R|9i$UTJPYrCxG=QL0{UX--Kd(7lP3T=7Mz$@zK3B}JvlC0vCJ5LH}x zDG=F0MsJ4JLMCU%q(bKQLYAOHRt;}PAY^JQWP>VADXA<-VkKmaDZ?Mn)Ik~(7xc{L(ff%2KTiRr0@e6fZ68W{{3 zU@HYOfc6On6$*juV`(iE&VX2uW>P2uu|PDaPz+{)O`$ld1q>yH5=ouT0!RM+-Eg;c zz3gi#=@j1O-!~LWmJ~`Q!7PymSt1iuC=0bjE<+5cr-5B?>NE3w+4rjR^>+82KUFAS zQm6peqX^QY6jZ1T)uRH|Q&MVtA-IKo=B$Rvd-+diD;BDj6sjeqrj`_{C+P)vGct)V g<4QO{|3g6oIIX~wQGhoq8%Tx`2+e`CIVgY`09xtG|6ycc0AWrB5r)K~qId(nyn;$b1_7`jNFhiP1Sb21`UXTY zGL$h?tEZ$ECl{$(DX7=@tLrGJr{x!w6eZ@x=NF}8(>1*t&3hM|sv zk*1D9tpfRgtARN=uQa!yQZG5bC{-`FG^ZpJ=-$LiuK1$VQXzADAxlsptA;lt5HhtDvO$%mlvEa^auu>iFaf>kZ&AqM&EU=0 zUdS0#$fe=U90BrwO(D0RpP$!%AOI8I_9cZpNu9BUyc!X#K>5tv#PrlczSu&3jSPki zu$2NCK>GxP3WY%Sv9uNnXFx1SGbt2-SRfiyC#cWQV5a+fysWMz5$Vp z3}p<}>M5zk$wlf`3hFif>N*PQY57GZMTvRw`9&$IAYr$}oZ?iVcyUHzK`M~1VW^{E zq^YA&t3W>BYG6*zE6pva)Jx7UO4Z9P%_+$Qx;L?sE50Z-IX|zsq^LBxgsYGNqKYdo z1tMF>=*`et$mGnJRLI<3$P!e@s^QHDgiLLPY*3{sC6xuKT!rirOh9k?TNHA5Gk7z$ z7jgy_a%p%oM}YiaQ^@V-=jZhw2*8B5eMuotQfF)-uSNtbP(Cv^F+H`AFSd|hBZDCW zY^6X3&_2PSLLrcSEUksY84wH7ObSIH7KjEFioq>#cWQV5a+fysWMz5$Vp z3}p<}>M5zk$wlf`3hFif>N*PQY57GZMTvRw`9&$IAYr$}oZ?iVcyUHzK`M~1VW^{E zq^YA&t3W>BYG6*zE6pva)Jx7UO4Z9P%_+$Qx;L?sE50Z-IX|zsq^LBxgsYGNqKYdo z1tMF>=*`et$mGnJRLI<3$P!e@s^QHDgiLLPY*3{sC6xuKT!rirOh9k?TNHA5Gk7z$ z7jgy_a%p%oM}YiaQ^@V-=jZhw2*8B5eMuotQfF)-uSNtbP(Cv^F+H`AFSd|hBZDCW zY^6X3&_2PSLLrcSEUksY84wH7ObSIH7KjEFioqtVqr| zuYczV3vP{2Jy9rGQYe)KvqTzXiA+$TEYuRY3^AadH+#9{u>jIaA7OIvMswJhSmK3Td=>>Q*GKnzb fN;p9OLqP*Lt-zB}fHx}}NQMyz&4IKzD1aFN5dW-s literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/random_rotation_01_c_result.npz b/tests/ut/data/dataset/golden/random_rotation_01_c_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..794d6b714e8ba6d11ae9535275a674cd8ecbc738 GIT binary patch literal 644 zcmWIWW@Zs#fB;1X&SUxu{xC8yfG{V62t#5~QM`d(UO^=zg8*0%q!1(t0+anheFGvH z8Oj){)l*W7lZ(`?6x3_{)pZoq)AEZ-iW2kU^NUhaLBei{ImM|!@#2icf>a=1!%#=T zNK;3lR)KuL)xeybSDIT;sh6Bzl&Y6onp2VqbZ=rMSA0=wa(-TMNl|HX30ENlL={(F z3PiS$(VL;Qkja@bsgSw7kR_;)Rl}PR2$|Xn*`P{ON-7IdxeD1Mn1J5&w3<6+5kV23o2u$`1^$mz* zWGG{(R!>PSPA*cnQc$n)SJzQcPs=YVDN4+X&o4?z1qr()<`ky_#fvi%3sQl64I>=| zBTXHJS_Sd}R|9i$UTJPYrCxG=QL0{UX--Kd(7lP3T=7Mz$@zK3B}JvlC0vCJ5LH}x zDG=F0MsJ4JLMCU%q(bKQLYAOHRt;}PZzgZ1wn8?j(v*_Qf>f?T_6U$q{VfVPycxV1 z+Y32^3b{1AnIjm1qBVuwetv#l|A7EZc-xm0@+5V}7V>IDuma_C5|dJM3i)CS`86^a zGQd^}WB~0G3@Q`?*~ijaD4YSYAkCyu1Y&_`P@x#i0-Hi{R0|kN3MG;{odt}~zBzMl zUxU$n`P=X6BT7yeN|qE#CBZC_23aB#R45CzL@q-Ns3-GasrA(#qKr2KjV^S3a7ihY zFDX=j>rn*hQ3@(lhU!rP>#5a^KhahUpQAyt>B^1C``0V;x-apT> z8VfB;3aye-Q%ef1lk@_-8JR?waiwx#WI;g#C}I(5KERum4J5z_gyul{2{_#Y0OS+! A2LJ#7 literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/random_sharpness_01_result.npz b/tests/ut/data/dataset/golden/random_sharpness_01_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..27fe0fbfea6981dfd0166742f2d7a50cedd74d68 GIT binary patch literal 713 zcmWIWW@Zs#fB;2?m3ObGIx;aZfG{V62t#5~QM`d(UO^=zg8*0%q!1(t0+anheFGvH z8Oj){)l*W7lZ(`?6x3_{)pZoq)AEZ-iW2kU^NUhaLBei{ImM|!@#2icf>a=1!$?QL zNK;3lR)KuL)xeybSDIT;sh6Bzl&Y6onp2VqbZ=rMSA0=wa(-TMNl|HX30ENlL={(F z3PiS$(VL;Qkja@bsgSw7kR_;)Rl}Rno5`E0t&k0>G^M1nAeF0-Jp$xYe~Us6Zw7D1 z_Cn5}LM{z&<_JchXiXuvpP!%Ce;@!8-u5MhJV~9gg}fROtU&pk#H7@mLcZ8SevJ%< z46u~~89@64g9?Q}_OY}U3THqpNHZxEfmk3KR44|sz@|_f)dGf+LW!hKXMvBG|F<}6 zmK(L{ev&=%v?!@ivZPQd31*2j$P$^LLRqLKav5US^vIVKDkPXsDhC8eg86zV7G1$Z+ui7?|zeZWA3f(B54Bhq4kH!B-R OfDs7IfpiTxJpuqC`op0B literal 0 HcmV?d00001 diff --git a/tests/ut/data/dataset/golden/rescale_01_result.npz b/tests/ut/data/dataset/golden/rescale_01_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..09c64d9fcb5e8dea69db1a302f14ee3ffb58fed7 GIT binary patch literal 644 zcmWIWW@Zs#fB;2?X?^M%e;64UK$w$3gdwr0DBeIXub`5VK>#cWQV5a+fysWMz5$Vp z3}p<}>M5zk$wlf`3hFif>N*PQY57GZMTvRw`9&$IAYr$}oZ?iVcyUHzK`M~1VW^{E zq^YA&t3W>BYG6*zE6pva)Jx7UO4Z9P%_+$Qx;L?sE50Z-IX|zsq^LBxgsYGNqKYdo z1tMF>=*`et$mGnJRLI<3$P!e@s^QHDgiLLPY*3{sC6xuKT!rirOh9k?TNHA5Gk7z$ z7jgy_a%p%oM}YiaQ^@V-=jZhw2*8B5eMuotQfF)-uSNtbP(Cv^F+H`AFSd|hBZDCW zY^6X3&_2PSLLrcSEUksY84wH7ObSIH7KjEFioq9SXE%6F{j4qHvg3;L&O+6aLbasS)RIE=B)tG{MkWzv gTnPv0e<)}GrxkcI3h-uS1IaJ~p*fH?2L&(#0EsxRWdHyG literal 0 HcmV?d00001 diff --git a/tests/ut/python/dataset/test_autocontrast.py b/tests/ut/python/dataset/test_autocontrast.py index 648ecf578..d212994e6 100644 --- a/tests/ut/python/dataset/test_autocontrast.py +++ b/tests/ut/python/dataset/test_autocontrast.py @@ -20,7 +20,7 @@ import numpy as np import mindspore.dataset.engine as de import mindspore.dataset.transforms.vision.py_transforms as F from mindspore import log as logger -from util import visualize_list +from util import visualize_list, diff_mse DATA_DIR = "../data/dataset/testImageNetData/train/" @@ -75,7 +75,7 @@ def test_auto_contrast(plot=False): num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_auto_contrast[i] - images_original[i]) ** 2) + mse[i] = diff_mse(images_auto_contrast[i], images_original[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: diff --git a/tests/ut/python/dataset/test_cut_out.py b/tests/ut/python/dataset/test_cut_out.py index 483a939f6..862b1f33c 100644 --- a/tests/ut/python/dataset/test_cut_out.py +++ b/tests/ut/python/dataset/test_cut_out.py @@ -21,11 +21,13 @@ import mindspore.dataset as ds import mindspore.dataset.transforms.vision.c_transforms as c import mindspore.dataset.transforms.vision.py_transforms as f from mindspore import log as logger -from util import visualize_image, diff_mse +from util import visualize_image, visualize_list, diff_mse, save_and_check_md5, \ + config_get_set_seed, config_get_set_num_parallel_workers DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" +GENERATE_GOLDEN = False def test_cut_out_op(plot=False): """ @@ -34,7 +36,7 @@ def test_cut_out_op(plot=False): logger.info("test_cut_out") # First dataset - data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"]) + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms_1 = [ f.Decode(), @@ -45,7 +47,7 @@ def test_cut_out_op(plot=False): data1 = data1.map(input_columns=["image"], operations=transform_1()) # Second dataset - data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"]) + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c.Decode() cut_out_op = c.CutOut(80) @@ -74,25 +76,24 @@ def test_cut_out_op(plot=False): visualize_image(image_1, image_2, mse) -def test_cut_out_op_multicut(): +def test_cut_out_op_multicut(plot=False): """ Test Cutout """ logger.info("test_cut_out") # First dataset - data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"]) + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) transforms_1 = [ f.Decode(), f.ToTensor(), - f.RandomErasing(value='random') ] transform_1 = f.ComposeOp(transforms_1) data1 = data1.map(input_columns=["image"], operations=transform_1()) # Second dataset - data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"]) + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c.Decode() cut_out_op = c.CutOut(80, num_patches=10) @@ -104,19 +105,107 @@ def test_cut_out_op_multicut(): data2 = data2.map(input_columns=["image"], operations=transforms_2) num_iter = 0 + image_list_1, image_list_2 = [], [] + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + num_iter += 1 + image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + # C image doesn't require transpose + image_2 = item2["image"] + image_list_1.append(image_1) + image_list_2.append(image_2) + + logger.info("shape of image_1: {}".format(image_1.shape)) + logger.info("shape of image_2: {}".format(image_2.shape)) + + logger.info("dtype of image_1: {}".format(image_1.dtype)) + logger.info("dtype of image_2: {}".format(image_2.dtype)) + if plot: + visualize_list(image_list_1, image_list_2) + + +def test_cut_out_md5(): + """ + Test Cutout with md5 check + """ + logger.info("test_cut_out_md5") + original_seed = config_get_set_seed(2) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + decode_op = c.Decode() + cut_out_op = c.CutOut(100) + data1 = data1.map(input_columns=["image"], operations=decode_op) + data1 = data1.map(input_columns=["image"], operations=cut_out_op) + + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + transforms = [ + f.Decode(), + f.ToTensor(), + f.Cutout(100) + ] + transform = f.ComposeOp(transforms) + data2 = data2.map(input_columns=["image"], operations=transform()) + + # Compare with expected md5 from images + filename1 = "cut_out_01_c_result.npz" + save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN) + filename2 = "cut_out_01_py_result.npz" + save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN) + + # Restore config + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers(original_num_parallel_workers) + + +def test_cut_out_comp(plot=False): + """ + Test Cutout with c++ and python op comparison + """ + logger.info("test_cut_out_comp") + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + + transforms_1 = [ + f.Decode(), + f.ToTensor(), + f.Cutout(200) + ] + transform_1 = f.ComposeOp(transforms_1) + data1 = data1.map(input_columns=["image"], operations=transform_1()) + + # Second dataset + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + + transforms_2 = [ + c.Decode(), + c.CutOut(200) + ] + + data2 = data2.map(input_columns=["image"], operations=transforms_2) + + num_iter = 0 + image_list_1, image_list_2 = [], [] for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): num_iter += 1 image_1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) # C image doesn't require transpose image_2 = item2["image"] + image_list_1.append(image_1) + image_list_2.append(image_2) logger.info("shape of image_1: {}".format(image_1.shape)) logger.info("shape of image_2: {}".format(image_2.shape)) logger.info("dtype of image_1: {}".format(image_1.dtype)) logger.info("dtype of image_2: {}".format(image_2.dtype)) + if plot: + visualize_list(image_list_1, image_list_2, visualize_mode=2) if __name__ == "__main__": test_cut_out_op(plot=True) - test_cut_out_op_multicut() + test_cut_out_op_multicut(plot=True) + test_cut_out_md5() + test_cut_out_comp(plot=True) diff --git a/tests/ut/python/dataset/test_equalize.py b/tests/ut/python/dataset/test_equalize.py index 85cb25d9e..0a5f2f93d 100644 --- a/tests/ut/python/dataset/test_equalize.py +++ b/tests/ut/python/dataset/test_equalize.py @@ -20,10 +20,11 @@ import numpy as np import mindspore.dataset.engine as de import mindspore.dataset.transforms.vision.py_transforms as F from mindspore import log as logger -from util import visualize_list +from util import visualize_list, diff_mse, save_and_check_md5 DATA_DIR = "../data/dataset/testImageNetData/train/" +GENERATE_GOLDEN = False def test_equalize(plot=False): """ @@ -75,12 +76,31 @@ def test_equalize(plot=False): num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_equalize[i] - images_original[i]) ** 2) + mse[i] = diff_mse(images_equalize[i], images_original[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize_list(images_original, images_equalize) +def test_equalize_md5(): + """ + Test Equalize with md5 check + """ + logger.info("Test Equalize") + + # First dataset + data1 = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + transforms = F.ComposeOp([F.Decode(), + F.Equalize(), + F.ToTensor()]) + + data1 = data1.map(input_columns="image", operations=transforms()) + # Compare with expected md5 from images + filename = "equalize_01_result.npz" + save_and_check_md5(data1, filename, generate_golden=GENERATE_GOLDEN) + + if __name__ == "__main__": test_equalize(plot=True) + test_equalize_md5() diff --git a/tests/ut/python/dataset/test_five_crop.py b/tests/ut/python/dataset/test_five_crop.py index 61632e398..ef2e376c0 100644 --- a/tests/ut/python/dataset/test_five_crop.py +++ b/tests/ut/python/dataset/test_five_crop.py @@ -20,11 +20,12 @@ import numpy as np import mindspore.dataset as ds import mindspore.dataset.transforms.vision.py_transforms as vision from mindspore import log as logger -from util import visualize_list +from util import visualize_list, save_and_check_md5 DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" +GENERATE_GOLDEN = False def test_five_crop_op(plot=False): """ @@ -63,7 +64,7 @@ def test_five_crop_op(plot=False): logger.info("dtype of image_1: {}".format(image_1.dtype)) logger.info("dtype of image_2: {}".format(image_2.dtype)) if plot: - visualize_list(np.array([image_1]*10), (image_2 * 255).astype(np.uint8).transpose(0, 2, 3, 1)) + visualize_list(np.array([image_1]*5), (image_2 * 255).astype(np.uint8).transpose(0, 2, 3, 1)) # The output data should be of a 4D tensor shape, a stack of 5 images. assert len(image_2.shape) == 4 @@ -93,6 +94,27 @@ def test_five_crop_error_msg(): assert error_msg in str(info.value) +def test_five_crop_md5(): + """ + Test FiveCrop with md5 check + """ + logger.info("test_five_crop_md5") + + # First dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + transforms = [ + vision.Decode(), + vision.FiveCrop(100), + lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 5 images + ] + transform = vision.ComposeOp(transforms) + data = data.map(input_columns=["image"], operations=transform()) + # Compare with expected md5 from images + filename = "five_crop_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + if __name__ == "__main__": test_five_crop_op(plot=True) test_five_crop_error_msg() + test_five_crop_md5() diff --git a/tests/ut/python/dataset/test_invert.py b/tests/ut/python/dataset/test_invert.py index 8bdf63dd7..f366553c6 100644 --- a/tests/ut/python/dataset/test_invert.py +++ b/tests/ut/python/dataset/test_invert.py @@ -20,10 +20,11 @@ import numpy as np import mindspore.dataset.engine as de import mindspore.dataset.transforms.vision.py_transforms as F from mindspore import log as logger -from util import visualize_list +from util import visualize_list, save_and_check_md5 DATA_DIR = "../data/dataset/testImageNetData/train/" +GENERATE_GOLDEN = False def test_invert(plot=False): """ @@ -82,5 +83,25 @@ def test_invert(plot=False): visualize_list(images_original, images_invert) +def test_invert_md5(): + """ + Test Invert with md5 check + """ + logger.info("Test Invert with md5 check") + + # Generate dataset + ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + + transforms_invert = F.ComposeOp([F.Decode(), + F.Invert(), + F.ToTensor()]) + + data = ds.map(input_columns="image", operations=transforms_invert()) + # Compare with expected md5 from images + filename = "invert_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + if __name__ == "__main__": test_invert(plot=True) + test_invert_md5() diff --git a/tests/ut/python/dataset/test_linear_transformation.py b/tests/ut/python/dataset/test_linear_transformation.py index 80153902a..0dd25a4da 100644 --- a/tests/ut/python/dataset/test_linear_transformation.py +++ b/tests/ut/python/dataset/test_linear_transformation.py @@ -73,12 +73,12 @@ def test_linear_transformation_op(plot=False): if plot: visualize_list(image, image_transformed) -def test_linear_transformation_md5_01(): +def test_linear_transformation_md5(): """ Test LinearTransformation op: valid params (transformation_matrix, mean_vector) Expected to pass """ - logger.info("test_linear_transformation_md5_01") + logger.info("test_linear_transformation_md5") # Initialize parameters height = 50 @@ -102,12 +102,12 @@ def test_linear_transformation_md5_01(): filename = "linear_transformation_01_result.npz" save_and_check_md5(data1, filename, generate_golden=GENERATE_GOLDEN) -def test_linear_transformation_md5_02(): +def test_linear_transformation_exception_01(): """ Test LinearTransformation op: transformation_matrix is not provided Expected to raise ValueError """ - logger.info("test_linear_transformation_md5_02") + logger.info("test_linear_transformation_exception_01") # Initialize parameters height = 50 @@ -130,12 +130,12 @@ def test_linear_transformation_md5_02(): logger.info("Got an exception in DE: {}".format(str(e))) assert "not provided" in str(e) -def test_linear_transformation_md5_03(): +def test_linear_transformation_exception_02(): """ Test LinearTransformation op: mean_vector is not provided Expected to raise ValueError """ - logger.info("test_linear_transformation_md5_03") + logger.info("test_linear_transformation_exception_02") # Initialize parameters height = 50 @@ -158,12 +158,12 @@ def test_linear_transformation_md5_03(): logger.info("Got an exception in DE: {}".format(str(e))) assert "not provided" in str(e) -def test_linear_transformation_md5_04(): +def test_linear_transformation_exception_03(): """ Test LinearTransformation op: transformation_matrix is not a square matrix Expected to raise ValueError """ - logger.info("test_linear_transformation_md5_04") + logger.info("test_linear_transformation_exception_03") # Initialize parameters height = 50 @@ -187,12 +187,12 @@ def test_linear_transformation_md5_04(): logger.info("Got an exception in DE: {}".format(str(e))) assert "square matrix" in str(e) -def test_linear_transformation_md5_05(): +def test_linear_transformation_exception_04(): """ Test LinearTransformation op: mean_vector does not match dimension of transformation_matrix Expected to raise ValueError """ - logger.info("test_linear_transformation_md5_05") + logger.info("test_linear_transformation_exception_04") # Initialize parameters height = 50 @@ -217,9 +217,9 @@ def test_linear_transformation_md5_05(): assert "should match" in str(e) if __name__ == '__main__': - test_linear_transformation_op(True) - test_linear_transformation_md5_01() - test_linear_transformation_md5_02() - test_linear_transformation_md5_03() - test_linear_transformation_md5_04() - test_linear_transformation_md5_05() + test_linear_transformation_op(plot=True) + test_linear_transformation_md5() + test_linear_transformation_exception_01() + test_linear_transformation_exception_02() + test_linear_transformation_exception_03() + test_linear_transformation_exception_04() diff --git a/tests/ut/python/dataset/test_pad.py b/tests/ut/python/dataset/test_pad.py index 7b66b6b36..a3038a4b9 100644 --- a/tests/ut/python/dataset/test_pad.py +++ b/tests/ut/python/dataset/test_pad.py @@ -21,11 +21,12 @@ import mindspore.dataset as ds import mindspore.dataset.transforms.vision.c_transforms as c_vision import mindspore.dataset.transforms.vision.py_transforms as py_vision from mindspore import log as logger -from util import diff_mse +from util import diff_mse, save_and_check_md5 DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" +GENERATE_GOLDEN = False def test_pad_op(): """ @@ -116,6 +117,39 @@ def test_pad_grayscale(): assert shape1[0:1] == shape2[0:1] +def test_pad_md5(): + """ + Test Pad with md5 check + """ + logger.info("test_pad_md5") + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + decode_op = c_vision.Decode() + pad_op = c_vision.Pad(150) + ctrans = [decode_op, + pad_op, + ] + + data1 = data1.map(input_columns=["image"], operations=ctrans) + + # Second dataset + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + pytrans = [ + py_vision.Decode(), + py_vision.Pad(150), + py_vision.ToTensor(), + ] + transform = py_vision.ComposeOp(pytrans) + data2 = data2.map(input_columns=["image"], operations=transform()) + # Compare with expected md5 from images + filename1 = "pad_01_c_result.npz" + save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN) + filename2 = "pad_01_py_result.npz" + save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN) + + if __name__ == "__main__": test_pad_op() test_pad_grayscale() + test_pad_md5() diff --git a/tests/ut/python/dataset/test_random_color.py b/tests/ut/python/dataset/test_random_color.py index 8ca0071e4..45847ba65 100644 --- a/tests/ut/python/dataset/test_random_color.py +++ b/tests/ut/python/dataset/test_random_color.py @@ -17,13 +17,16 @@ Testing RandomColor op in DE """ import numpy as np +import mindspore.dataset as ds import mindspore.dataset.engine as de import mindspore.dataset.transforms.vision.py_transforms as F from mindspore import log as logger -from util import visualize_list +from util import visualize_list, diff_mse, save_and_check_md5, \ + config_get_set_seed, config_get_set_num_parallel_workers DATA_DIR = "../data/dataset/testImageNetData/train/" +GENERATE_GOLDEN = False def test_random_color(degrees=(0.1, 1.9), plot=False): """ @@ -32,14 +35,14 @@ def test_random_color(degrees=(0.1, 1.9), plot=False): logger.info("Test RandomColor") # Original Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + data = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_original = F.ComposeOp([F.Decode(), F.Resize((224, 224)), F.ToTensor()]) - ds_original = ds.map(input_columns="image", - operations=transforms_original()) + ds_original = data.map(input_columns="image", + operations=transforms_original()) ds_original = ds_original.batch(512) @@ -52,15 +55,15 @@ def test_random_color(degrees=(0.1, 1.9), plot=False): axis=0) # Random Color Adjusted Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + data = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_random_color = F.ComposeOp([F.Decode(), F.Resize((224, 224)), F.RandomColor(degrees=degrees), F.ToTensor()]) - ds_random_color = ds.map(input_columns="image", - operations=transforms_random_color()) + ds_random_color = data.map(input_columns="image", + operations=transforms_random_color()) ds_random_color = ds_random_color.batch(512) @@ -75,14 +78,40 @@ def test_random_color(degrees=(0.1, 1.9), plot=False): num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_random_color[i] - images_original[i]) ** 2) + mse[i] = diff_mse(images_random_color[i], images_original[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize_list(images_original, images_random_color) +def test_random_color_md5(): + """ + Test RandomColor with md5 check + """ + logger.info("Test RandomColor with md5 check") + original_seed = config_get_set_seed(10) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + + # Generate dataset + data = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + + transforms = F.ComposeOp([F.Decode(), + F.RandomColor((0.5, 1.5)), + F.ToTensor()]) + + data = data.map(input_columns="image", operations=transforms()) + # Compare with expected md5 from images + filename = "random_color_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers((original_num_parallel_workers)) + + if __name__ == "__main__": test_random_color() test_random_color(plot=True) test_random_color(degrees=(0.5, 1.5), plot=True) + test_random_color_md5() diff --git a/tests/ut/python/dataset/test_random_color_adjust.py b/tests/ut/python/dataset/test_random_color_adjust.py index f79137e14..3eb55043b 100644 --- a/tests/ut/python/dataset/test_random_color_adjust.py +++ b/tests/ut/python/dataset/test_random_color_adjust.py @@ -22,11 +22,13 @@ import mindspore.dataset as ds import mindspore.dataset.transforms.vision.c_transforms as c_vision import mindspore.dataset.transforms.vision.py_transforms as py_vision from mindspore import log as logger -from util import diff_mse, visualize_image +from util import diff_mse, visualize_image, save_and_check_md5, \ + config_get_set_seed, config_get_set_num_parallel_workers DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" +GENERATE_GOLDEN = False def util_test_random_color_adjust_error(brightness=(1, 1), contrast=(1, 1), saturation=(1, 1), hue=(0, 0)): """ @@ -188,6 +190,41 @@ def test_random_color_adjust_op_hue_error(): util_test_random_color_adjust_error(hue=(0.5, 0.5)) +def test_random_color_adjust_md5(): + """ + Test RandomColorAdjust with md5 check + """ + logger.info("Test RandomColorAdjust with md5 check") + original_seed = config_get_set_seed(10) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + decode_op = c_vision.Decode() + random_adjust_op = c_vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1) + data1 = data1.map(input_columns=["image"], operations=decode_op) + data1 = data1.map(input_columns=["image"], operations=random_adjust_op) + + # Second dataset + transforms = [ + py_vision.Decode(), + py_vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1), + py_vision.ToTensor() + ] + transform = py_vision.ComposeOp(transforms) + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + data2 = data2.map(input_columns=["image"], operations=transform()) + # Compare with expected md5 from images + filename = "random_color_adjust_01_c_result.npz" + save_and_check_md5(data1, filename, generate_golden=GENERATE_GOLDEN) + filename = "random_color_adjust_01_py_result.npz" + save_and_check_md5(data2, filename, generate_golden=GENERATE_GOLDEN) + + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers(original_num_parallel_workers) + + if __name__ == "__main__": test_random_color_adjust_op_brightness(plot=True) test_random_color_adjust_op_brightness_error() @@ -197,3 +234,4 @@ if __name__ == "__main__": test_random_color_adjust_op_saturation_error() test_random_color_adjust_op_hue(plot=True) test_random_color_adjust_op_hue_error() + test_random_color_adjust_md5() diff --git a/tests/ut/python/dataset/test_random_crop_and_resize.py b/tests/ut/python/dataset/test_random_crop_and_resize.py index 8ccbb98c2..de039e6d8 100644 --- a/tests/ut/python/dataset/test_random_crop_and_resize.py +++ b/tests/ut/python/dataset/test_random_crop_and_resize.py @@ -331,6 +331,8 @@ def test_random_crop_and_resize_comp(plot=False): py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image_c_cropped.append(c_image) image_py_cropped.append(py_image) + mse = diff_mse(c_image, py_image) + assert mse < 0.02 # rounding error if plot: visualize_list(image_c_cropped, image_py_cropped, visualize_mode=2) diff --git a/tests/ut/python/dataset/test_random_crop_decode_resize.py b/tests/ut/python/dataset/test_random_crop_decode_resize.py index 4a46851f9..c6125d4b6 100644 --- a/tests/ut/python/dataset/test_random_crop_decode_resize.py +++ b/tests/ut/python/dataset/test_random_crop_decode_resize.py @@ -15,16 +15,16 @@ """ Testing RandomCropDecodeResize op in DE """ -import cv2 - import mindspore.dataset as ds import mindspore.dataset.transforms.vision.c_transforms as vision from mindspore import log as logger -from util import diff_mse, visualize_image +from util import diff_mse, visualize_image, save_and_check_md5, \ + config_get_set_seed, config_get_set_num_parallel_workers DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" +GENERATE_GOLDEN = False def test_random_crop_decode_resize_op(plot=False): """ @@ -40,22 +40,46 @@ def test_random_crop_decode_resize_op(plot=False): # Second dataset data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + random_crop_resize_op = vision.RandomResizedCrop((256, 512), (1, 1), (0.5, 0.5)) data2 = data2.map(input_columns=["image"], operations=decode_op) + data2 = data2.map(input_columns=["image"], operations=random_crop_resize_op) + num_iter = 0 for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): - if num_iter > 0: break - crop_and_resize_de = item1["image"] - original = item2["image"] - crop_and_resize_cv = cv2.resize(original, (512, 256)) - mse = diff_mse(crop_and_resize_de, crop_and_resize_cv) + image1 = item1["image"] + image2 = item2["image"] + mse = diff_mse(image1, image2) + assert mse == 0 logger.info("random_crop_decode_resize_op_{}, mse: {}".format(num_iter + 1, mse)) if plot: - visualize_image(original, crop_and_resize_de, mse, crop_and_resize_cv) + visualize_image(image1, image2, mse) num_iter += 1 +def test_random_crop_decode_resize_md5(): + """ + Test RandomCropDecodeResize with md5 check + """ + logger.info("Test RandomCropDecodeResize with md5 check") + original_seed = config_get_set_seed(10) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + random_crop_decode_resize_op = vision.RandomCropDecodeResize((256, 512), (1, 1), (0.5, 0.5)) + data = data.map(input_columns=["image"], operations=random_crop_decode_resize_op) + # Compare with expected md5 from images + filename = "random_crop_decode_resize_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers((original_num_parallel_workers)) + + if __name__ == "__main__": test_random_crop_decode_resize_op(plot=True) + test_random_crop_decode_resize_md5() diff --git a/tests/ut/python/dataset/test_random_erasing.py b/tests/ut/python/dataset/test_random_erasing.py index 842b4a15c..3265ac2a6 100644 --- a/tests/ut/python/dataset/test_random_erasing.py +++ b/tests/ut/python/dataset/test_random_erasing.py @@ -20,11 +20,13 @@ import numpy as np import mindspore.dataset as ds import mindspore.dataset.transforms.vision.py_transforms as vision from mindspore import log as logger -from util import diff_mse, visualize_image +from util import diff_mse, visualize_image, save_and_check_md5, \ + config_get_set_seed, config_get_set_num_parallel_workers DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" +GENERATE_GOLDEN = False def test_random_erasing_op(plot=False): """ @@ -69,5 +71,32 @@ def test_random_erasing_op(plot=False): visualize_image(image_1, image_2, mse) +def test_random_erasing_md5(): + """ + Test RandomErasing with md5 check + """ + logger.info("Test RandomErasing with md5 check") + original_seed = config_get_set_seed(5) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + transforms_1 = [ + vision.Decode(), + vision.ToTensor(), + vision.RandomErasing(value='random') + ] + transform_1 = vision.ComposeOp(transforms_1) + data = data.map(input_columns=["image"], operations=transform_1()) + # Compare with expected md5 from images + filename = "random_erasing_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers((original_num_parallel_workers)) + + if __name__ == "__main__": test_random_erasing_op(plot=True) + test_random_erasing_md5() diff --git a/tests/ut/python/dataset/test_random_horizontal_flip.py b/tests/ut/python/dataset/test_random_horizontal_flip.py index b6a4fef00..1272148e4 100644 --- a/tests/ut/python/dataset/test_random_horizontal_flip.py +++ b/tests/ut/python/dataset/test_random_horizontal_flip.py @@ -49,7 +49,7 @@ def test_random_horizontal_op(plot=False): # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() - random_horizontal_op = c_vision.RandomHorizontalFlip() + random_horizontal_op = c_vision.RandomHorizontalFlip(1.0) data1 = data1.map(input_columns=["image"], operations=decode_op) data1 = data1.map(input_columns=["image"], operations=random_horizontal_op) @@ -69,6 +69,7 @@ def test_random_horizontal_op(plot=False): image_h_flipped_2 = h_flip(image) mse = diff_mse(image_h_flipped, image_h_flipped_2) + assert mse == 0 logger.info("image_{}, mse: {}".format(num_iter + 1, mse)) num_iter += 1 if plot: diff --git a/tests/ut/python/dataset/test_random_resize.py b/tests/ut/python/dataset/test_random_resize.py index c581712ac..1ac790ed1 100644 --- a/tests/ut/python/dataset/test_random_resize.py +++ b/tests/ut/python/dataset/test_random_resize.py @@ -13,16 +13,18 @@ # limitations under the License. # ============================================================================== """ -Testing the resize op in DE +Testing RandomResize op in DE """ import mindspore.dataset as ds import mindspore.dataset.transforms.vision.c_transforms as vision from mindspore import log as logger -from util import visualize_list +from util import visualize_list, save_and_check_md5, \ + config_get_set_seed, config_get_set_num_parallel_workers DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" +GENERATE_GOLDEN = False def test_random_resize_op(plot=False): """ @@ -52,5 +54,29 @@ def test_random_resize_op(plot=False): visualize_list(image_original, image_resized) +def test_random_resize_md5(): + """ + Test RandomResize with md5 check + """ + logger.info("Test RandomResize with md5 check") + original_seed = config_get_set_seed(5) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + + # Generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + decode_op = vision.Decode() + resize_op = vision.RandomResize(10) + data = data.map(input_columns=["image"], operations=decode_op) + data = data.map(input_columns=["image"], operations=resize_op) + # Compare with expected md5 from images + filename = "random_resize_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers(original_num_parallel_workers) + + if __name__ == "__main__": test_random_resize_op(plot=True) + test_random_resize_md5() diff --git a/tests/ut/python/dataset/test_random_rotation.py b/tests/ut/python/dataset/test_random_rotation.py index d399dee00..a6efd3cce 100644 --- a/tests/ut/python/dataset/test_random_rotation.py +++ b/tests/ut/python/dataset/test_random_rotation.py @@ -21,18 +21,21 @@ import cv2 import mindspore.dataset as ds import mindspore.dataset.transforms.vision.c_transforms as c_vision import mindspore.dataset.transforms.vision.py_transforms as py_vision +from mindspore.dataset.transforms.vision.utils import Inter from mindspore import log as logger -from util import visualize_image, diff_mse +from util import visualize_image, visualize_list, diff_mse, save_and_check_md5, \ + config_get_set_seed, config_get_set_num_parallel_workers DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" +GENERATE_GOLDEN = False -def test_random_rotation_op(plot=False): +def test_random_rotation_op_c(plot=False): """ - Test RandomRotation op + Test RandomRotation in c++ transformations op """ - logger.info("test_random_rotation_op") + logger.info("test_random_rotation_op_c") # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) @@ -58,8 +61,44 @@ def test_random_rotation_op(plot=False): logger.info("random_rotation_op_{}, mse: {}".format(num_iter + 1, mse)) assert mse == 0 num_iter += 1 - if plot: - visualize_image(original, rotation_de, mse, rotation_cv) + if plot: + visualize_image(original, rotation_de, mse, rotation_cv) + + +def test_random_rotation_op_py(plot=False): + """ + Test RandomRotation in python transformations op + """ + logger.info("test_random_rotation_op_py") + + # First dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) + # use [90, 90] to force rotate 90 degrees, expand is set to be True to match output size + transform1 = py_vision.ComposeOp([py_vision.Decode(), + py_vision.RandomRotation((90, 90), expand=True), + py_vision.ToTensor()]) + data1 = data1.map(input_columns=["image"], operations=transform1()) + + # Second dataset + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + transform2 = py_vision.ComposeOp([py_vision.Decode(), + py_vision.ToTensor()]) + data2 = data2.map(input_columns=["image"], operations=transform2()) + + num_iter = 0 + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + if num_iter > 0: + break + rotation_de = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + logger.info("shape before rotate: {}".format(original.shape)) + rotation_cv = cv2.rotate(original, cv2.ROTATE_90_COUNTERCLOCKWISE) + mse = diff_mse(rotation_de, rotation_cv) + logger.info("random_rotation_op_{}, mse: {}".format(num_iter + 1, mse)) + assert mse == 0 + num_iter += 1 + if plot: + visualize_image(original, rotation_de, mse, rotation_cv) def test_random_rotation_expand(): @@ -71,7 +110,7 @@ def test_random_rotation_expand(): # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() - # use [90, 90] to force rotate 90 degrees, expand is set to be True to match output size + # expand is set to be True to match output size random_rotation_op = c_vision.RandomRotation((0, 90), expand=True) data1 = data1.map(input_columns=["image"], operations=decode_op) data1 = data1.map(input_columns=["image"], operations=random_rotation_op) @@ -83,9 +122,50 @@ def test_random_rotation_expand(): num_iter += 1 -def test_rotation_diff(): +def test_random_rotation_md5(): + """ + Test RandomRotation with md5 check + """ + logger.info("Test RandomRotation with md5 check") + original_seed = config_get_set_seed(5) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + + # Fisrt dataset + data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + decode_op = c_vision.Decode() + resize_op = c_vision.RandomRotation((0, 90), + expand=True, + resample=Inter.BILINEAR, + center=(50, 50), + fill_value=150) + data1 = data1.map(input_columns=["image"], operations=decode_op) + data1 = data1.map(input_columns=["image"], operations=resize_op) + + # Second dataset + data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) + transform2 = py_vision.ComposeOp([py_vision.Decode(), + py_vision.RandomRotation((0, 90), + expand=True, + resample=Inter.BILINEAR, + center=(50, 50), + fill_value=150), + py_vision.ToTensor()]) + data2 = data2.map(input_columns=["image"], operations=transform2()) + + # Compare with expected md5 from images + filename1 = "random_rotation_01_c_result.npz" + save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN) + filename2 = "random_rotation_01_py_result.npz" + save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN) + + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers(original_num_parallel_workers) + + +def test_rotation_diff(plot=False): """ - Test Rotation op + Test RandomRotation op """ logger.info("test_random_rotation_op") @@ -93,7 +173,7 @@ def test_rotation_diff(): data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() - rotation_op = c_vision.RandomRotation((45, 45), expand=True) + rotation_op = c_vision.RandomRotation((45, 45)) ctrans = [decode_op, rotation_op ] @@ -103,7 +183,7 @@ def test_rotation_diff(): # Second dataset transforms = [ py_vision.Decode(), - py_vision.RandomRotation((45, 45), expand=True), + py_vision.RandomRotation((45, 45)), py_vision.ToTensor(), ] transform = py_vision.ComposeOp(transforms) @@ -111,10 +191,13 @@ def test_rotation_diff(): data2 = data2.map(input_columns=["image"], operations=transform()) num_iter = 0 + image_list_c, image_list_py = [], [] for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): num_iter += 1 c_image = item1["image"] py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) + image_list_c.append(c_image) + image_list_py.append(py_image) logger.info("shape of c_image: {}".format(c_image.shape)) logger.info("shape of py_image: {}".format(py_image.shape)) @@ -122,8 +205,15 @@ def test_rotation_diff(): logger.info("dtype of c_image: {}".format(c_image.dtype)) logger.info("dtype of py_image: {}".format(py_image.dtype)) + mse = diff_mse(c_image, py_image) + assert mse < 0.001 # Rounding error + if plot: + visualize_list(image_list_c, image_list_py, visualize_mode=2) + if __name__ == "__main__": - test_random_rotation_op(True) + test_random_rotation_op_c(plot=True) + test_random_rotation_op_py(plot=True) test_random_rotation_expand() - test_rotation_diff() + test_random_rotation_md5() + test_rotation_diff(plot=True) diff --git a/tests/ut/python/dataset/test_random_sharpness.py b/tests/ut/python/dataset/test_random_sharpness.py index 8689ae8ff..d8207ff09 100644 --- a/tests/ut/python/dataset/test_random_sharpness.py +++ b/tests/ut/python/dataset/test_random_sharpness.py @@ -16,14 +16,17 @@ Testing RandomSharpness op in DE """ import numpy as np - +import mindspore.dataset as ds import mindspore.dataset.engine as de import mindspore.dataset.transforms.vision.py_transforms as F from mindspore import log as logger -from util import visualize_list +from util import visualize_list, diff_mse, save_and_check_md5, \ + config_get_set_seed, config_get_set_num_parallel_workers DATA_DIR = "../data/dataset/testImageNetData/train/" +GENERATE_GOLDEN = False + def test_random_sharpness(degrees=(0.1, 1.9), plot=False): """ @@ -32,14 +35,14 @@ def test_random_sharpness(degrees=(0.1, 1.9), plot=False): logger.info("Test RandomSharpness") # Original Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + data = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_original = F.ComposeOp([F.Decode(), F.Resize((224, 224)), F.ToTensor()]) - ds_original = ds.map(input_columns="image", - operations=transforms_original()) + ds_original = data.map(input_columns="image", + operations=transforms_original()) ds_original = ds_original.batch(512) @@ -52,15 +55,15 @@ def test_random_sharpness(degrees=(0.1, 1.9), plot=False): axis=0) # Random Sharpness Adjusted Images - ds = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + data = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) transforms_random_sharpness = F.ComposeOp([F.Decode(), F.Resize((224, 224)), F.RandomSharpness(degrees=degrees), F.ToTensor()]) - ds_random_sharpness = ds.map(input_columns="image", - operations=transforms_random_sharpness()) + ds_random_sharpness = data.map(input_columns="image", + operations=transforms_random_sharpness()) ds_random_sharpness = ds_random_sharpness.batch(512) @@ -75,14 +78,45 @@ def test_random_sharpness(degrees=(0.1, 1.9), plot=False): num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_random_sharpness[i] - images_original[i]) ** 2) + mse[i] = diff_mse(images_random_sharpness[i], images_original[i]) + logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize_list(images_original, images_random_sharpness) +def test_random_sharpness_md5(): + """ + Test RandomSharpness with md5 comparison + """ + logger.info("Test RandomSharpness with md5 comparison") + original_seed = config_get_set_seed(5) + original_num_parallel_workers = config_get_set_num_parallel_workers(1) + + # define map operations + transforms = [ + F.Decode(), + F.RandomSharpness((0.5, 1.5)), + F.ToTensor() + ] + transform = F.ComposeOp(transforms) + + # Generate dataset + data = de.ImageFolderDatasetV2(dataset_dir=DATA_DIR, shuffle=False) + data = data.map(input_columns=["image"], operations=transform()) + + # check results with md5 comparison + filename = "random_sharpness_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + # Restore configuration + ds.config.set_seed(original_seed) + ds.config.set_num_parallel_workers(original_num_parallel_workers) + + if __name__ == "__main__": test_random_sharpness() test_random_sharpness(plot=True) test_random_sharpness(degrees=(0.5, 1.5), plot=True) + test_random_sharpness_md5() diff --git a/tests/ut/python/dataset/test_random_vertical_flip.py b/tests/ut/python/dataset/test_random_vertical_flip.py index c09d9df22..2fc9b1277 100644 --- a/tests/ut/python/dataset/test_random_vertical_flip.py +++ b/tests/ut/python/dataset/test_random_vertical_flip.py @@ -49,7 +49,7 @@ def test_random_vertical_op(plot=False): # First dataset data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) decode_op = c_vision.Decode() - random_vertical_op = c_vision.RandomVerticalFlip() + random_vertical_op = c_vision.RandomVerticalFlip(1.0) data1 = data1.map(input_columns=["image"], operations=decode_op) data1 = data1.map(input_columns=["image"], operations=random_vertical_op) @@ -65,12 +65,11 @@ def test_random_vertical_op(plot=False): break image_v_flipped = item1["image"] - image = item2["image"] image_v_flipped_2 = v_flip(image) - diff = image_v_flipped - image_v_flipped_2 - mse = np.sum(np.power(diff, 2)) + mse = diff_mse(image_v_flipped, image_v_flipped_2) + assert mse == 0 logger.info("image_{}, mse: {}".format(num_iter + 1, mse)) num_iter += 1 if plot: diff --git a/tests/ut/python/dataset/test_rescale_op.py b/tests/ut/python/dataset/test_rescale_op.py index e6ccf17e8..a26f9a50f 100644 --- a/tests/ut/python/dataset/test_rescale_op.py +++ b/tests/ut/python/dataset/test_rescale_op.py @@ -18,11 +18,12 @@ Testing the rescale op in DE import mindspore.dataset as ds import mindspore.dataset.transforms.vision.c_transforms as vision from mindspore import log as logger -from util import visualize_image, diff_mse +from util import visualize_image, diff_mse, save_and_check_md5 DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" +GENERATE_GOLDEN = False def rescale_np(image): """ @@ -72,11 +73,33 @@ def test_rescale_op(plot=False): image_de_rescaled = item2["image"] image_np_rescaled = get_rescaled(num_iter) mse = diff_mse(image_de_rescaled, image_np_rescaled) + assert mse < 0.001 # rounding error logger.info("image_{}, mse: {}".format(num_iter + 1, mse)) num_iter += 1 if plot: visualize_image(image_original, image_de_rescaled, mse, image_np_rescaled) +def test_rescale_md5(): + """ + Test Rescale with md5 comparison + """ + logger.info("Test Rescale with md5 comparison") + + # generate dataset + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) + decode_op = vision.Decode() + rescale_op = vision.Rescale(1.0 / 255.0, -1.0) + + # apply map operations on images + data = data.map(input_columns=["image"], operations=decode_op) + data = data.map(input_columns=["image"], operations=rescale_op) + + # check results with md5 comparison + filename = "rescale_01_result.npz" + save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) + + if __name__ == "__main__": test_rescale_op(plot=True) + test_rescale_md5() diff --git a/tests/ut/python/dataset/test_uniform_augment.py b/tests/ut/python/dataset/test_uniform_augment.py index 26bca3bd0..a26b64726 100644 --- a/tests/ut/python/dataset/test_uniform_augment.py +++ b/tests/ut/python/dataset/test_uniform_augment.py @@ -21,7 +21,7 @@ import mindspore.dataset.engine as de import mindspore.dataset.transforms.vision.c_transforms as C import mindspore.dataset.transforms.vision.py_transforms as F from mindspore import log as logger -from util import visualize_list +from util import visualize_list, diff_mse DATA_DIR = "../data/dataset/testImageNetData/train/" @@ -83,7 +83,7 @@ def test_uniform_augment(plot=False, num_ops=2): num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_ua[i] - images_original[i]) ** 2) + mse[i] = diff_mse(images_ua[i], images_original[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: @@ -147,7 +147,7 @@ def test_cpp_uniform_augment(plot=False, num_ops=2): num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): - mse[i] = np.mean((images_ua[i] - images_original[i]) ** 2) + mse[i] = diff_mse(images_ua[i], images_original[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) -- GitLab