From 6112bd386c50163f39068ae36f67c8933db93545 Mon Sep 17 00:00:00 2001 From: haoyuying <35907364+haoyuying@users.noreply.github.com> Date: Tue, 6 Apr 2021 11:35:46 +0800 Subject: [PATCH] Add deeplabv3 and hrnetw18. --- demo/semantic_segmentation/N0007.jpg | Bin 0 -> 42230 bytes demo/semantic_segmentation/README.md | 164 +++++ demo/semantic_segmentation/predict.py | 6 + demo/semantic_segmentation/train.py | 16 + docs/docs_ch/reference/datasets.md | 15 + .../deeplabv3p_resnet50_voc/layers.py | 345 ++++++++++ .../deeplabv3p_resnet50_voc/module.py | 186 ++++++ .../deeplabv3p_resnet50_voc/resnet.py | 137 ++++ .../ocrnet_hrnetw18_voc/hrnet.py | 612 ++++++++++++++++++ .../ocrnet_hrnetw18_voc/layers.py | 345 ++++++++++ .../ocrnet_hrnetw18_voc/module.py | 243 +++++++ paddlehub/datasets/__init__.py | 2 + paddlehub/datasets/base_seg_dataset.py | 141 ++++ paddlehub/datasets/opticdiscseg.py | 78 +++ paddlehub/module/cv_module.py | 113 +++- paddlehub/vision/segmentation_transforms.py | 307 +++++++++ paddlehub/vision/utils.py | 120 +++- 17 files changed, 2822 insertions(+), 8 deletions(-) create mode 100644 demo/semantic_segmentation/N0007.jpg create mode 100644 demo/semantic_segmentation/README.md create mode 100644 demo/semantic_segmentation/predict.py create mode 100644 demo/semantic_segmentation/train.py create mode 100644 modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/layers.py create mode 100644 modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/module.py create mode 100644 modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/resnet.py create mode 100644 modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/hrnet.py create mode 100644 modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/layers.py create mode 100644 modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/module.py create mode 100644 paddlehub/datasets/base_seg_dataset.py create mode 100644 paddlehub/datasets/opticdiscseg.py create mode 100644 paddlehub/vision/segmentation_transforms.py diff --git a/demo/semantic_segmentation/N0007.jpg b/demo/semantic_segmentation/N0007.jpg new file mode 100644 index 0000000000000000000000000000000000000000..106a6939c20632c444ffec485f00d5a9553d57cd GIT binary patch literal 42230 zcmbTdbx@mK^adCR5Zs|Sl;9MKH$jSff#O=+-HJO!8-hzAgyL49P_%{O?(P)V;;vo3 z-_Gv-vAeT!l5;1S_a<}Byzd-&&U63G{aXbPD#o?0F>t!;NK!Z8i0!OpZvF? z{wHX_|5kJ~G$0xVItIr7CmmQCgx&-rK;P>G{#Su_|4tJ zu&~KrP*74av#_$Ua|j3uy@5c5rDbI0Dl?k<<<4g?cINVp*;KfzsdilUqsJ-q5gLdK>zuLg6jROKq55s zm%JFnlIkEcHxfp^P)t&(#N6t3EHJ;u37NV3I5s(xz#8-Ef3E$HXaDaU3;X}&+5hg? z|KZmH02hezeDi=r0CB)m@NQl#fbIWD%YT9*FaH5Z_kl_$+wpGxu5#~m#>qLFfNs6Z zMu|zT7`}oBztJD#FCE5dkGp7X^>Zm5s8Q*R$i)4mQ#J4jP}(Usb%nq$B0I{bQd`9v zSKw5?UoH?et=BF1NM8lt6mL+Fs}j!6&Rk zvoo3L{lTMu0RFB3@IB>C5ZlB?_X=B6b3*iDUkyx}s^PTMdTFF!6;+5%?B;?NxZ2xk z0s_B{Ssi3b5!%YM=ODng#i1C9u^D|7F22!fJ%Qc$=iDUfKarO{k(HDEKnJ6l z25GI~NZsPWt_}~3kSqDYBtm+&ix%1God&=qrr^2ykZaWC;EqZH99zz|v2XV|H&@b+ z9EZ&q4m&BZ&@uS5cV8UFrRYvo!)39k%!ZZsH5_7l#XH-vfVH{nru}MyN$!V(D)9bVF8XLnHd(yB z!uw+UXSe@}O!&q`LY!yp)d+!u_CEmj9DL4Dt_*W;cJkpLz`#MG{hGsTgEY|m+4%cD z&#ylzW7b|1)Q~%257`uPX+ChUo-{=D4hc|$=d7oFA31Fet1077P)20wZE$(vYLo)0 zexDD;n@iQh4P?4ZaBXRTdHTtVc?xYhU4uLpk;$9d5Cb=eA>7db*3MzhAE*Z{t! z)G>p1CHB_&j>i52Sd0=Fl`r{-bX^NoMVX=RVK%pxorI?&&T0d^}#I<*xm1BYn1+ z1X%g%Imkximbtc;$xL(`IuC?`HG>)yx?sWEVaQQq_`ab|cT~$59o<)$9 z@|Y_Vg$~o^86gT>#h0#SZP>r#E_9+VroY zwP0|xhH{Ys4QcgfdfODTKBhCqO;{L?Yg@$X3xq5j--c$Gt5^=H5mQ0g7f`{fTU?zEHn4sOqMbcUx<%am=AuQ)Nni zxHil z8&V@@OOM8{b%R<@#4FVD+P6qeZ_DmS8`N%?=?Q$~JD*4=(-ls7lAWxa^WSPWOE@#2 zG{x(f0|sf5;YC&Qjvi?TWA~av?<%pw+>X2TWW9l4y3|$Gf`Z$umQib-*l|wW`udj* zH0M+PTJDD58($}0bQ|d{hcIlyFC00%Q5EU359k6_J_rt)RmXE-=-L5c*Cly2@Y^g? zC}mr@B$HQvFX$_Mg#1UB&E=QU2^%!{DS6X>2Ner*`5ylA@rEK(28;C8t?}CExzQEM zIkGDd-orYLF;zza1@s&Ujo9lrVdD-x`7Id}D8VsIyD`QN<1Y+(`DU(O;)U6&S24x3 zsa?^I?XI1Yz3Qg^Ur7&+t~@q;#r;6`nJeVGATwh%i-s6T6W-n+(2dUL1v$nZ zcaQ|0%w~RWMJ;=D^AN`I990tA_eOKYivx`UlVuRz-Bd0HzL={$O;>K9Qc_Zljg1(O z-vppHaq-4YhiaL+l;++;{g&5p89|qA`;MyG*l8$8y9a~gTQvmI2NGpJHo(u-b%)VA zduZ=O6E(OkBdBJGb$Jkm3=5Tw3s{hi19fF;{s>K^Yv8Z}9Nn1cJAWG)xT`(}S#W_- zp~xbWcdMjcA{_+*#xiMpd=L1Yv4I$8Zq3;G1IV#YVtOx3JJ&n8Q0v5{8n_%y%r-^) zOHnL(`)i{e-wU%5apAgGY{K{?OvsGm z53VWelW=4dXYogCACoSV`Ij_`WdQ_6!m;ra3A93KyyBSqF?jVkksz%ce-qD8iMQQ!#9p?>wATD?{R-ZH zLh=15qK#fqlkBp<_`ZqB0@YY@6|hu-(j>T9BhSPqppmPi^D;FO6I@Mpp~Xw;#2$LNKRbvcWc0z9{`0l z8HOrvcVc+uINfaT)U3Y2hMbi9c+U7qh~*dUckxhy54}mewPxW2`J!T9?cP#NI9-V0 zsKI9@riCPNr6XwX&J>|cNJXY3#i|!PBkXG~*am|q@}=Q}_PVvj1;OO*COA((pEqln zg@*+g_gXmT_PPwtQd;apCF?vvE;_f-uVlTYrdo%pk|30&I(+{Ce4(LBnVAqQfRiXb zv?{(TrrbE-l6L+gg=egw$^2V1xW04i(Qo@(ueF6)crR2?K&Rq42O!Z6^v)^Yw#c_u zomX~l-e zcs4TzPDC`fV%|UG18G&!4ekZZ`aU#2&K#>Mi`VkR2fL)q7;Phm7rGV9w3%-h+;Ws6 z9|+MRg7!uN52XJdeW6*Uq4CPQgZ%Cr0z9B=*&MUbz-QQv6mrp&@C_-tKTKZK!TAPq zvnu5)-cPQGc$ z04l~_DWQsiwUL<+ruP#@G^bH^{619S?eegTKaJ+6v`8# zX$NPN-sF5DM$67gw&T-p<6SFvEmH2dEV`!OvsxGq&Y`M)>Jx88)0vu&)#^8>b^MTUu6yZ%_`55`;l62-=wqDkZtM4h%M1iA%E+_ zB?ZuC{cDtIhgMB8GmQ&+(|-l2EYpi^^gVB%M|@>iiCL2k-qwptS= z;i3U@_*!-Fk~2THyq5gv|Ht0ze|ocmy1Jjgo8SWj3yMWwI%G)|^s{5y-h-Hu7vjR* zxDI!IW^*~2`7$SV$kW&>^OJ-CXcfiESDrvm!{jrBuVbEA_UyQ(a!&UORnD2f;z8=; z8CTK?C_7ARegq+{fLBlS2COjwYBZV$$@16kl#W`Lvf%Q>PRO&|YYF zK)ns3$v72MtovlL?LUWGKdf3doA(dEJKj_Bl~7?sUOfK1RD2F#YOj7;vS1$C^shkb|%3POu)bWR>m4M+#_q-hBl=t~x%X#KS zjG|wDc>L^jV0a}gQ6owC{>>YvN`6)Cmx?bef0J71`xpcZP=C^*0L3oEE7#YG|BS}y zU0EDeny3pc2)X6JdU|#KBs_2*QH?H#njnL@oFc_Rqd6iW zH-P705`Hmxg#U<%DJ8ceCk&r@H{1{c0hf~AI$G;?R8g*K)cu4(6WpQe*^|{-KiSoY z(7yNZ_KNMsy?Kdt8s7p7BD3v*M{%fKl+3&;>b`GVWvlS5gk^e<6|Ej|NF|4dh;!KL zXmK?#ArP6>q6vm9ED-Ql+5=(3y(50wS{dMjeuX(AF`X;BcNs6-zmF^)0MuH!o52`O zZ}dc0GpG^Juhjk6)eBBy{|A^)`<|=$A^_EQ*y)9;u$Zdd{!nX!EGHTH4h>2sOf;g^ zg9IgUw$OpX9E}fv?*=msMzV{3G3HxzeX$7BiOu_f`*$d{!H(Xe8!oU$_pI@ z-06Hu%y%`S5IuM{^PRWaZ>4pSJWa+Bf#p70P)QFbD!J z3-CC2jBMf!-jkb3rH%%EC!C`+ z!lCXZm&(PqRE35a<$%5u(zK0Ks-zaR`T7qazC%X0_bw|1U)WzGdRunz2{2RtYijeS zYG0E6^x;EGIBigBwyP4`M;eRRG>3X@i5^uBkgpyQkUAv^N*bvHLL>Ae^w(Wx z;Tv#;9C!N$_qK-Uv=UaBbp57?gb2=QrmpGS%FCGh;DfMt9%nYtgL1fr5l%|KCi9-6+vPIUqGaY_D z5RFzUeX$YeTVSWj3$6fm6xo8L;4CIEWy+QZjKZzb=TxM$2@f7l zhW;l;rFVd|x)(5mFIX&6Yo7vjw38GqX|Kq_^!o>hcx(9a%Y4D{7)rO5Y=vs#A)M!T zljwG4`d#g(KC%%ttA7AU;a{6!4=d9AQs?zId?ZwGzsdJK8L;1=(ue*01&0^ltw>ch zN^t}C-JkhMBU(grkDRJ{g*Ms@(=dklY)ln^v*Ep2dWg;Y-4@lhc$EL zLnUpsHobe_bFA(Tnj6vRbO{MNEDu=!v^c^uxF-fvtiu4yZfcXxL~9(K+q8iy?@mFQ zjHZb_{Id_+h~l>nH0QDG%hk~U4B;Kbc3x-#r=_Ju=o6Wl?b*bbS!4s<1))?G!CZE1cL&Zp+L8*97c?pQ3Au=0Td z7W9Fm+_(M+2WhzB3a;!vu%W0EXJW6z|#;V=a zU6^R0z|g$utl;+7MSHQFj64Zm!8Q09MNBqWrWD9xlBD@oM*Vejy`mk>EoTMr@VL@j zA!)Z+o54X)DX(roKvv$HCW1*DAFQ;E`1aQYQ%+eUrIbFi`#l3|Dr#*BmC7~-2bfEHa@vKPUV%QdD`3q@w zI=IRe)#d+U_CcIjvQi2mj@@92Pc(X6-O(^c#~YtfS>gQ6xuB+{K8Z3np4x zc6LM8F=_AbZOe0kaw+_0yobf{(j2NjhDx0zw<3S#ar5d)aB8;4nCXYfUw>$@2kBQL zZyxa0f?tpLXHk-~^K*-ROh)v8E+c%%MHOP?{IzvU#G5MJyWlTJTM&A?*G_`6d`ow%Hs1@ioFss8G5ZGH40wX^Ob?ZLqeM!Lw8Am3X zp5=H$4-a%UYR?etrtZxi$AA6^%)D!|1_Bog&4%n(XVz>1)b$%UD;3g?)-Zrj@&nW5 zb@75BOSc4XyvaN5FD%Lq?h)cXLAzq*Sfd~C2LzTr;h`6x=cJF~DB2Z`mJCofN>9ge zr<-3-++IUDN8JAW15|tvOf61!uI|aFMJ-tk(jL_*u37Y$$fbQ1JLBh6iIm}GU^=}l zttzVjUTf!U*|tjxSF4jPH*PPKzxo0=2fd_5JUSqK>%>WnZSt;nBFIvO-M`&tO_npd zz4yzDEOvAIX6zKns6|pI7i^F=+DQewY2yG%s5%r>k5$xeEm9o?xZmtRD{vevCDQVSNR^!{h6HjvRz(V2bPBtO{6t=s`XnxKcY$2#vQ?CAoU?%M zB+x8FII+y_mEJVfvdi+tUs)2d>mAM(uaLds?T96tmh&?WN_95P!Y&JGX?nCLQ|Lg) zRQ5La+pgK(UTWah*C()I;b{bdM5-rpW0!`Zm?)RME)`H>T}LJN7D%B2@>Lmm8$B;D z`DW^YqeIvI7h|RyPIh<#Z|Hl`b5Bh;M_24ijlcZ8D+jS5T`2cWefP`d3TFi>oAX98 zKk#}~gHxf5+9E`XqBh_P;SalW(I_GQT9$)^g9I~`*D~(R<$}(F(T9H{%G-bd=*2h6 z=rk#5SC7#OMc1`9XTLL_BEC7wuXnK#w2w2|a7mKf^NJdY8bOVN3~Yk$~{ zPz1b`6!S*WGPe;Whtg2F1q@0fUEeQ#pUuMzd)fb&BW`I_^b?_~+A{0I-DIU?>a`H$ z0yPusPYjlZ!mYgD1d+m4$rj@a1*8}(16&HK&`H$i4@EYI!=`5=yx}OI5)eYuc-SI< zeZ=2pp%#_|T{2)3m8|J&+JkdI!KUp}gn?y4<4(+8VVIja=iPjg%@~@0a*7Kb3v=1=x2 zYkKgN5X&PFNqErt&=CnSpOON}Ut0_|Q`!c{>QKS&{Z@RXAMX85?9tRR626#dKmOzHJBqVFc|T$a{)E8La!MdBACFJ3pGz9d$LMq~G;>;!=Flj#zUCPw z>)+JUh;Zi5F}5F+Yif(P9hIEPv+V)WKQ)zqmKR<9vi7I8p>5G&QMoE+g;QsK&Yg~Pf-PxL_JBkq0 z5o@Ny1!1}1C>mhu#@6WM7Bouyh(pXzeDWdwHJNnkdDmIlbCZ7le`?Y>gV%~f1jS>F zxf@P?XFll#v`n{4bx@=q^8#BXK!6_rg%Jk~$ciqJOvIPy|Hc*oPVEhuK1*VmQh)$)QuB*P-at^`n z%^aWFg=uA-y;|%Av+vNjirFvM-(cEi;Rc;tmK=sNhQ-x-(>`)^H>))ZK}RVop+9Pp z>~hBR;fklmRrB0Adz-=oIkB`1DLYcOIJ-%=en3P>3His)>rSkp6DTtemG}`!H zwiuSgxJp+{pRMd5s@|vL?gQW!^V6v@pi$bF87^7uw z9@BGGPqnnI?7cLZnIcYYE~??7iZrUl%;&q371YXl zE90Aq(!_!GJAz~$NP*k=j1zOY^A{*^AN$fWnItJ!*TFspoq}ow0$}bAvZ!SF<#H&} zkI;{P&tNjW#e*Y$J_X#ug&FtziAO>#a9`lVgp-sK9N}32Cx21I`(UI!3Lbp_%PmL! zDUh@9tb3kD5X}B7l{MaeON})#aV3|5d}(R*>hQ_80wyTmURAPK{MsxyzVIHYFI^PZ2{SiUpIHtmSgT$UwuC;s)bYAS~v*|06T7j{8Kh{dad&nL~-JL8WA>wABEe@iV5 zxoRPc4b~$2_I@ae??Jbam9E7v#08{GC6DDueybYVI~yzd&7&6f5hSXasE-*p0yx&- zNDII`BWO_awj_&9z;_knU&yLe5U25*ju}ILsVV{C(jFjVvUK-3)}@6lS4xeSB{#ZF zE|mBrcP_}s)GB9#jD9G8Hf&T<#tkurchPmxN$z8!c~ki1b+WvLiCy{zIbXz| zu7+o{<;%uw!Jg_eT3#$?bJzb-Jy-eIbS|WS7Q9!(+aq#mY|gfbXGmz;AFd@PlhZ@c z0MzVNO3JGX7Hh=7S0*?1NQRk$)9s>=MLYLTKjgGInR}Ktcf?)E%-U|hD2TI@UBrbj zz9slh_RNdG()W4Yk!Jj2k~G6{6|1=tr+NzkJJFAMJ71B_-ccQQ7aDtm=p;S;Xf!kE zBE9=vOB*d+w<#Aj^{-w~Qw23hOKQ1qgG`kLQ$7vKV7bxqp#gyZ0%oZjrK{y%OV89# z4i;#AT~#EWtt_hiB8^sJBU3?hht60{)c;&_SwsP~pB@|`9;LM9R0jC%P}IQ-qS|u1ko3-*v@y4lj+!7?23hO8Yo)F@9xFC)TYh!SY^aA{juW z_0y$`U$q04>|FErjbh_V=EKjO&Q^0!3g@8Rr_}JYpV=HVqtb6@mQ=iH+?`Nze3`Cj z#VeSvia&lc`b-UYXLcHI@>D0hFte`ToDu1UX|yx@Ja{o&V2MKy3#Gsv3bKH66zh;a z?v(F(4&aTZl6h7U6egoeq#1c6PSkJI!>46(0;5NS&SO2v+Zxa=Hore|cy{Kll+PCV z&1P%V7F}`+9G2>P^}LZMV!9T?vzt?>k>BdW=bxl);iz@SIIx}fvy7k>r_$5^`w!p? z=@VUV+HOjVvZ6d|Q2~Vsn#M{n&;@eTJa{c#%IS0 z99*IR_>fUu3hBTMWgYU*ozL3Z`g;qd={oCX`1>}t){AV%5O=PMq|v%T6BzP-D_8Q= zpHnJdy_Zhd@ABIT&8&6)`o8`s_s6XLQ%ZFfuS@msQyRoYiPb$+#1zbMX|(;LRYb`1 zmO;8yg%U*ywa}L?XHA=G+4|)IUo{N)9ryb`083G`OKo4{s9FYWvP%}=_|SA&8SP)K zBf6jHk?hz%?f7+ZXTC>n66p)MU~LhaMsD*qox=0Q4y5$PbIf@dt`;XseYq^$XNJKX z4Q!YPnR~FNeCJ}(^~~?FzkHZHFN@2uEPBRA-x1<-ysvC(-@JSqS>8__$<)*bg$PU+ zxD75F5;a@#!F)%h#76~s;-b%mT`^fJ-t`N_M_BhC3_~|y42lre64r_j6Fk2hG*W)> znsBN5yNhgpwblFG-sA5!Qz?q3;KQ{WXjf&|7*OL5tsqSrrxaM(V&@uhtKDD~i_^)R z!mZvLk)YqnoCxf5-D_AZElU^LnxgHcZL4?*rtJQ5CM9%eaWO10*59a59p-VTed~k- zhXH8dslXmgh7in)%94?Lnj<_hrEQE4T}00h{WITzHKDk;PJc0VscjQxot)nP4Cxm? zkIfFWi)6kgYUiFs0tr`^GO^hK9Er(rdtL2w9FlWy#%xkH>Y$C#xoHnuvq4|7&6xU= zM(BDL=~%60%8BvmbUO>x4<2h=BE58^ttUlAykr;XZ#GzjMzniH4^6*;+{HM@2T$EQ%t?`p z;?hYif*R!e0g|#Nr)rUxYM`6g^G>QM5pKsyDt6azK+}_hMpW%c3L1AzvHgg^pY}?- zXF}L)c^T-W>JsSw@qe>UcFUFml&21Rp$!uam4Sr`sI4b}wts+830a>%QJ&P#yhw+c zAliv3>d%W2k&JDJ8okt6fQ0-~2n3LM&)VsSpR*Q+I@;-{CF1nAVuoTcgdzkq)Ga}s zai$7o$~-+sXy!|5!p|OBSF`7+ls-ap)P~06$TsG>c1pny9BkvJzo_3|?qya}CC&o3 z3X|yhkKjHF4x7H;9^!L9d0E|a4WTR|TwOUHu=SkWJ;n(A9v%3Vep1h{tMVQ$?ipNF zjR5)%huvnHo#W+*RPr2x6TOKL9%l&bOrJvAbn^?^`{2vcagOA(l0H$`GtCoHZfsD? zED1SMGgwVYvMH6d9a}`V=I~fdgl*S<(~+e5CXZ}`9gg}9O_J&JRiyXd`lIyT%uKxm z#}8Yfh2G9G``UY%<>xc&Y0U6{g&QE}3)=lJ>+@Hd1PDqyT~|Jk#nOUeYO%0gs7; zN1s_EO$Y2j-4p@gdkqy1dIzzCQR!%KHL+ojSxxt!LR-$NkIHjr87yOM7l5m$lpAnr zzU4=3VdCM1ymXD~I4qr|HQsnfEg`?u<-xzPv}LVlWAM7cL00NaN&U2~`n36>%E2)H zTJN&i**66CFMrhbJ6A4)Hgr})oo|skO#cAXuR#x|y2eaAm}9wpZP2I4A6KzlCYt$k z-QA5D4}wdm~ib7KWL3c~VP#r833ks5}{J+2}I9km`i z-j-~`wyGbPU_;S*7q5K&0YtUmc*ErulP~Cym!dXA(ciTQjQ{LBTox%)7G$9GYZIZbC&EJe7)9Hj3#>!d6s{tP;bFvS~q zq3_!})LLZc%sq7S&53*>KM9>_$cbjDz*jXcVwdg1`H(XA-wQBBxuP?f*JMdor2M-O zfLPzooQ5(H3_TS?jg_{g?6NBDORR)+7>~!~W!bfR)R+8Mm?Q3N+fH*GLI$aCzCTIZ^^@mrc0M(v{b{c-wE=Z732@&M!F0)!Y0u2^q3RQ41AK5%A_!4lr2O*R z5{kFau^Gd19(@AnLD|zf#!AxG005Jd4cmV^AZETVa&|R-`rR=CVg8BDs%t~73BaYn z=g96ZOVO0lxhr2i-^`?YBVX^FrJ;*zbOx+wn`C5MMU)hc{gp z82vq4GPR)`z3o;C$9_TUXBq1d(X$GHwgW2+yh&5*Uf#2vV>u=wa#&6`(MMX}0dMS&VZh62xzQYP+QfnisJkWx(9*EeP z_~`APK~bgU6~`~D_EnECbWs>>Z8el(bVU#wG(QC04ISl^jyq2J!toW-2IK6u!6%C5Tn8 zXEOSPcXlPpRY!U>;zo1+0UTdP=0-GhJu*&G;5d~#n%rNqcl-mi;qV8KGJp(D4DNHf z-_lM7=&v38!9Bpra#*yE{gA5+@LA%+#Kp&iKp3cQvbuiLPl>Xx2z3fULE~RvCWIUQZ>zsWdqIyTgC(BOC1`J z!oMR1_6|1n5QMmu#r~FbWlv_JSf>f4+M=RWOFdv?i2_|cZz%YiM zmk}mfLpV+%|HBBDF}0nDS}OWk9nXlyx7@yY7T-X{ z@kH~j_U@NS4Kea=>K4Wsmz+VIK0wLAGl9LvDL?qj(svN+WbD{ESx90pT0#iCg-x$r zW0A)MI%@XkPOB`*!01y&H7U7{?YC{dB^%Z~jhl~Vx;7qX@?p+I zVG^3&!%h&^t=mFP%A{YW6ko=HhC1eQD9qG*b*e`oiFjb1tDmx48z0X90u8=VwWaz( zHraNGT1@Hkn~|Z28Kg&u5jlU}T)%jOwZpb};^4F%|4jSae2%LTxsT?U~Usd4xu8JgNRG6nX37- zU*InT#~B}gDGtIjlLXD*xVw#b49il+m?S6|tFEF}%TWx!M>3D?dxCU$)eQu{O5i-=36dFl>U5kn9R2@yp0-39i=WnDW5^6?uu zGZLi(w|2Y2XUhK=Vom3JZJy!t8&0OnSNB~=A5PjI>HxgJGnu;}%s&}DZ|l%w8a;Us zsM#nN&rMI^!jZQ!16^2F+!sW;*=7QVP|KZ!}bX>TI4SF)tD6Iol`MoI7v>t<`1dcOuY9_!FP88O_!w46ipDWrT4_6#KV zJ%$}}H4HVcC%2XtGOzaQ=U4dr?jP;hc9CKsJ)PlGB*S{9qBXa>zLW=tu~A<5)R0k}%oP@rmyIF3JCh7|T6TnhzwP~- zm!mEEB5=v0=W(%_5%(MX<_g=wLbwfeYL{bk39Z{COQvFv*VUT69f|kAdz(2Mfldkr zXH$8{nb;l;otGS=2|LJMGRpWt$4VZxP5bsZ-Rp>8nhW|kKF4o9OyRJ5otO(=hf);k z(0-_R@y(=u(7Uu9aBN2J_xp{Ryt7$We>!|0GG%FYT=PmO0Kb{8jua-8oqqs-m?~yk zrHS{8n{;bm^?ruO@!u=t9AgL?}fd@g>Inw!w#wlzWKv)37@TJK_oh!t0B zP{Xao6Yn$%!OEV{Qh9;G*4IKe`Dt9*L#>2Li4`bZma8sk3Q@$OXk zQF?**Z%PJ>M#ujF(ssCPn>u=XJk@0bo3_MErOahzp7bJXft+HJ#X$3z?a#XL4~;@gKol zj0{S5DcDoWT%wEx4c%9&Iu~vQ9j1*|3*$gV#lL>FxvC;DWX%rTy;5y-`0PJR0UY4X z4Pjw+G%LkkTj@HEfJ=lJ4~E~grj0+Nll6k|p?Z!|HAa?zG>&)s!C7V#q_Bzf zX7ucKO>XJ869l25Z{RnXh=wF&o;cDgZ5vP6uGe;7e?dzRmKcEZL+QMo;Q+4<`bT^72+OW4EmV=!*jRO0%=ZtbJ5#P2Q^_46b2aYaIR)gZ?@*?H!N z!=-YMGF(>~{kRp8jD-#9!zTQq_{gb)1~v9JV``X1Y#Cab%xwnS@+^XC+JF4Dr@^bJ zqv}3-ZhKU$!PlzI*MbbF+Nj_J`PYiPK4)LU0Db+}c_MQ=nRanRO-^O`#X2;aEb=ld z7?K;I8c}a`a6ICF+Si`-eNVBu^%#q7JU7u4PTLx@P}n5MxHyP)YCmUWGqQA>2<2#{ zK_B50HqV$}kcv3`Q)r+1C2rs!fL%k64n!eHUAZ;lG2%mJdMKqy^Zg+Hsf9O6BxMyp ztUe}A)*Lhf8V((On=;n8N;kEqHhgVIZHtodFt?dgXlFe>uqpo%(ob{M_Eg)05;6+P z9d*-4MY9@F^`$s?XG&HB5Pz=Z6=L`l9TS%oj-TA#r5e)u0FzY2_+Yoc3VXwF+IFbkNI3wX?={lSXX*Rn5ir!7 zqpALL)y%NuaHYr;q~5m*T*>30$jP`A`#BTcqr=F%xi#7%v)|0)Xme6p2-O^;n=i>> zh4hH|Pxox|)w~tC`3G2CTdj)yS(M}AIKKEnf%_0eqP#{eBLWywc0U%}WMmZxytnsJ zeKUCxOw&nfdpb1phLzPn5<}tf`!A~pqmPH@mw_v&4GkS~AH0IW1EqBOwPaVIc5(dH z$hYLSnKEZ@GGBd~hyl;_omHpgf^s#sf>*_U>JGfcAB&24&@a0A2n%&_$%~YWaUX!K z@)LaRf8adC)Q}uQE{kBFoiF74iV!@wri(Nr2prrrf-flm*{G}cbiQ@;34QP16`B(A z{+wOIu)LU=O~Luag2vBJcR(%6Vy~3LiF@nbK|_!Ho##%S!@MDT4-^bfO?P;ig+{Y; z{@|dr%p8{{p*Hv+xHG-I@2#0)agrX9@}022)h#*40J$QxnsL*^+h44&0(Rf$^G`HL zqvAKJ+>N5#GkPXG9(ifTW{nl{xZyYg@S4vjipRfwa;+Q0O>^egy;MPQdKI42Y*=Aj zGiji#0COWq^+-K7`cd&hO?9UsDsQ)4Re7wCCdV3g%T!?1Ko=J`Ub1H#$;Gx zvi_bM|DiTR611F%O}Fov;OeJC@_B!lef`6QK=URycoFuv|9oK03ROHO5jKTQr%@PV z>f0we-e6fKT4?%B>2PDR>Sp#0qlgY;zFb-%(`?#73ef&EwEr8s4V)cUJ>PYH5n-`V zZ!zVVvGzVvJ+ARsb|edZDfS5T_N#RfB9+;?ma)KIH+7P4Jxvjz0NL3_1HMD93$7|W zAJ+2DtQO0Yp$uJF-GH{ySLie~%q_;ABEJdK)ioO#@*#}KqKWkivj*jMDABHpMMPzY zRVnT?yIh9~ME&8u9K5}S#7wa8K_k=aauuWA)Wnk1CaYVuSMsR&35wVh^@pHBQEQ=o zO5?5^dwmP_wx^e4+N_m%NnV`3mHY%6fxb+diW-Xwl*YvfDidxwE1MA!Vsz^8*GW5D zXqy|k4-Q(1@%;M|*Vq+dJjGCktK0#CU*`m>t#Plaw$1?1BUx#%kg^Ce=e5DO+_pxk zIW%YIR_i-$85Zluk-j#_v1W~#-^FMU@L}7-k6X5}b_kj^~IXh%ZELxVl@cff24Wbp!*RTt1bxlODjZC`hw1|71c z*ez(O#m46&I+f1Tvx&Y33&VG{|C_AKJX|Ziqtr{M*hkqX(C>_ip%R})dt1!L-ml@# zW5}9Qp@5x=t7RkJJ-Ve;>rc5{XI?f+g6b|`vYko~1=Bu4N|{5*dI2^RwEI{u=ibQ7 z(YD`@GKcb1k8A3XIj6;4eOl1XXHz?v3V63r}%0kQ$3qZ1#8jXV(-ij=*0l zm6A4)%P1AAm%zoUE%hGC%W3v#SP9!hp*GR6=HhJ65(+f#nCwS5a8L{jf^TTgaZv-^ zHoB2zO%!!O(jIc*BJ$zi>60(e>Ijw8F5dt(TAM|2=1`u5pLTD+Kd5*BS)8h|36rM9 zczP{<{pK)S1_P2pN3)5YkLO{;Xj|>oe{4=2szcJI@AMlo@4vy zxR}k}I%0lyL#NfdSD<%C&F#m53%c8ur38~bBu5O4e7kS+Bi0q!XiFKt1I;R z4}JN(M59mUXLr0e%-IYVFt_Pj2nBu!q8WuBKk=4sC(@eNvRIw+J>POROOJt^7R9Cg z#(`90*v941y?(u+u@j@8W4dQm>{}XI8+WX(8;1l=_rk$ZZ2Ady$+TU z+esz0+nt3-sCbfHB-djea=-un%xA9WH@7KpN_=hQ%^TNWKStf>^O~zZp9R>L0AfAq z)<;{i**ZLh@e+iCn4f2^%f8Zs;U_1^i;%26KBu{_&60sY?mfszU#kD$$Q+ zzNGx^&JHKe3lgU> zkzL~Dt*<6xoE3Vw3nsMri`y31KDhthm+^Kd1$weMj`Q+q!`DX13$yZ%kQ8eB54XDP z3iR*AEZ9V5{!zCR%Qn7o^4o8=^!K_(@?}B%!Ho%D^{Ph%)8^kVUCpMV+=Sl_)tXfE zXBBgfPYIN~8=*L9v0N3B6S;Bm6&5uSe7>qGnyZ^?j<#2i>6q8C5BfwKuR&*bvy3HU zhZLcs?Q?xkb7aTSrs=?DuGM+Ibg}?-0-A>}(WeBjR3|sv&8*ib3TLy`DZMC4tbhcV z6nFIO={pJ1qMopvk=8+*fx~t@`z&E15$=rFgzkvA*o!zCta!5v)UCOIXo<56rP;h~ zKi%)ejMVYSMEcR4zdok@k}7M$P|H5Wxn7`eBn}@^+q%tDO}eL$uV}(FFB8Tz5mi|L zCU`ij*QDK@bmpSkkOcOC5;Q*OG{iH0DS_4?Hn>Nh=!ZPvqgtpKAJ^SDp^rF_;&8Ut zE9rHI7d%Rr?uB3!<*JVgRb(o2QR|_zRmGFLzJ2M4cqsstPT>ums#1;GHi{N0TG;iOc>i=zfjcQFVJ55E#Es_6My}|5!^CBJ z-StCCn5ZHsbN%Uh1_yH5JAG@AY5~M*6ifsYw6)YKoJ7KQP9YV>Dp|*zmUu3WNmS1j zUlkVvW94kk2*%<{@8sikzpj6%7>?labVun@V}%9ZGPw1*5%5$c5Kss$rEmvSaEtk22+s@~F?G}%TT z6*C?Pts`01M8&hP@cOcp6n7CK%zA`P)nMjvo#vCA{jttsv)?pq~07$?44;#m29)3h<}d*k8NvS>duD#iIEohX~w3P$C8}YqBkwlE9*NB|D7|T#Zz8MOr z8)5bst~XW(OZro<$yzP%k5wpTzO<|jk^^q()V$chi;k1^s8E5#U*O;NMisPKk(V(@ zE2;Gd86L2CLL1!Z4h2>ekFX70OInp-z_o4U56ne~G_eJp9$oS_=>|t^;FY&@;F~ed z^JpAzxV%~Z>7GHQ)wYrXaly{i2qSsPPgAun>S69_CKD$Jn|Y;Y5H_$Qe1oWo&` zUmNOk^H6C`MS4|NuBX8m3cR`!sRnXXrlfvQLEY)8-_G;>p!m%z@MYfadc1HBVZS71 z$q583nrc((IE>g>%;99EUPYXUwMkgfqOC zXBvI-N87tXo3ANzc**$nbmF`&Y{rQ_a=zn4md=3T2BwYg)M77vb5dqlA1>|JuX>ta zx`%se?*0HSbEU4eQ=209QR|=L(MKVcm(Tx96h!BuROwZ>95zF36dJ#b-$J z1?WAfE(5wz0To?lCmF3Y9G0Q2W0t2W-8;1aX{1m-d8HN z?@||@?e|PQjQMVd-C^G%!alIe2=FUx4v%2y2u{q2@rD=l1=Jb5{|4)ybm< zO5Y>uY9w$2i$bn=zQ{0#p>QG!5@YBM)1AuLXuWf?@tFqpP}+B`_UMC2n+<_qAiE^K zJS!Yx$XF)mQ?i5FNM&SFHTZ?AU2eNKqj^-;ZmfoGvUzvJ(pEfw7JwhDF!Su`{75v- zDPmE6_GdlD{qZCa^b2YsDbirvB1iA_h@cj$WekdsOJ6KGCQbFM@atl#c>NMcL_5ok zOP7}IxXf=NUTk_9D)s75J4cmo(n(qbi)i2*)KVH z!pbKQYR7eB6;uBp*E(~72?U=I(vuzuVZ`AWRe@oA&CPbsAM{&lRG>app??wZ;n4<_ zBHs+vvWzbejK(PIV(Z+P@d=pBmsv^eZ zbXc&9ih)1MG=hh;mAO$Y!p|95V7>5Dapst3&Yx)iO|sj2{kD8GTJx$R>FQOp?q*Si zQcLK$>z8#_H8;D4j%Pdifc+5m$e&tLNi{d1c>mfCIVJU=Y$BX+((1Dn5zKndl>4cs$FmS&J z`HupI*nG6K(+)m?OzBy9Pdv2d4x?XzoDSP5JaO`<)Cdea$s4%QCZy?#=l$d$69lh# z;@YQ{GGQRobZ&x8x%~omK+w=EE*j)BmA^j(cH9!8<*aUw#C6=$1t3 zwzHuTy|@P)t40L=`0SzH1l_cyTJmIc0p0(qUp&wHS1J3$(+2sMz)i5u-a9HUt`qTl zaNraA?`=HUZO`qq++ecf%|7>h)de+aKRl+T;c^l2FYR&XA_ZKl?d}NM(T?}IBQ9c= zcq4d(&UNQipTL)81MawL`W*QC{bGinCbrzcBR5`g3kK6$CMb=5yaB%;l6~RFB?Z>&u3<{N^nqBGTaP7tY^a$4u{xRklg}AVeUOY%u zQGt>Y!jMtdD4g8I6R0qooA3MzE)yEg2 z7yQ@c&r#{z?=RgorLa|A``2Lm3lu&liNQ;+(v+!)#L81OqazxY2uC77wWm_UWYrEuXZVRW%2DRLD!u)fv2m+kzAsO{bgb za3*K_+S8$;$|UDYR1gIFPx*Z9+lzmNmr@N20?FZ^0k*gz#Grq8CF{c|@anVB_Sg2S zIWt#A+fF*GJMHfuM_6K0jv>4{&I5ViW0~zuzCv{#!BiVEd`LjhI@TzWegQ8%YJxmW zf}#_0T6XlswtC;MXRpN~pLhVgd&P_F*%TF#d25%xQ``D?AFTT3WLS_ke-W(e8fKZL z@I?sWnc_ZFZ1lSWpG4T8?WR^=Ijg=WLr(GpTgnHI3Dbtftdo)={=BaRcbN@dLk4Rh z)>vn|idpsS-Cw;#o&i$Cm@s!D^;I6%fnjdzcvm4khdKh(G8Om;#-<3k?aq{46 zZq52+_Akirqa_cDPy!1Z976ktj^?w9H4k5@kZ;EQ=$>2OpM?Q#l#7E6Z(8(it+0*Gl~){)Ozc=$7CZ~>Bn`Lpy*;K zau-gD?PR?yDL7;05ii9?$`qhIDN-Cl$3|1kN%Jj;Lu|^;Z*-H~7yF7xNr$)O+?=dQ z`k~z`mD6;S*i?O$fzNQOLZe_9An6%-V?esxz^Aulw;EO7?t; z$&ai^C`F``Ekm%Dyv+K+Aa^LPt5*Una-b|9`1)9=`K^vME3wh16q^~x4&we_nw8J& zKY)Y;5o>4aB(5EYv!LNavU!~pmc{0+ZVXXxX$@?(&$YRR*eO|#RkIW9>e(3t9TPc| zQ(b7b>d=!}uXtjkob!R9aXw+GZvhw!_ri4WHBB^L?gq0v&y0zp9)RoC7-Az4#byM_ zQFC2L@CLq@iv@F8ssKNe{!SE>WaU#Fcmr=uAqKew6+kjfJ>-)xnTAz<292>E(YmIf zO9-qGOAu(GjR$FIg|zQ4ianYS^)&a(avfBL996scMOuOC8Dwb zDwadk922b1=Tl+B1<3Y06uulZ8`@$vSxzvHlv1Tz3B9*@(Cq%3qF9ep)_%P%{=~DM zVdK~-c8Vp}q(L1~-`ld((-8L^)4lF+12SUjg){Y4N6UTH)7#C1ZsBHE+E^(7KR@2U z*^Yx@!r{HuQ`#A~pFQ~rDeroqP)hg8n5U`b<{84N1G)KPcP`xPTP@<_A7F4D@?|6Q zw_14-&bM@brm|JY-V9TFlKQ*Dgtdc}z*697%PF5u>9(g@#g~LA71Vf~IMan+jY`pWqzG%a`#+1YLHE6jcgKhjF0ldyx;Sc=t?F6AX3jP%9>HI1Lo3h{f zLL-MqCfD@{OM}N}d%m>KY?v{})_2{#dsk_my61Y$Wpl+2XZzi1LS}_yzGL$n_Uq=2 zQ7G;#tj}7-ht;}z&3?E`*XXNG z*jLWy%aIk4&i?d;pU*xNZT=K!mHa7!&6m`WG8l^VwSOwa5Co$*Je!K0w#`6nm&$yS z`)VkQoA_S%+tZ-*%SHfKQ=@FyE~P($e|2-IxZ7ByW_harPbs82<(qQhrYF+S;W?ei zqPg)o+sWcSA+h-MUtNzouOa$Oa4o3n#VdjdeBxT(n_?( z3B7SP>E|)}>H5=YCQOiz&qR7%@ROI{R@CUcWH>eKk7kBMvS;O_+J9p1zsq=Z0CywO z)-c79lR8@&J>v#k9dA6IHo{p6=Nme8u8M`!f^+Pj{0QRNBAWs{gHl1QZg_qvy-~+P zAC~$en=&@&)%K2+YN9IV;Zdneugq|@5(S=y9&@Gtq~eK_dIvxl|0gh~KIYC$jPUt#IYpkT0E2Mm@WRjgHQo z#Z$5HXuMR8v2#wpVkTTh?@70uN92>vM6Y6;!42jf6|R3H{BLfiOSOiW_9o%!5Sy*d z+!#7<@KFi({V1JvV6!-l6^-=QKf-3WW3e|9TcGIbgpt3aH1}jB>9Wx(_btHdSfAtK zMgYtKvmdh{+)RMK`iVxiTXjBO8jxlXox>OyEIYoZ5roL26_03JN# z7FWuuk2$HaN{iQ;Eh)GT1z_Z(QRVG~!*?=fgw@tuYZ>ybZZf*4)7-xS-~khi!>71w zAsCkzvf}fHcQ$6KQn+mfJ&a-uZ03{NOMpEw^hJIXM$-{LDqW=BmOp6dd_(~+Czb0Y z2p;o2W9k`_E8wf)k9_URG?Vu%ivzjW4c80qcZ2<%=M%81lp*2fVT0m&OE%CE>#-zG zPZ+Uy_nl5ze4mo-`E|V08?)XE+JL%MCujO1RfkA!ieo72F7M{uM4F=|ZAZ$5&kU*U zPRa@z@AsHGAR}IIgSi`_&uB!yvwvML5aTJXd2~0fli5KH$O;I3d8#o92epBR0ocW> zDj$PKE4N=#@l}e0U_9Uq{;iH}c=XNw<3L0avIFUvMdFDqY zx6u4R-CPV->j62@fJtM&^p=3Q=9Bz4;7$m{bt5A`A=C3dIw?dn&KPw~17g8(ULwS; zzO4y`%iPnbNd4^}kn+}ZJUke{*vww?g`o#1`&~d*Aq|=+eIuN-I}dL^;xztg_jYxebaH2;Qyo>3qnkZ0*WP;rS z(`fx;3I@%h4n9!3#XWJZ+oHqb-yO7+gG10GF{|J1v^xIj?5w!U_0A)m>-0budN}Bo zFyQIetednbwlDl|J1rBjElrB?|LJXQzy#*p5i|_R_$~JGm%}<0uaLs7 zeGg$I;?alDO`mgjpC*UrHtau-nO{GuwRcN#lBaYA>DaB7*V-jkYV_!>+tV@z{%9>) zo{Fw``}pa@d9J^?u|99pi=j``?+7Un_m6kXGf>q}Z%QP}x%3=f=x>z0)crKRdo@__ zwOb(SAa!(uW`>r?rUpvJ8qo3WCIKt zW0S+UBlZ{gko5Ppa378?AZ5j{-^})Jlo19WQZ#fTc_rfa!k@F#kl*P)z_)j^?{r7M z=@{*~$12)ytlm%W=lBJ$>|BwHTit)r`=ZS;E2R0fTtEc&K8432+FBAALJKq?UxMxDL*ZMk^?!Yowz|}nSvBW34c}!5?3DNTE#Oe^ItZV z)Us(8Mi#ug0%al=@cLuQ0A zC{R$)QUGhkxZ$(lKAk5H?n;58!E>jjiSU#j7kS;#;K@`Q`a3-<@fIlQ(nlD+Uq@zbXAj~(EioLg)En30bVo@#HUqz1e9+pry0QnzlU2+CZE4oY zC7+1If$&)Fm3ylOxvDc!k{n6IB0GCJn2g7RG+S;K9)OFquF?UWP(XdT8P;1m0axS; zoUJfuib@JnUa{+p;L2T%J}%U#=$(__5@_ zPV25!luYLCNxz|X4F_J9(8PKzUU=loZ;e`o?_V0agGmJ9IfMR zbQE|{fsmjfh462*hMLr6v&^xaamZw`7yN|=95M>q74AjA){?&zw?l*Dy#^&|Rh}T4 z#X?T+X@8;CjctOm_7=I&DQ{&s>&!WUaIW2gO2@k(2kj;!WCxtW6$kRuDaMO5v-VOR z7di421!QT70M?TTRN>8nC%1)$_7g?Ve>S?IzG=R;m4ADlXE|2rdKGvv{)HnC1}=Q* zAP;mSjwa{Pf5iN|_24Tegn|v7~o`)2oAK{KYu42nl063WYkP1M;nK z8gy(;dungkLQ&won0hOYd}}9mkcNNw^WK9$ZBkj|1WIRQ4}urVtVGSj9$faJ_Tjdn zWn4OyiwFhtSZGM=va60%mSxZooGK4$ze@rpDpWjmq52XMcrnj^m*5$1%W-SsSS2A{ z;HW>^^r=o68x!WVy@H^~#77}ZmT=c(1x2cUd}m|@KPLC{tlV^dX@qscy0O-2*$ctn z(fJAe0mFBv4F3aw#H2qI(QPr;(1ot5WHVd%@cj=qTDkU+8^c-Qt+}1#>+1cd;hhuD zlZLOYHS*u*6%)S6?G>TUrpgqWqs#-EE6d&B!n&^=j{fZ7vJf{@YnFm;C*fQN%|jp0 z_MH-A%HC5)l}TD|8N^4xXQ(NUs~+VwZ;h^M*=F2nB>RRZNEXLO;$>D%?jC_3i4|{b zQ@1Npxx(4Q9Zhx}L^)(r)+be@p`)L6V|Rc$P4)3FnOl$~PM;$=IzL4mi_t@hn$zei zp*qV%z{cWaL*49mZmPxLk)`7*de+FISH{TGKP*KNej^?>B@1WxnYH%#B}NcBV~uBm zF!OCRkozA+LWv9vG|&b#f6=Ov0mY#bthe@+U&Z0fz) zfsQZX*SxY6N#BAWh7TN+g@bsw<23C?M|WO%V=Zf}FLXXE&@m;1FoH7cU zTGnPkGV)5ISWcAkg0s*`JH|ib1Tn)5(%pk1t-o$&CM95tICD7HV}hP&6Pg4kc>tl8 zo6kggVq@xqF%p}hL2xf(X)1*%^BLu(+v6BLj|mdjv8%?r&POUIBgmLIfkvOyDh zsjlM|W+M^onnhJbTl#A+9Imx_Z*1uHm^LU{nl*DIPu}bMpmvYilNf7sF{zO3ecM4GPNJZ4@%nE(qytwGGl69y#Qw)3f*cLUS;jG zaOrX+2vKHK=c3W|q%`k|9KGgY25P^3linwtNa1ZZhd$S32XG3lLzdPvCPZm&TL_;U zQl1Rgi~Rv((AavRn;i!0-zO7NOn1OQa1TrAJG^l`czh|e_as+akHoZ*rT3Z3dk984 z?S@lmnE=QrSy?MMa>T*Hi2RO*a4SG`<0^2-wu8ZNjr}mgCFB!D;`ENrv;ZZc&wFD! zhFOFx{kj1cvZz$RJE3P|LEH}Pn(uv{S{4TXCpMnBE zdK1+kDi7nXg32tN7Qvk5b+>t&0x&|bcG=`3F59=%)I8({SxU)5V`1(xU-wJi!xmsq z{@>1cVQf>;r_8Tl3JZ@C04)bEmVG~I5$iMFe4~JdGgj0I#Isn4HmJm8KHyGBT-cOO zP!qswcRnK;Nlp%HLknl_g3A}YaL{F})Ya3+Ca|NRcYm_BujnY}eYYPbl zT}P5|bxQ+u@+7xG=v}D5RHdzSYKC#_bWOu67m(#b)6k~$C4i{7J`Fo0hacYKfSJPi(S95;6bF6T>@o=G*R+1WUKB5c_3S2@KVOLs2 zE=0ge`!`$&kr|eWA5wz%ndousdiL}zOAr0>3zs!s6UrX`5-5}3paR=4msow||MVfOtnF8U zO^pg2oabinLRIHCnOpNF2_qD*Qb$N2k_FRP!MhhXdxDb^_DtOwzpedQBh}Z7InME+ zz9}Pw4VS+H-oL?*_-{QLTFPZ;aFMbYk60i0NVrXei+4+T+LS02J<*kP+u%;F#ZNoz z6I26pn03h5o7Wp;n?V!h?7ob3vmN`N0HzAi+k=&{EG{fCe8 zhc*|rQzz~@=T^fgkBh}`)_<2tx(iI4bHCD8bTs(+mdwe#q|JqLfETkx>06-}|~1m0TQx-ih+-Zrs<-hGB`?l~=4AJ0c>TLm{#B$0h@ zxXX~?C$!~*S;PWI)vQJvS;5b%tIv$(Av+F|s4NDJM9E}fLsn=wPtIr_Q)^faLK9S> zPAbv9Kg{kdb#Fm+U?u;2GMxPA77AP{2Vbd+h>=l-Uctlcr6vUV#V28h<38g8@py?{ zd#}ahmF%`phxKv0I5ryA3+`(V z>T>`6E>Y%@Cj)`R+m5PQ4$fd;{)7nlzP|9WTwIT-1`9aJ)$iySt|k~Kfmm-yq1Ap3 z(F|819;-=Q*L+fqnkr&fhuM%7sS2^Are)XP6k8fV54|cFDaXg^>*izNdxSq;j;y}F z&XR}nVuIU&%pdQ2vunb4RJ$ic@=A!z6~&c&x7w|V&_pRM3NSI3#2~MBt24`IGz17C z#4dbJ`;^J@8FM+{LVcx&*U|(bPOqUg-#igg-cqTHR~pN?s@Jiu3A0|-l>L|w1*Ax8 zo^NPCS1H$XoOsP4;XtTdtikU{_fH)JTG4|8wEB&gmYQ4vdWX%C+;kcuym1)DJK-)y z3qFFHoWAV5&9FJqv9Szar?&th00t$aYt7Rr4;wVTzx`ZDmlB(GIQg+ZQ8l|$PEhkT z?>fL~9lkf)ri32a>y)iWE2&`>Z@NR{l8G5npTN@1>|`iUKPC$}_8`|q(us>iJDDKj z8~v2iJsOgQ%7_c<>K&oAA*W?DT{~vyj*A~Tec}Z|_WzNy87~u&ZdGiOyGBc6w0-%B z?fWA>;}2(90sHC+{eJVBV6k(`QsG099+BJDZo@w3izoAw7WRX{&XGmQOo-=Q$vd5} z8{Ky#5z&2pAoQ@>;SqyNQNoC`J!x;I-#p3qjNop)kH4!{8!{<@S$CdYOZ4wl?fg?^ z{;+5eqxmIp^&;KCHZSszMy$&zP!p@uA)~R@m8gH3)r|@V94Na4f%;FctG_tN<&vz$D|EWDb2T5q;#PTB?s& zvzPzmvQtOqy^uU_LVUF;dWbTUx^uM>lJTP5{0nv3D5#u9{km$BO$9_>)?PW+f7j|z z@w4@Dw@I)8=SLyb=JmH59i)F-*F1gpasFjU(a)pf-`F5V z;gKwzMOJS4zM{t>&i4Eo&2KE6l_9|kFrkJA4$9D2p1834d!tLFujK{7RXJOrz8g`6}4%St)gDJSw!BCh|19*S5q4 ziRB&dZMrk;q9G?W9kIe9Yn~bu+$*zwxzgMwv8LdxO#{iIOQ~^qZNcEP{@rex<@RuHYWGN^Qz@3AK`1Mi=PgYyZ zkoQ!8%S@|;$!r=_Bq|FVIDEdPoW`Mo&d%GjX^^`h+tAW`(L+>J(i1J;Ey7)-)A(`0JislifAh(NfnHH3InY!sqe0vx|$KIFII zPS_e&4gZ+)Q4%;Z@j`5%wAVxdg^@*RCygX^i^$h7fVs(5-hhuqT`QtZ-xSze_9vsW zMdK*|`=JI^t{KJ^NSbwLMQ7LJ!Y|el4u?~STPkgt4>NgweH~L}Z299)P4F}LpyaAz zVg->Pb01m}w%FF|vsmeIq9?#bbZj9v$(B-37S$pC+2^B*sa55Qh!+rRTTZ218901g z()vZE_o!Q}82saKF5xP$r%Vm6+6k$J>}+8;Rh2u~OhB^#cz0Farva?pPxGzkR7{RiOoY=G{;X~A4~6NDB^<-4!=ok6V8@!C@tdp5^F!xod3KxjN)3u%$nqN6t*3-hg}qS*QOP8TP? z`k{8F|=Fi7uzFMB>>5gK9II>VYpqM{#<~e}MbCwKNRDkSg-wlbiN1N_RHNkOME`;lm(m z!?ko920)3^45QjXp*k?K__-vi)zg8zkH+PwPCkw}x;KQzEBTkQYN$AHpeOS^h-;T^ ze5~HfM(8bLD8k|SDso85hof$OR)v)Z*Sw@#U=^T4z?~JFrXi#);yPKZw`oZ=ma|7y z5q>t6gq6U%o8P!29z7oyXWeC#l=<9>Y3KX|tE%`WTGBBX8aM08)q<(J(8bqdi?CG}+9vf?- zTw|vyBCv;7{l{mU{}KK%l-t4tMZUrET4*U&$o8umz=98xU@$ZN<8L&0J;@vhue9e> z*GMi<(R*GUt$6nh58^sQpOZOz@?nORX*rsDEV~oL`Ohb#SDqDQkCH8H&YqZ+E^#S) zwP|Y-u%Q?+i>r74^D|N)-Zks=wEW|AS|?J92;VUz4LG~_*p%#;>V{|76ljzsTc>t^ z@NLQizu}QdI3JD9k3Wxmh?NMQHgHkBNfm&e_MI%uVpbZh#hmn(uXC7bV7O;v(+l!{ zP)cXA0=Z7}#|7dTy8!TEV1Z%CiPp6$plqZ#`r#9o+bGCkN2Lubujvd+1snKKY+t3# zXFURNRbQKHt}F{M{RRhp|K9d?bHv-cExei@R$S3le0{KmynuuKg#+w;Y=L%cxLcuuG>B}3BnCV1ZL-p$f6xqW55lg@PTT(VJc!hi8b_kuDL&qHE=a(AK zvZM1;cm_`S#O_16Q*QX!ERuC4{eq;&x-i($mVvR7Kn)T&HCkFNOlrZ>lda4~74C2z zY2{R?@E5snN^)O?)!ezPlX@-^G}5Cw%JqxNB2sVFTFXd-gkuCLn!0%o=k(Uq%=CGYPtl?i|7M2PfL$@SH zw-~OUDC&ZuHY3p<9T(yZo&2XgV4s@I2IzQ^+!(B~({x!qCUcuED5#5sbCGsce^Qdl zeI0ZQkc2d{eXYv=gB9Hdz_oElvG=};EcyTJI4W$7@_6n*jUNGYgSFeYUP2*^ME3o$ z7=8R=CRPgfx#G5%lJ$Q8qj`Ew9}g-jf%r5Z|l;I0(h?1OK+eAKE`30 zFOKt}=pVsTTokGsb+p!r0ZzPZj|+mJ+~^H;{;-dDhmcP;->!?@H~)yq@?6Pk;$BhP zztqcpH6+|Pwovl!t?-9tE(vO8%%>~FhALw|>FQDD53I`87=rh+63k%JS!xNb-2meT zk{9FoWIU^Hz-o?NUW_|5JB4A>W$W!xaN3(SUJgvt91AJT%}(IMe=BGmq+%7V)Ov#U zo!1IO@*4*AZ(TgNl4$8hlv!EDZ-yxU&xAx4sj~Ixi|P#m0;3rH%e>0cg%g(`UzP&- zs`#mhwoC4V$n7Hj5Y5Bl0vQ{g;0@y!Myb0>+sKZck$addi52vfzwNfmHa+DLp+d=p zP9h7HbGTs;Vh333k#D0x)dXcbK za8|gr{EtNF87GSVD(Rh-5;Ou(QT^`V&I&98w)sB}(m2<;>Igo)m`c+TG|u}Hi_}fY z5P*V79Ubc#&Ek_*8owa42EKEZO7!HuVqa}=NW^!NvB+1#<7bI|+3)^T^4gy#aPSiL zyYbJuW`*pfKX8^TyqyK}TFFG-Jg!&K*QVRGsH?sRLERx5e_Q5ojeQ#EE6vpr;9V2M2k{PAL8{vEZ7RURSxu~Jum znTow{!IfQ>vfdMA{F-%J%BqsLMf6^6?2O5S2Wv{bGBy!BmnTIn{PCZTWQc9z)Nd`6 z02|okQQ0@zcumWU$-ecQ@rljQDY6)maZl~mz#MjNDaL6q)D?&evpy<(EWrC>NEoN7 zd3fcc&y8u$ux=Q$GKVM0BqWV77YvD32YDSA2|kM@VC%wr@Ki`jCfyePjDBqz3ndt# zUifDaD~!Nyi?XpW4I)F*$6k5T3IQx^aCM{Bs%A>-129Glie4zZxR63)p_RO1j$=hz zqL`hW_7h1>29n`^(|zjGYH4Io>{0jqb1cZzh321amtQ+T0@sB`&g!km12*(9*5WI} zPr{vH^V*I5{j}EPLz_Tus&J#MRMW1*`;w~fe?`GQG8*Ds4Wd1K889uh&VXU@`}h&+ zVTexYW%DKd`5*wr*M%CtN_j*bu!K2G8%o=Zk_^MD&mua=W39MV{kJ_$;qq#Zq@SzF zS_Re02wT>MZ*+k&I=kvk2vE`C=K-fqIzvjFYiKu}FL2z#SEm|ENwG0LW^R)n--T}H zIbJ^~eQs*$L~7*u#J)kTwfY4HaaD{2-v64E*eRnU#AR(^p&!57wc0_ACOqpFqXk4gl0wsN?n8i}87($sm-spQ%N#q!hn{#eT~bS_PJuMyixN=lSktC)Wug$e1pQR1-1WfUYgBMvpq zj21D6LYQWntuwQSl8N^O5w8%nUu6}DV>sD*i<-CXuMyOjoAoWFygagGFsO{)2R=Hz zc!edO@m{mJly?gJyU%=YI5IVAxl9+YG$mR%##q~|!Z&p6bbD9B-?Dp`PU<|)N}Ru# ztXA*Tm8w6HLu zrhe58!_{3{l9=^jT0Rzxw&kow0D4eyw=(;<;HMB55EL;8gT3uf7>gK`HE0xeZ3_OP z+6euHR=IJzpe0dA(^A#M`=B3UQ|U7vQ(oSV^>NSrEw;>v}oBN8#6r+h5sQxGP~(Fmp$2yTcyY!jvIz z!X^*n0m{}LAbW?|_mPrF5Z6v&l?x@o<8>B^j#oH*E)b|O8`77*BNoQl(!mktUNjzQ zQ$5kn`nuF7&X~?5__#wX?6wMCMEOw?0Kj~@YgbW{SzYe2j^5Z)ZKp`H06@r#?+V$c zZ;+gMF|!I_NY3GLsp_8M`?rG6WCu=2Z&RYctTHD&G4eNT-~!KVq?HwV5&P;fr}qY= zn0nZoy2X3(3$meYQff}5D}bJ3$LesEs=W>y1OWgddJ7YpGo7!VpjG92mqH1Ulfv6l zz^U0b9i~L8B)ekmgt?=_+V>AS8icThn9R_;Gk z{Ig`Lp=Q(p>rMcxtzj5naq~o&%fB_b%{&K==>^G=??oQM=Sjp&gW=9i?tMw&HJT>UQIr zE@_DV(kt{~Py8$kst@)osgR9~7HN5S6+mJ+^$;8g4dK_P&0h}$Tucp^e~IkYJbT`% zvjCc?RGr>pEUtfelX|!JM%x*ktd6sWq8qFD(WPo=8l2mWW&h(g&jf$dk*`l+J0iv_ z&R%l>ost}W3x{tId%;vfx2+^)Q-O;5Aj^zl>V44Q)@xZG#{Go}M+aMJZB=ZO78q)! z-HBRD?P;=-6N*}=%2VP~v#^NCkpB27JVYw%q_ny*U08RRouZPusAAZ0TtgT=pQdA$ ztB0gPq*WIfTn{NSy>Ki2_1Gnkl~s2%A5+_ea3ODS;~i`^YNAgIlKy*O=Y~uXS?7Tq zX6e6r$a-^FwvI;eur)!uZ$?lD^S0Oh|0+ALsHVEEZ3h7(NEZ|&Gzr}n0+P@{=}meO z0RcfeBuW=Z5EP6+61t%%9i<8gq6ij3ks?jH(F8(=fHW!3zn_DDeDBFS#`hhplYO|y zUURQC=Y8LoWdenYqaW^6?x6IEb<~0wl6AGG&l#V5lfY>6RiUJ(av~T^IuG(2@D*M9#{bA6c3;I(sK`elT#_ zz~B4u?~q=3=lT|wZNNDHj_~z2LV^pv#L4n~%T{lekiTQWmV{Q-Iz{vEw@)`MLk8*` zg1OaW8m6A#b9^GH5ez%>(^P403`&P^72f~qfc=ZqpGchj2lULkOt!U%V1_!qBdcDo zuOpJQJVsoJK4ND7D)14M8K;!}JPUnsEX`96rQN-}YRc2B zW_5kQG#PaZ8wZEGc?eb3-!^?7#wPuAO#oW(>+98ql<`zq4Wo*)f+KDi{BWDxO2IX- zda(A9?04R(Cj$E|Wjm=1BS%^~?GS)8X2e2CowX4&@ zN)1bFAV(+FMiSUbILd*j27vKW9;xj&gdF+2gia2+nd>uB-Oz?Tk_&9xV2GPG@c^l@>kxK zFet2B+>`gKWQ$JRshQ!!W1G4)5Qm3MqwVV?GiU808A*thaR~_Bufgq9hpQM66sc_N zy7=v6NWSLBXS}0C>l@O+M_^aimBn zUn4n*3-a$L?)IKp#^3rX*q!l{D-3lN*!k#xKs<*&Wzm`&8SBa020f0SOoKBe`_ne? zSf&myac>q5+6G%SR-5U73RZV-b3S^lDbIa&uOgbe*cn#)z2we)cKrNptZnNiPjn## zlC^Y7dC@I+;LL5kmByIFfPB}Q<##nn+s_Ml{IZ^D%uJUW&Bf4mMC0y*f6AU%8FV zlJt(|EKKq8oHGX_H~ZXX3I(ObZTteTBN#F?lLBfMoRXaYBi3!K^alH{%=`9}rl7=K@Jtjjdgw1RiFcZMHlgm$vA6u;vus}or=W1l?x z=`1q@%Xte=U^8aKMN-RvjHhzaq~Z3)$g1uc;*xdd3Bfv#?*3uBJ2+Vmu37p=s^vhA zSa83JcXGhU$`hX2#V$_kbt9;XtS&0eOd-d}i#jYKH%2CK-x(hb_?6Cw&W~3gd3sSo zw+TonDZBg_;wxjFlj-$Yt55^TJ6g8Yw@V30JF(VpP_|vcw{VpeO$sgRc?ON~F`p@a z_N#fyI{FK~mvDju^CP(~NbzDJ6OIc|vyWyva0!=H8D4%{OJ}Zzx0lGO)c&&6wfA~A z-HO42G+0GLW->uIDq_9BsUir-?+~Oza~mokCRaD+ zAoe?NKILYh2l}8sDo)PM4ov`?pXBD(zob?qpK(dlMsb$jb0-tZ{Yz3HMmX&!6dFB) zinJB7G#5g*JVeQj)t3{41%*Cyh#8}k3C)XU(#}56Xn9!>eS?2sc9R(hi3H%xB3+ZJ zTVo|QTjAmysXcG%bu;wGpM*tsFd^s~`z5-q^^y3l{@5d%=lmRh?(BKxBh+ePK+|^2 z*M)}u8Ir?P59&nBlWv|N>x>LH@w8V^E1V>4WxnYQge0@gn+9GrAAZaEIflQr6S;k8 zhRlffEJeY$9+UQt{Fc5(sX$_sUS(6bDRvPDz( zIxc-95QkN5{|PS5hHtW;=r5@GX7^ru&Xp%PFg4J;YZr~#^p{R`tA}kD#Tt^gime1a zNz{=o+W;oT@KVmvRCDah>{q-E#8u#XC_SeUjL^zsdK51yL08Xhq+AcqSFMn+ulH~cH3>8t?~iF4<1cmXGMXP2yjjxJ9_*Cq z=WsCd!3&&6t9YNg-O}icHNMx;4JNK$2b}4^(9~-l><0ld=iNUfc)S^A*;Ri~+S-6) zi)OS-Dz&uJI8t5bEHK$^RN|t352Z0wk{BV(2Fgc0lp|baZ3|T0jEuC|m3I&+OGA9M zlZb2A;6yZhhueknV9&|L=kJ$Kq*2A`5G#7f%=*s{`sQ5MIUXRZtQ7r)CjezCF@^s~ z57A=XlK!VO;xHcsu7OzFOHBFUd-l7;A)@G^S7z_|Ot>p5rid6vmrD%kRLVafuG#P` z=XX346YZWkd)usA%dEs-1NDNeN*Hnk+D& z+^68MUfkj~7xGrhL8K5(_QllaDC9qE=07AQ+JjRPu}rQ&tK@pYMx{yC^>awutQYRH zdlHj698vS7CiUQNpD_d1ZP*jVML#foRYiI|+C)He&2yuetE9@V>o{@?Cy{l17_Gpl z;vZ0+ZQ@-|Ed;pDLIzRx>g{a5A`)sBsJdiQev7ZR%T3b*bNEL0ljp(X`Gsb!{z8KR zR>lIj%1HPf57Pqt=IAqtD-p2o7E*5#6t+$8zp@)Hl9i>`i08guB2Y4fC|K3xlxqaG zNZ!Ff4bIE9Qu9bNq}us%^^tiA-2DM$nLwuUsWWkrV#e691-N%0Y>?jQbOk5+ScAI? z-L2GP-&Cxq5Wvc1(TG@`480H90Q1k?4K@@D!>^m7u0gnTjwNh}_i_-0?a^(s=uX&J z7OOpkf@06by*aam5FBvn%-!Y0-t?|k8SpID zeK?NeLAi56kI``XfXAXms5_jXsFs`T5*tni?~f&4lgvB4No?LwJK+nLj=UT)oEwxq zK84jc2OxP~1}xOFidU9*zP;A^CY5*+g%IQ~iI3%;_m!060BLY>N2=7_ei7F}V;(BF zG@-s+{+sLAIUmune&i#nTa-ZiLrwNG!gp+wnYT*UK4;2&)G?pwXy~wv6W>2$K0oa(QaAbXAtoH; zMi(r7lLl*Tfnbb2hV>5SHlFlay4E#eoSKbKjeJ4^5_~E{r*PK{m*afj#O1gf(o>y= zqyyq&*>C#0=hUd1FM?=#0yPB?;J z+@(rr`oVi6uGkTVD_7}@`y%OnembYGoSOO|{oE7%a<26b z#yZV_Zl&J(9Hb&cP_wZ!tqk+rULVRI*jiKfU>qsr)kLq(`*Al9T@#Bf+utsO7bp~p z&d7@1YBF5^F`3BD6TSaO^syLyt8`@-UYhqDqyviY51%i-2pB_Hwr&EDdK?2!cFORa z1Wlx<Uh2FDX#;lTbHI#`|Is8Hk?c(BVvj!yB#D^6igLqaEG^UMT3A@<)9Q~Z$ED^&$d6wVUo+ruckCyH%YYhSAd zTSUgAn)u&R;MRHvjV$P&k^%)fuUZ`5Lc9?5t0Nm_i{^=oZyK&JSLu51W2vO=5>DX< zf*tFQQ!Ta%57V7Oxu!Zq5C4=J{yOLP&`9!6xyN+>wKH)V5lX+wAV|4rbMZ_3^NfyD z!2_855b|b;)lE;X{ek^soAFNmRmfJn#bj`Jhv?B#9&SzC%^E>1vxqdUMj6bj4ykm7 zHq(C&Wq&Js)eoz{k1wLNziP#b(0<{%?VK9xMM!0#qD*hcADLAE=b3n zS?-fZAXXk?0O7w@o`vklu;{pD8W%Uz0}zW7&D;~7SP!~IkHA5}uLid;!;P>Bi*k{r zm(@42=NFsvKll`p=mk3uSC0@B z>^waxx)dYi^fp0VMhU%#y?5*-uIL~k#6{U~j?cJG)(^_^{n%q478aQ>{dRZP%0hip ziD(ATjDV0b-6H^Es!UbXzL(SdsBi0iAQFKR;}wAat|#!PIN@K|(|=n=K4A~;JPV~V7l0*mH6|Mi%N6ykK2MNYVnO1+GLmv zSL#I*Kpk3P)uTHyIQe~F?e;^3t;x2;J?eRL7_Bjp)f%y-HR5jy^;%jJq}l#!^+O;i8H4>jI#E?^9QtdFXD#MPDea{ zx8=f_*C$eSzc&Q&3{~{e@;D}akv+bH{!y8|ReZ_l*oK@%Y)oojT<{Yl&#G2-M=3pZ zbmq+mIwS}JS4Rq1M!-egpM_*SrfWEZiSIN(7E{ZqnhuWJP_WNr(bfQeAXSp{>-E+U zXgt008J z6I$?|Jdr2^=h%WO<#Y~FYDH;7O9vGn_fV@}j`?t(SvtA5jYKMWy1JO$(e3lk+Hv^d zrVz^>$d`G$RE78gZL~2lz#To6TU`&kDUhN@Rch}q3k*NDL0$TslbP4uk&1-iQ!*ep zru`%;;&4gg>ZL8-xRZurd5CuprY*(_E+t6k3OpCDB|8Um8c4dGJdS13GRxep?o1Eu7^uV>7G?;jfVU+{5#Cgf=G|_!t5l06HaZMB>Rh@yFpY|BqmJ=JGJ&cY)M6Y0?gQnG67q zC?R7-ic!48ezf^Bb@GfP(^ut@ZJjA*$gtgqCqCnTY66$=xDCIq*n1X}S;iY8q+^~9 z1aN`TcgtOkTc?JNiEO6veGU z&e<#T7E`kyq$uWlRsL{!3Y5JrV>5*dCcV8Mbk}Yj98}2UN1O-l?7iwKWU1_YBcMPm zTYlGj$Sa}8f0OBdvB2fg{70qH1{UmKzPlJ~e^0Cx%7MYQ z_#f&0iai$|)~Rajmw#@|t*z1D%P&Haf0eWGv5lMHU569!E`g1JplGeV3@Ue`Q`cCu zRAqxyPRw`ze?Uqq@?~nCE>r@%LkJI1pu`Fr#SMkS_&R*YlZBX#bk+JHLLoW$a zZ&J~-?|)F%S7DUcq^n;`mcn>_oQ9uOd7x$4)cE)#%!45WeV)UKE~dB{m&_MzOtF9e z0X^PdcfI12V5(v6_zK>nXq7xdKwYr-(7obw-mHj(OX6k!vb=DKD%B(h%NxE3nEj(cSS*qaE{V4+i z+@A7EYKil7nL?#38Ds+^x46N?12kIyC?$-h7*Qd)Pp(SBaNchl2QosE6%Vq|0`&6qu%9 z3!SfpYvmztKW2m#y|^?^q^rL|k29|uziIFcr2Sl8m__?u_uEMH>lDDQcC+zZ7*9kJi>W$&B&EcTXemXVTH9$rKArd?=&zvk;}GLdd#Qsok)>; zKG^^x{I7d%;v=-uEyV{nBg4Rle}`Rc!Ku%O!8MKcXFLNm-~S*T-aaZhVq`nv7-mB;JlU0%~7}Y6L#B0`#Ndc1< zYaNnMTGP280mkXy-nbZ9KLaO5Oy#iRn}_)mnhpW!1+C|IAWXzeHGLy>04gkcS^==J zZjHHOk6WL0Hv$wdh0%Z8!Jz-kQW)1fbNnO!>IRBj6h1Gf+^4Rk!hPkhvvzGa*R(Nc zTDOS}%1Pm{PyOy__ripi%PD!m*ZlIrL2HK??|Tu3q{;F4Mq&%+^c~x>N179bGU9AJ zhuwrKTuKNY-{;R)7(dPBN6`?zHKwjnPJXqZTJkkx#Bp=&l9%iRZZlWGdS&_?ABMsEKI{J3Fedt+BK z#YO3i|8SVblV)@HJqb$<1JGJQP21e0{EHKzkv$%Ut#9$wvMS5O~3_ zD>(gj@$0V-O=Ky0ojRRzow}0i$rw+GkhI9qvbE`EYakHP>$hKD0Gm7hM5cc*q72-n zW?iPtVhY?tK==F9YsuiW*z-u+F7)7Y=zXXsW6?wiifP3bK^{~dq+dfWr}^HbaRFuo zRWrWgUf5X;cssbtxPSkX8wX>u^5Ua7Zp%e$NgN2hzx{gJWIjme*vp25Su>N%rVk#B zSVdyUANdo_3aLLzeacId=9_38qYeZzlH#7$ReVSn&#m^Ho-rhw?kb<}ZlK{9W4Wo8 z?=o);Mfew=(CJ5;xrI*}ZG#?^27$1TMV>1uH6z6=?Q3fu>M6dPi^04xz4%RWdV2|; zE0tjzLW*sV90}<;AS_J z(QbQbW4~gN2%=B_t_v!h87z?X`{VgN`wrFEor2`XUO!nwDfJ0W9~dsdG85fJIpJFb zmhr6aY*HSx>!EMH4muxwW25YI-_@nrOJlzp?X2!RO1n$KGM$Q_k8$v3#?dHf7YW9d zo%WG+7qctBGM+R(fyDaE>Yvx|fE0Vn7hx5rk~qH`)(DMm=ed44|0uVqrD7oB!DaQ{ zr#U9AZf>6W;UK03*2q2o>f**%FIRy?zuY!@`Jv^}>qFPyG@{{#4eDMdYu(P1xqm<= z=_md9mN9~oKJqQ%Se3L87ZW{!)uAHuM-gtXAJsL@3Y)< zHMDarQ`Lh}e<(KXDooF}D-*-xKs_IK$3o1{r7^!9V8M_)?a!o(Sv}AjahpMkv@Bb2 z@IL8SW2^Mqf#B(vVZ;@m)d==9j#v4SDo$ClVe)gC5`zI$W~`a=w09xd3U1r^;?(u6 z3N0sSYkY(#3bVavWUNpgSZ%zO&8cOotcWL&P2FEc zaKT7id%s`!H9LFvxgIRsFBNV+C*JcuYRj(V^qR@Li%P|D;7AW+ePMME{JQPrjxx=- zS!2scipHM3HL^`r8h#woI!PNc7SPs!cTZWi*0P-J5L}-v^c&y$(i8m9K>5t9?&EtI zIx8)p(XX9TIn#NhqClCMI@2o!ZRi50--l)g+K12$n%nL0{>iy-rrl>ros=f6$FMmY zV*9kgc8v*Bu0|@^wdq`PBhcDv&%NQ~tX7*91~O9v z{gh+Z|I)Kz^SINTJ95*^+J5vw?~jq~D)Zl)^V+~3uE;g;=iqq5XAWZCsQhDJVVoe( PK4>TSzuAdm|4jTpX;hbJ literal 0 HcmV?d00001 diff --git a/demo/semantic_segmentation/README.md b/demo/semantic_segmentation/README.md new file mode 100644 index 00000000..b490236a --- /dev/null +++ b/demo/semantic_segmentation/README.md @@ -0,0 +1,164 @@ +# PaddleHub 图像分割 + +本示例将展示如何使用PaddleHub对预训练模型进行finetune并完成预测任务。 + + +## 如何开始Fine-tune + +在完成安装PaddlePaddle与PaddleHub后,通过执行`python train.py`即可开始使用ocrnet_hrnetw18_voc模型对OpticDiscSeg等数据集进行Fine-tune。 + +## 代码步骤o + +使用PaddleHub Fine-tune API进行Fine-tune可以分为4个步骤。 + +### Step1: 定义数据预处理方式 +```python +from paddlehub.vision.segmentation_transforms import Compose, Resize, Normalize + +transform = Compose([Resize(target_size=(512, 512)), Normalize()]) +``` + +`segmentation_transforms` 数据增强模块定义了丰富的针对图像分割数据的预处理方式,用户可按照需求替换自己需要的数据预处理方式。 + +### Step2: 下载数据集并使用 +```python +from paddlehub.datasets import OpticDiscSeg + +train_reader = OpticDiscSeg(transform, mode='train') + +``` +* `transform`: 数据预处理方式。 +* `mode`: 选择数据模式,可选项有 `train`, `test`, `val`, 默认为`train`。 + +数据集的准备代码可以参考 [opticdiscseg.py](../../paddlehub/datasets/opticdiscseg.py)。`hub.datasets.OpticDiscSeg()`会自动从网络下载数据集并解压到用户目录下`$HOME/.paddlehub/dataset`目录。 + +### Step3: 加载预训练模型 + +```python +model = hub.Module(name='ocrnet_hrnetw18_voc', num_classes=2, pretrained=None) +``` +* `name`: 选择预训练模型的名字。 +* `num_classes`: 分割模型的类别数目。 +* `pretrained`: 是否加载自己训练的模型,若为None,则加载提供的模型默认参数。 + +### Step4: 选择优化策略和运行配置 + +```python +scheduler = paddle.optimizer.lr.PolynomialDecay(learning_rate=0.01, decay_steps=1000, power=0.9, end_lr=0.0001) +optimizer = paddle.optimizer.Adam(learning_rate=scheduler, parameters=model.parameters()) +trainer = Trainer(model, optimizer, checkpoint_dir='test_ckpt_img_ocr', use_gpu=True) +``` + +#### 优化策略 + +Paddle2.0rc提供了多种优化器选择,如`SGD`, `Adam`, `Adamax`等,详细参见[策略](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-rc/api/paddle/optimizer/optimizer/Optimizer_cn.html)。 + +其中`Adam`: + +* `learning_rate`: 全局学习率。 +* `parameters`: 待优化模型参数。 + +#### 运行配置 +`Trainer` 主要控制Fine-tune的训练,包含以下可控制的参数: + +* `model`: 被优化模型; +* `optimizer`: 优化器选择; +* `use_gpu`: 是否使用gpu,默认为False; +* `use_vdl`: 是否使用vdl可视化训练过程; +* `checkpoint_dir`: 保存模型参数的地址; +* `compare_metrics`: 保存最优模型的衡量指标; + +`trainer.train` 主要控制具体的训练过程,包含以下可控制的参数: + +* `train_dataset`: 训练时所用的数据集; +* `epochs`: 训练轮数; +* `batch_size`: 训练的批大小,如果使用GPU,请根据实际情况调整batch_size; +* `num_workers`: works的数量,默认为0; +* `eval_dataset`: 验证集; +* `log_interval`: 打印日志的间隔, 单位为执行批训练的次数。 +* `save_interval`: 保存模型的间隔频次,单位为执行训练的轮数。 + +## 模型预测 + +当完成Fine-tune后,Fine-tune过程在验证集上表现最优的模型会被保存在`${CHECKPOINT_DIR}/best_model`目录下,其中`${CHECKPOINT_DIR}`目录为Fine-tune时所选择的保存checkpoint的目录。 + +我们使用该模型来进行预测。predict.py脚本如下: + +```python +import paddle +import cv2 +import paddlehub as hub + +if __name__ == '__main__': + model = hub.Module(name='ocrnet_hrnetw18_voc', pretrained='/PATH/TO/CHECKPOINT') + img = cv2.imread("/PATH/TO/IMAGE") + model.predict(images=[img], visualization=True) +``` + +参数配置正确后,请执行脚本`python predict.py`。 +**Args** +* `images`:原始图像路径或BGR格式图片; +* `visualization`: 是否可视化,默认为True; +* `save_path`: 保存结果的路径,默认保存路径为'seg_result'。 + +**NOTE:** 进行预测时,所选择的module,checkpoint_dir,dataset必须和Fine-tune所用的一样。 + +## 服务部署 + +PaddleHub Serving可以部署一个在线图像分割服务。 + +### Step1: 启动PaddleHub Serving + +运行启动命令: + +```shell +$ hub serving start -m ocrnet_hrnetw18_voc +``` + +这样就完成了一个图像分割服务化API的部署,默认端口号为8866。 + +**NOTE:** 如使用GPU预测,则需要在启动服务之前,请设置CUDA_VISIBLE_DEVICES环境变量,否则不用设置。 + +### Step2: 发送预测请求 + +配置好服务端,以下数行代码即可实现发送预测请求,获取预测结果 + +```python +import requests +import json +import cv2 +import base64 + +import numpy as np + + +def cv2_to_base64(image): + data = cv2.imencode('.jpg', image)[1] + return base64.b64encode(data.tostring()).decode('utf8') + +def base64_to_cv2(b64str): + data = base64.b64decode(b64str.encode('utf8')) + data = np.fromstring(data, np.uint8) + data = cv2.imdecode(data, cv2.IMREAD_COLOR) + return data + +# 发送HTTP请求 +org_im = cv2.imread('/PATH/TO/IMAGE') +data = {'images':[cv2_to_base64(org_im)]} +headers = {"Content-type": "application/json"} +url = "http://127.0.0.1:8866/predict/ocrnet_hrnetw18_voc" +r = requests.post(url=url, headers=headers, data=json.dumps(data)) +mask = base64_to_cv2(r.json()["results"][0]) +``` + +### 查看代码 + +https://github.com/PaddlePaddle/PaddleSeg + +### 依赖 + +paddlepaddle >= 2.0.0rc + +paddlehub >= 2.0.0 + + diff --git a/demo/semantic_segmentation/predict.py b/demo/semantic_segmentation/predict.py new file mode 100644 index 00000000..a991f48c --- /dev/null +++ b/demo/semantic_segmentation/predict.py @@ -0,0 +1,6 @@ +import paddle +import paddlehub as hub + +if __name__ == '__main__': + model = hub.Module(name='ocrnet_hrnetw18_voc', num_classes=2, pretrained='/PATH/TO/CHECKPOINT') + model.predict(images=["N0007.jpg"], visualization=True) \ No newline at end of file diff --git a/demo/semantic_segmentation/train.py b/demo/semantic_segmentation/train.py new file mode 100644 index 00000000..55f3ba59 --- /dev/null +++ b/demo/semantic_segmentation/train.py @@ -0,0 +1,16 @@ +import paddle +import paddlehub as hub +from paddlehub.finetune.trainer import Trainer + +from paddlehub.datasets import OpticDiscSeg +from paddlehub.vision.segmentation_transforms import Compose, Resize, Normalize + +if __name__ == "__main__": + transform = Compose([Resize(target_size=(512, 512)), Normalize()]) + train_reader = OpticDiscSeg(transform) + + model = hub.Module(name='ocrnet_hrnetw18_voc', num_classes=2) + scheduler = paddle.optimizer.lr.PolynomialDecay(learning_rate=0.01, decay_steps=1000, power=0.9, end_lr=0.0001) + optimizer = paddle.optimizer.Adam(learning_rate=scheduler, parameters=model.parameters()) + trainer = Trainer(model, optimizer, checkpoint_dir='test_ckpt_img_ocr', use_gpu=True) + trainer.train(train_reader, epochs=20, batch_size=4, eval_dataset=train_reader, log_interval=10, save_interval=4) \ No newline at end of file diff --git a/docs/docs_ch/reference/datasets.md b/docs/docs_ch/reference/datasets.md index cdbe0e0d..c4409fc4 100644 --- a/docs/docs_ch/reference/datasets.md +++ b/docs/docs_ch/reference/datasets.md @@ -39,3 +39,18 @@ Dataset for Style transfer. The dataset contains 2001 images for training set an **Args** * transforms(callmethod) : The method of preprocess images. * mode(str): The mode for preparing dataset. + +# Class `hub.datasets.OpticDiscSeg` + +```python +hub.datasets.OpticDiscSeg( + transforms: Callable, + mode: str = 'train') +``` + +Dataset for semantic segmentation. The dataset contains 267 images for training set, 76 images for validation set and 38 images for testing set. + +**Args** +* transforms(callmethod) : The method of preprocess images. +* mode(str): The mode for preparing dataset. + diff --git a/modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/layers.py b/modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/layers.py new file mode 100644 index 00000000..dd958e19 --- /dev/null +++ b/modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/layers.py @@ -0,0 +1,345 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.layer import activation +from paddle.nn import Conv2D, AvgPool2D + + +def SyncBatchNorm(*args, **kwargs): + """In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead""" + if paddle.get_device() == 'cpu': + return nn.BatchNorm2D(*args, **kwargs) + else: + return nn.SyncBatchNorm(*args, **kwargs) + + +class ConvBNLayer(nn.Layer): + """Basic conv bn relu layer.""" + + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + dilation: int = 1, + groups: int = 1, + is_vd_mode: bool = False, + act: str = None, + name: str = None): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2 if dilation == 1 else 0, + dilation=dilation, + groups=groups, + bias_attr=False) + + self._batch_norm = SyncBatchNorm(out_channels) + self._act_op = Activation(act=act) + + def forward(self, inputs: paddle.Tensor) -> paddle.Tensor: + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + y = self._act_op(y) + + return y + + +class BottleneckBlock(nn.Layer): + """Residual bottleneck block""" + + def __init__(self, + in_channels: int, + out_channels: int, + stride: int, + shortcut: bool = True, + if_first: bool = False, + dilation: int = 1, + name: str = None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + act='relu', + name=name + "_branch2a") + + self.dilation = dilation + + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + dilation=dilation, + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels * 4, + kernel_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels * 4, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first or stride == 1 else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs: paddle.Tensor) -> paddle.Tensor: + y = self.conv0(inputs) + if self.dilation > 1: + padding = self.dilation + y = F.pad(y, [padding, padding, padding, padding]) + + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class SeparableConvBNReLU(nn.Layer): + """Depthwise Separable Convolution.""" + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + padding: str = 'same', + **kwargs: dict): + super(SeparableConvBNReLU, self).__init__() + self.depthwise_conv = ConvBN( + in_channels, + out_channels=in_channels, + kernel_size=kernel_size, + padding=padding, + groups=in_channels, + **kwargs) + self.piontwise_conv = ConvBNReLU( + in_channels, out_channels, kernel_size=1, groups=1) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + x = self.depthwise_conv(x) + x = self.piontwise_conv(x) + return x + + +class ConvBN(nn.Layer): + """Basic conv bn layer""" + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + padding: str = 'same', + **kwargs: dict): + super(ConvBN, self).__init__() + self._conv = Conv2D( + in_channels, out_channels, kernel_size, padding=padding, **kwargs) + self._batch_norm = SyncBatchNorm(out_channels) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + x = self._conv(x) + x = self._batch_norm(x) + return x + + +class ConvBNReLU(nn.Layer): + """Basic conv bn relu layer.""" + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + padding: str = 'same', + **kwargs: dict): + super(ConvBNReLU, self).__init__() + + self._conv = Conv2D( + in_channels, out_channels, kernel_size, padding=padding, **kwargs) + self._batch_norm = SyncBatchNorm(out_channels) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + x = self._conv(x) + x = self._batch_norm(x) + x = F.relu(x) + return x + + +class Activation(nn.Layer): + """ + The wrapper of activations. + + Args: + act (str, optional): The activation name in lowercase. It must be one of ['elu', 'gelu', + 'hardshrink', 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid', + 'softmax', 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax', + 'hsigmoid']. Default: None, means identical transformation. + + Returns: + A callable object of Activation. + + Raises: + KeyError: When parameter `act` is not in the optional range. + + Examples: + + from paddleseg.models.common.activation import Activation + + relu = Activation("relu") + print(relu) + # + + sigmoid = Activation("sigmoid") + print(sigmoid) + # + + not_exit_one = Activation("not_exit_one") + # KeyError: "not_exit_one does not exist in the current dict_keys(['elu', 'gelu', 'hardshrink', + # 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid', 'softmax', + # 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax', 'hsigmoid'])" + """ + + def __init__(self, act: str = None): + super(Activation, self).__init__() + + self._act = act + upper_act_names = activation.__all__ + lower_act_names = [act.lower() for act in upper_act_names] + act_dict = dict(zip(lower_act_names, upper_act_names)) + + if act is not None: + if act in act_dict.keys(): + act_name = act_dict[act] + self.act_func = eval("activation.{}()".format(act_name)) + else: + raise KeyError("{} does not exist in the current {}".format( + act, act_dict.keys())) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + + if self._act is not None: + return self.act_func(x) + else: + return x + + +class ASPPModule(nn.Layer): + """ + Atrous Spatial Pyramid Pooling. + + Args: + aspp_ratios (tuple): The dilation rate using in ASSP module. + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + use_sep_conv (bool, optional): If using separable conv in ASPP module. Default: False. + image_pooling (bool, optional): If augmented with image-level features. Default: False + """ + + def __init__(self, + aspp_ratios: tuple, + in_channels: int, + out_channels: int, + align_corners: bool, + use_sep_conv: bool= False, + image_pooling: bool = False): + super().__init__() + + self.align_corners = align_corners + self.aspp_blocks = nn.LayerList() + + for ratio in aspp_ratios: + if use_sep_conv and ratio > 1: + conv_func = SeparableConvBNReLU + else: + conv_func = ConvBNReLU + + block = conv_func( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1 if ratio == 1 else 3, + dilation=ratio, + padding=0 if ratio == 1 else ratio) + self.aspp_blocks.append(block) + + out_size = len(self.aspp_blocks) + + if image_pooling: + self.global_avg_pool = nn.Sequential( + nn.AdaptiveAvgPool2D(output_size=(1, 1)), + ConvBNReLU(in_channels, out_channels, kernel_size=1, bias_attr=False)) + out_size += 1 + self.image_pooling = image_pooling + + self.conv_bn_relu = ConvBNReLU( + in_channels=out_channels * out_size, + out_channels=out_channels, + kernel_size=1) + + self.dropout = nn.Dropout(p=0.1) # drop rate + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + outputs = [] + for block in self.aspp_blocks: + y = block(x) + y = F.interpolate( + y, + x.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + outputs.append(y) + + if self.image_pooling: + img_avg = self.global_avg_pool(x) + img_avg = F.interpolate( + img_avg, + x.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + outputs.append(img_avg) + + x = paddle.concat(outputs, axis=1) + x = self.conv_bn_relu(x) + x = self.dropout(x) + + return x diff --git a/modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/module.py b/modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/module.py new file mode 100644 index 00000000..38cfd429 --- /dev/null +++ b/modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/module.py @@ -0,0 +1,186 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import Union, List, Tuple + +import paddle +from paddle import nn +import paddle.nn.functional as F +import numpy as np +from paddlehub.module.module import moduleinfo +import paddlehub.vision.segmentation_transforms as T +from paddlehub.module.cv_module import ImageSegmentationModule + +from deeplabv3p_resnet50_voc.resnet import ResNet50_vd +import deeplabv3p_resnet50_voc.layers as L + + + +@moduleinfo( + name="deeplabv3p_resnet50_voc", + type="CV/semantic_segmentation", + author="paddlepaddle", + author_email="", + summary="DeepLabV3PResnet50 is a segmentation model.", + version="1.0.0", + meta=ImageSegmentationModule) +class DeepLabV3PResnet50(nn.Layer): + """ + The DeepLabV3PResnet50 implementation based on PaddlePaddle. + + The original article refers to + Liang-Chieh Chen, et, al. "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" + (https://arxiv.org/abs/1802.02611) + + Args: + num_classes (int): the unique number of target classes. + backbone_indices (tuple): two values in the tuple indicate the indices of output of backbone. + the first index will be taken as a low-level feature in Decoder component; + the second one will be taken as input of ASPP component. + Usually backbone consists of four downsampling stage, and return an output of + each stage, so we set default (0, 3), which means taking feature map of the first + stage in backbone as low-level feature used in Decoder, and feature map of the fourth + stage as input of ASPP. + aspp_ratios (tuple): the dilation rate using in ASSP module. + if output_stride=16, aspp_ratios should be set as (1, 6, 12, 18). + if output_stride=8, aspp_ratios is (1, 12, 24, 36). + aspp_out_channels (int): the output channels of ASPP module. + align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. + pretrained (str): the path of pretrained model. Default to None. + """ + + def __init__(self, + num_classes: int = 21, + backbone_indices: Tuple[int] = (0, 3), + aspp_ratios: Tuple[int] = (1, 12, 24, 36), + aspp_out_channels: int = 256, + align_corners=False, + pretrained: str = None): + super(DeepLabV3PResnet50, self).__init__() + self.backbone = ResNet50_vd() + backbone_channels = [self.backbone.feat_channels[i] for i in backbone_indices] + self.head = DeepLabV3PHead(num_classes, backbone_indices, + backbone_channels, aspp_ratios, + aspp_out_channels, align_corners) + self.align_corners = align_corners + self.transforms = T.Compose([T.Padding(target_size=(512, 512)), T.Normalize()]) + + if pretrained is not None: + model_dict = paddle.load(pretrained) + self.set_dict(model_dict) + print("load custom parameters success") + + else: + checkpoint = os.path.join(self.directory, 'deeplabv3p_model.pdparams') + model_dict = paddle.load(checkpoint) + self.set_dict(model_dict) + print("load pretrained parameters success") + + def transform(self, img: Union[np.ndarray, str]) -> Union[np.ndarray, str]: + return self.transforms(img) + + def forward(self, x: paddle.Tensor) -> List[paddle.Tensor]: + feat_list = self.backbone(x) + logit_list = self.head(feat_list) + return [ + F.interpolate( + logit, + x.shape[2:], + mode='bilinear', + align_corners=self.align_corners) for logit in logit_list] + + +class DeepLabV3PHead(nn.Layer): + """ + The DeepLabV3PHead implementation based on PaddlePaddle. + + Args: + num_classes (int): The unique number of target classes. + backbone_indices (tuple): Two values in the tuple indicate the indices of output of backbone. + the first index will be taken as a low-level feature in Decoder component; + the second one will be taken as input of ASPP component. + Usually backbone consists of four downsampling stage, and return an output of + each stage. If we set it as (0, 3), it means taking feature map of the first + stage in backbone as low-level feature used in Decoder, and feature map of the fourth + stage as input of ASPP. + backbone_channels (tuple): The same length with "backbone_indices". It indicates the channels of corresponding index. + aspp_ratios (tuple): The dilation rates using in ASSP module. + aspp_out_channels (int): The output channels of ASPP module. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + """ + + def __init__(self, num_classes: int, backbone_indices: Tuple[paddle.Tensor], backbone_channels: Tuple[paddle.Tensor], + aspp_ratios: Tuple[float], aspp_out_channels: int, align_corners: bool): + super().__init__() + + self.aspp = L.ASPPModule( + aspp_ratios, + backbone_channels[1], + aspp_out_channels, + align_corners, + use_sep_conv=True, + image_pooling=True) + self.decoder = Decoder(num_classes, backbone_channels[0], align_corners) + self.backbone_indices = backbone_indices + + def forward(self, feat_list: List[paddle.Tensor]) -> List[paddle.Tensor]: + logit_list = [] + low_level_feat = feat_list[self.backbone_indices[0]] + x = feat_list[self.backbone_indices[1]] + x = self.aspp(x) + logit = self.decoder(x, low_level_feat) + logit_list.append(logit) + return logit_list + + +class Decoder(nn.Layer): + """ + Decoder module of DeepLabV3P model + + Args: + num_classes (int): The number of classes. + in_channels (int): The number of input channels in decoder module. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + """ + + def __init__(self, num_classes: int, in_channels: int, align_corners: bool): + super(Decoder, self).__init__() + + self.conv_bn_relu1 = L.ConvBNReLU( + in_channels=in_channels, out_channels=48, kernel_size=1) + + self.conv_bn_relu2 = L.SeparableConvBNReLU( + in_channels=304, out_channels=256, kernel_size=3, padding=1) + self.conv_bn_relu3 = L.SeparableConvBNReLU( + in_channels=256, out_channels=256, kernel_size=3, padding=1) + self.conv = nn.Conv2D( + in_channels=256, out_channels=num_classes, kernel_size=1) + + self.align_corners = align_corners + + def forward(self, x: paddle.Tensor, low_level_feat: paddle.Tensor) -> paddle.Tensor: + low_level_feat = self.conv_bn_relu1(low_level_feat) + x = F.interpolate( + x, + low_level_feat.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + x = paddle.concat([x, low_level_feat], axis=1) + x = self.conv_bn_relu2(x) + x = self.conv_bn_relu3(x) + x = self.conv(x) + return x \ No newline at end of file diff --git a/modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/resnet.py b/modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/resnet.py new file mode 100644 index 00000000..2fa6fa2f --- /dev/null +++ b/modules/image/semantic_segmentation/deeplabv3p_resnet50_voc/resnet.py @@ -0,0 +1,137 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import deeplabv3p_resnet50_voc.layers as L + + +class BasicBlock(nn.Layer): + def __init__(self, + in_channels: int, + out_channels: int, + stride: int, + shortcut: bool = True, + if_first: bool = False, + name: str = None): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = L.ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + name=name + "_branch2a") + self.conv1 = L.ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + act=None, + name=name + "_branch2b") + + if not shortcut: + self.short = L.ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs: paddle.Tensor) -> paddle.Tensor: + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.elementwise_add(x=short, y=conv1, act='relu') + + return y + + +class ResNet50_vd(nn.Layer): + def __init__(self, + multi_grid: tuple = (1, 2, 4)): + super(ResNet50_vd, self).__init__() + depth = [3, 4, 6, 3] + num_channels = [64, 256, 512, 1024] + num_filters = [64, 128, 256, 512] + self.feat_channels = [c * 4 for c in num_filters] + dilation_dict = {2: 2, 3: 4} + self.conv1_1 = L.ConvBNLayer( + in_channels=3, + out_channels=32, + kernel_size=3, + stride=2, + act='relu', + name="conv1_1") + self.conv1_2 = L.ConvBNLayer( + in_channels=32, + out_channels=32, + kernel_size=3, + stride=1, + act='relu', + name="conv1_2") + self.conv1_3 = L.ConvBNLayer( + in_channels=32, + out_channels=64, + kernel_size=3, + stride=1, + act='relu', + name="conv1_3") + self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) + self.stage_list = [] + + for block in range(len(depth)): + shortcut = False + block_list = [] + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + dilation_rate = dilation_dict[ + block] if dilation_dict and block in dilation_dict else 1 + if block == 3: + dilation_rate = dilation_rate * multi_grid[i] + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + L.BottleneckBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 + and dilation_rate == 1 else 1, + shortcut=shortcut, + if_first=block == i == 0, + name=conv_name, + dilation=dilation_rate)) + block_list.append(bottleneck_block) + shortcut = True + self.stage_list.append(block_list) + + def forward(self, inputs: paddle.Tensor) -> paddle.Tensor: + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + y = self.pool2d_max(y) + feat_list = [] + for stage in self.stage_list: + for block in stage: + y = block(y) + feat_list.append(y) + return feat_list \ No newline at end of file diff --git a/modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/hrnet.py b/modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/hrnet.py new file mode 100644 index 00000000..2e9ea789 --- /dev/null +++ b/modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/hrnet.py @@ -0,0 +1,612 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +import ocrnet_hrnetw18_voc.layers as L + + +class HRNet_W18(nn.Layer): + """ + The HRNet implementation based on PaddlePaddle. + + The original article refers to + Jingdong Wang, et, al. "HRNet:Deep High-Resolution Representation Learning for Visual Recognition" + (https://arxiv.org/pdf/1908.07919.pdf). + + Args: + pretrained (str, optional): The path of pretrained model. + stage1_num_modules (int, optional): Number of modules for stage1. Default 1. + stage1_num_blocks (list, optional): Number of blocks per module for stage1. Default (4). + stage1_num_channels (list, optional): Number of channels per branch for stage1. Default (64). + stage2_num_modules (int, optional): Number of modules for stage2. Default 1. + stage2_num_blocks (list, optional): Number of blocks per module for stage2. Default (4, 4). + stage2_num_channels (list, optional): Number of channels per branch for stage2. Default (18, 36). + stage3_num_modules (int, optional): Number of modules for stage3. Default 4. + stage3_num_blocks (list, optional): Number of blocks per module for stage3. Default (4, 4, 4). + stage3_num_channels (list, optional): Number of channels per branch for stage3. Default [18, 36, 72). + stage4_num_modules (int, optional): Number of modules for stage4. Default 3. + stage4_num_blocks (list, optional): Number of blocks per module for stage4. Default (4, 4, 4, 4). + stage4_num_channels (list, optional): Number of channels per branch for stage4. Default (18, 36, 72. 144). + has_se (bool, optional): Whether to use Squeeze-and-Excitation module. Default False. + align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. + """ + + def __init__(self, + pretrained: str = None, + stage1_num_modules: int = 1, + stage1_num_blocks: tuple = (4,), + stage1_num_channels: tuple = (64,), + stage2_num_modules: int = 1, + stage2_num_blocks: tuple = (4, 4), + stage2_num_channels: tuple = (18, 36), + stage3_num_modules: int = 4, + stage3_num_blocks: tuple = (4, 4, 4), + stage3_num_channels: tuple = (18, 36, 72), + stage4_num_modules: int = 3, + stage4_num_blocks: tuple = (4, 4, 4, 4), + stage4_num_channels: tuple = (18, 36, 72, 144), + has_se: bool = False, + align_corners: bool = False): + super(HRNet_W18, self).__init__() + self.pretrained = pretrained + self.stage1_num_modules = stage1_num_modules + self.stage1_num_blocks = stage1_num_blocks + self.stage1_num_channels = stage1_num_channels + self.stage2_num_modules = stage2_num_modules + self.stage2_num_blocks = stage2_num_blocks + self.stage2_num_channels = stage2_num_channels + self.stage3_num_modules = stage3_num_modules + self.stage3_num_blocks = stage3_num_blocks + self.stage3_num_channels = stage3_num_channels + self.stage4_num_modules = stage4_num_modules + self.stage4_num_blocks = stage4_num_blocks + self.stage4_num_channels = stage4_num_channels + self.has_se = has_se + self.align_corners = align_corners + self.feat_channels = [sum(stage4_num_channels)] + + self.conv_layer1_1 = L.ConvBNReLU( + in_channels=3, + out_channels=64, + kernel_size=3, + stride=2, + padding='same', + bias_attr=False) + + self.conv_layer1_2 = L.ConvBNReLU( + in_channels=64, + out_channels=64, + kernel_size=3, + stride=2, + padding='same', + bias_attr=False) + + self.la1 = Layer1( + num_channels=64, + num_blocks=self.stage1_num_blocks[0], + num_filters=self.stage1_num_channels[0], + has_se=has_se, + name="layer2") + + self.tr1 = TransitionLayer( + in_channels=[self.stage1_num_channels[0] * 4], + out_channels=self.stage2_num_channels, + name="tr1") + + self.st2 = Stage( + num_channels=self.stage2_num_channels, + num_modules=self.stage2_num_modules, + num_blocks=self.stage2_num_blocks, + num_filters=self.stage2_num_channels, + has_se=self.has_se, + name="st2", + align_corners=align_corners) + + self.tr2 = TransitionLayer( + in_channels=self.stage2_num_channels, + out_channels=self.stage3_num_channels, + name="tr2") + self.st3 = Stage( + num_channels=self.stage3_num_channels, + num_modules=self.stage3_num_modules, + num_blocks=self.stage3_num_blocks, + num_filters=self.stage3_num_channels, + has_se=self.has_se, + name="st3", + align_corners=align_corners) + + self.tr3 = TransitionLayer( + in_channels=self.stage3_num_channels, + out_channels=self.stage4_num_channels, + name="tr3") + self.st4 = Stage( + num_channels=self.stage4_num_channels, + num_modules=self.stage4_num_modules, + num_blocks=self.stage4_num_blocks, + num_filters=self.stage4_num_channels, + has_se=self.has_se, + name="st4", + align_corners=align_corners) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + conv1 = self.conv_layer1_1(x) + conv2 = self.conv_layer1_2(conv1) + + la1 = self.la1(conv2) + + tr1 = self.tr1([la1]) + st2 = self.st2(tr1) + + tr2 = self.tr2(st2) + st3 = self.st3(tr2) + + tr3 = self.tr3(st3) + st4 = self.st4(tr3) + + x0_h, x0_w = st4[0].shape[2:] + x1 = F.interpolate( + st4[1], (x0_h, x0_w), + mode='bilinear', + align_corners=self.align_corners) + x2 = F.interpolate( + st4[2], (x0_h, x0_w), + mode='bilinear', + align_corners=self.align_corners) + x3 = F.interpolate( + st4[3], (x0_h, x0_w), + mode='bilinear', + align_corners=self.align_corners) + x = paddle.concat([st4[0], x1, x2, x3], axis=1) + + return [x] + + +class Layer1(nn.Layer): + def __init__(self, + num_channels: int, + num_filters: int, + num_blocks: int, + has_se: bool = False, + name: str = None): + super(Layer1, self).__init__() + + self.bottleneck_block_list = [] + + for i in range(num_blocks): + bottleneck_block = self.add_sublayer( + "bb_{}_{}".format(name, i + 1), + BottleneckBlock( + num_channels=num_channels if i == 0 else num_filters * 4, + num_filters=num_filters, + has_se=has_se, + stride=1, + downsample=True if i == 0 else False, + name=name + '_' + str(i + 1))) + self.bottleneck_block_list.append(bottleneck_block) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + conv = x + for block_func in self.bottleneck_block_list: + conv = block_func(conv) + return conv + + +class TransitionLayer(nn.Layer): + def __init__(self, in_channels: int, out_channels: int, name=None): + super(TransitionLayer, self).__init__() + + num_in = len(in_channels) + num_out = len(out_channels) + self.conv_bn_func_list = [] + for i in range(num_out): + residual = None + if i < num_in: + if in_channels[i] != out_channels[i]: + residual = self.add_sublayer( + "transition_{}_layer_{}".format(name, i + 1), + L.ConvBNReLU( + in_channels=in_channels[i], + out_channels=out_channels[i], + kernel_size=3, + padding='same', + bias_attr=False)) + else: + residual = self.add_sublayer( + "transition_{}_layer_{}".format(name, i + 1), + L.ConvBNReLU( + in_channels=in_channels[-1], + out_channels=out_channels[i], + kernel_size=3, + stride=2, + padding='same', + bias_attr=False)) + self.conv_bn_func_list.append(residual) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + outs = [] + for idx, conv_bn_func in enumerate(self.conv_bn_func_list): + if conv_bn_func is None: + outs.append(x[idx]) + else: + if idx < len(x): + outs.append(conv_bn_func(x[idx])) + else: + outs.append(conv_bn_func(x[-1])) + return outs + + +class Branches(nn.Layer): + def __init__(self, + num_blocks: int, + in_channels: int, + out_channels: int, + has_se: bool = False, + name: str = None): + super(Branches, self).__init__() + + self.basic_block_list = [] + + for i in range(len(out_channels)): + self.basic_block_list.append([]) + for j in range(num_blocks[i]): + in_ch = in_channels[i] if j == 0 else out_channels[i] + basic_block_func = self.add_sublayer( + "bb_{}_branch_layer_{}_{}".format(name, i + 1, j + 1), + BasicBlock( + num_channels=in_ch, + num_filters=out_channels[i], + has_se=has_se, + name=name + '_branch_layer_' + str(i + 1) + '_' + + str(j + 1))) + self.basic_block_list[i].append(basic_block_func) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + outs = [] + for idx, input in enumerate(x): + conv = input + for basic_block_func in self.basic_block_list[idx]: + conv = basic_block_func(conv) + outs.append(conv) + return outs + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels: int, + num_filters: int, + has_se: bool, + stride: int = 1, + downsample: bool = False, + name: str = None): + super(BottleneckBlock, self).__init__() + + self.has_se = has_se + self.downsample = downsample + + self.conv1 = L.ConvBNReLU( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=1, + padding='same', + bias_attr=False) + + self.conv2 = L.ConvBNReLU( + in_channels=num_filters, + out_channels=num_filters, + kernel_size=3, + stride=stride, + padding='same', + bias_attr=False) + + self.conv3 = L.ConvBN( + in_channels=num_filters, + out_channels=num_filters * 4, + kernel_size=1, + padding='same', + bias_attr=False) + + if self.downsample: + self.conv_down = L.ConvBN( + in_channels=num_channels, + out_channels=num_filters * 4, + kernel_size=1, + padding='same', + bias_attr=False) + + if self.has_se: + self.se = SELayer( + num_channels=num_filters * 4, + num_filters=num_filters * 4, + reduction_ratio=16, + name=name + '_fc') + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + residual = x + conv1 = self.conv1(x) + conv2 = self.conv2(conv1) + conv3 = self.conv3(conv2) + + if self.downsample: + residual = self.conv_down(x) + + if self.has_se: + conv3 = self.se(conv3) + + y = conv3 + residual + y = F.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + num_channels: int, + num_filters: int, + stride: int = 1, + has_se: bool = False, + downsample: bool = False, + name: str = None): + super(BasicBlock, self).__init__() + + self.has_se = has_se + self.downsample = downsample + + self.conv1 = L.ConvBNReLU( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=3, + stride=stride, + padding='same', + bias_attr=False) + self.conv2 = L.ConvBN( + in_channels=num_filters, + out_channels=num_filters, + kernel_size=3, + padding='same', + bias_attr=False) + + if self.downsample: + self.conv_down = L.ConvBNReLU( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=1, + padding='same', + bias_attr=False) + + if self.has_se: + self.se = SELayer( + num_channels=num_filters, + num_filters=num_filters, + reduction_ratio=16, + name=name + '_fc') + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + residual = x + conv1 = self.conv1(x) + conv2 = self.conv2(conv1) + + if self.downsample: + residual = self.conv_down(x) + + if self.has_se: + conv2 = self.se(conv2) + + y = conv2 + residual + y = F.relu(y) + return y + + +class SELayer(nn.Layer): + def __init__(self, num_channels: int, num_filters: int, reduction_ratio: int, name: str = None): + super(SELayer, self).__init__() + + self.pool2d_gap = nn.AdaptiveAvgPool2D(1) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.squeeze = nn.Linear( + num_channels, + med_ch, + weight_attr=paddle.ParamAttr( + initializer=nn.initializer.Uniform(-stdv, stdv))) + + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = nn.Linear( + med_ch, + num_filters, + weight_attr=paddle.ParamAttr( + initializer=nn.initializer.Uniform(-stdv, stdv))) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + pool = self.pool2d_gap(x) + pool = paddle.reshape(pool, shape=[-1, self._num_channels]) + squeeze = self.squeeze(pool) + squeeze = F.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = F.sigmoid(excitation) + excitation = paddle.reshape( + excitation, shape=[-1, self._num_channels, 1, 1]) + out = x * excitation + return out + + +class Stage(nn.Layer): + def __init__(self, + num_channels: int, + num_modules: int, + num_blocks: int, + num_filters: int, + has_se: bool = False, + multi_scale_output: bool = True, + name: str = None, + align_corners: bool = False): + super(Stage, self).__init__() + + self._num_modules = num_modules + + self.stage_func_list = [] + for i in range(num_modules): + if i == num_modules - 1 and not multi_scale_output: + stage_func = self.add_sublayer( + "stage_{}_{}".format(name, i + 1), + HighResolutionModule( + num_channels=num_channels, + num_blocks=num_blocks, + num_filters=num_filters, + has_se=has_se, + multi_scale_output=False, + name=name + '_' + str(i + 1), + align_corners=align_corners)) + else: + stage_func = self.add_sublayer( + "stage_{}_{}".format(name, i + 1), + HighResolutionModule( + num_channels=num_channels, + num_blocks=num_blocks, + num_filters=num_filters, + has_se=has_se, + name=name + '_' + str(i + 1), + align_corners=align_corners)) + + self.stage_func_list.append(stage_func) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + out = x + for idx in range(self._num_modules): + out = self.stage_func_list[idx](out) + return out + + +class HighResolutionModule(nn.Layer): + def __init__(self, + num_channels: int, + num_blocks: int, + num_filters: int, + has_se: bool = False, + multi_scale_output: bool = True, + name: str = None, + align_corners: str = False): + super(HighResolutionModule, self).__init__() + + self.branches_func = Branches( + num_blocks=num_blocks, + in_channels=num_channels, + out_channels=num_filters, + has_se=has_se, + name=name) + + self.fuse_func = FuseLayers( + in_channels=num_filters, + out_channels=num_filters, + multi_scale_output=multi_scale_output, + name=name, + align_corners=align_corners) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + out = self.branches_func(x) + out = self.fuse_func(out) + return out + + +class FuseLayers(nn.Layer): + def __init__(self, + in_channels: int, + out_channels: int, + multi_scale_output: bool = True, + name: str = None, + align_corners: bool = False): + super(FuseLayers, self).__init__() + + self._actual_ch = len(in_channels) if multi_scale_output else 1 + self._in_channels = in_channels + self.align_corners = align_corners + + self.residual_func_list = [] + for i in range(self._actual_ch): + for j in range(len(in_channels)): + if j > i: + residual_func = self.add_sublayer( + "residual_{}_layer_{}_{}".format(name, i + 1, j + 1), + L.ConvBN( + in_channels=in_channels[j], + out_channels=out_channels[i], + kernel_size=1, + padding='same', + bias_attr=False)) + self.residual_func_list.append(residual_func) + elif j < i: + pre_num_filters = in_channels[j] + for k in range(i - j): + if k == i - j - 1: + residual_func = self.add_sublayer( + "residual_{}_layer_{}_{}_{}".format( + name, i + 1, j + 1, k + 1), + L.ConvBN( + in_channels=pre_num_filters, + out_channels=out_channels[i], + kernel_size=3, + stride=2, + padding='same', + bias_attr=False)) + pre_num_filters = out_channels[i] + else: + residual_func = self.add_sublayer( + "residual_{}_layer_{}_{}_{}".format( + name, i + 1, j + 1, k + 1), + L.ConvBNReLU( + in_channels=pre_num_filters, + out_channels=out_channels[j], + kernel_size=3, + stride=2, + padding='same', + bias_attr=False)) + pre_num_filters = out_channels[j] + self.residual_func_list.append(residual_func) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + outs = [] + residual_func_idx = 0 + for i in range(self._actual_ch): + residual = x[i] + residual_shape = residual.shape[-2:] + for j in range(len(self._in_channels)): + if j > i: + y = self.residual_func_list[residual_func_idx](x[j]) + residual_func_idx += 1 + + y = F.interpolate( + y, + residual_shape, + mode='bilinear', + align_corners=self.align_corners) + residual = residual + y + elif j < i: + y = x[j] + for k in range(i - j): + y = self.residual_func_list[residual_func_idx](y) + residual_func_idx += 1 + + residual = residual + y + + residual = F.relu(residual) + outs.append(residual) + + return outs \ No newline at end of file diff --git a/modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/layers.py b/modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/layers.py new file mode 100644 index 00000000..d5554a1b --- /dev/null +++ b/modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/layers.py @@ -0,0 +1,345 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn.layer import activation +from paddle.nn import Conv2D, AvgPool2D + + +def SyncBatchNorm(*args, **kwargs): + """In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead""" + if paddle.get_device() == 'cpu': + return nn.BatchNorm2D(*args, **kwargs) + else: + return nn.SyncBatchNorm(*args, **kwargs) + + +class ConvBNLayer(nn.Layer): + """Basic conv bn relu layer.""" + + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + dilation: int = 1, + groups: int = 1, + is_vd_mode: bool = False, + act: str = None, + name: str = None): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2 if dilation == 1 else 0, + dilation=dilation, + groups=groups, + bias_attr=False) + + self._batch_norm = SyncBatchNorm(out_channels) + self._act_op = Activation(act=act) + + def forward(self, inputs: paddle.Tensor) -> paddle.Tensor: + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + y = self._act_op(y) + + return y + + +class BottleneckBlock(nn.Layer): + """Residual bottleneck block""" + + def __init__(self, + in_channels: int, + out_channels: int, + stride: int, + shortcut: bool = True, + if_first: bool = False, + dilation: int = 1, + name: str = None): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + act='relu', + name=name + "_branch2a") + + self.dilation = dilation + + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + dilation=dilation, + name=name + "_branch2b") + self.conv2 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels * 4, + kernel_size=1, + act=None, + name=name + "_branch2c") + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels * 4, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first or stride == 1 else True, + name=name + "_branch1") + + self.shortcut = shortcut + + def forward(self, inputs: paddle.Tensor) -> paddle.Tensor: + y = self.conv0(inputs) + if self.dilation > 1: + padding = self.dilation + y = F.pad(y, [padding, padding, padding, padding]) + + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class SeparableConvBNReLU(nn.Layer): + """Depthwise Separable Convolution.""" + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + padding: str = 'same', + **kwargs: dict): + super(SeparableConvBNReLU, self).__init__() + self.depthwise_conv = ConvBN( + in_channels, + out_channels=in_channels, + kernel_size=kernel_size, + padding=padding, + groups=in_channels, + **kwargs) + self.piontwise_conv = ConvBNReLU( + in_channels, out_channels, kernel_size=1, groups=1) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + x = self.depthwise_conv(x) + x = self.piontwise_conv(x) + return x + + +class ConvBN(nn.Layer): + """Basic conv bn layer""" + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + padding: str = 'same', + **kwargs: dict): + super(ConvBN, self).__init__() + self._conv = Conv2D( + in_channels, out_channels, kernel_size, padding=padding, **kwargs) + self._batch_norm = SyncBatchNorm(out_channels) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + x = self._conv(x) + x = self._batch_norm(x) + return x + + +class ConvBNReLU(nn.Layer): + """Basic conv bn relu layer.""" + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + padding: str = 'same', + **kwargs: dict): + super(ConvBNReLU, self).__init__() + + self._conv = Conv2D( + in_channels, out_channels, kernel_size, padding=padding, **kwargs) + self._batch_norm = SyncBatchNorm(out_channels) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + x = self._conv(x) + x = self._batch_norm(x) + x = F.relu(x) + return x + + +class Activation(nn.Layer): + """ + The wrapper of activations. + + Args: + act (str, optional): The activation name in lowercase. It must be one of ['elu', 'gelu', + 'hardshrink', 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid', + 'softmax', 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax', + 'hsigmoid']. Default: None, means identical transformation. + + Returns: + A callable object of Activation. + + Raises: + KeyError: When parameter `act` is not in the optional range. + + Examples: + + from paddleseg.models.common.activation import Activation + + relu = Activation("relu") + print(relu) + # + + sigmoid = Activation("sigmoid") + print(sigmoid) + # + + not_exit_one = Activation("not_exit_one") + # KeyError: "not_exit_one does not exist in the current dict_keys(['elu', 'gelu', 'hardshrink', + # 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid', 'softmax', + # 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax', 'hsigmoid'])" + """ + + def __init__(self, act: str = None): + super(Activation, self).__init__() + + self._act = act + upper_act_names = activation.__all__ + lower_act_names = [act.lower() for act in upper_act_names] + act_dict = dict(zip(lower_act_names, upper_act_names)) + + if act is not None: + if act in act_dict.keys(): + act_name = act_dict[act] + self.act_func = eval("activation.{}()".format(act_name)) + else: + raise KeyError("{} does not exist in the current {}".format( + act, act_dict.keys())) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + + if self._act is not None: + return self.act_func(x) + else: + return x + + +class ASPPModule(nn.Layer): + """ + Atrous Spatial Pyramid Pooling. + + Args: + aspp_ratios (tuple): The dilation rate using in ASSP module. + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + use_sep_conv (bool, optional): If using separable conv in ASPP module. Default: False. + image_pooling (bool, optional): If augmented with image-level features. Default: False + """ + + def __init__(self, + aspp_ratios, + in_channels, + out_channels, + align_corners, + use_sep_conv=False, + image_pooling=False): + super().__init__() + + self.align_corners = align_corners + self.aspp_blocks = nn.LayerList() + + for ratio in aspp_ratios: + if use_sep_conv and ratio > 1: + conv_func = SeparableConvBNReLU + else: + conv_func = ConvBNReLU + + block = conv_func( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1 if ratio == 1 else 3, + dilation=ratio, + padding=0 if ratio == 1 else ratio) + self.aspp_blocks.append(block) + + out_size = len(self.aspp_blocks) + + if image_pooling: + self.global_avg_pool = nn.Sequential( + nn.AdaptiveAvgPool2D(output_size=(1, 1)), + ConvBNReLU(in_channels, out_channels, kernel_size=1, bias_attr=False)) + out_size += 1 + self.image_pooling = image_pooling + + self.conv_bn_relu = ConvBNReLU( + in_channels=out_channels * out_size, + out_channels=out_channels, + kernel_size=1) + + self.dropout = nn.Dropout(p=0.1) # drop rate + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + outputs = [] + for block in self.aspp_blocks: + y = block(x) + y = F.interpolate( + y, + x.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + outputs.append(y) + + if self.image_pooling: + img_avg = self.global_avg_pool(x) + img_avg = F.interpolate( + img_avg, + x.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + outputs.append(img_avg) + + x = paddle.concat(outputs, axis=1) + x = self.conv_bn_relu(x) + x = self.dropout(x) + + return x \ No newline at end of file diff --git a/modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/module.py b/modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/module.py new file mode 100644 index 00000000..1660d1c6 --- /dev/null +++ b/modules/image/semantic_segmentation/ocrnet_hrnetw18_voc/module.py @@ -0,0 +1,243 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from typing import List + +import paddle +import numpy as np +import paddle.nn as nn +import paddle.nn.functional as F +from paddlehub.module.module import moduleinfo +import paddlehub.vision.segmentation_transforms as T +from paddlehub.module.cv_module import ImageSegmentationModule + +import ocrnet_hrnetw18_voc.layers as L +from ocrnet_hrnetw18_voc.hrnet import HRNet_W18 + +@moduleinfo( + name="ocrnet_hrnetw18_voc", + type="CV/semantic_segmentation", + author="paddlepaddle", + author_email="", + summary="OCRNetHRNetW18 is a segmentation model pretrained by pascal voc.", + version="1.0.0", + meta=ImageSegmentationModule) +class OCRNetHRNetW18(nn.Layer): + """ + The OCRNet implementation based on PaddlePaddle. + The original article refers to + Yuan, Yuhui, et al. "Object-Contextual Representations for Semantic Segmentation" + (https://arxiv.org/pdf/1909.11065.pdf) + Args: + num_classes (int): The unique number of target classes. + backbone_indices (list): A list indicates the indices of output of backbone. + It can be either one or two values, if two values, the first index will be taken as + a deep-supervision feature in auxiliary layer; the second one will be taken as + input of pixel representation. If one value, it is taken by both above. + ocr_mid_channels (int, optional): The number of middle channels in OCRHead. Default: 512. + ocr_key_channels (int, optional): The number of key channels in ObjectAttentionBlock. Default: 256. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. + pretrained (str, optional): The path or url of pretrained model. Default: None. + """ + + def __init__(self, + num_classes: int = 21, + backbone_indices: List[int] = [0], + ocr_mid_channels: int = 512, + ocr_key_channels: int = 256, + align_corners: bool = False, + pretrained: str = None): + super(OCRNetHRNetW18, self).__init__() + self.backbone = HRNet_W18() + self.backbone_indices = backbone_indices + in_channels = [self.backbone.feat_channels[i] for i in backbone_indices] + self.head = OCRHead( + num_classes=num_classes, + in_channels=in_channels, + ocr_mid_channels=ocr_mid_channels, + ocr_key_channels=ocr_key_channels) + self.align_corners = align_corners + self.transforms = T.Compose([T.Padding(target_size=(512, 512)), T.Normalize()]) + + if pretrained is not None: + model_dict = paddle.load(pretrained) + self.set_dict(model_dict) + print("load custom parameters success") + + else: + checkpoint = os.path.join(self.directory, 'ocrnet_hrnetw18.pdparams') + model_dict = paddle.load(checkpoint) + self.set_dict(model_dict) + print("load pretrained parameters success") + + def transform(self, img: np.ndarray) -> np.ndarray: + return self.transforms(img) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + feats = self.backbone(x) + feats = [feats[i] for i in self.backbone_indices] + logit_list = self.head(feats) + logit_list = [ + F.interpolate( + logit, + x.shape[2:], + mode='bilinear', + align_corners=self.align_corners) for logit in logit_list] + return logit_list + + +class OCRHead(nn.Layer): + """ + The Object contextual representation head. + Args: + num_classes(int): The unique number of target classes. + in_channels(tuple): The number of input channels. + ocr_mid_channels(int, optional): The number of middle channels in OCRHead. Default: 512. + ocr_key_channels(int, optional): The number of key channels in ObjectAttentionBlock. Default: 256. + """ + + def __init__(self, + num_classes: int, + in_channels: int, + ocr_mid_channels: int = 512, + ocr_key_channels: int = 256): + super().__init__() + + self.num_classes = num_classes + self.spatial_gather = SpatialGatherBlock() + self.spatial_ocr = SpatialOCRModule(ocr_mid_channels, ocr_key_channels, + ocr_mid_channels) + + self.indices = [-2, -1] if len(in_channels) > 1 else [-1, -1] + + self.conv3x3_ocr = L.ConvBNReLU( + in_channels[self.indices[1]], ocr_mid_channels, 3, padding=1) + self.cls_head = nn.Conv2D(ocr_mid_channels, self.num_classes, 1) + self.aux_head = nn.Sequential( + L.ConvBNReLU(in_channels[self.indices[0]], + in_channels[self.indices[0]], 1), + nn.Conv2D(in_channels[self.indices[0]], self.num_classes, 1)) + + + def forward(self, feat_list: List[paddle.Tensor]) -> paddle.Tensor: + feat_shallow, feat_deep = feat_list[self.indices[0]], feat_list[ + self.indices[1]] + + soft_regions = self.aux_head(feat_shallow) + pixels = self.conv3x3_ocr(feat_deep) + + object_regions = self.spatial_gather(pixels, soft_regions) + ocr = self.spatial_ocr(pixels, object_regions) + + logit = self.cls_head(ocr) + return [logit, soft_regions] + + +class SpatialGatherBlock(nn.Layer): + """Aggregation layer to compute the pixel-region representation.""" + + def forward(self, pixels: paddle.Tensor, regions: paddle.Tensor) -> paddle.Tensor: + n, c, h, w = pixels.shape + _, k, _, _ = regions.shape + + # pixels: from (n, c, h, w) to (n, h*w, c) + pixels = paddle.reshape(pixels, (n, c, h * w)) + pixels = paddle.transpose(pixels, [0, 2, 1]) + + # regions: from (n, k, h, w) to (n, k, h*w) + regions = paddle.reshape(regions, (n, k, h * w)) + regions = F.softmax(regions, axis=2) + + # feats: from (n, k, c) to (n, c, k, 1) + feats = paddle.bmm(regions, pixels) + feats = paddle.transpose(feats, [0, 2, 1]) + feats = paddle.unsqueeze(feats, axis=-1) + + return feats + + +class SpatialOCRModule(nn.Layer): + """Aggregate the global object representation to update the representation for each pixel.""" + + def __init__(self, + in_channels: int, + key_channels: int, + out_channels: int, + dropout_rate: float = 0.1): + super().__init__() + + self.attention_block = ObjectAttentionBlock(in_channels, key_channels) + self.conv1x1 = nn.Sequential( + L.ConvBNReLU(2 * in_channels, out_channels, 1), + nn.Dropout2D(dropout_rate)) + + def forward(self, pixels: paddle.Tensor, regions: paddle.Tensor) -> paddle.Tensor: + context = self.attention_block(pixels, regions) + feats = paddle.concat([context, pixels], axis=1) + feats = self.conv1x1(feats) + + return feats + + +class ObjectAttentionBlock(nn.Layer): + """A self-attention module.""" + + def __init__(self, in_channels: int, key_channels: int): + super().__init__() + + self.in_channels = in_channels + self.key_channels = key_channels + + self.f_pixel = nn.Sequential( + L.ConvBNReLU(in_channels, key_channels, 1), + L.ConvBNReLU(key_channels, key_channels, 1)) + + self.f_object = nn.Sequential( + L.ConvBNReLU(in_channels, key_channels, 1), + L.ConvBNReLU(key_channels, key_channels, 1)) + + self.f_down = L.ConvBNReLU(in_channels, key_channels, 1) + + self.f_up = L.ConvBNReLU(key_channels, in_channels, 1) + + def forward(self, x: paddle.Tensor, proxy: paddle.Tensor) -> paddle.Tensor: + n, _, h, w = x.shape + + # query : from (n, c1, h1, w1) to (n, h1*w1, key_channels) + query = self.f_pixel(x) + query = paddle.reshape(query, (n, self.key_channels, -1)) + query = paddle.transpose(query, [0, 2, 1]) + + # key : from (n, c2, h2, w2) to (n, key_channels, h2*w2) + key = self.f_object(proxy) + key = paddle.reshape(key, (n, self.key_channels, -1)) + + # value : from (n, c2, h2, w2) to (n, h2*w2, key_channels) + value = self.f_down(proxy) + value = paddle.reshape(value, (n, self.key_channels, -1)) + value = paddle.transpose(value, [0, 2, 1]) + + # sim_map (n, h1*w1, h2*w2) + sim_map = paddle.bmm(query, key) + sim_map = (self.key_channels**-.5) * sim_map + sim_map = F.softmax(sim_map, axis=-1) + + # context from (n, h1*w1, key_channels) to (n , out_channels, h1, w1) + context = paddle.bmm(sim_map, value) + context = paddle.transpose(context, [0, 2, 1]) + context = paddle.reshape(context, (n, self.key_channels, h, w)) + context = self.f_up(context) + + return context \ No newline at end of file diff --git a/paddlehub/datasets/__init__.py b/paddlehub/datasets/__init__.py index 4f097c2f..26b8bfa6 100644 --- a/paddlehub/datasets/__init__.py +++ b/paddlehub/datasets/__init__.py @@ -18,3 +18,5 @@ from paddlehub.datasets.minicoco import MiniCOCO from paddlehub.datasets.chnsenticorp import ChnSentiCorp from paddlehub.datasets.msra_ner import MSRA_NER from paddlehub.datasets.lcqmc import LCQMC +from paddlehub.datasets.base_seg_dataset import SegDataset +from paddlehub.datasets.opticdiscseg import OpticDiscSeg diff --git a/paddlehub/datasets/base_seg_dataset.py b/paddlehub/datasets/base_seg_dataset.py new file mode 100644 index 00000000..1cea3e9e --- /dev/null +++ b/paddlehub/datasets/base_seg_dataset.py @@ -0,0 +1,141 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import Tuple, Callable + +import paddle +import numpy as np +from PIL import Image + + +class SegDataset(paddle.io.Dataset): + """ + Pass in a custom dataset that conforms to the format. + + Args: + transforms (Callable): Transforms for image. + dataset_root (str): The dataset directory. + num_classes (int): Number of classes. + mode (str, optional): which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. + train_path (str, optional): The train dataset file. When mode is 'train', train_path is necessary. + The contents of train_path file are as follow: + image1.jpg ground_truth1.png + image2.jpg ground_truth2.png + val_path (str. optional): The evaluation dataset file. When mode is 'val', val_path is necessary. + The contents is the same as train_path + test_path (str, optional): The test dataset file. When mode is 'test', test_path is necessary. + The annotation file is not necessary in test_path file. + separator (str, optional): The separator of dataset list. Default: ' '. + edge (bool, optional): Whether to compute edge while training. Default: False + + """ + + def __init__(self, + transforms: Callable, + dataset_root: str, + num_classes: int, + mode: str = 'train', + train_path: str = None, + val_path: str = None, + test_path: str = None, + separator: str = ' ', + ignore_index: int = 255, + edge: bool = False): + self.dataset_root = dataset_root + self.transforms = transforms + self.file_list = list() + mode = mode.lower() + self.mode = mode + self.num_classes = num_classes + self.ignore_index = ignore_index + self.edge = edge + + if mode.lower() not in ['train', 'val', 'test']: + raise ValueError( + "mode should be 'train', 'val' or 'test', but got {}.".format( + mode)) + + if self.transforms is None: + raise ValueError("`transforms` is necessary, but it is None.") + + self.dataset_root = dataset_root + if not os.path.exists(self.dataset_root): + raise FileNotFoundError('there is not `dataset_root`: {}.'.format( + self.dataset_root)) + + if mode == 'train': + if train_path is None: + raise ValueError( + 'When `mode` is "train", `train_path` is necessary, but it is None.' + ) + elif not os.path.exists(train_path): + raise FileNotFoundError( + '`train_path` is not found: {}'.format(train_path)) + else: + file_path = train_path + elif mode == 'val': + if val_path is None: + raise ValueError( + 'When `mode` is "val", `val_path` is necessary, but it is None.' + ) + elif not os.path.exists(val_path): + raise FileNotFoundError( + '`val_path` is not found: {}'.format(val_path)) + else: + file_path = val_path + else: + if test_path is None: + raise ValueError( + 'When `mode` is "test", `test_path` is necessary, but it is None.' + ) + elif not os.path.exists(test_path): + raise FileNotFoundError( + '`test_path` is not found: {}'.format(test_path)) + else: + file_path = test_path + + with open(file_path, 'r') as f: + for line in f: + items = line.strip().split(separator) + if len(items) != 2: + if mode == 'train' or mode == 'val': + raise ValueError( + "File list format incorrect! In training or evaluation task it should be" + " image_name{}label_name\\n".format(separator)) + image_path = os.path.join(self.dataset_root, items[0]) + label_path = None + else: + image_path = os.path.join(self.dataset_root, items[0]) + label_path = os.path.join(self.dataset_root, items[1]) + self.file_list.append([image_path, label_path]) + + def __getitem__(self, idx: int) -> Tuple[np.ndarray]: + image_path, label_path = self.file_list[idx] + if self.mode == 'test': + im, _ = self.transforms(im=image_path) + im = im[np.newaxis, ...] + return im, image_path + elif self.mode == 'val': + im, _ = self.transforms(im=image_path) + label = np.asarray(Image.open(label_path)) + label = label[np.newaxis, :, :] + return im, label + else: + im, label = self.transforms(im=image_path, label=label_path) + return im, label + + def __len__(self) -> int: + return len(self.file_list) + diff --git a/paddlehub/datasets/opticdiscseg.py b/paddlehub/datasets/opticdiscseg.py new file mode 100644 index 00000000..2d100194 --- /dev/null +++ b/paddlehub/datasets/opticdiscseg.py @@ -0,0 +1,78 @@ +# coding:utf-8 +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import Callable + +import paddle +import numpy as np +from PIL import Image + +import paddlehub.env as hubenv +from paddlehub.utils.download import download_data +from paddlehub.datasets.base_seg_dataset import SegDataset + +@download_data(url='https://paddleseg.bj.bcebos.com/dataset/optic_disc_seg.zip') +class OpticDiscSeg(SegDataset): + """ + OpticDiscSeg dataset is extraced from iChallenge-AMD + (https://ai.baidu.com/broad/subordinate?dataset=amd). + + Args: + transforms (Callable): Transforms for image. + mode (str, optional): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. + edge (bool, optional): Whether to compute edge while training. Default: False + """ + + def __init__(self, + transforms: Callable = None, + mode: str = 'train'): + self.transforms = transforms + mode = mode.lower() + self.mode = mode + self.file_list = list() + self.num_classes = 2 + self.ignore_index = 255 + + if mode not in ['train', 'val', 'test']: + raise ValueError( + "`mode` should be 'train', 'val' or 'test', but got {}.".format( + mode)) + + if self.transforms is None: + raise ValueError("`transforms` is necessary, but it is None.") + + + if mode == 'train': + file_path = os.path.join(hubenv.DATA_HOME, 'optic_disc_seg', 'train_list.txt') + elif mode == 'test': + file_path = os.path.join(hubenv.DATA_HOME, 'optic_disc_seg', 'test_list.txt') + else: + file_path = os.path.join(hubenv.DATA_HOME, 'optic_disc_seg', 'val_list.txt') + + with open(file_path, 'r') as f: + for line in f: + items = line.strip().split() + if len(items) != 2: + if mode == 'train' or mode == 'val': + raise Exception( + "File list format incorrect! It should be" + " image_name label_name\\n") + image_path = os.path.join(hubenv.DATA_HOME, 'optic_disc_seg', items[0]) + grt_path = None + else: + image_path = os.path.join(hubenv.DATA_HOME, 'optic_disc_seg', items[0]) + grt_path = os.path.join(hubenv.DATA_HOME, 'optic_disc_seg', items[1]) + self.file_list.append([image_path, grt_path]) \ No newline at end of file diff --git a/paddlehub/module/cv_module.py b/paddlehub/module/cv_module.py index e49f173c..4dceb295 100644 --- a/paddlehub/module/cv_module.py +++ b/paddlehub/module/cv_module.py @@ -17,7 +17,7 @@ import time import os import base64 import argparse -from typing import List, Union +from typing import List, Union, Tuple from collections import OrderedDict import cv2 @@ -629,4 +629,113 @@ class StyleTransferModule(RunModule, ImageServing): self.arg_input_group.add_argument( '--input_path', type=str, help="path to image.") self.arg_input_group.add_argument( - '--style_path', type=str, help="path to style image.") \ No newline at end of file + '--style_path', type=str, help="path to style image.") + + +class ImageSegmentationModule(ImageServing, RunModule): + def training_step(self, batch: List[paddle.Tensor], batch_idx: int) -> dict: + ''' + One step for training, which should be called as forward computation. + + Args: + batch(list[paddle.Tensor]): The one batch data, which contains images, ground truth boxes, labels and scores. + batch_idx(int): The index of batch. + + Returns: + results(dict): The model outputs, such as loss. + ''' + + return self.validation_step(batch, batch_idx) + + def validation_step(self, batch: List[paddle.Tensor], batch_idx: int) -> dict: + """ + One step for validation, which should be called as forward computation. + + Args: + batch(list[paddle.Tensor]): The one batch data, which contains images and labels. + batch_idx(int): The index of batch. + + Returns: + results(dict) : The model outputs, such as metrics. + """ + + label = batch[1].astype('int64') + criterionCE = nn.loss.CrossEntropyLoss() + logits = self(batch[0]) + loss = 0 + for i in range(len(logits)): + logit = logits[i] + if logit.shape[-2:] != label.shape[-2:]: + logit = F.resize_bilinear(logit, label.shape[-2:]) + logit = logit.transpose([0,2,3,1]) + loss_ce = criterionCE(logit, label) + loss += loss_ce / len(logits) + return {"loss": loss} + + def predict(self, images: Union[str, np.ndarray], batch_size: int = 1, visualization: bool = True, save_path: str = 'seg_result') -> List[np.ndarray]: + ''' + Obtain segmentation results. + + Args: + images(list[str|np.array]): Content image path or BGR image. + batch_size(int): Batch size for prediciton. + visualization(bool): Whether to save colorized images. + save_path(str) : Path to save colorized images. + + Returns: + output(list[np.ndarray]) : The segmentation mask. + ''' + self.eval() + result=[] + + total_num = len(images) + loop_num = int(np.ceil(total_num / batch_size)) + for iter_id in range(loop_num): + batch_data = [] + handle_id = iter_id * batch_size + for image_id in range(batch_size): + try: + image, _ = self.transform(images[handle_id + image_id]) + batch_data.append(image) + except: + pass + batch_image = np.array(batch_data).astype('float32') + pred = self(paddle.to_tensor(batch_image)) + pred = paddle.argmax(pred[0], axis=1, keepdim=True, dtype='int32') + + for num in range(pred.shape[0]): + if isinstance(images[handle_id+num], str): + image = cv2.imread(images[handle_id+num]) + else: + image = images[handle_id+num] + h, w, c = image.shape + pred_final = utils.reverse_transform(pred[num: num+1], (h,w), self.transforms.transforms) + pred_final = paddle.squeeze(pred_final) + pred_final = pred_final.numpy().astype('uint8') + + if visualization: + added_image = utils.visualize(images[handle_id+num], pred_final, weight=0.6) + pred_mask = utils.get_pseudo_color_map(pred_final) + pred_image_path = os.path.join(save_path, 'image', str(time.time()) + ".png") + pred_mask_path = os.path.join(save_path, 'mask', str(time.time()) + ".png") + if not os.path.exists(os.path.dirname(pred_image_path)): + os.makedirs(os.path.dirname(pred_image_path)) + if not os.path.exists(os.path.dirname(pred_mask_path)): + os.makedirs(os.path.dirname(pred_mask_path)) + cv2.imwrite(pred_image_path, added_image) + pred_mask.save(pred_mask_path) + + result.append(pred_final) + return result + + @serving + def serving_method(self, images: List[str], **kwargs): + """ + Run as a service. + """ + images_decode = [base64_to_cv2(image) for image in images] + visual = self.predict(images=images_decode, **kwargs) + final=[] + for mask in visual: + final.append(cv2_to_base64(mask)) + return final \ No newline at end of file diff --git a/paddlehub/vision/segmentation_transforms.py b/paddlehub/vision/segmentation_transforms.py new file mode 100644 index 00000000..a1d4ce2a --- /dev/null +++ b/paddlehub/vision/segmentation_transforms.py @@ -0,0 +1,307 @@ +# coding: utf8 +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import random +from typing import Callable, Union, List, Tuple + +import cv2 +import numpy as np +from PIL import Image +import paddlehub.vision.functional as F + + +class Compose: + """ + Do transformation on input data with corresponding pre-processing and augmentation operations. + The shape of input data to all operations is [height, width, channels]. + + Args: + transforms (list): A list contains data pre-processing or augmentation. + to_rgb (bool, optional): If converting image to RGB color space. Default: True. + + Raises: + TypeError: When 'transforms' is not a list. + ValueError: when the length of 'transforms' is less than 1. + """ + + def __init__(self, transforms: Callable, to_rgb: bool = True): + if not isinstance(transforms, list): + raise TypeError('The transforms must be a list!') + if len(transforms) < 1: + raise ValueError('The length of transforms ' + \ + 'must be equal or larger than 1!') + self.transforms = transforms + self.to_rgb = to_rgb + + def __call__(self, im: Union[np.ndarray, str], label: Union[np.ndarray, str] = None) -> Tuple: + """ + Args: + im (str|np.ndarray): It is either image path or image object. + label (str|np.ndarray): It is either label path or label ndarray. + + Returns: + (tuple). A tuple including image, image info, and label after transformation. + """ + if isinstance(im, str): + im = cv2.imread(im).astype('float32') + if isinstance(label, str): + label = np.asarray(Image.open(label)) + if im is None: + raise ValueError('Can\'t read The image file {}!'.format(im)) + if self.to_rgb: + im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) + + for op in self.transforms: + outputs = op(im, label) + im = outputs[0] + if len(outputs) == 2: + label = outputs[1] + im = np.transpose(im, (2, 0, 1)) + return (im, label) + + +class ColorMap: + "Calculate color map for mapping segmentation result." + + def __init__(self, num_classes: int = 256): + self.num_classes = num_classes + 1 + + def __call__(self) -> np.ndarray: + color_map = self.num_classes * [0, 0, 0] + for i in range(0, self.num_classes): + j = 0 + lab = i + while lab: + color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j)) + color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j)) + color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j)) + j += 1 + lab >>= 3 + color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)] + color_map = color_map[1:] + return color_map + + +class SegmentVisual: + """Visualization the segmentation result. + Args: + weight(float): weight of original image in combining image, default is 0.6. + """ + + def __init__(self, weight: float = 0.6): + self.weight = weight + self.get_color_map_list = ColorMap(256) + + def __call__(self, image: str, result: np.ndarray, save_dir: str) -> np.ndarray: + color_map = self.get_color_map_list() + color_map = np.array(color_map).astype("uint8") + # Use OpenCV LUT for color mapping + c1 = cv2.LUT(result, color_map[:, 0]) + c2 = cv2.LUT(result, color_map[:, 1]) + c3 = cv2.LUT(result, color_map[:, 2]) + pseudo_img = np.dstack((c1, c2, c3)) + im = cv2.imread(image) + vis_result = cv2.addWeighted(im, self.weight, pseudo_img, 1 - self.weight, 0) + + if save_dir is not None: + if not os.path.exists(save_dir): + os.makedirs(save_dir) + image_name = os.path.split(image)[-1] + out_path = os.path.join(save_dir, image_name) + cv2.imwrite(out_path, vis_result) + + return vis_result + + +class Padding: + """ + Add bottom-right padding to a raw image or annotation image. + Args: + target_size (list|tuple): The target size after padding. + im_padding_value (list, optional): The padding value of raw image. + Default: [127.5, 127.5, 127.5]. + label_padding_value (int, optional): The padding value of annotation image. Default: 255. + Raises: + TypeError: When target_size is neither list nor tuple. + ValueError: When the length of target_size is not 2. + """ + + def __init__(self, + target_size: Union[List[int], Tuple[int], int], + im_padding_value: Union[List[int], Tuple[int], int] = (128, 128, 128), + label_padding_value: int = 255): + if isinstance(target_size, list) or isinstance(target_size, tuple): + if len(target_size) != 2: + raise ValueError( + '`target_size` should include 2 elements, but it is {}'. + format(target_size)) + else: + raise TypeError( + "Type of target_size is invalid. It should be list or tuple, now is {}" + .format(type(target_size))) + self.target_size = target_size + self.im_padding_value = im_padding_value + self.label_padding_value = label_padding_value + + def __call__(self, im: np.ndarray , label: np.ndarray = None) -> Tuple: + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + im_height, im_width = im.shape[0], im.shape[1] + if isinstance(self.target_size, int): + target_height = self.target_size + target_width = self.target_size + else: + target_height = self.target_size[1] + target_width = self.target_size[0] + pad_height = target_height - im_height + pad_width = target_width - im_width + if pad_height < 0 or pad_width < 0: + raise ValueError( + 'The size of image should be less than `target_size`, but the size of image ({}, {}) is larger than `target_size` ({}, {})' + .format(im_width, im_height, target_width, target_height)) + else: + im = cv2.copyMakeBorder(im, 0, pad_height, 0, pad_width, cv2.BORDER_CONSTANT, + value=self.im_padding_value) + if label is not None: + label = cv2.copyMakeBorder(label, 0, pad_height, 0, pad_width, cv2.BORDER_CONSTANT, + value=self.label_padding_value) + if label is None: + return (im,) + else: + return (im, label) + + +class Normalize: + """ + Normalize an image. + Args: + mean (list|tuple): The mean value of a data set. Default: [0.5, 0.5, 0.5]. + std (list|tuple): The standard deviation of a data set. Default: [0.5, 0.5, 0.5]. + Raises: + ValueError: When mean/std is not list or any value in std is 0. + """ + + def __init__(self, mean: Union[List[float], Tuple[float]] = (0.5, 0.5, 0.5), + std: Union[List[float], Tuple[float]] = (0.5, 0.5, 0.5)): + self.mean = mean + self.std = std + if not (isinstance(self.mean, (list, tuple)) + and isinstance(self.std, (list, tuple))): + raise ValueError( + "{}: input type is invalid. It should be list or tuple".format( + self)) + from functools import reduce + if reduce(lambda x, y: x * y, self.std) == 0: + raise ValueError('{}: std is invalid!'.format(self)) + + def __call__(self, im: np.ndarray, label: np.ndarray = None) -> Tuple: + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + mean = np.array(self.mean)[np.newaxis, np.newaxis, :] + std = np.array(self.std)[np.newaxis, np.newaxis, :] + im = F.normalize(im, mean, std) + + if label is None: + return (im,) + else: + return (im, label) + + +class Resize: + """ + Resize an image. + + Args: + target_size (list|tuple, optional): The target size of image. Default: (512, 512). + interp (str, optional): The interpolation mode of resize is consistent with opencv. + ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']. Note that when it is + 'RANDOM', a random interpolation mode would be specified. Default: "LINEAR". + + Raises: + TypeError: When 'target_size' type is neither list nor tuple. + ValueError: When "interp" is out of pre-defined methods ('NEAREST', 'LINEAR', 'CUBIC', + 'AREA', 'LANCZOS4', 'RANDOM'). + """ + + # The interpolation mode + interp_dict = { + 'NEAREST': cv2.INTER_NEAREST, + 'LINEAR': cv2.INTER_LINEAR, + 'CUBIC': cv2.INTER_CUBIC, + 'AREA': cv2.INTER_AREA, + 'LANCZOS4': cv2.INTER_LANCZOS4 + } + + def __init__(self, target_size: Union[List[int], Tuple[int]] = (512, 512), interp: str = 'LINEAR'): + self.interp = interp + if not (interp == "RANDOM" or interp in self.interp_dict): + raise ValueError("`interp` should be one of {}".format( + self.interp_dict.keys())) + if isinstance(target_size, list) or isinstance(target_size, tuple): + if len(target_size) != 2: + raise ValueError( + '`target_size` should include 2 elements, but it is {}'. + format(target_size)) + else: + raise TypeError( + "Type of `target_size` is invalid. It should be list or tuple, but it is {}" + .format(type(target_size))) + + self.target_size = target_size + + def __call__(self, im: np.ndarray, label: np.ndarray = None) -> Tuple: + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label), + + Raises: + TypeError: When the 'img' type is not numpy. + ValueError: When the length of "im" shape is not 3. + """ + + if not isinstance(im, np.ndarray): + raise TypeError("Resize: image type is not numpy.") + if len(im.shape) != 3: + raise ValueError('Resize: image is not 3-dimensional.') + if self.interp == "RANDOM": + interp = random.choice(list(self.interp_dict.keys())) + else: + interp = self.interp + im = F.resize(im, self.target_size, self.interp_dict[interp]) + if label is not None: + label = F.resize(label, self.target_size, + cv2.INTER_NEAREST) + + if label is None: + return (im,) + else: + return (im, label) \ No newline at end of file diff --git a/paddlehub/vision/utils.py b/paddlehub/vision/utils.py index 2b3c1fa1..39a8b549 100644 --- a/paddlehub/vision/utils.py +++ b/paddlehub/vision/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,11 +13,14 @@ # limitations under the License. import os +from typing import Callable, Union, List, Tuple +import cv2 import paddle import PIL import numpy as np import matplotlib as plt +import paddle.nn.functional as F def is_image_file(filename: str) -> bool: @@ -26,7 +29,7 @@ def is_image_file(filename: str) -> bool: return ext in ['.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff'] -def get_img_file(dir_name: str) -> list: +def get_img_file(dir_name: str) -> List[str]: '''Get all image file paths in several directories which have the same parent directory.''' images = [] for parent, _, filenames in os.walk(dir_name): @@ -39,7 +42,7 @@ def get_img_file(dir_name: str) -> list: return images -def box_crop(boxes: np.ndarray, labels: np.ndarray, scores: np.ndarray, crop: list, img_shape: list): +def box_crop(boxes: np.ndarray, labels: np.ndarray, scores: np.ndarray, crop: List[int], img_shape: List[int]) -> Tuple: """Crop the boxes ,labels, scores according to the given shape""" x, y, w, h = map(float, crop) @@ -99,7 +102,7 @@ def draw_boxes_on_image(image_path: str, boxes: np.ndarray, scores: np.ndarray, labels: np.ndarray, - label_names: list, + label_names: List[str], score_thresh: float = 0.5, save_path: str = 'result'): """Draw boxes on images.""" @@ -145,7 +148,7 @@ def draw_boxes_on_image(image_path: str, plt.close('all') -def get_label_infos(file_list: str): +def get_label_infos(file_list: str) -> str: """Get label names by corresponding category ids.""" from pycocotools.coco import COCO map_label = COCO(file_list) @@ -175,10 +178,115 @@ def gram_matrix(data: paddle.Tensor) -> paddle.Tensor: return gram -def npmax(array: np.ndarray): +def npmax(array: np.ndarray) -> Tuple[int]: """Get max value and index.""" arrayindex = array.argmax(1) arrayvalue = array.max(1) i = arrayvalue.argmax() j = arrayindex[i] return i, j + + +def visualize(image: Union[np.ndarray, str], result: np.ndarray, weight: float = 0.6) -> np.ndarray: + """ + Convert segmentation result to color image, and save added image. + + Args: + image (str|np.ndarray): The path of origin image or bgr image. + result (np.ndarray): The predict result of image. + weight (float): The image weight of visual image, and the result weight is (1 - weight). Default: 0.6 + + Returns: + vis_result (np.ndarray): return the visualized result. + """ + + color_map = get_color_map_list(256) + color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)] + color_map = np.array(color_map).astype("uint8") + # Use OpenCV LUT for color mapping + c1 = cv2.LUT(result, color_map[:, 0]) + c2 = cv2.LUT(result, color_map[:, 1]) + c3 = cv2.LUT(result, color_map[:, 2]) + pseudo_img = np.dstack((c1, c2, c3)) + if isinstance(image, str): + im = cv2.imread(image) + else: + im = image + vis_result = cv2.addWeighted(im, weight, pseudo_img, 1 - weight, 0) + + return vis_result + + +def get_pseudo_color_map(pred: np.ndarray) -> PIL.Image.Image: + '''visualization the segmentation mask.''' + pred_mask = PIL.Image.fromarray(pred.astype(np.uint8), mode='P') + color_map = get_color_map_list(256) + pred_mask.putpalette(color_map) + return pred_mask + + +def get_color_map_list(num_classes: int) -> List[int]: + """ + Returns the color map for visualizing the segmentation mask, + which can support arbitrary number of classes. + + Args: + num_classes (int): Number of classes. + + Returns: + (list). The color map. + """ + + num_classes += 1 + color_map = num_classes * [0, 0, 0] + for i in range(0, num_classes): + j = 0 + lab = i + while lab: + color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j)) + color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j)) + color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j)) + j += 1 + lab >>= 3 + color_map = color_map[3:] + return color_map + + +def get_reverse_list(ori_shape: List[int], transforms: List[Callable]) -> List[tuple]: + """ + get reverse list of transform. + + Args: + ori_shape (list): Origin shape of image. + transforms (list): List of transform. + + Returns: + list: List of tuple, there are two format: + ('resize', (h, w)) The image shape before resize, + ('padding', (h, w)) The image shape before padding. + """ + reverse_list = [] + h, w = ori_shape[0], ori_shape[1] + for op in transforms: + if op.__class__.__name__ in ['Resize', 'ResizeByLong']: + reverse_list.append(('resize', (h, w))) + h, w = op.target_size[0], op.target_size[1] + if op.__class__.__name__ in ['Padding']: + reverse_list.append(('padding', (h, w))) + w, h = op.target_size[0], op.target_size[1] + return reverse_list + + +def reverse_transform(pred: paddle.Tensor, ori_shape: List[int], transforms: List[int]) -> paddle.Tensor: + """recover pred to origin shape""" + reverse_list = get_reverse_list(ori_shape, transforms) + for item in reverse_list[::-1]: + if item[0] == 'resize': + h, w = item[1][0], item[1][1] + pred = F.interpolate(pred, (h, w), mode='nearest') + elif item[0] == 'padding': + h, w = item[1][0], item[1][1] + pred = pred[:, :, 0:h, 0:w] + else: + raise Exception("Unexpected info '{}' in im_info".format(item[0])) + return pred \ No newline at end of file -- GitLab