From 075fef0a16b82e734e138f1c163b556ae688bcbd Mon Sep 17 00:00:00 2001 From: SunAhong1993 Date: Mon, 2 Nov 2020 16:19:56 +0800 Subject: [PATCH] update caffe2paddle --- x2paddle/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 243 bytes .../__pycache__/caffe_convert.cpython-37.pyc | Bin 0 -> 549 bytes x2paddle/__pycache__/convert.cpython-37.pyc | Bin 0 -> 7976 bytes x2paddle/caffe_convert.py | 11 + x2paddle/convert.py | 127 +- .../core/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 157 bytes .../__pycache__/fluid_code.cpython-37.pyc | Bin 0 -> 3102 bytes .../core/__pycache__/graph.cpython-37.pyc | Bin 0 -> 3052 bytes .../core/__pycache__/op_mapper.cpython-37.pyc | Bin 0 -> 6176 bytes .../core/__pycache__/program.cpython-37.pyc | Bin 0 -> 16508 bytes x2paddle/core/__pycache__/util.cpython-37.pyc | Bin 0 -> 324 bytes x2paddle/core/program.py | 225 ++- .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 166 bytes .../__pycache__/caffe_decoder.cpython-37.pyc | Bin 0 -> 8973 bytes .../__pycache__/caffe_pb2.cpython-37.pyc | Bin 0 -> 122151 bytes .../pytorch_decoder.cpython-37.pyc | Bin 0 -> 2268 bytes x2paddle/decoder/caffe_decoder.py | 14 +- x2paddle/decoder/pytorch_decoder.py | 46 +- .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 162 bytes x2paddle/op_mapper/dygraph/__init__.py | 0 .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 170 bytes .../dygraph/caffe2paddle/__init__.py | 0 .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 183 bytes .../caffe_op_mapper.cpython-37.pyc | Bin 0 -> 32347 bytes .../__pycache__/caffe_shape.cpython-37.pyc | Bin 0 -> 11845 bytes .../dygraph/caffe2paddle/caffe_op_mapper.py | 1313 +++++++++++++++++ .../dygraph/caffe2paddle/caffe_shape.py | 443 ++++++ x2paddle/op_mapper/dygraph/prim2code.py | 447 ++++++ x2paddle/op_mapper/static/__init__.py | 0 .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 175 bytes .../op_mapper/static/caffe2paddle/__init__.py | 0 .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 188 bytes .../caffe_op_mapper.cpython-37.pyc | Bin 0 -> 24228 bytes .../__pycache__/caffe_shape.cpython-37.pyc | Bin 0 -> 9346 bytes .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 3017 bytes .../__pycache__/axpy.cpython-37.pyc | Bin 0 -> 1117 bytes .../convolutiondepthwise.cpython-37.pyc | Bin 0 -> 2731 bytes .../detectionoutput.cpython-37.pyc | Bin 0 -> 1608 bytes .../__pycache__/normalize.cpython-37.pyc | Bin 0 -> 1268 bytes .../__pycache__/permute.cpython-37.pyc | Bin 0 -> 1000 bytes .../__pycache__/priorbox.cpython-37.pyc | Bin 0 -> 1487 bytes .../__pycache__/register.cpython-37.pyc | Bin 0 -> 1380 bytes .../__pycache__/relu6.cpython-37.pyc | Bin 0 -> 784 bytes .../__pycache__/roipooling.cpython-37.pyc | Bin 0 -> 1141 bytes .../__pycache__/select.cpython-37.pyc | Bin 0 -> 1265 bytes .../__pycache__/shufflechannel.cpython-37.pyc | Bin 0 -> 849 bytes .../__pycache__/upsample.cpython-37.pyc | Bin 0 -> 1394 bytes .../caffe2paddle/caffe_custom_layer/axpy.py | 4 +- .../convolutiondepthwise.py | 26 +- .../caffe_custom_layer/detectionoutput.py | 22 +- .../caffe_custom_layer/normalize.py | 10 +- .../caffe_custom_layer/permute.py | 2 +- .../caffe2paddle/caffe_custom_layer/relu6.py | 2 +- .../caffe_custom_layer/roipooling.py | 2 +- .../caffe2paddle/caffe_custom_layer/select.py | 2 +- .../caffe_custom_layer/upsample.py | 4 +- .../static/caffe2paddle/caffe_op_mapper.py | 628 ++++---- .../__pycache__/__init__.cpython-37.pyc | Bin 0 -> 168 bytes .../caffe_optimizer.cpython-37.pyc | Bin 0 -> 1933 bytes 59 files changed, 2842 insertions(+), 486 deletions(-) create mode 100644 x2paddle/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/__pycache__/caffe_convert.cpython-37.pyc create mode 100644 x2paddle/__pycache__/convert.cpython-37.pyc create mode 100644 x2paddle/caffe_convert.py create mode 100644 x2paddle/core/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/core/__pycache__/fluid_code.cpython-37.pyc create mode 100644 x2paddle/core/__pycache__/graph.cpython-37.pyc create mode 100644 x2paddle/core/__pycache__/op_mapper.cpython-37.pyc create mode 100644 x2paddle/core/__pycache__/program.cpython-37.pyc create mode 100644 x2paddle/core/__pycache__/util.cpython-37.pyc create mode 100644 x2paddle/decoder/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/decoder/__pycache__/caffe_decoder.cpython-37.pyc create mode 100644 x2paddle/decoder/__pycache__/caffe_pb2.cpython-37.pyc create mode 100644 x2paddle/decoder/__pycache__/pytorch_decoder.cpython-37.pyc create mode 100644 x2paddle/op_mapper/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/dygraph/__init__.py create mode 100644 x2paddle/op_mapper/dygraph/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/dygraph/caffe2paddle/__init__.py create mode 100644 x2paddle/op_mapper/dygraph/caffe2paddle/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/dygraph/caffe2paddle/__pycache__/caffe_op_mapper.cpython-37.pyc create mode 100644 x2paddle/op_mapper/dygraph/caffe2paddle/__pycache__/caffe_shape.cpython-37.pyc create mode 100644 x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py create mode 100644 x2paddle/op_mapper/dygraph/caffe2paddle/caffe_shape.py create mode 100644 x2paddle/op_mapper/dygraph/prim2code.py create mode 100644 x2paddle/op_mapper/static/__init__.py create mode 100644 x2paddle/op_mapper/static/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/__init__.py create mode 100644 x2paddle/op_mapper/static/caffe2paddle/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/__pycache__/caffe_op_mapper.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/__pycache__/caffe_shape.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/axpy.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/convolutiondepthwise.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/detectionoutput.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/normalize.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/permute.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/priorbox.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/register.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/relu6.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/roipooling.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/select.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/shufflechannel.cpython-37.pyc create mode 100644 x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/upsample.cpython-37.pyc create mode 100644 x2paddle/optimizer/__pycache__/__init__.cpython-37.pyc create mode 100644 x2paddle/optimizer/__pycache__/caffe_optimizer.cpython-37.pyc diff --git a/x2paddle/__pycache__/__init__.cpython-37.pyc b/x2paddle/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..451b052adca420e2b7bca89043c99353f61b79ba GIT binary patch literal 243 zcmZ?b<>g`kf}qA(@nJyvF^B^LOhASM5En}Ui4=w?h7`sWrX0pxrYI&xhE%3zhA8F~ z=3oX*mMT^QJqtbamyAGlnvA!&0}@kGa#G!k5(_f?G+AzO$H$kY78Pga=f%fI@g(OL zrRo(F<);@V<`#jhxWx|Ttz;--1@ghfFGu~{yb}H5(!9#VytKr;bOS^E2qTEW@yYoq wsR&+PYPo)eQ32Ra{rLFIyv&mLc)fzkTO2mI`6;D2sdgZD6|(>d7A77h03y>rC;$Ke literal 0 HcmV?d00001 diff --git a/x2paddle/__pycache__/caffe_convert.cpython-37.pyc b/x2paddle/__pycache__/caffe_convert.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dec68cfab01e41e3a452bc544bc4dba411e68915 GIT binary patch literal 549 zcmaKqy-ve05PbR1QXo7=N8W%}%F4vX z%*4fULI;E;Tlp?`U(Tmoy6yzQ`l{ZdEk@|0fo@swZ~?mr;b4g26bVc)MjR6h*h+2T z5C_;H1I>Fq4}tA;B*tV6@5f}qoF$qMKEZ_MxSn3o)ryBa&KXz0uI4*=L#5OK&jiXu zB(GfE&iBc|%R*AdQtq?1&W{^OWJopb)9u;@RPg)rpMGV3^1Pz_l{)E9N8=VQKg5kGOdTpCwWF!j)`t41&?$(vJ}h>M zfa$|hr^E`Z_<_m`j1Sa~!H%*DGtsKBW30+*Xql|e8o;Wu<7}QC0;I+k*kM5G zPJ=Bz)cCejf2@GJ{|uf zUZCu$Pn4+=DO(y-nHK0IB`bVbR3)s)N&;5S!3urusA0Xv(6S?pX;U`y;vIezEamj&L19NtBC3F9`| z^Etd1*k#tjeA_v^Wp)L4SJ|}`53Bp)T3BwiMjms9q1GCOE#~wGu44&^)gKHUYupVs zt-i;tz;S(#_lMq&g`VfSkD7X1pbd%3AnQ(sL0s5$hU3s|wXi>d7d#5UbsU~fdy=Eq zw|d=vKN%%7TCE*tu(3(FJK;&${!~ox^}E}S%?2E}et1U67I+pxt$=r3f7lHiCdCh{ ztyUj8;_P_*A@KX*YBIZ>%|UO|VvaK&I$iD#+zl&3yfmBUvXF9_X1YA?GB$LsiCKsB zR!h9H#k|0}7ry3=gMsIEhu5uO)3Ms)T{(lEHyUFVne|{dnY5t!)ag}Pt!;<9|m}Y>_Rz7dnQashJ9g&2}D*JsyM& zC|MWV>)RuTZ#ZB!9%TZ69kzj6;+5DDiA$=vHIQxu6k9=D@_T$R4#EqG=(Kylut=|I zp~t(}1!&7%k4=V7oAzGLnvBB-k3kGZxntQd-1Q;<&hgK`xAG!>-M|u#+9qf?_Rqnw#M$k zQ{;MN#kTA0ET`(YoX80Mk4=neOV&rg*Z&=EifX6@RZIT9a9hjK=G*@d&;LcMbCPmdYx_^2nH0MB5xzM&{HM?dq^Ps!gkc zwnns(5h;<*>K|&;T4Y9mP3u8@uknQV*Z1Zk^+_^rv$4i0R>ta!oyGN4`}KSG?7MHQ z*>Ape$A0tn_in!#7O%Chw6BE4tKxCz2exC6`8}r*HT97vp51;*9)wH3q2mfOJ24nzGE$o2jdi$i#khf z$^zC`zu}M;a&*k~)wjlX$@r+(nDy4a`OfQrR0$${p2Nc{YY1Fvj3Aev#5nGaa)VfK zFw*{Lv;}FEc|X^jk2TMa%Z|G};GP?svjvH(ww?GX+m4H44)f}qN=>89q}=VGZzoND z3>(oon_+lkdn9H_Ifn~d!J3W5lAj4$#l3~)}CxOK&!HbIzCW9f1D;th$ zi}h$WVpB9=xxr>^r13VkJ}aW|Q^c?agU@_2KTRMl=*Q(`UBryVgoLOP^YA4ljau|9Xf;**0aC+dsQd-s_sDSRSI1Sr zC5-rEG+<;JWwz52jHtkLWS>ib6#&zwWmR!&=oQg}ISbQMhJ{g)%ri2!N_%BidZJD% ztUNUXBdRd3z{34+jB1MD50_KB64uG`BhTIJW@o7-MFOz{<~|%a?F# z*2yH1mBuBRV)FBV#?@RV7guuunqMGF60BpS3)ay$#t&ix%1nVR=**?&?jsEQ{axN9 z8XTo$D9vecO$?JL@-KkkTevAuS3_+iN?TAZsB-}d4CTdTs{96UdbC6ItK!Nc&>zv+ zy#(c?2&6~)mb#@*3s7P0q0TP^8q?F5l8ZDNE2L0h6h&KQB|6ScaV!G2vWPw~2DA+k zeW3OcR1#`vM8(L!vjW}%F# z45AnVg$mE6Do45D%8CHy<4U-aN1!*K6*Y2jZ>_Dpo9;M85$ex^ zPF=oE!^+}aIy|KbGR2j4yB*H&gF37oo|hR^5_Kp}QWUe`cpVPqN$9L+Vu4Sy8W%`p zehK?8b-k6@R2DMYgpw$mXy>BI3J5!AvbSkeB8n_7-6WiI8BUV@J3Qr;29mE6-ciio z2^<>va!p#>%j;AMfwLizxXPF-k;WFP#bG`DYoaKAQs6zQ(1WQ5?`MJ#N z9&1z6H4qHQ3Qb{ECS<5a)if-Cr?5C!ZEaW|HKsKR6_1r^J)-OncoZ;zB|{9+&68zD z`V$?tPBxk3g?t&)=2vlhN||rCl$u_05V{HTeEC2SxSQhKOAbq)!$QTWifo;jiO%+b zj=nJ0)-%jCJ&xs~iWb-37H7D`M&sH#@AjOT#U4tCW=0SfkqGvj@KcQ!Cf~S2hq77j z_+RGSJOmbWE)?;Izf5ap(TzeIIbSM4Vr~33;jR*H-pbPKFTqaJo;!p$cW-x{ z2234OT%5(XPWw|#H61duop#pZZ-8TLWPK4H8#BX?=Y(ta)(IXdCr-IhdRH!tirN$Oy$^D-*JW=-+FhJ=q|B)>;P zS8&V1Bvk=Pb;+{*Te!g}WR6Fwj$_(TLpF!%Wu!+6RV7Qr1)sTQ}S1PvjbGF?vZAua-8Md3Kaf&*I?M_Aya9Szlvi?LT+GBad zPF3VPki3%}tg(5ScFd|IDNXj0-v?D(PSYyN9Gd0X#&+sbiZmOiF$76&^QZ=pOI(ta zNL0T5hF89cn}W5czmm$oi(ZdF^rLsQ{9@^UsZkL&P`5Os#43E_^YA}t3qK^X6ZB{K z2yFlgvwKApZPiHKD)A0OVR=}dQt7s|XGGdo<%vr4cee%4hRB#Idvgqa@rO``X^}iSt6>y99R;~m9X|LkQ89Uz z=-y}+(J8@!N0-n_&#nLM`0flo+K<>HE@7&xq;wtLU4FDegwr&pd|Vx;F&fDM9!19Ni<(ofdS@<>=0ev5dfCXM|J~?VekE3iVXNg+A50QBNS%y(90H8lEJC(4Mlx?!mSNdU?&ey8yS1n z0|(IS&F~fTj(}dbHiKa7UteC9SZ$O^mZc|=_pHX&27)OHHsQ5Y%GOV&;xtaR$xY9_ zx$lo#o9!9|ICQKT2)F)3y{S1++vte;F{^-B2i58iq8{@NRB zciywU?@g>OR+@@J%-O~wjn}3O@-+Ur(5Y4-EKnk}o#RszmYxu)5wUiX&lo2dkHSED`0i^lQlHVk=lk zj{Fu0^~x6!ijrqZ+$kq9nKVWu>Yr=)geVb!LP2=z3(<=ml;mW0Bv5!Wud|*z+_j#m zsrgSc*v6gaxmX`MZmj#eeq2BedRt~@M<88nih49v!+nv+#71gVaY@>^EHG5E??L<( z(8=Se_xIeny%DIaBfGIV~KT2uj(hZ-t6p$L)eEB^|;ptC&xD%~hN%EaX^wbtlH zi`Oy5DNMxZxN`eZ&k-M$nkHYsTQM1*zem_VB5ZB@N~~>P#dVDz82>;JFO0ea_aC4U zUvm|;k(`bS8cs)aA(ESFL0eGqC$g{wR3eT8b~4MuYI;pw#7T<&#EB|-A`igX%1U|- z&IVVm5}0Pk*+ZOFqS-h?=5t13He+IXCA(;8T^7vKgESb;G7e RZCd7{dD=K<8pgK`^*<=0n;ZZD literal 0 HcmV?d00001 diff --git a/x2paddle/caffe_convert.py b/x2paddle/caffe_convert.py new file mode 100644 index 0000000..0751de6 --- /dev/null +++ b/x2paddle/caffe_convert.py @@ -0,0 +1,11 @@ +from x2paddle.decoder.caffe_decoder import CaffeDecoder +Decoder = CaffeDecoder + +from x2paddle.op_mapper.dygraph.caffe2paddle.caffe_op_mapper import CaffeOpMapper +DygraphOpMapper = CaffeOpMapper + +from x2paddle.op_mapper.static.caffe2paddle.caffe_op_mapper import CaffeOpMapper +StaticOpMapper = CaffeOpMapper + +from x2paddle.optimizer.caffe_optimizer import CaffeOptimizer +StaticOptimizer = CaffeOptimizer \ No newline at end of file diff --git a/x2paddle/convert.py b/x2paddle/convert.py index 0a2f5d1..4371fed 100644 --- a/x2paddle/convert.py +++ b/x2paddle/convert.py @@ -13,6 +13,7 @@ # limitations under the License. from six import text_type as _text_type +from x2paddle import program import argparse import sys @@ -66,8 +67,8 @@ def arg_parser(): parser.add_argument( "--without_data_format_optimization", "-wo", - type=_text_type, - default="True", + action="store_true", + default=False, help="tf model conversion without data format optimization") parser.add_argument( "--define_input_shape", @@ -88,11 +89,25 @@ def arg_parser(): default=False, help="define whether merge the params") parser.add_argument( - "--input_shapes", - "-is", + "--jit_type", + "-jt", + type=_text_type, + default="script", + help="define the jit type of pytorch Module.") + parser.add_argument( + "--input_files", + "-if", action='append', default=None, - help="define the inputs' shape") + help="define the inputs' file path") + parser.add_argument( + "--paddle_type", + "-pt", + type=_text_type, + default="dygraph", + help="define the paddle model type after converting(dygraph/static)" + ) + return parser @@ -117,30 +132,27 @@ def tf2paddle(model_path, "[ERROR] Tensorflow is not installed, use \"pip install tensorflow\"." ) return - from x2paddle import program + from x2paddle.decoder.tf_decoder import TFDecoder from x2paddle.op_mapper.tf_op_mapper import TFOpMapper - from x2paddle.optimizer.tensorflow.bias import BiasOpt - from x2paddle.optimizer.tensorflow.transpose import TransposeOpt - from x2paddle.optimizer.tensorflow.batch_norm import BatchNormOpt + from x2paddle.op_mapper.tf_op_mapper_nhwc import TFOpMapperNHWC + from x2paddle.optimizer.tf_optimizer import TFOptimizer print("Now translating model from tensorflow to paddle.") model = TFDecoder(model_path, define_input_shape=define_input_shape) - mapper = TFOpMapper(model) + + mapper = TFOpMapperNHWC(model) program.build() - bias_opt = BiasOpt() - transpose_opt = TransposeOpt() - batch_norm_opt = BatchNormOpt() - bias_opt.run(program) - batch_norm_opt.run(program) - transpose_opt.run(program) program.gen_model(save_dir) -def caffe2paddle(proto, weight, save_dir, caffe_proto, params_merge=False): - from x2paddle.decoder.caffe_decoder import CaffeDecoder - from x2paddle.op_mapper.caffe_op_mapper import CaffeOpMapper - from x2paddle.optimizer.caffe_optimizer import CaffeOptimizer +def caffe2paddle(proto, weight, save_dir, caffe_proto, + paddle_type, params_merge=False): + from x2paddle.caffe_convert import Decoder + if paddle_type == "dygraph": + from x2paddle.caffe_convert import DygraphOpMapper as OpMapper + else: + from x2paddle.caffe_convert import StaticOpMapper as OpMapper import google.protobuf as gpb ver_part = gpb.__version__.split('.') version_satisfy = False @@ -149,12 +161,10 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto, params_merge=False): version_satisfy = True assert version_satisfy, '[ERROR] google.protobuf >= 3.6.0 is required' print("Now translating model from caffe to paddle.") - model = CaffeDecoder(proto, weight, caffe_proto) - mapper = CaffeOpMapper(model) - optimizer = CaffeOptimizer(mapper) - optimizer.merge_bn_scale() - optimizer.merge_op_activation() - mapper.save_inference_model(save_dir, params_merge) + model = Decoder(proto, weight, caffe_proto) + mapper = OpMapper(model) + mapper.pd_graph.build() + mapper.pd_graph.gen_model(save_dir) def onnx2paddle(model_path, save_dir, params_merge=False): @@ -162,8 +172,8 @@ def onnx2paddle(model_path, save_dir, params_merge=False): try: import onnx version = onnx.version.version - if version < '1.6.0': - print("[ERROR] onnx>=1.6.0 is required") + if version != '1.6.0': + print("[ERROR] onnx==1.6.0 is required") return except: print("[ERROR] onnx is not installed, use \"pip install onnx==1.6.0\".") @@ -185,7 +195,7 @@ def onnx2paddle(model_path, save_dir, params_merge=False): print("Paddle model and code generated.") -def pytorch2paddle(model_path, save_dir, input_shapes): +def pytorch2paddle(model_path, save_dir, jit_type, input_files): # check pytorch installation and version try: import torch @@ -202,9 +212,12 @@ def pytorch2paddle(model_path, save_dir, input_shapes): return print("Now translating model from pytorch to paddle.") - from x2paddle.decoder.pytorch_decoder import PyTorchDecoder + from x2paddle.decoder.pytorch_decoder import ScriptDecoder, TraceDecoder from x2paddle.op_mapper.pytorch2paddle import pytorch_op_mapper - model = PyTorchDecoder(model_path) + if jit_type == "trace": + model = TraceDecoder(model_path, input_files) + else: + model = ScriptDecoder(model_path) mapper = pytorch_op_mapper.PyTorchOpMapper(model) mapper.graph.build() print("Model optimizing ...") @@ -212,34 +225,15 @@ def pytorch2paddle(model_path, save_dir, input_shapes): graph_opt = GraphOptimizer() graph_opt.optimize(mapper.graph) print("Model optimized.") - if input_shapes is not None: - real_input_shapes = list() - for shape in input_shapes: - sp = shape[1:-1].split(",") - for i, s in enumerate(sp): - sp[i] = int(s) - real_input_shapes.append(sp) - else: - real_input_shapes = None - mapper.graph.gen_model(save_dir, real_input_shapes) + mapper.graph.gen_model(save_dir, jit_type, input_files) def paddle2onnx(model_path, save_dir, opset_version=10): - import paddle.fluid as fluid - try: - import paddle2onnx - except: - print( - "[ERROR] paddle2onnx not installed, use \"pip install paddle2onnx\"") - - import paddle2onnx as p2o - model = p2o.PaddleDecoder(model_path, '__model__', '__params__') - mapper = p2o.PaddleOpMapper() - mapper.convert( - model.program, - save_dir, - scope=fluid.global_scope(), - opset_version=opset_version) + from x2paddle.decoder.paddle_decoder import PaddleDecoder + from x2paddle.op_mapper.paddle2onnx.paddle_op_mapper import PaddleOpMapper + model = PaddleDecoder(model_path, '__model__', '__params__') + mapper = PaddleOpMapper() + mapper.convert(model.program, save_dir, opset_number=opset_version) def main(): @@ -260,6 +254,7 @@ def main(): assert args.framework is not None, "--framework is not defined(support tensorflow/caffe/onnx)" assert args.save_dir is not None, "--save_dir is not defined" + assert args.paddle_type in ["dygraph", "static"], "--paddle_type must be 'dygraph' or 'static'" try: import paddle @@ -267,8 +262,8 @@ def main(): print("paddle.__version__ = {}".format(paddle.__version__)) if v0 == '0' and v1 == '0' and v2 == '0': print("[WARNING] You are use develop version of paddlepaddle") - elif int(v0) != 1 or int(v1) < 6: - print("[ERROR] paddlepaddle>=1.6.0 is required") + elif int(v0) != 2 or int(v1) < 0: + print("[ERROR] paddlepaddle>=2.0.0 is required") return except: print( @@ -277,12 +272,11 @@ def main(): if args.framework == "tensorflow": assert args.model is not None, "--model should be defined while translating tensorflow model" - assert args.without_data_format_optimization in [ - "True", "False" - ], "--the param without_data_format_optimization should be defined True or False" + without_data_format_optimization = False define_input_shape = False params_merge = False - without_data_format_optimization = True if args.without_data_format_optimization == "True" else False + if args.without_data_format_optimization: + without_data_format_optimization = True if args.define_input_shape: define_input_shape = True if args.params_merge: @@ -296,7 +290,7 @@ def main(): if args.params_merge: params_merge = True caffe2paddle(args.prototxt, args.weight, args.save_dir, - args.caffe_proto, params_merge) + args.caffe_proto, args.paddle_type, params_merge) elif args.framework == "onnx": assert args.model is not None, "--model should be defined while translating onnx model" params_merge = False @@ -304,10 +298,13 @@ def main(): if args.params_merge: params_merge = True onnx2paddle(args.model, args.save_dir, params_merge) - + elif args.framework == "pytorch": + assert args.model is not None, "--model should be defined while translating pytorch model" + pytorch2paddle(args.model, args.save_dir, args.jit_type, args.input_files) + elif args.framework == "paddle2onnx": assert args.model is not None, "--model should be defined while translating paddle model to onnx" - paddle2onnx(args.model, args.save_dir, opset_version=args.onnx_opset) + paddle2onnx(args.model, args.save_dir, args.onnx_opset) else: raise Exception( diff --git a/x2paddle/core/__pycache__/__init__.cpython-37.pyc b/x2paddle/core/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7b06785e8706535f67261a86f21141e15df2ba4 GIT binary patch literal 157 zcmZ?b<>g`kg2`30;z9Id5CH>>K!yVl7qb9~6oz01O-8?!3`HPe1o6vFKR2&LzqmB7 zGBGbLF)!V~P(Q*bATcE+CpA7fKP45x%S$cSuP`bAOX(-)7p3aQ$7kkcmc+;F6;$5h Su*uC&Da}c>1DXFBh#3HN8?5esw|BiONWmhcn!9jt1F1RJ(2vp{^O+IErw7Idpm)p^vZb52!#)&E;wt};B| zj=l?iTxaYTV&;#H&UJj*8z7QNp0F-&b54Dci1(Q)^6K3nY5*Ihe;=LCRW=8 z?@AB6XLwIq@3N-<8@#b5*Oi+p?B8znWCVsk(dv+V{s?rgKN)u26v*h+w`>P z^|dYIPOhu{P=(zf%yXsPIPDMeOuM~7j*ec5v!FA`@?JMcC?}(5 zzGrF~A)K~$x@o?X4btN<-3!zGr=QvR@aj9Elt~nHP=a$@xVIMtY4q97@M_-}>~wl6 z+A(g8?RNiI`#}(=aUKK@P?j|i#$E2?b@7hsrX<^QqeX3e*wJTd^DC+(`4|;AK#dM7 z>bczXb8%ozgt}tnVv~(|TBH7fEe(ghquwmY*4@MwFcz;6C+$O_+68>3Ti`vF7hF1< zsPs9@HOMgTyv)q&=aq@L%hhAj8;fBddjAHywKuk)wel(=`ZZrFEa{{7Qy~fq@l>WY zw96+Paa1?ii)<{?`b;Ew_qFXi5{g~f3U8C4e_}Cr}3wyeL!VqWju^2mNu_BM@JX-X>hCkFnE}M8S#7(iI zUKVDZY+Nb3%a7jpfTiEeqC#En3blc`5EIn~v;B@y;AZuqo~aP!gO!}=h=Uca znFSATVO6Te;-#5qQ!!|6lq3Gp-bWSdRK;0kHc%ONc)M_CmHF6I#*-`g{e?H-hc0~n zKYyU)rkip_uAYbqAA68E6E5#Nq>O%$Yo!IMw!X>6-nmM6vr34gPq1#hT(=9^{ETeY zO~f2$dZVS;$ZA|`oboHD{MyJhXK8!1T(;)yX4|LyBkjbgjD{ooqtC~;Mq;Zm+Il^S zx>1@pdV7uX^f$8Gy+I-y3-oIARO8)T#p(XYx`iRmwa>?BHs4PV)81!kBR}p(XT!HO zub%QPZ4;u5s_ziG{1;sW)@)u<6&y}ojk7q-@-Xd0+REagu4Tr5K&~=fH30`{*o_pS zw{nOq5uvlK?IeahL7?_-hgk_-dp*?+bG1Rz%Im{U)X(EysvYv&S1yT{I=v)`Iwa5Z z(%VW#Dv~z{1f4jKx|#CG%EAckmH}x`Mg1h~L``222DN9Q5u&xdI1MNgDk{*jjLkZX zj!Qqzr(rUP3_6yiPTu|fC^sPY2)OK15XNm@<#k>YHR1AA^aQv+Y+f?u{) z(XR_p+LAqA!$tyl*$X_nL zW$8%w4#(B!Ngr3BBP+6s)|E@LhPJ$7>$+^jevyww!y zVMw)G9)~73D|&ARl-~iqjJ6l!qqW6Ei_509+o6p@4{b+E$XqvXqC<02DsSo+IQsPeQAS)b^p^SQCeMImzf*VqDz+qi+96GoLdsvC`a z;6Ya}b)}r}zFd6YY}c|>4`P~fFHq6S^LLqg4}mnTvW5V_e*#ryvv2?a literal 0 HcmV?d00001 diff --git a/x2paddle/core/__pycache__/graph.cpython-37.pyc b/x2paddle/core/__pycache__/graph.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..676c341d5bc2039d17fee11f2fe863d646233cf1 GIT binary patch literal 3052 zcmcguTW=dh6rP#Ac7W%_~p+3wh!@v-T#9f_P(Bb31c8=bQ7LnY+u&C5G>}@$de{Wyb!d z#^SS4Sw~8~L?)T!J=W(<&Z#a?7foU6R?li$kXb#u={#W4md-OKozUI0ngz67SwOpB z+Mcw&W;O3QR%A7AGP{nwH>Q!zE3sTj)y&2jpDSr8CDx{6xLjA z_c}?c-A*(Zrim`Z!_-t9vx;8%G)Y25_TN`3R=RY!c+Jvw68754gQAL#>My3sC=Nca z_oK9)45O#5sNIUTuHUGCd-JQ7l)caoVDOPvtKAO$DEzTLx;Zcd>Ori+`W9hxZSYik zzTb&Dsqde|!in(*?%{Vug^$;A)2=Os1svn(qcVq*v`PVFmv_Y;!Ot6{{dOzaHri{O z(5i8@gn`C}EQ8lEQh^5g{_AKZv?e>Yb=Do2F7#5FZ+#vOY>cfZobnx^&X5O40X3|~ zHybsP0a(F6)JX&F#OZdZjw|B&ez@!VZ(>B^B4fP5Z9c9XMS%v8buOt*M)|%j`F=l^ zLxAt=vhVK>TfMADtzZ##in7y`5llxivR)C<&LWu|JznxW%g6%A@{jy>9VyvBmNS4$ z@r=R$!2pzCfpk$ivLHQ_t}MzD%7Q#4%W@e^@#KnJg`_Aez-CQ5#;}uNy@HKWHskw5 z50P4iCic8^=A~=sQZU2-CZ1jj;$AN_bWZfrLnT8M%6pw4)m{X@^!u%Wa!fVvTtu_+Ql_Zs%eh6z zO)!}-^#v-Ud*tGz;XU^F*7st{rwCOuA>ji10;NUel&1(?U3-s98#E|mjx)meTNP5U%L*6Q6w~<~n_S~VdN8Kd1mQc-)_fItF_L=h0B?_Q3xE~Jr91dAb zEgYyt2MVHzJ8XCD5gfIPn@F->#0AuSSMsSn+MIxz=yCV>4pV>7I@Frj7uW?hbw;-) zPPc$nuS_khWI2#Z@e!`WJB-?BQQX|7IWuo!8Hd@%UbYk;vZ=j$ckVn^!+`hPAXwD4 z1htL(WH=fKbDvSK!L$rf1Ey!ygX0i(}&pSgxtvQf6|Qtt|8X1s89gwvYmqZ0d(RLRFMW($XwN2 z$Z!_c)Sd`+Hx)C&nGt6DeCkA{MQiG$_JmH#@tui-`v;DF=AbprN8+YB z;I}i+bmxRj2E9(IMKI8I5aX)0@LB>*BN>ir8I~D-H(*C|$=o;t6}CGgbrq76{8f8I zKmzw6MkK_0#!H5wHoz>QZVOjr^+gO_Ovyo{=#+!Fjs`uC42b7bKsXZi|X5QbZ@r_zZouP&4T2}Nf z%I@*Es7B#URVgEXR21pd$CMpL22(TP;U#oXY)R;vLbxw@j_2ZEEuB`>hgT}z4((w& zRCqfm0g)nf6}h;gb5-<6)o#4mh3@8EU?!floA&NxuaHkxF~?kpW$*&qT9vhb0G`5! ASpWb4 literal 0 HcmV?d00001 diff --git a/x2paddle/core/__pycache__/op_mapper.cpython-37.pyc b/x2paddle/core/__pycache__/op_mapper.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4513b2a4fc29f7362501fd499e6dff65f3adb3b GIT binary patch literal 6176 zcmaJ_U6UKfb)BAX46s=2a!HC5B|$1%HeAfEB)ce;LMs&Y;YhJoj7gdF+R}IyJ=g^n zn89{8v{-oHRCyJNa!M{$<;~@CVP8}gKl%sgDdqfvy!2D5k{9O(8--0JynbR&J$1f!fk%6c zukyxc_UvNeonm*FrIkmPKG+ufFnjFOn)-#a81WKz_>xul@ukA=U7kH&xYTRm&#s_d zK)n~i!($fS>X`hn#CDFjyiW&)=uk~jVlPBKG((y2D ztCTloSB(x0oivlVcs-SEUEzsJVl#@%y925HbgZy?9o{@v>1`U)I}%-$>b2Xc z=q7!X+{te%(arX>*Y4*sZH3x1yP_kL#2Fk-JG?8+5k#6Nf8gQ73O)UK!4mcMoE*l`h>CxWGmmn-VVqgCow$JK#ud#LJGaItOHrNI8 zkC3ae5P51LzE3l?H@DhkM)a%W@$nF%$RrtbJc;k<6WU!Qt~TX?FWE z>{SZ)AQ0K%2k@<^2+6k&Ke%fhzFh?Kx?dwtb%CjS`!4)+%Es4KwE$|MpXV^Q$}~d% zk?H>n%qTK5LJR0sjoz2gvy002t-~)cH+xpYvqHW+trfLd9dE0pXMbXS!a6p$x${vt zT~T(g(OWBO_gT62Vuib(xpQy#wv8h$caZ#scJLhS;1!7bR?nI-BklVP*yxukH_if^G6*=tuSDZWiFE}34-*3O5C&}_G5i)X30Qraaf(%6`|PK9B&6C2$-c#{;2 zEcO_iToR)UMx{2d_w#nrm#=It+QG{!Ly_`sJ5gzCz3jTSuszbzUZ+3ma=pRRPBQ8% z*kLz|ha%sD)#=(^Ki^II@h&<(&`v&tul6x_z1q!&BNZoN&$y1%QCD^|sgkUniW`_0 zfWSbKuB342RA77^zA;YQBLxTuU%U0*t$xym&tdNR;$m=gAk=8M9N+Hdx&qkbF?L6` z>Vm?Z_RZi@2l+^07E-)TrInFP%XYw&p?1_@NYIN0+UX9J@Tidk6&*s5TcIKyr{Z-~ zm`$GVy&0t3@L;~PFQSRO24TUWBD)S}i&!1bw{S(9HQ<;wT+M|OQYkrI9c}8a&&1E6 z*(UYHkDTrX9{CDHYVE-Fb`ZR|4T9ishr38!1VNt%APOE2c?Bgtk9ZYnz-xR3X~^p^ ztcDKX9lm4WV`-Y?li1p1SNsh+-3ehq2I4f3xUF<>ly>*_aU#Tz#9atjpexgUr?g^%%eg7XaX0I# zIDQ{Rl7gItP4%(8^q;IPR@ie-WYI0bp1 zQ#d^rjKnJ}@D^J(3O4gg25{H_HUtF&n_*xx+uXiwsgOIfN?Ce}8fJ|*45ro4M@Y;E zh}F4karbt~0p_T1XY%}={ZypQ1P{gCp=@TkYRb`&&>Z3VI8|4hHSOlu&We7TxI--a%ccd;%z6Qgt zvB|mR34N_s!Xw!>{Rp1|Jm#Q2L`GQ!LvXRh^>N2+>9a&NU8_gOg~Qsg3TNhWcFHco zUN2fN!h~a+4k^24bUo;Th(4gf{sv%bUPZy=%3S|!|76r=AhC=2wwU-unb3|s za&z|6gv-A}t)#n5xj7MgXj@F9<)w!LEm;(8jr?b95^W1E543P1FK~8~;vzOh&{l5G zgR28oPwh`(TX+lZ8#-@Vg;VcycDxHhUpXbh<4=Raokbu#RS=%qG*l}9mI~-aXy`>$ zREnrq2jQt4_U>AsB&$WRq$Jg8O*M)dbZqf4bZSOtr3hw0u|hLE{5>d1P*g!lJ_hVi z-}Ujm!biJX(^crM8r}aM-zZj5c4gWqh>BD|m;Uom7j^W5chi#wVfs_CTGoEfM1(i0 zM~SKb6Gn1p>v+x?%ClCi;hN>n&MzoWhnW}?PSK`!p?9%ZyH6Ted$Gnz{$4B;`POZ*k|4C6BN z1fl*_D!2)O5LF!>n<3)|_?BS?e?SXKERV48GMj8I-BuQq*;Ev-z9pMSBG2}khFD&04*O{$)24|8 z&4pHTkYuBz-#^}LJ+9Zx!iUFdKhMflBZwW#1xYC3M*IMaKC}JAb;UMqgl9SxZ&JZu zlX!~+QMeL?e@MA^Nf>0lLAhHb{+7g#NZckt99X;up*J3N65@#JO;|iv!ggqy(H#$T zhHk_$cRNnOf0-J>!SFFnc;T@fEZ=xRxUY3rrWL`>p!5=3@@ zgG5mb=Mld~?(gx)eFy-a9U_oYcn!fCYfDzSG=2uo2;@DrfMXj#R-iEmm0KXjpEy9mH-S&qETqpUs!|}F0W)XgO=v}P zttPZQe8K@17e2=|3W$U}KB1ZXVPSfxa<6) z)}RaLv{F>Kbr6dGIQ$YimEKx!9R$z;0sODSFU^Q3Bjn`3JIt1@l4r)|!895_Y5Ly4 zIP2grQQ`2A_{uc;oE<)k_R;J-$5^~_5oOS*&NVK~HL6REDm3mvqk2Z8$|HPkqN}Wp z>niO*t(?o*c~D+GHoK%QV;tv_iEHZd`T5u!y7(YxPgjbS*%i$835pEJBvn3$4cU~2$eBPs*@$pCYo5I_e!KOuASnKd{d8hqZGOexpR9;fjRXPB zRXme$fg73~wVpbI0tCW393%(0l8W*IEIL%?;d9MEnvWEsK!M0e{DuVa2<-`cS4j=y z6+J3FAkn9)$gqy`7UGxF*Ok3AE7{l5O%Yq9AQq}?<+ahUYiSTd2e`Y^M>Sms(8_!y z+G)R=LE+~#%r{9qPohoYyAZgF?os|9NEi>lLAmciXcjLxyCH@KHU!m5s;!i;0fM;` z%_oAqu9iD!CN1bg&qJI^e2lVx!6OMNEx0^gLrHGnn@@<#Cw=W{03!<&L?F#0Q$RK&r;4! ze5SzH1VS*qzOc|u`k=NsQr*7zEvY?Af-4ZN?qM9p907rk&Y#eK(C~w{w0zjeEX_vTnPuUG$5RmTi}|%YHe{ zpYdmqpJ`Y8iacliS$WR+bMl<`=jA!$FMQroIhB9UQu*N6{fxhecTW}YUXb_WDs#=M z6@P+}S~WYV^mMZm)i-uK>ru1YL5ioEx0@m2Yj~O2(2ecjPFHW$dp9p3q4b7UfezHG z&Gl%VZH)dhh;=XW!|&>jB-`>J1so%cJO-RZlDuu zJ%>8{;}U1_`$hc1KSdB(aVE0wJK;j)DEq$sbsH&eltIc9NO735_mwy^boZ@StsB>W z$r0ERN@l>IhJxYEkE-?m~Ww(9vfgV2jzgoQYZuo&kMmSP)W86&dr#71HXvTOaH>ovHz zQJ|~mnw{Qm6rQVYbai!VX{zC^?rvLEZwA%qZZD{IH>wH?-@iOnwYwXA(z@+th`P5M z?cJc-?BG8u9??e_fhabs7z=B#SCi7e5VV8spo2=No_73G>(?qt5rotUqef>vNL-FQ zsk{`1fhMVaPV25tT=tM;n^CYGCYi9gmy}Ikbxt@;a$AA!1nnee2Aky5UXx#0raJO@}(b;HpHlKdx;yahjmekj~Dww==f;$)YF7f*WxMQDByQe{c5dole{qT!R!2>) zJ8TYreDd7Af?vq_#a7E2+OfSaya!}vuhkrV963p@(dz{rh1Vs%W@VjZ$|g9?)B+|` zmz)>y8cIKQ|M>JE#wEZWbRMN@Sx%PwP_HM&dVRaAcH4}X>-C-8M%(1*M^IE(8E~1W z0(Pv)0LVB(vQp13o{^NJ8O*;!kuigLN`bWqgUPC#FqpjZ@XV@$D&m<_B~`{VuVz#Q zPfyLNIXnw$UM=8RRI6%H9Y+f#wWLlUrK~QhWpxrMGipVhLP|wFtxl^)kTR>zs7H}9 zr>g2PJm=L}^*Ejj>IwBJJddeo)H!t?B^OmqJ&Ba#o7Sea;i#Wc7m&K7Y;4M<5&QeR zZuD-gB{`gssJXt*t>hn{Tzc8o*Aa;<;T(44$TO2Z-6$*Xx%8P&(>>ug1>s3WMV?cV zuq>PhBr&7OWRqN*>^IZ}z7ztr8H9Bluf#LaW=EHhGdVa(am;qYq>MN?L0#HNilF=6 zEjhDEX?-`0y4xaCl9D;MI1C%zT1IG3Kgm{qhQWjsgbGL_FXI>T)LOPT_0OD&(Rq-C zJNi73fMnrT5Q*&}q_J}L9XvC9Vlg0@unF$kNA^>RUH{0wSaXuR4tnhdxfIDYJtX=P znm9DZ#0!H+sC^BYqZ5)IQ!}hlfdj{Myq^?tvOiD`%mKA^4QZo389&@-g-uS@{!3Fm zub8tr>3LG@llOmNs(;20_dm=2r*`%F)BsK$9>7$&Pd%YDHzF&7meqnrc;6mC>uP0` zwN=oUls$w#*2=W9lAnPFXvewz%pe!#p`F>_l9>%xIkDTcQ(s4c7@9EoVd1}LJX{FwT=(_AfKj8B3XF+XsjlhyzX zv1JdP*xt1EodNXMR%ZO>Ms5rZSJ_*3csa@%Z97fNY&rUW?|mcAKrjC4AiK9pp4Q65 z*^|~*PJeY2A7X{~)BRvxoO{9AdGl=xYtIoNrR5C^`xf>rzxQmMM{5@b-Xt&fP{(N% zRR--k`g_wkXz6><-(QFJEwgz+_-ZSI{{IRonb=NKY@~dI6evb72-n?vQ^x%Z828Ij z2{TGZPTG+X6htDQn3x?m6 zw*NE@glCKuA%=^Bbr1!rDzvw%S*tc@&IdVFlGzL*!>+F2RZ|i|^Y5r&Z^FM4cPqFX zik_ZiHT3TQnn`DOJJ5|NpcO#tL6e@7oES1;t!&QMi){1f8HkckbvVgDozWzEzbHbX zE{!j<5Y+>JCKdZoPdJK`%s9j^3z(HTO(Al_FCegR;wpBzy zSFkbu+)UA4wioR)nMIUXK<)~jPohK_x#dHrcHGZc59Ac6VauugzMR^jwGVj$i`W`u z9@u;T20P^pGo+3O_Kn{MeYk@x(%m@Qa!vY;|B2Kbc^d5EEbL-hz*l2$(;j$xe=64T zp9~7hjte*?@5O~-Uh%|&5@YC&zv_TTl#uGhC8|$@0@6Vx;g4geQQ)sXgjJ0*ZqBfz zLlPkUM=0m;J;K=?qJ@32-c4=&UtuZZWDUw4SGj|7T*f@gTY3FA7#&X-QowuViPJO_ zyEJHMSGOu#dC>e)TCNhiQVvfkS7y1Lx4;#ih1I@{RhY$Ds|?TrdYqBd@Ep#8j2-R# zW-Oy+ESh8K`}S}?cJ^(_V}BSG;@=pL%k)<^{V5q&w1Aq&aQ3oWu0Fk2y}|4r`DD6(H+p9G(3(gwmD_UB^Ph3f!{gu);0ox0oQP@V zhtjgGC6(U-RUn^nZz^6$% zAdOEkmF>n>P`!D#8g+Z!c6YP6-e_0Du8w~4fAA+PMZbh2(JukF+For4n9wJPwY?hb ztp`0K3;h{+hkf4xj1j{2ySqvAq;^VQMqQ(zh!#aYV(c606NU@%wCQIU@T?1$(zJ$+ zwnZb=@Gx>2r~v`&rDhoJ-b73bUlVgXdvmwhR{CWYr{qsEy>3s8o@4=Ru$~SHjE5LZ za%?&+g*d=FMROVh7yd$;Q{T|AuR1^@LOo`CeqqewM$y|1ojA8c;cI?b2ix7-fnjxi z?lujI$cKGqxEuA4oM~SZv+&E!4j60`sss?4LtIh6ff7H!FH{Jqw&lR-mTVV%FPHK^ zAR92-|0I9H|gN z5XR0GYwv8FX*oD7%L5lpx-E!th;J&FSz$Woaj@seT8vSbHB=@9CaNImEy?Zn6y#Vf zCsNU$*=%=jHb&xQvXy27jtLM`KTrnR^q0|)*$%Op`TCdmg%t!AR(}EOMX9u4_n(?p zrAc82+FHE=sI1WrUtCQ`Wy`3h74}hg>?xUlsD-~kvQgZfve$p)V|yD*HEs|^zwFiu zq*|0}p!;^X6j?(oxNv)`W9i1N^6Jrf7qY_*|Z zMa5xBI9CxIb7tRy*3TI9sX=}m^CUJ4tHA#5AEOev1s!<*Eqxk(;#CS2yvZ3KY|R2^#~&{*h~B7o~)I7kV7FW3v# zcO#(v*+>Iq!`$Y+(1b>>*Kbk4U)Mn(QoVLwzk^BXzt4b1ed19cKsTYm3*VKr&7dPx z0O<=;ff6~XjDUEvo=L7Ju)IGLbQ(8-MjM3H5L5OVJqLdNszK}hlJO*L?l!dYp*jeL zUpKGial0J|`s_w~x2b&ZbLo71@0F`xzACt3M(=h6b`jge_o(=A_DMzUZuchO9h?+h zztHYBBscUI!-mM@1Jvw4HtKe@3yo&G*$?z8oe%Z#s}8tEqQrb2F@Ij#3A?)*M`EMd zhU;WWdLAhnV_Yzq8V_=>8QB zwR2ae%Z*Tpd}pXiutY1giI6rcG72)bHLHk6d7UTs&hQNqWD(Y;r{*Aw zVLkpV@*o*J$R%$$ujaw6J(PnyR*+fH5`GC~kD)Bn7S-autsr%V$M-F@bOMrx^*fO2 zqcSE1C<|;QGp`#jVphdLDLN6CqWpbo9zR6+6KJ!j<{{lnkP*(fon@vYO;RwjJ=jy{ z(b{s_+KOonEgUZS9}mpIQzzLbcZXK_Xw7$qD8+qP8O}-X&?qn`jsrFrM=~i3|F_s@ zA~R2~i7*`NTU2xT6PfWUBCz$HMc@Z}&&4)Yc@;X8OI$%Y*sCY004vd_b}EcxRPG5l zPq>EARw}7vcGvoD6@OyzC3X=aF-ym7FUj7~@aDoRXm2D1VGrWs1$3j}FXD;i6JEwf zXKP%bW6tQ51T9K4!sj2OfTGGsLxV@H?DWqc7F6tMB1R{b`|~4Snu=ODXYD)vr)WY| zjg(jww!TV#Z4(v`m~FMW-Gk9teWfej>*%QBM(oq`JSu&a{`{Aa&_A8lpKia}2&>Xj zf961Lzm&cLI=*E*s%;Rev6$ z{bsWRzhXBv@cOyx3oymce`H_Qw0QiSG5q?4dxIC=y*Idlnt4Ro#7B0mKd*ymS9hu= ziy=5DcEYNy9Fo#{1F4A%q1w@u(E0)caY={?EmBG>q4UgmnSr=2t}rHOofz1Ad=+E4 zC{iP&z6Y}EwVRN=?e3jGPZ{gf>FCYqhQSB|4B5c1Fz7QFiwR?}3kHe%2B0-Mu%?8~ zLC8;UBDYGKyM+kAK9AVa|vk>7}6%8MEE@p_z*`b&brGX;Exi*!%w8dKfvHp*?^OpT200Va$C5a zqU=p4Vhq=B(wL_*X{?~!G*(pEG*(i%GzK3r)=d zZVcHpA7_yYga};HX)UPImJ|MVbWD|bCk7G-=JBX*>;eh=Psp1YE{@Z{!B{dnE;Cu8 zp$%zPfi%lSCx|@>f1CTjfwct73|7!6wV;klg0b=GPMYmT1|Dy_491P+wxJ zTHXh|9X<0LQSRt7m*nx>vwnU9`X7?h$Y(C4k>{RG+?(BQyO!~@yQuNJpW~zcy#6kB zUVn!H51={gk9@fk?Dc|mBJN=`+LfbTg(Adj9*^}O{c>lk)4kKFD!DlX3=Y?LglC19 z!3I_n=gv+20a}ALo|Q7Svqzn4O$zk$Z#DFFD13hQi)+_0ay~F_J}@EowW~-&h$Mte zdHIbuUdPd;UZ(#p$NXz-Cf5i#8o|kML5HlCeBIz{_ZbrnKMTMgY&zS}x^dUfMO`Yi ze(`$H0TlRZ5U%6;Mk8v7qK7MGO&S4R*BW_2HFbr`*=d-O76ygZkJxS6$a(<8lsd^Y@T1bf7Rp(j9R_%?JK@Bma6 z-yu8(j%a$%OLOohPEvM?;12Mwf=}E;EK3g+VYOk7=y`Y(HS#0WCW6nPRTNFp)y2Ih z_=etAL_-lAM@mEEYzgW@oolCz48+pU8{&T#w_lEuMU*?5 zbJg!L5YY7uVvcfcYM?2%ADi3TP`-ahji%`BW}Q>N|*e3U1_KK|x4_A1XRSS^5W1 zooRghVCNli9Od9%dIvq`hDGD%*m(!UQ%;G;ONj?y4)3L|b~;IMbSJjaewd~-H+)P| z{l&DJQ6y9IxmGe%aul__#w`=g=MdeINLvW6M69s;UyULOl z_=q901$ZSF0XtFIAj$O~{b{?LdKP{{#B6Lj5K_+LC*0*jgz9l)ntls>=9@OIweeQ3 zH^`)K2EugHH{Lq-2H8#PeY*~z3Ly*+Q5fC=9neYm7M$VWJP$H^3-8+_N7#eR4F@|9 zm!&E}P(yXlUsq*`ap#euW}s?(s9;nw7Y?ecqg4H4#IkA@^pI0?pobsBV1nxtGy>yt z-|ke@!t^`LE*gX^0cJ!qZ12--A$6TDV;lf|QDzZk;1D&&-p=2M0asAR#R&D(k{EF5 z$Y3^JKRAj9c6c50vxh~3x|>{u6CVI@ggK=;OUSWCIWXPRoQQMxzzUbeK>h~bQUm$v zaUR`sHYlBW%VIn)s*^Cm<=r@MWrUNF{WbEY)(5r!8nauOUK25J*N}SZaH_1$%tLE) z8s&Jf~`?BS0X8&-J160m*=P(-yoyFE9+4S1-U<+ucc9H$xX zQHcXkpTRX9hpWfd)nkOhJ9AJn&&s&5pK>`{jaQ2+>F7U^)iF>*d9+&TJ|3im^C&r* zEsgmjC;_h0|0QPsgl2wP$;{5#zs<_f_!swUM!ygKg}FfegbRevPI)xz4dxAXwBf&i5%J4`1BSo8gZ3eB z#Bxhn*F7YO{i+9u!9s&!9_p_$^`NSh2nZ+He?-=099XocjE%o!_b(n~R|jFsiA)?> zOW`_tkw3I2CX*;HU&qV)at{=DK*6z8ZYVUfZv!R+WcC55sP3r3FoSgk-3*wU9p+Gn zMo5Vzcv{D)<#Bx**NP{(d8tzw0(3*$Q#Dg(#!)cB|3}o}5U%jCAcS$$pIL9ig-%r9 zfaF?zofm^^myNlfq7gt9`lpCUjIN=d=LP!-g%X_aBon%j9YSo0nlxcxVM6aGEjo3HNchuI8z>r`jwQ^`jQ0u1>PW{ zz+w}m_ji8>?Htm?5|4DkrvEL;ji?0fU@`mJd@G~m`cTF8t-sGkB ztf5Vzu*w>-AG5V{TxU2X>cow>!Us0^IbZ8PX|&`E)jH53LlokyNOhX? z=k5(I3T}R`)?bi_L6uCkT7OB>R(VsF-+P=NrI$^5fGY*)WftlN>PZ?0*Dt1E6Vio_ z;30n(>t)n%5=P>2E@Ki%-CH_jp?`t^Oz5aB7hgxZ3W6p6XKuYwlAwiS^NZN@9C*$M z(Sa`h+$ZNc(@l@Joy<3fpKSYoL42Hq9NZy9FYutJhN8FDpBqDyg1z?c>aSt0A`<$AbU*J6gd%zJ zVTHteRP=ooqszrO`edVre5J{*_Zd)@7=|E9@(=h*%S-<*12N&VX7l?@pmg!)g-ZzX zYy5+7Lz-d8&L2%70vG-ziQ+8Upyo`I%oU@I8Z&M61=!}sP^;Mer#~5SOaN%Z_9ljf z{yzG7*rGVvhB#zrI9Sg-`6@0WkMP_vXe%pCS(mP~9Hfuz3;iOF+Ne@#5c7Uu5j(8NAPccDr1mN($sZ#{QCC zT;WI5Y7P+BX;$bV_(RgwHxL1cT&$d^%vX-_JC->+=kQ)Aqz&#pRY6px9=&#{`CX20ttV_!H1{5xxUrr z=Yhcdj0atgclxamHwjL8dRFA)B*^(gYNYO>$V(2!N}kITj8l4cdZtJF!N<@#X>bg! z{}o<%b&Ox|4q#F6lPb9;&a3gb3=hW0n_;w0>2%EjRJL&iH9O2kxfbj$lLpHqBYExz z#+7ykI0I~6TuH)ZBV30iYld@^my)QNa}6cGf-6QjUgbv5bHjO*U4X5OOGW!OdMypg z(J?q3t=8gw2Nsb{D+pJR;F!c7%nX;LUamzbdnSgS`+Hv>oqCr z0S&680;qzxW8J(Rh<6A_pU9`kS;20Ttn6vc9y!)SV9)*IhwkAR-JrJOFZT?Pa!{|t zy#f6%*sS1IuQ5gi&#?OzW1m9c7rsb+?ixDKxA2Z@++i=tH*SW!k7IGPmyfyRyHuB&iJ!p${Q{6fbMwJ>dF7!tYxsp`wTQtk z1J1)Qgh6lJ+>oSFVU~+yHlzI4EQ4>$@I9Okgum*4$s~N^$}j$mNAfT|@1VqMq~|9P zA%e_Xk!752ULc~HQbFi}OAxn{yO9Tx;)9nr9CL*VY?w$+%~`{DoA@YnO~g{|gnkUE z`YeOT89c(^GYt5plKwjko@4MlgP&t?nZain`~rhF7`(;cHiJzD1i|%}7`(}Vyiks* z=G>;{OV%kDGBoG<5`N*62r$Vqd~BnCp63>e-o@gJUb*DG*5_7$N}FEhpQ7R`d2JUy7L#Ni>~5&3UgZ5@2~z5E6E;<4l~2oKV(s-)58_on=O=lm0wnbn=tcb!h}gKrqewjyl&pAFAlJc<{#wYoahfp5RkP{{!Lc0(esic$K(ad z1lbJ~={F@|QL{J*aVEq|qK`$$-r)&8VwNv9>Fn`&R 0: for block in layer.blocks: block.get_dygraph_inputs() @@ -376,11 +418,15 @@ class PaddleGraph(object): layer_id, 0) == 0: continue if self.edges_out.get(layer_id, 0) == 0: - for output_name in layer.outputs: - if not output_name.startswith("x"): - continue - self.outputs.append(output_name) - self.outputs = list(set(self.outputs)) + + for i, output_name in enumerate(layer.outputs): + if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel) or \ + (layer.kernel == "paddle.to_tensor" and layer.attrs["data"].startswith("params["))or \ + "paddle.fluid.dygraph" in layer.kernel: + if i == 0: + continue + if output_name not in self.outputs: + self.outputs.append(output_name) def gen_dygraph_code(self, code_dir=None, indent=2): def gen_codes(code_list, indent=0): @@ -415,6 +461,23 @@ class PaddleGraph(object): gen_codes( ["def forward(self, {}):".format(input_data_name)], indent=1)) + + def gen_run_net_code(code_dir): + input_data_name = ', '.join(self.inputs) + self.run_func = gen_codes( + [ + "", + "def run_net({}):".format(input_data_name), + ], + indent=0) + self.run_func.extend( + gen_codes(["paddle.disable_static()", + "params, _ = fluid.load_dygraph('{}/model')".format(code_dir), + "model = {}(params)".format(self.name), + "model.set_dict(params)", + "model.eval()", + "out = model({})".format(input_data_name), + "return out"], indent=1)) def write_code(code_dir): f = open(os.path.join(code_dir, 'x2paddle_code.py'), 'w') @@ -431,6 +494,8 @@ class PaddleGraph(object): self.forward_func.extend(gen_codes([return_code], indent=2)) for code_line in self.forward_func: f.write(code_line) + for code_line in self.run_func: + f.write(code_line) f.close() self.init_func = [] @@ -440,12 +505,12 @@ class PaddleGraph(object): for layer_id, layer in self.layers.items(): if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel - ) or layer.kernel == "fluid.dygraph.base.to_variable" or \ + ) or layer.kernel == "paddle.to_tensor" or \ "paddle.fluid.dygraph" in layer.kernel: line = "{}".format( layer.outputs[0] - ) if layer.kernel == "fluid.dygraph.base.to_variable" and not layer.attrs[ - "value"].startswith("params[") else "self.{}".format( + ) if layer.kernel == "paddle.to_tensor" and not layer.attrs[ + "data"].startswith("params[") else "self.{}".format( layer.outputs[0]) line += " = {}(".format(layer.kernel) for k, v in layer.attrs.items(): @@ -453,8 +518,8 @@ class PaddleGraph(object): line = line.strip(", ") line += ")" - if layer.kernel == "fluid.dygraph.base.to_variable" and not layer.attrs[ - "value"].startswith("params["): + if layer.kernel == "paddle.to_tensor" and not layer.attrs[ + "data"].startswith("params["): self.forward_func.extend(gen_codes([line], indent=indent)) continue else: @@ -466,8 +531,8 @@ class PaddleGraph(object): line = layer.outputs[1] else: line = ','.join(layer.outputs[1:]) - if layer.kernel == "fluid.dygraph.base.to_variable" and layer.attrs[ - "value"].startswith("params["): + if layer.kernel == "paddle.to_tensor" and layer.attrs[ + "data"].startswith("params["): line += " = self.{}".format(layer.outputs[0]) else: line += " = self.{}(".format(layer.outputs[0]) @@ -478,7 +543,7 @@ class PaddleGraph(object): self.forward_func.extend(gen_codes([line], indent=indent)) elif "prim" in layer.kernel: func_name = layer.kernel.replace(".", "_") - from x2paddle.op_mapper.pytorch2paddle import prim2code + from x2paddle.op_mapper.dygraph import prim2code if hasattr(prim2code, func_name): func = getattr(prim2code, func_name) func( @@ -504,6 +569,7 @@ class PaddleGraph(object): line += ")" self.forward_func.extend(gen_codes([line], indent=indent)) if indent == 2: + gen_run_net_code(code_dir) write_code(code_dir) else: return self.init_func, self.forward_func @@ -513,23 +579,22 @@ class PaddleGraph(object): pickle.dump(self.parameters, params_output) params_output.close() - def dygraph2static(self, save_dir, input_shapes=[]): + def dygraph2static(self, save_dir, input_shapes=[], input_types=[]): from paddle.fluid.dygraph.jit import declarative sepc_list = list() for i, name in enumerate(self.inputs): + input_shapes[i][0] = -1 sepc_list.append( paddle.static.InputSpec( - shape=input_shapes[i], name=name)) + shape=input_shapes[i], name=name, dtype=input_types[i])) import sys path = osp.abspath(save_dir) sys.path.insert(0, save_dir) import x2paddle_code - place = fluid.CPUPlace() - with fluid.dygraph.guard(place): - restore, _ = fluid.load_dygraph(osp.join(save_dir, "model")) - model = getattr(x2paddle_code, self.name)(restore) - model.set_dict(restore) - model.eval() - model.forward = declarative(model.forward, sepc_list) - fluid.dygraph.jit.save( - layer=model, model_path=osp.join(save_dir, "inference")) + paddle.disable_static() + restore, _ = fluid.load_dygraph(osp.join(save_dir, "model")) + model = getattr(x2paddle_code, self.name)(restore) + model.set_dict(restore) + model.eval() + static_model = paddle.jit.to_static(model, input_spec=sepc_list) + paddle.jit.save(static_model, osp.join(save_dir, "inference_model/model")) \ No newline at end of file diff --git a/x2paddle/decoder/__pycache__/__init__.cpython-37.pyc b/x2paddle/decoder/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14f88d0b35a59757578ba240c339233eb4f0afb0 GIT binary patch literal 166 zcmZ?b<>g`kg1J?*;z9Id5CH>>K!yVl7qb9~6oz01O-8?!3`HPe1o6vXKR2&LzqmB7 zGBGbLF)!V~P(Q*bATcE+CpA7fKP453mzb888lRV1u3uqP09K%%k_u8%q#qxjnU`4- ZAFo$Xd5gm)H$SB`C)EyQ(PtoL00259DrEow literal 0 HcmV?d00001 diff --git a/x2paddle/decoder/__pycache__/caffe_decoder.cpython-37.pyc b/x2paddle/decoder/__pycache__/caffe_decoder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c200af75fff2352a8c6688244988ac9d121c2fca GIT binary patch literal 8973 zcmb7JO^_SMb)KF-3$tA^ulJ9#1 zV6mc9c7U3m?w;<~J^jA-JC_$0d;`C~9DdmN$uoxWZ){9|HYzXRitnQcj6ijbdVW`R zg|^u>dsf|2d7oW(&}VhsUZGy-d38_v?QXH>*L`Jt-3Xk(y>A3=SlYMhWwZ-{hqf0M z&|W~h82D)Wp@((_?NU%iyDZ}u1NFL5Tlfe&F*a%{D<$Dx(%9&i z55mlmDuxuVb$8prOXzEH*7Rqh@&c~-izq^aZ&Eh{6`1#px)oS~jk}%i6>n6)s$RD6 zQgdr7ydK8=?%SbmkpzFd^Lhzayo@4MsnIY}W2_DgzCI?a4Q#K};{3)y_mh5BZubU# zoiwBm5A8VYZs`K1|Ba!^jfXeR_oC!{yc^wZMqABj``PEtzq$TeGYGn2qlNdGYYE*b zeCPb$`alMp$LnHOv%0ziaEurP9 z;p$Pw9-Ta6FHd!bVIl+QH4acVfaQ+rn5dakBwLz`W(7Ik!Dd$82$R>E8ZVH9x~B4% zC4>)mtvxiqjBA28Uk%TE_Qp19E^*3(Xf$||y?(F@)Hkwnqp`Ew?B-8&nQL$_LVoU` zWO1IBc2S(f6|>?g&-NCj?}#!GDd3yK`V1n97KrI5P4x z$E9S8n-uoRC@)}H6eigl57qfhH6E&GYCaIVI|#Kthn>igWwt=9pu<78*@CX@_TLG0 z=69QSL)}R3VlYL$?r%|bimu7*pqVslw(u4cI5P7%v=Jw>sU%C;Y&2TkW*j#fa{wT( zp2HO{p)i0-gSZJ%^=})YEIErf9)t|>$U1jkF76LlHNwzR~&sNy&PT!SVg9x1GNCDtE z)Sks9!1q+8Bz!mmKjMVHcoW4GegZz)Bkxgrv`t{rZmPN~b=pi-FXXne?#bMRpn|!@ z;COH>SjJOdpj!fTS2F+bm5-p7I}y~rf{LIE*afOo0>lPt-_+~GZD8Ip^_K!Zv3H(l z4{H(hJnxBd=j1SbU)sX}Fb^;scw6?Pf6>6r=ff~7i zIfC6wosPY)^cy%=)B@+U(ZMx&;^Ik?nt^-x?3BT$X#uBgjl9&0KbE?wKTh4CaK}WS z(i_jl{ksI>Qd;-{rfai8s{V0+7>1&mq%g`ct? zHn&KQNxhtwQ=i&s=chM~oqxJ%q}~TAemb?s3w$e#aypgNx>G>!*<|saO0Dq{`}k~U zH8~b=f0dKQzO*sl2KI`>XRfuouoG3-!K&zyxVqVlVX*sAHGw5>?P?uHNp%1ygln0T z=w>_0>?Dlg;)(hHh}J4|+Rl|0v zh8j53b(sa@n-$totGi3bQP#~0;a=XhspK*%=?^k@v!5jWURDgF-Cn4hNtn4h?DgNq zy&c7&PBaa14)n^4F?(RZuWH@bnFYTL#C1DLYL$ta%3r@;lqDOz=AgckZ#xVo8>B{! z>x)NwWBoLjfun9{LR()(QLhAe{pox42lZof?R?{PKcc%VtJWVotT*zNg5>_B}D61wBxL^LvTD3$WW z=$Oz}=4qf(M(~M)k-WQ~IifHb;pO+#nzW0&Cu(eqoYUiV#_oXu54JD{`cs$kle(Sa z;O|lTz`wttzlwbiy`bH@PW^<95hCbutC&%HZ3=U0an0uQa13G;LQ&<6x3 z4~G@Oes0S+(~EC62O(?LXf&#oGB=NjfO_cCuVF#`8z?e2j3g|es?=v$nCy054fS@Y zsq_U}6NeQ>zuk)|XhFCK>(cItvA0mkiun#`{H+L*B?@<@b+eW2FcJwtuz~I8V0j1h zW3FllI!>TG10?Xu|3pYx4L?7JgHNL~i@XSjSy1w?Xe4lf=&j}fL!hAk{$PR%0}p7j@(p@3sBAKG{PRJ!0R`S=_}1gWt*Ur7=&MS=lk6p;7Hhu#O-z~t!Gv|>qLb?;Ms}+PZCy0qT2XReSn#@0<8fIANg6iIusD5%TqgWsgI#S`3%_?)Qbnk9al%F`GFS{!3rmWx556vhyU`u zpu{<^%lfb9LQ~$D#kW**+~{#`bQQ%mG{voz5%C~XK*CPdpPD;xQsfMgl5v6SO$1J~ zHfWk_&t?_`4?dvDUFNPCozL%9WqzwUXeZDM!w`N_fYF)yWC*0U(EYchmTysPQD*C~ zyPH`k>Q?y5@4_L{QQdmw`UVV%-;UwhB+aN5$~j~f92jx6>z`}9@!TsfzIFBbYcF1Z z@ztw0u3m3Q^Tu1RUCmq^)NUAyFQl0i%j{0S9qAgX`b#JfSV>ZZM_Dh?qiW<+G8=cv zzrddoABP7$J#>kwE}`SEaK&d(7{IKrmVw}kn3Xc_Wz=kQKC)Xo%7EDsGvCA9gdq4o z;8+5+K=7aM3#c4-qH^9& z3V97#E{`?%&qb;hMEyl16>#6Tl(F|LJaem45|eWdp1A{K@yJMyNS%wu&b^z{8RcI% z1Nk$|_%VF=%QuZSe3QK&%P#+JvP%cD`ae5A$NMZWYYY5_cT;&XNhMKEho!CVSF6pq=)A81GTe?9Y2qrGN`N} zN-2U0A_x6Fj@nkBQ@m(EqH#ZMj8|kXP8l;dvGbo}HrDGrhS|^Kbo@~%^|?RU$xmdb zKFl#*IwR^`tb9&bBoNRSX_k$#)#+{$v-l@ z5%YJU`gmOXT(0b2;ku$&=u_(^i%k|Z4_N{DWni2Y5XVS_Od-sG95}d_# z&SX%LhZoBt^`}e|lAhNqlbk1JVqC=>F3qW5c^lEN*IG80!H;6kI@NnTL_u4?(14-` zNU1w+d@eBq1qFbBmq2DOy*D`cHXM4VgRouvh}#hJNgHNM;YpE{C{FY+P8tyJ!akLR z6M;JYMv5{Ng8e=d+s?-DG^5scM@UphPbSr76ycyRcTlT))P%wbFJ~@a9cn$;(nQNM`EI+EV6A(13^%A3c0Ys!3gP<(a1^-@Nje@eOP~v-|kWLD!)fND!&N$=-!- zzZo=k;WKPE2Ag@ljwwF6KDq?~E25n>b^pT@G!q<8CHJ>)y zALV7XKe}h)vM%S2wzePO1ow})*$Sqn+uw{QK5L$TfT9~T+Y^q9*wS&u_v5MKI$ZnZ zb~kmdp-O%L3WDEkQPLgPbiem{LOZShfR6-z@3AJj!8;MyJVVUt(G2$r#&CRNzEUtQ|SwS%gKpM5t(uC6J;Qikvfom5u@Q zJp}rYdF>#pg$(jP=QZdG!7YNC?=l-7nA-{l3Ry9xF1A&`IcS|SRt)54&2PgXjoeXT zCR__=Ng&)%i7| zQ=qc|6KHiR2&k>jB8()wz>x>akE{Y86M3|>ka`F9(G`(VCf*Ahne*Cp#4TbbKeBLG zS#e{Ee9d4ShSLd{POB~J_tBPM8=u8y>5WNf1Mf}u`22e$Vnzde<&B$KH}B>SOkoR5 zO1`q5CrK=(J-pWKCOj(yEbi^Fy?r~$oOjv*lqkNg34P>W1P{bhy)=ub!=P>x_Y%3+ ztyZJYqMfPRnR+J&*UWz9>h6aiP$rh1LWLPLhBb9@f(7rKP3cGuHnPu3Ak+ literal 0 HcmV?d00001 diff --git a/x2paddle/decoder/__pycache__/caffe_pb2.cpython-37.pyc b/x2paddle/decoder/__pycache__/caffe_pb2.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08ae698ad13978e9697af56042605f71d33ee287 GIT binary patch literal 122151 zcmcG%34EPJ^*`Js_ue*7nlw$JqzlldEwq$|Hc*zxV%nr7kR~BX3tWVd_IZ+CnvE=7 zBKyAYf*>l0iVC8lxFL#&vdAWgz*Q6xkt-r9E{Oj0{hl*to@but-sj#Y@_*^)hM9TJ zIWu$S%*>f{W?55SZcV{|AD_6T`|m%BPx%c+^nWKK@Im}WcYbTi6nlzY)IVjQC|OiQ z;o|<{fs$m5!zBpElL-#T5T2Tx#^E@^rAe!3ieueYcKej%bSHLNxt*}5+SBY(+p^29 zj3+CQHp8B7m)oTdg)8j}P@jVDrS^;~ROlSISRnE-Y zQ7WhCwpkQP?q*L(?rzUa&Su)X+p}HTdt}kB&Ov*QJ(p?k!L;{a+EmJU_I#Jpo+hQf zA}-Tj_TG$F&3M%=-U54}i&tafaqSkFCE%3B_7eQl^)2DhKK8y$dk)i{+#Q&fe+Y+!;F4250(R{Z=4fYzhL~FB3)M%DS zxYT5?V@i86r9E9r>+PdmO3k3;$|aZk7{uL{$Z9jLl8 zl(uA1y2Yf#lJc<}l5(s4ai+bTX)kwaf5QHxOM6Ea?c1_w-=2f^r|dhJ_Wn$Jf0uTz z{b`r>`)%)F0Hb+Ytt-fO4%efuExgFr<)@y&&&F%eiX(iKMY0&=K|1;XFnD#1z z_I>})XxB0AI)nDt|DVx5kZB+2(*B11O_%n!vLyc7ro_vLcE9}{Zn=Y)(m^hzPuLH* zl-_UOS`l@4B?#uT&h&v=4S^f6sokzij`~m+~w7736PX{n~yN`I{-fv0p>}X3B5v*X`f= zQr@sLD3J;Cd;3k4$V~Zz{T51Oru@-<8znMR{$&3dB{Ea~V!vbm)tB-&)aq4V%HQpO z*zfvM{%OC5Qk!)Cg%bb6m-27WechMxe`w>3FXcb>e?iHhb4?L?(9iH!t%3BK?JA@c zqsD&75|rw#Kw1p4_I4mGPV|_9A3p)<`EvkvDq8TJK-x6O%YSuR-%<*J8vQKp$A@r# zIK=G_4fs;#_K3-?5DsNphq|;r0b0{tTJLvMIE<+uCe-UeyBxm?qK{wfv=1OQ1HVe1 zrRr%OLTqNAT0v8d(Qom$%BjHLSyxu}74=P_f1Engp2p#~@ohK!cE7SRc{r#NjoJ9^ z;mfBQDRYp|6OioZ?3swEF+p*c!#Wo!*<8TP1Jscy*JFiBCOGqfvnRtx$Jq-wdtb@j0#=gK< z3Le)-##jc7{ZMyWjTIuy)$e^@^tPOP6)3mpxN zW^SQL46yU~YU~TbnBMO!cEQe)E*DDHEFR=Q2+=zm;6<8_m`;*D* zG+=FW`#+B|`MA-S$jSMBP#M6FINv0kA4Gh}Cx^pGIh{FgjvR~gj%Xvm8D+jl)d9wU zG0r@Xim@FSJDA^T{AC`Xl=)`aoxs?|yw(_tkus4Rr!St9XCDT}8Tb*;)(OwfMEoot z&(225In1+{N!viIkOpurV9tv!seQ#%I%f~^DF9P6G=$E#X$GRwt(VeWfcS;J(p`j< ziznTeE&&!<$jIWV#-k|meHpMWpLAck0$5j0x-Ze}an+>e(W`-R4fi80+e(h|Nzt{y zxsGSj$?)NNVBNs8ZzQZf*0}xpHz<4rzZ>zR_B~qK_a?+Y>TBPdk#Y;Suhu6DN#DnS zaci{xsw6L~WFH3wTGLR;nx$l)MEo{i$!|U;m)`@7G`)VW&>}!C$&)3UI>nJqOejQlf@b&U2 zSlR(=GKkD`_9p`kyM zYsKyLDyh|Dz<3-j{8ZsponZVJ7_>fB>&?8tRKZC!2_F?}e681a5`>Rb7{5MEr!L`3|o82^OTBZ7TN z@^}vz|AI7UCQ+_`1Lyx>{idGf)g}LG+&S}N=lxvD%t$r1fp~wFG z^Lw#f42%-{HRM+8;w;WPeihp>;KU)ZPq-L`a!CMVs!cLj{Wg2iwVP}8Jl^^0l zlrf^%wt!p4avMo=<+d%hrvs}Tz3@5o_Nc3cV!HxZGa%*9PYSCNSTiyHy*MeXDqzjR z$oE&5CySA?TZBGUZ0`=p+2|i>w*?Bd+8)5D#)$DQV-)UB#r7Ov%(Z`qzw;vF%m?(I zsL?|%C!pn^r60^|Cn>h~0^Z)xFJ5-*6RNXF`z`>+Lhe;s$BZsfHNahj9{j4W59gcb zi*0HfviKH53-R=-{5bmnXJ2sqwa6Gtfw2tSqZy=tja>M!A2614Uwo5iHsS(}?D=)X zyMVR7{W$)v@JU~7Bb#rCQKty~AZ1L5=fNdWJl0(h%|cQBXrmjTQ} z@?sX*KLOUE;Kp0%)8=@czugW47WI%n!}n-np}BK{EjlTu3|tG+YPLJYe^|I0;~ZB?6=_Ha9x#>Mv0z-VT8Jz~??QEVTRw~dQoF)y}T&|kcHGIZ?7X@@Mf zH-G|J)rsS6!tpkwwfi`J98x-%<5By6CosC0slCLg>q~}ARak8&2g)mM5MmN_**S-)2)&5kLnS!+!e)RxjGs{dt zjGXq=VtWfH*ut;lgkKKQdJTRR*(Cpc%&*D!ovpx4G3T`06yn2az}Uu|GkQV+&r7l0 z53B*^UL-8#4RQ7ito@6zZ*-mTqeJ*H1j@r%{5bu8;zyB9bI-{CI1Y^hcPv`}5D6Iv z#`a0c6!r5Rkg4a~S-Vhs?F7ayj6k9L2Z(pzsrI)Q+Y`X}Fh;D$B95m;HqE(bKucG< z8lvcLX9D9a%wNy*92N!RY+#&&*&(WprPw|f80Udg&qS<8v3));F0jcqp)47^8p^xo zFSaiP&PA|96vJx}&B{`2Ujpb$?KJ*g78&PqKwn|MjK5b##<>d6SKF`P?=_Ket_AdU z_N(}NePo;)0R0iH?TYOiBjelz=#Sd3~|?YHpvQ;~7*0Q9HrxAFHgk#Rl?=sWE{iHvhEpucLrhreHojB_8Lzs@VUZ$!rVCUCxG{}q3~9U13-K!3;n zA^tuP8Rxrz{+|6`{CzMo&i4WR5O(tw+dqhm^Dv-)h*5`ZBtMFb^$4IJwco{GHBv_z zt!P#CWBVcejk*J^*nSewWSuL*|DTRrx@Q3W6JEtV8yV+0;QZA7CH_7i8HYIi0hfnQYXZn6CfK$A_F#)V%-&g)lzeg#<6%YPjii^kvI;5QGy*CJ#67SOLF zufO5{zl)6Z2B2w7{XKqfM#lOBpotTI#826iqsZT%08O0u3x4lJ&Wn0F)#>l}MYTj1 zbI;ve1@mq@DvdDSu2WV>H z{qb878LJl12jI67zg3a3>HvKpeh1;VIx^P5fIbAjL-9K-GFCmHKY-ui_#F`$>qtN! zh2ID9Ylw`s2GDEqYs9Z9GS)gkugC9b{F)16^{vaX7plZQ2H3}*xb_>$X z{gNfH?3BoU$w=#BUN@H5?Z7&YSE;52FrVG9#O?r2C$CQ3{asiihwtSnvAck=5v$YZ zVaZo}$nsl=O6*O*I3BCh7b9bw0E`o{Hl;n}#*W|!mfjLO3EY#|vPQfsyoM~XKLm`E zq4_-Kvz+GV+bO{5X87p5J{34U?pig<7_k``Tims36bu^}4!2Fz*`^m5eQ2AP3(K^~ z*;Zhr(3Z~?#^|fa*$Yksg>BLcx}+EMBW=L2*p%3VNTGAUWdBwEu)_OKO6*}^oz6Jg zc9q}LSHz?6$Rzqek-ZFgjIxC$Y9AN_#yI5psmOg`J1};j4?GhYV<#|nvE)aqc?oPf zC9vs`xsx z_Aj)r>TAF{pK%J-Dd+CD68i#BxKK*BNlJGS(k}Luj->OFNtUj}z7$xOap|Iu8Ty|;tLr$oAs7wNtOX`l8<_h*pu*-1n>p3%@RHttSvo>$zXj0Sl?txpA6Qwfc0&b_DEQLWjW>Weo&zurlbj_>qC zeFm}{b)J447(Yf2p}nMP6f<-|Jw|JPjq5l#r^^ca`eKSiG8k0IGPN$T=5q`ly)%Zo^P ziR(hTa9&+Sq%94MpL1O%zwY=2a9`$H=~fN(!G+Z7m!LpENYMaKVbZa>lGE_zt_aL zRn_hrgLB!tq;u9K2AgRNo)0BIr*d6lu#Cp+m{FG)Y@{*RNP}yH{PRQ1o(imKT%QPi zD5n&m6rf>A`Ff;$Wk};23Q$IxzhbykAjbFZdD0p>DhS35U{q$6E#E$_7*5N@>?)QY z>feP&x)|)RF?%=Go}yyx4vg8*oF3zT7bTB9fKd&t>GjAMbAT}yIYeFa#$e-(;qJgE zFur>uTR!_$%-$0idtuh|>{o?aWN%>M%Ne?aqDRE*`o*dP1GUomu>eC<&JA6*QV z>{szG+Sn4aYk_e9=CjvaZ79zS*jGi`r&ANqhGN(aN@sa#cG@hxcNOT?8Ka9T+kt_R zQr*O(c0a;Ss$5 zfzU0q7@WyzmG84yz87W5_c1n|C@1;0MT(9E)fS%=Z9qyZOOei}(5TY}jCNPPd1VFs zoF$VUaU3wzeVRtQBCRt_w-LN9z>uGTY^I*&CjUIR2{`0wAgk$Q{RXE3_XJ=@*fo)J zPj1XU5fqZbb4Pf764E~8^l8lBrRqS0e6@sEn2TDu}L;hM;k_%xpUq_6T?^#vqwcX`=k}ekT&j< zO&Sl>t&Gu17PEH%V<)#=^j=D1$S!U(Ey?-kh8X-5G59HHEQqRyoB@n8xqYHyoCS=t zxmCOwD`Z_MBh)#-IG5)Fv+ntE&I8W*Xh-kt8tE<%<{d#V0M3OxgXn%-_!fzneG#xO z<~byK=`I1zrI>XlbCxV-Uk0qpfkU@9MILqgrsuE<#_TIV>q?Q+tss;kTeD*Jb)a*-DFL!VH3Uz9vZ|GyP|LTN~OF+%3VkAs4~4?g~n0xF#DSn|Ff>-F`tDT)*UX57NHm%Zuy_U*^1`jo3x@kAd+O=3F$4 z7(w8E#*d_ILsGU!koKriHnm=TjLSBO zIYL^D)~kDj#P_)+qLpeYWH?BKeRmj2nE7e8|E;cVzr88a=hx z%fR$3425h{WMQC_ve7LJ)W*N!G07a2LfTj)=@sDo8l{SEUwjo9zrp-xuEg`NcV7d> zZ+Yf7%M~)uaJkgjLiZU=-u6MKegBuW57|E603Rl2`^bO~ld*mL9=XuoaZfWOZMVQ3 z3L%=|A3*CZk=jv_+CL%OaWT>cNzaLcsIWzwkQ;4zIx1$FS8tr>?uDen4{wsSsl_>NoS}4u zwSzePP=sa9^M$p8IPN70T4*Mx9mKKgKh9n?;@}RcnT0gIcN<(Jy(f+x{&DQ^r948XukVz%AJB8jl%;*)|7&I?5$*Pg!-tkVLmn3<;W<~RariIecBj{){i(&{`@a%r*ng&=p-$xar->veZKUybEIAuAnih5U%Ln? z-tF&s`!s0KYU>jGE_Le^1>-Vcc(=Ys!MFk#>c;n|V{Y8O3OHA1>9P6dtGIm)aIR%N zmd5EQCA$t7*RviQtuMsw8-Vc<)?lMz+z5=D(09DCDSz*Y+aCqS&8)xb`KGWRHE!Pm ztdDV@n+(>i!1}mPhb^2}3@5GQI1@Ap9af$3acTn=-Z*Zxxl5$_T#@SAK>K!|RDTL7 zcW`f=tWCZMcjLyz|2A3NxcwR6Do>oojbIg&esw2sKgT+QDf1!n8?t)+JZOADxO1Lx z=Psn(ZE%Nn?#6LDj=6R(v`c~3qW5s0FQ{Q~ZD@aG9R57gnxbLQ?T~T!`+~coLh3I( z`wDRGjoRmP`qtw1S3!gBxhI|EeBs%BNaMF8!8hewyB#Ut2 z;D@gYV)h-t_*T}qA8K6^PwoL(J>=W=y;7eGq(1k9@^=gwh}#b!<-1%T-S_i)?}Q69 zOMj2+RB)D7J~d}$R_PuD4Z0Pd_hDMUx-J?;lQr-zBik#_b<5 zzugu>xgN~jMjTj$?OST+UL_j2=a6_ysWN1eIx_ z4r$F0Ir}9j=o3_zN?l$-nt6gsaqZP;Tmua{g-D~+Z=%VW^ys=QKEDPUzZE`TCS`ja zX?We#<+Cc=8_~+9R*%ksSvh4u_4ks~<&x8zNc)44ld98OoKv*k7RQ;XIP5oOZwuA9 za_$v~+kXOuKTFB3kdplcX?SJS;%qF*fqn-N+c+5!mnQV^aS4qHc zW^yG|5}A***FS+1Uu3`6>x)o63Pw9HN_o{2LDOWM1kQsc;B}j<{XT(pa{|6OvfmpL zU(j})z}^1|-2G2$AQLZPSB6>8q?|K>SH&x#XxvEPyjTL~#mJ7YTWc{=c8_o)PXh1V zCh*?vWNtc$Vf;$q>=>R}JI$28`EIj`f+vC6>@0;q8g)!)k=>%R2Sb~0~TPfme1pIbnE7Tm%->P(Lal&4X7E#!g@ygceb+VMM{W1Q@H) zQ{Urur1lq`EyS-=3>#De{=GE6SrX;7>tg#RU>pKIsXLpa62jFXqek=SRK_5&<#&{SQA4IE1y=ko2egqf|sK;L;=kYi&)?nT7N@R>Dfw30ceKj)1 zGr(wsyhp!*^(A05LDPRdavr|`#yZqH`W>pT0%JYyCV4k<9)AMH(b#8|iHz}AU^HXI zjw&T3tPvf18M4T92+e%>f0ljOwJR%Pg*wdK6Zi0Ux zpXp}X7VISn>?%lLR{`BDL_2-mv*i&mKMc$>a4!^Y0rz1RwppAB%(Gw{#GT%eYe;L% zbMS@&ZuEBLFuL7>)|=2GO`%{;GL9xe z;~H?=O-Y0Xa9*Mi1%Qnu@*Y}X_0230n7zPS(b!j#oc#01`>Pq^>V-ym4D zzBS*YSEKIDTq92&!b-=UoveIl1x#<$6X!l6`P_=Mj~kpz;B0OJdj(9ZSN@SRVc!Of z+nIBEbS{MPDPY_IPQRafC=&LkLFF@2pBtq&~P;H=?TE5{n0bsQVvSd+xYIJSqgp1+-q*e%X_XOHfW zIY&Fqoc1w>Xm*a_*c}`@$~l%}pXXS$#O`A3V&9P32J;$+YCH6eWHaM+}_p&ec8(-|V9BUWI-}z!0j?Lw7#r9&vZo!L@ zM6=l5hhy_4_5qHyI&)FlR%agnosYjqIBg8w$dqO}?HoIfp=UbB8L>T`4vuZ*ZynB_ zz@F)J8W0^4n_yZ;a11j!$BvV4)G9YSI(9C{y5!sW97{^U{eoi?68ja$J}j|UId+D`l-!;vu{ZeJ zSrYpL$Ih16pE!1o#Qw^$>m~Ls$Ig}5zd3fE#Qw{%^Erl>TsU@t#HMoWLWx-%yGUXc z9J^RzRUErSVzW7RnZ)LD>>7#f#j#5zR>QH&B}TpNX6H(Y?Z>ezBv#9@t0Y#(v8yF^ zFvo6?SUtx+BC#VmcB91BaO@_Dt>f56rwjcLch+$1c!_Q1*a;G|Id-DNdO5aUV$_#! z#;x%inaVI?x8MeO72Czog9+KUWPEisa#Tbvf>RQ{$~vc=hq7*^{HsbkAHRwLh*Gu~#W29y^$Hp73A(`;uE zm&##^iX$7HJ|ouaY-M;gKJ`hAYId_zE3vbfR*Q2QL*B^W4(FJjmliwQ_}lFaxy@OO zTw9z0PSYhNmt*j4acrlv z4`}Ul_Ql_&&IH3P;xEf2wuEE*NlanYatvp2_}g;%M$&bQ(<|Q;hbEk*pgiF$0HQZswNjYE0K1%a}m-szgnD&5rdY>Wm7Ti5#rb?=Mn?^5@!{# z>zvDsw9A}2q#fv7ZlqoA9Eh}ooU4!)x8LJ>UF95vwAIcvoTln}FvoOVuW?ob;u_~* z{5{UO7I5&waVd06&3%XPH`vga?hz7GbLOEOga3-Zog`G$On9=y)ZBN9#Ps~8^1Fre z+brMI{ANo`&1fl!=~+wR=~+v~)T}iqkZNWcmYAN46r`SuR7}rBDyHWm71MK(imAD1 zr_j{%jrvC8#Ldo!<=d+qJ6mFU-cXQg-nfvz!4}Stmq<*H>I4Z3Hh;TRzSVH-a*642 zTtTXF{A&58#_BGP;iXc>OG->>E7x%hwp;#ot#b%k|61oz{B3crL*AaGtC*6@!}uF4 zZ@{_*8X0}ldei9~!wQcnUgsRfw65oRC>cN8sW|Ocz?UCB1-L3a8{s1*yaM5)Bzz#k z4H901@EQrXAlxY74uqQ|d!0O4i{--Pfn68b_ONV+f}td=K_aHnd;cp;3B;f}U9+vPU2%j$Drx6~J@bd_dN;r-1n1ug`@VJEEL3q1_ z|AX)j4&yEWgm+5VLU@;i=O8>G;e`l)Si;93e1?R(5I$4Fry_iogo!6-OL!Z?=SY}j z^;`*)tez*~+Ymlq!gnEjfrRfv_(BQakMKnjehA@m@W<`x2u;836HdkLMMYDbX}5inMiM7++HJi>4(5ALfB z_f-R~P2k86;mP0E818EZ+=&8roDc3khP%&za|Dj|IePTI&TwBh;D!XQ(+BqrhWmyK zhZ#%Y$W!al`zFJE(}3$2xQ#xzZ!z4r47jrej=a1cy>Bzzw+*<91nzhr-2Du9zX5lL zz|k&JkKT6}?mGtD0|IxV5AFemd%%FxazJ)rkKT6~?z;xulR}R?z8>87818!p+|LB= zLq50%8SX&??iGPM*$4N1hWow&_lCfo;)8pL;T|&J-WIrSAKVWZ?gs|kdjd!PUauYx zGu*>29JB+0>+!+;kl}u4z?BKyW*^*-816>~+$w?F;)8pH;T|#I4iz}t2lpt$J!-%m zD{zhv?lFdY%z!&d;Cg*(P6b;hr_%9u&ACAKY^c_nZOuh`^EO)}!}RhWn`j_q4#B?t^=t;hs0(o)@?g zAKcFv?q>#ETHt7Rte5W#4EKTo_eX&n^TEBya4#Bg?+6_2o%QIw#BeVeaQ_jw?LN3P z!=+s~XrTf}zFCjn&l&FL2Am~uv|rPM`vt@O!hoA2aOAP|;9h39mkqdu0yp7<`z6Eu z(ttZg;K+~b(fbv{{mOvr61X#baIY}jD+b)D0!Q9mkKV5t?$-vK?w4oz;9h08R}Hvr zLXSMZ9=+c%+;0pxt>>KMgL{qPUNhjdoo8jIz z;NBFtD|~Q&Vz@sUaDNdv@+o`u_%p-(*?@aj;Aro%2lp3-`-=;QRg%D6?Sp%V;odRe z<_X+2KDfU!++Pj2MFK~@W-s5rG2Gt_xJ?3goe%Es4EJ{f?n;3pKeb2i9}M>o1MYf( zyTJ$dF2lWRz-j%SyxJbUe=^)Z4Y*r{-ivH-oF^`Ujzql@8N8V zngQ$6zY$aZS+bvbvH89j`AEEP3skIg%F-_;rcB99!GFt&(ledG@qzBKUBgcIj**_> zVP^!1mhFsg8A%O~4N+|BfHOMU)928K5vRA`*)o6C_ zY)emXuTwibGBh@nj&mfP;8Np4#6M-s9N%i(t zC)|`htTKCOd~?6UC9j@_4@C@Wd?1Wc_d|tM0yd#kMN8xLMJuajS<|*`?HL?&`bVpa zb>d7bvDHcSZ5^vF(Mgq7d`HS2+gcryl$ttg1{IcTvk9CW8lgIEM$#x(+FvIIt2wvG ziZ}L*jSrwMUcOwIstPw#E=N_l&XLPdHC@<2`A?^h{XLtVesBjBsj9N9-u|JUF~tVv zPt|N|sx!D{2zv;b#nbBJy*>S-PECU~y@gOW^b9wTIRiv^uxG$wmXvygRUvk>rrW8} z;r^ao-3&x!DXm#yP17W*7Fw|s$VioENi*utltaysHMO&6V7T8IsoKpd9Y_s!k8bJd zS2-5f7p>ydo}H3P8FT79tCC=QMu)-O?vWnIDz}IG5h%Wo#2Wfo^I(xxsd8L9G&njo z(vun-tD0xcB66qoY}wK?Vs{U1cSibqhPm3s{1N1WAGs_VbCm1Qy@fE*iBwrMX7D4I zMPm+aK8>p2Vp5~Ic?Lfw=wr>jMb`8+J!4z8YR)aNs>Vm1?xB%XUuv+YzdJQRWgyX+ zRv#N18FwIkQ&lNOSdz=?Dx@(qi8^bhTTIOsu0>hh9zIZmYmlq~Y8tLP3x9dl9x82Y zBn5$D2`sNKS+#Oy&1FSaMbkiP%)md)Ds_;;;}Vy)qHZtA%MV#g(JqW$^@+7jtzAtW zkc#Pys#}U1qh?=gnubK#1h#SthMTet&8^*So0>XW8rm`3T#UlB4vh@-^rt2qd(E1m zoh0fzAp_Fpi0z`7YGnS_vr}4?zagfYG0@v)TBS4&rM8Usk6|!E3sAQi-7++yMzdnd zXcW0(K%%y6wycK1!J#pd;6e1uvJ|-AGq}a+PT5=*9*GuNvCW%@c1qjrE==;*pk{+L z!y~nRWN3UCBUo7<1-jLMpazpAmW4r0vINNz46!qXq(`9UOW=`29a{}d^DJCDh7pPj zR#rD%kkrRID@GFxm$9O5v7ew&sP*Vmb5^aiD!rm~b5X%$D$r~{KA6Kbs+sE+3Q0_3 zniX5KrtO5vDK(cBTQk! zR$`1Tp%$l6%#G_-rh#aN6&rAR23e*x!04FGjZ7m82HM2vaL>r7YUp@<@rqSdYpn9^ zJ?IP6po0)NZuN?~MN+#!X=^tLEVXQXVqHUPcXMmaLDqCLog`a*fn=9iQz_iGu?uq` zA`KAnhV@OA#WhzIThm*eF^?0FJO5ao8XO*{S*&DfY1I-EJVd+Iyi1d%e~wb~Hl$mD z(d-=qP`qkStD<*k1l8Nzvt`>3jGoLOJ)>0Z1>uKqJItoCx*1Y&l&q7ItL9ixLpG21 zb*Bb|)!NreFceMERA@a~ZCd3Atoc-#HKTK=9|N?9 zT!tCn8LO@);i%fjDn+ClP1rL~JqJl;b$ZJ3F%nGRX``D^S*~ct9&<*=IE9K)TD8A5 z!vzwOyN8opZdGV}0iMm3$(jFRt4xtrjGfD5vlzR`vY4a-n#bg_fEHo|MmJC+@G{~; zeZ`jkp3%`1Mldxb&Btgl+zqacqQh11$1u~H2gjVgk)D3E(e9&Lhjw&BqwHqH?h$8{ zhQ9p)=n4UGjMxKZV1xxOUR7Zs|`AW3=tDQz*@7^&-lahbw5j zqk}!eXwtFj3ZhH{BPi*_?qMtqQah_>P+6+xTjlNtR=z8d>^Azb1{y|)fu6DICHP)Z zw?xK(KxeG&l$_TwaDCaD=GKOe6Wcr5y4tEfV3mz>-|ZfN)>=K22`vmG#E=^RSYNWf zeIvD>?QBn}IkgI?QmwL)oHQ1YY#i@FtDt2WMEWUbszo`qc4HsZV!BM`Bh`>=5n*A zm*kCmdG&k-Qey3~fv4KN@TsCMKO1O{u@qMHI&T}x5!6tm4ZcS@#>V$^Ywn-+W#)kDB4ULt>G&^-{=tOh2 zSC$Z5Lt{f@Q%hGvWsJ&!*oMk@&1Z_NGL^5I9x!QfM^u^%)R}&#XJpW^Wuzr3Txd<* zni|CtYnP}F=5kT3C(+lpjE{^!*t@Blcw@~xYdYrq)BqO8Jj}52P0hRfz(cF%S~IxY zV>}$|hEsh|DRd+}HLa+pp28NQn{UNn!*fA}s};3VT79ew>>zEU9~AmoIb!J}rEpBG z6fj-XjPhj7b?zANcd$4Jr%AmXrS3K;auNnDm82GhZVoh0ml6bYKGdHwrFC;@%^g^D z^IX`8RTSx^hJLWhvY-l#II6#q8eCDgr}Q@$r?xRQuy(7nWg81gl{FOu;sA~QqC~B# z+MT9%3~egeU4-ptP4}TfrRnVIXkOa|5vs7P_NI;v&7Gaio0=eLpP6G-_^0wfQBiR2 z4OGZdS}JcI8XFrL5X}Zt{MZmRIWfUyD~-s3rXppTgrHju$;_i-TDB{NfELc4O60P@ zTh~wZ0l3S5XrL9tBSXVIeHe_p?V%lm82n4HELdz!O$~18AIET})GcGO_XGh@&Q1kI zh6}$c!Qb1lYlVa^*Ewil6sh&}Rzf!^L>aW2U z4;010+5tL|0B`jb2a8&K@}Q5hW&;}I_vlz^yHa5UFU5F!OPPSDf&AP)*qR9f+lTtc zXqx$B-A81wD;{yY`OyL_~6fF~t^x0ooWP+J~M+(ZLD&3$jlP2}e%Vb}u(EZ~A zdxffr?#WFp8$H0~R+%%f8C^kIGzkLmIEnUDYhi_QwZA=^X*KNuLrVIG`h@yLeko}g>hl26 z@ckpYF}HIubi=oFw0dx8lmTY|N{`8@9SpA@O-(e@fay4H{6!n%Pf z2tL-F1GGS7zaAdKVr@{j!Y;0X3$MO;@cUV1ym%Ax6AZ0;YdbCL zJXnYV7DnzUaK0*v3*6zD;{bdE!y~Dok#1@ORrVh%1l0RD*j<+lAc8T)kcHC zG(cV*sq}@4xNd+B_0b~@rm`zQz^_u^nyH=SCGddJx}&MSfuWQhUeEBgZm0U@hac)4 z^VEwo{qX9W2aoCYY#bb+xH++V{yQD3Iub>np5iw6qzX?x0chRng_ zX@3Tr=(~R~fsz8#0#sTP?OcVX2chE`z>kN5_3RwhYUH#2f;HfaSFlx9#rUu|CR9D1 zQ{cLAqaR2NahV@1YL$<22Iz6~G(I+xjdI+`F4E7g6sTQ9f8|Az*k>3c~xO~KW z)z*w)o!|g~may4t!y~LRVF??h)FQb?^o;;^t%~zl1?1v^bc6mhI?$aY$tO@MV;2GgC%Mgu=;6h+gdxj8d|%^NI=L+ zT$HQ$5K?nsjT%-YX_Z@17gkBK3kzh;daKN+Gue2k*02gySrzM=ySh6!G__)nyO%2L2#%sZ0;Xd_HXwSwn>~e?DUD#iab;> zVh#_%Dh6Y-a$LEvFnT&rdX=ZrO+~(=V8bx8feqd6C(I(=C+V4FXz}`TE(RhScn-EY zJhr*C;8vmqurmVp@KE>G(d|6Wg?xs0vzi7C{ki6WVry2eaRVm3&2W-$9UH`E7t;CN z-nt1C3IaIM6UNd;A3`q|nqRxbaiagrM4iy(TCnyup) zN~r`i0<%LAgFhvztf%T!lz9|RSKi|Q?VB)Y5MT_H+4!ciXWs+m;s2-#z@7;DI3Kg5 zvVISZ*lPW=S$QS!wPuY~m17VM$m}Z0fix%?iq$R*VcDvhhl;Fn{{jN;j#M9(23QjE z{uG*5<5i7TIV~4FKMt|BqHZ4jEbQECdV}mU2BilqY;2w+gh72ezaIKvkhAqrD0O+-p2hMgD(4qdv>PW zT|%^L#a~sbQ@rLQMb_*PbJu=WxiVlWWoHDmO>3WTy9X?a%)ssec;_?;Hpi)SxAhorRhgBN^9J_b#$#tNK&6Tz{Bgx&Eq}hgmZ)mHG@~5FI~* zU>HWXA$ywI2&M?toMu(!u+vprm9)R55BsO!tb_?0PAd|gY4F+ZF)*~97IoU0=MKh7 z5aY@rQ)QLFVJ3EIcHgY7IoT@r+xT=LSuUwr(MxIHT+dF&Xxe?O1bB{3229OUfdamK zh)y-QklVb6seb_0O}*WO^1^`E@AQ#%Rr^}00-EPF|1Pn11G#LAALcYFqcYAYi6r+E z+chz&56Eny{6{c`V}57Lcsbd-w>fa8fTL;(DOp-h=5y#k(DgAaQ5?QTI|U;WO>b3_ z0*koay_Ls)RyiOKl(K_WDoL3pq(x;VI+7w9E2XHMK#B%$&P!5uu=kNt;%ZX}X)-Kn zE39&HnMxzjI2qaqRaitaCj{89v<6xXPR4{Yh(>YPzv*c?x%gh&#E|t<8=KZOY;5VG z?IA4TCwA9o<9j0n_Te>Z}+)5}Kl&P!NNIy@+tlvLsCV(!1`Gnk?Q*p2EMf;qCRnA0V(iwE z(QqZc&y;;>-R`PghMtm@wX3TZS-U%u6GlJTEY{sOCR&{8su<+S9mjoB4f4bw+F0UW zhY^oF`48y$H78knwGIu!RYE?_PUrw5P<(tVqq&%V;i~ouN_8x*M?nuHHP##G;}EeD z2R?9WJ6glmyjVmk@pdO@`NeRph9lGtb|`ku64g?u`Bn0T>n6Jih;gN8_Hy}#W~SW& zN=C9LJJPR8$*q=?Ka`FZQyWBhA0%<8Jd1DsaU;mLM@p<}S1f|{!@!v>6!WbLFx;Q`xxdl3_vu>x?OU%Wo>hu zVQBHBF%j3*&bay3ZcqSvG1JiAI8XdoeWg{&_SEP&TZ*|SyY#SC*ZpFksSPz4W0)Bj z?j2q*l-AT*m06A<$h>wWDW%nw`PDVI671n?sNNLTQ~?D+b(|33nnqN}^uA#=O}osbKD3(7p7tnZYpU1X6R{G8gvu4Q>OyK?KM&nCr&v`v z{BK&PmYa&LsvN$(VCArG2r4b4 zY~|Klj9UA~BoG;Nc63W0qY;PI91x*o)zxUF%&D`jX{k|BHfWU5f|lLX?^naw9rnZ_ z?9u6kl^@HC>By|KehT{A?zx?p0p8U7xQHNg`6_dDs?bG3Ru7`?<*69K!7k8)@^?2a z=hV+wOU6KtCDPz1Y*1>Cupa1ZVEPX@QQcuxjlSkSFW#O^{nO`6{onfMH?ect=WB(y z(4;QwBotf`;VSBu zQJF3fYt6knQPxIYoh-Iyld(nlsG*9(1_V+tv4~!mSD|&s zQYaBb#kahnAywB1+P*!6U7UU0v;h;ud1xg&E2UvHkV9QG2!d+|;!7EQ(FcCWV!*4E z)m1)KmOMe8<`Y4cU24ry8Z7Kwa!|w5WPKZ&msqpO;3j9XV3qEHF%uiSw)ocdq#KKs z6Kh*C!qY0T2OBl`1P^UL)^GFRI09b84fwcurN{a85Ekv_X3MlUfA4Gff{W5~ZOSQS1xdw+6 zj2NZjHFq_s zamz2{fUmbKk0Yd}vPE|a#vLDbpn)Le=!TBQ?xqtOHng|U*%=!!=8hJeucI-XS0k>N zu+F2!8OEL-)!eQsPveH$(%JUM!)mU>N`pq^z;+mn1VUlPhoC;5vJ|VBG~x%|fd(YsU?i z73^C860KT3RRWlx(|t93sz?+4QM(m`V6~L6!NCR?yPnn-WQia6ML;uA!0!+H&&Kib<51hpC^>h`6>(8an;9OVTyVs}6A|X&TzRIX|m% zfr*+d!D8tq*3j5UgP;~AbOg&|hhA*)@(YK*AkJ`Rbau620_ToXJh=&comCOo+eEBo zw-!yRJbz^?zLer$eQW&}5L zAxCU4z@Y%{BytLyj>VA#m!bf0+2h0-+#<>&fh7Zt&&n7DZqrw|E7S=q&8He1gRaW4 z35-@o0dE5JudF3O{$VGuB*?#h@SgyrF0Bhm2aMob#LA$1Ys!Y9$k)_0U*sxeZAm*K z+Yq9syACG^jcpl{q>UkZg+evtlNu0iWgXE99L7lOY;e&$!v`jr1~P#ZljR> z1&JG)8d@u3HCMtR>faLw-!XI=4DztwjrPNQ!%d7%wzBFVcl4lwRMhER-#!9CC&c9X zdeW+d*^_qUMIY#0H;2m}7%E6&*hfYjV(T86s449O0$k`|+-^>O@=j*~R4( znt<@-x9IG&Dzo+mdIPswN4Oim0-FfmANO0pApdqi%^<}^%rcm z=<}OiO1ByWH!X%%Bmfw!NU)JtO-`;xtaAVEMNgI$L$xgXpn`>%T%Oon>fh0*ndwR! z27THMDURHl{j91STPDF=&k{|j)E8^+$3T-~`{PlTr8Z6SqD)myAMDNsujM3G&lATt zty$Z!fs9t(xkR@^Va8Bo$pYs(LCVM@*A3@3u4!@go|+n~)Fn$ZjUo(wH#W6F z5&k~*!sOb8Ni&)04$Pj`bJ(-|D|yzxCg;VQV@>bvGwk-p!V{l1o1aLHk|o|8>{;xi z+N*t5>?5d$eWh+S{6J;!BaAsaY4@P5y?%6RRrwZqYaY$rrS&l!PanpnmFZi0wrs@( z3*;Rd^(j|avinSMG^djZ?oHgd;oIg5FVpZYGzRt?i_L=Czr1Rd=(B3jc1I3lhj5lI z8)nQg^ULZS|Gz4MugtP4gU9Dd5t4Itq^Ff)smAB(NIfehor%v5l0cN2C)*a_bD4RJ z-aE5qoNQYd4t0PW?xSJcTh|9mSIY+ksdIVf@v?m{?R>dV1+5k6h<}FmGL}{yi9KQ1 zSVA`+=;MyRSGyeb=9#SAd}kM-aT5M0oi!)CVsr@qv2HrvN1CGVc;O;W%sy2&4;3w| z)7zUHvQE`u0q8pyN2_jc0oV}O#sg^d4%%j;>P$VygT~t1w@WAd*xFLY(>qClmwR7J z&SPt&NSC<^3~;kIvG~uTQEx4)(>v9Ddkm_w&VbR{$h-9+%hi|b44AqxLGKpM;q1#g z14b7_c$ozKX>@Z0^i*N-GKSTM1AP9X9EY!Hgz=g?+qskT@Ri!`tgXJe95}fSU(p2u z-X`g69}UdP*@v&_q`!xh!$A{r_=+rNWp(;MQEvZ5$l-vZ;B4jA;a~ z%6UwR>{y<6JeR@RchsrIN8V=`$~sg;Mm$eYf=hkc7H=LZQpPyFEk3aD_8+aGqyAo> z@T~%R_P8XIum0$mqU z-q-{b_v6022Mt#<&_zNCG9fi?+_2%qZiZh#exas~Yq8B13t2^eDk0$ZhU1$%n=0qf z61eGv_R4)IbzMtCS65SOC0%?~N@J6vQc4BlDDr4WHWpR%$2WCU&Lz%w zU^7Ey6+v`1uiwzt+*mma&XlOGA%R~k!jWuwlr7Uf-o2lr@_ z(bZE?g5GAa{IVgB>?9BxWv*otqO!bVC8KtYiZ_b(9%>UVDI;vcT`iirGR|ttbC4%l z+IrU3A<{o&mE!jk)6DMo6VtTK7F6vgrpdLK)-muCvxk>vCWi19XEc$PLRw@yB~6YD zLAF$Er{qNGa7J!GCpJxcRy9hj#zp6zb&PdQ_ zsJiY#uwG|851vN)20o}GOdr4(y%@xuCA8&(He(%*WH$KW$Q7lzj+`U(2QrR&+eVJm z$-ZqPN9j~p@%3>+S!jL`pImN(7D}u4zc=6$OC=QO?6dY*sF*Wc4t;OZPJ!cL=l3fS z-nMo%gN5vYBJV`Nr9=aSu2;VX{ zeS0ovl2%zPi;tAzp5AmEXJwt8=>(*4vokU>oznPfn>6R2DMCW(0bH<~qFZ*Cl`5># z-tqo^-l>(IMxy~O#M3bfr4xJzZ8SZDItFe>fG>&t$%=HPQ9f+=8yQPi7%A9&mUM5^ zoY+&{!0SIH>0Zm3l&dzA<~5tCi5Nrqy}UEcb&-ASEy<_Y(&_|T((+EPrFox2>XOCC;&J>*${`b7{#@o4 zu`Z_dDU@WKPoUs`8*%z1weK>}>cF2Q&QVOvR^{_qWpMH%sZUoV`D8`vn*D%u7ycx@ z&BD4P1nno@e&jT7CWbI?+>j8Qc(;3~Yu=-Lx81pTM7qInqeRjV!{%q`B>7VqXQ9dyAH_KPKtz zib=)mk;^jtN&5E2Of1bu3(e|oKuhX9G!Qvi=A@Pr(JmtD?V{15mb-NBW}Bq9)g~F( zOP1P1WG^7H#%7y|ee#vm+mw^^ZNy3XHsPc$BFYakWpB5G7UG~?#n94|CD;Xl(bwGp zk?KAIl&-`d-TqnFY6C{v_1SAV&Dy}=9)!Sz$8P(i>CZ>R^W~?Ck3zQf_(Q{>>nzq& zi<{)SUQJIl24?htr1Bvr*?*k+^#?(3b_1$0SFzWpzKQhdDh5qH!OsB;kNzx_eRdix z*Pf$E&%Nw<@{a?qGb$;rsHAd3rM5LuBjAr)!rU$q-LrByKa!r^L^n{DVcxZ@B*mJN zSu_PL;F?pA%%+sgUX;wX6lzMcqa?FzC>dxSQ!+~k5-sZ*OES&4%rpx6Z7CU2 zz@NrGJDPV}#mzgqnARnlwFa3vMVP zlTBK^|B#-Eg$!;J=P7Dn?L%pLTBtau2Gm5)xuQ0jG32C*5abkCmznosndtlQ1 zqR7NK4I!%_%2tN6efr2w7g|}#?q=S!`WMQbcP14G6lvO45wU(_) zsS*%OttP7Kl@q-X#sq@83fnnStLrgQrD~{BRoN$InrU3%iApnx+Up(EV9Llw{PBbH zQQfrh9E#?U%E5akC9R)F(T$)0ec!pTN#7GI6MN^NBiEX<1t4`lmF(YCvT2&wM1>zI&2O^kifO1?Ty;E< z`VXNm$54}=+NoY;nJ8D-5>0wfwCGC7ms;N0Nw=~AhPHH4FbhV1NM=3XqStOFIddZ@ zFo0~PWMKrpr&gdMsavTwSueimhF7^kd&cfbdI=_(>m8Vh{Y)Nsz+C0PvoIilVgwSy z31q{n5{&xalL;q4+iCr2VDc>sWv%VgGbLwcwP0Qb11RId`H9%Z%#%*~o@!DNxKKyodsx3pyB zK?1K3rQ>X%*NRgRorlS${3UHZZRB=r<7p)HAi9iPEd#up26TP1Dbk%)$-uKmx_kJb)rB?sWh4w5^yX2{ zlJb|N@fMPv2YmFrr;=!mheIGaY4MkIg8w(F8`eiheM((S9*~bfBGbW_k+ft7Tp(DO zIAfY(;9D&|CA}Avw62;1O6~SZdao#j!Lm+9Pkkia=aqE(1sD-6;j>n)f`-32w zn^;FLs)SX>1xWhehSI}Gu-pnB1*He1lG(3DY4dwdJo9;}k*v^~$Rk>5-!AJgE}0K(C38Kj z71qu<-r!;*TuWWjeSRyfh;(F7F_1!Pl9#o6kTEVmGWcFs*vJ)l)+@7|dRO30udw0E z|MFMbeeEl>BJw6zGWY~oSmE3!tkm6J$#OS=O=x07&0>ON!}kRkd8wPg3g)H2lI9a( zVT19g_lj8BJAD%--R|+6wE0$87;U;aPo5Lg`c|-_qylD^(jbc!+!Xliq0~BfSS+kd z@W;OTD^|XG)vl%^VObH`uL~=&3(&Xx;@K}f^^doYN_tO`g(10bmG#}T{?`+`-ShTQ zVN->tfFyIiR_3nqgY$xJVB=*ox4nodVLX-gB9PQaNmDVO2h-L)#pO;?uGqRY;+l83 z82o_@Tq8;I{V@L^tbmN?#nSp&F~7c};N-=zwC`OpeA|wBkDQt$MYb;+lT%Z#QPy>EOtDhW#iKXP45i*n6C1Y9o8TmW?};-*4|fsS z^u}dLv$atbRu(8eYBcYuuetK#5<_d%FQetm%l(=<2(?PxNmcV6PSbt{cZ-bXnVZ1N zl%(;VnjRfI2=DMwQa_@m7d#%2ysk#81pTC1HYM*{>I+oRz}soMa4o)9RJDlvQ_DFBd_ow- zJm1(ukqkW3rnSyMhQruL>b>Qb)^E6J!3jX8)C+I$C#Xlo z36;b(Lv)=v*U)tln6Kp4dH@**6HhwumRm?&dwdVRWqL``*iB3$zBk>p4izY8F3`-Q zl)vu`n;y@C*v9j3nLfhGdgD!N1_8{?d{@#SHVi4IWG<(Y8*jboi7<%gJ_eV$l1m9g zjeB-3q?9H|W-H|r^7NZtG6nJkKAJQ5PFyBM`DecYr}x%m6VZq2q{gQazVQ}ZNR4?{ zGz(7V8Y;W-BwPpv`PU?mZG;i}G~C1rwSEc|6??IBz9g4d82%Pe0*hJsB4wW5Ji@G6Ph8|J!kTO%liz3ZM7BTxO7R_1=)v zYC#r`yfK%#g5b@kUlovMi8d;E;aiU>9OBW&=%HXMb%hgnN?{&Hm*>GR4%Uncxz%U>$ z!G*!=@0slcpZ!=}NV{e|YM0qT<;(SIT?k(WvfsDMJjQi&U#|-(v->@>lgezoViz(N z8Z*pOltb_xyAa7|zSHxoGVZJZZsuCb(*LGiNSDLb8}Hq_Of4Zlu8`GrQHp99vI&zp zfRJ|)vUx5wL}-kYtC^LAIH3@|2jIdGJ&4RILOh=kgRkI)G0Xo}UgpQZP5NHL3z?YB z2lFxqQnu$2>Wsk4cp=#apfU#$f$Q)m4Fa;AQy49$1p-|>0F}9fa4nA$AtL1gWDce* zKTK%;m-9ke7B{<1Q*?dJWq=9&^h6v-Kgm@MHz;t82s27r}4<>UMA)i6H z`cD{#A)BX=GuZ4qC37a_?7g)Y(o@~H_%a_LqzfoV^Ney>J-wItGKUl5S(Kac2wzB} zz`ZK3^kuM*cuMAMLJOW=4iTil8RyKAMBp4EV7$&3mM;$RNV8xg@s!MEgqHPEU&ti2 z9hXX)5Bg<3NF<&n64{5SLq_AQ!`7JwB62SNz_H-I;TKYT4g>&7x3wTtMuNAsl5xmopGymGB^w<{O84bsC1I zytMqBNYZ%!Q0o`@(debbr0+=%Ek|K^7^vY}QT;k;K9g9eoW_faSl*aVChGN3Se8Cu zZ1FMPPApVf6E+!qL{YEt!t&*9MZDXAch{xfMK?)3gw6Nq;OmOo;T$+0mgXlHwMi%N zu@<(N%}MhOMy(120fpss7nHWZ1VLnbscg#)e&vm(N#9e9dQJ<1Dne@iW72%1F|2S* zTj1Td8}-B&q)ykIQZ>@J^-&&b)T3`MQ0_Lgfe7P_I{qA13IindLu#R1uN`VTVUBnF zaQIZN=hBSH0aWY4Xm42di;A^e=F|%dAS#s$QIHD%(}~)G8Auf~r9V|kh<;d6H+=v` zo?RqU9>22~0w!PC`bsYkp$}`$2MEFhD*#E$dx$Xv*aJxN(~O$M0h;t`BidZQ)TqTH z0Ofv9tEU@v#|eP(vyL!GyYD!Ll+6Q3X1(mFwT=L34@Cwz^|)h5LHOZE_%3}fJ?e=t zK-mxEL&EO8!bkv0&4N6o&aENoZp?mGRn97>s?7b<`zI^ z=U>wQ@T8tMbD?TeB0mzz3>qWLBJQ%sYvp=2%Ztd61|~lm83~gilkTIIdJM>wn|}B* z>AiNTTQ3Y;aU%E*X1+Esp2bvcU?eDh$foXGSq5QA`rQ{e6;GmJE3Ra}w5jLoT$zg( z*?X2VbpZr6_5DSV+8nQPYS#)b8Vfvntkur!FKSrKdb?BeGY0~B(L@vACx%i#Kygy> z#VC{ikx%zr0X!0(hUGD6WjYNPp4_CatDPI# zNll|dXTR#H-ECPn?xocmXqs%+_ga40D$TF|>8T?dh#vCMN^aif zI#qRV-x=F@fy7Zw-#VvGojO%@>V3|CDc0tsnM7vIk2WWnUUY+~C62KdZvH;rS!)g^ z+k-cr*>@qxBi>NC!>hLhi)i&l%?Dmmxkrb%>N$k#P&FxYqu9mTPxHyV}V1@3&}=x zZk)Gr-3#)K^UASn_9^0NAKvihr7m1~0)|>tgDgyV3x$J)`Z76qAftJy@oUYu}vMz|?Yn0r2vgPSU6q()K(scru9df#Bl4`J|Ix zR$+5EWwzaoRhWp>D$ID2u$|RS*3iyq3n_fUydi#)Vra^;*Q}n}%&miA9%m$nbnTVg4`(k@9aXp%=AJaq_aR!e z@8LQh&Rs9FSKq5g;c|4PpN*s9Jm#SI>bH+25piprhrW!jJ~oz6;Qx3PTjlug@1IKI zzK^ed$5@*5)`0{hoQfqg(hKm_qhslK3CuN>3c-$?4cqKBJdwyg{B5Y*F`kuqWUThF zl}qo-EZXWs4dJnQ^>OvzUp~e9YD{9iULjY`tenRr0A}`4lsNk*Dt64}Gw+;4yg?^{ zFKA{nf=p9)dWvQJMkf(}(cBd((yJ=w8|a3nz9h~ay&|zeG-nMlM7i)`DaJ5mTSH?d zD;Me=3^^aDOeryqO{G{KqIrIW>gg>!xXDZXS6eO5ny0ITUW`z@w^0HYipKL=$n#d) zn>^ZNYXesH;TBOX+qt=yqQ?|c;VGNtSDM`V)TUS5o|9}|*OVp>Qs6iZJMa2EmT4WO zi7jSQ;VJv-2RvovOUpi#S!zbJP9MkU86j3Th3DVO#M^AbAHU=`Zv$DJ7BI|7t~OWg0tIjZ7O>h0hw)Z zy`J-#mKSRzFJql`IBRkbLEr8pQ&Og8Jh)w!^Deyy7t1m$e!;L_+MDf?_foO`nu;ad zqgxp{F4JM4fS#NfdpOu=wxbSqA^|XOGz36je9#^S;|Sz?Fw2RH_uzh69=KQMR(4!p zXtjCr;>3?PJ4)<;omY+l8Y5&6BHQdVSZ{;Fwy# zW}CGj&ccjf7f3kRKmu*&%G_Zu!B;rB^;OTBJ%Y{d?70C(Q1lwSJi}rkUXD|&#>*?f zEXiG`TRiMVxEihFo!0sF3+G^L`~5IFmo4I6XAYnD=hs*9nqO?da^H>vnd?|e(<-ta zh-cd-hlmrJB8M2zOxW9WpSw4L1JAQtR`0kf+}Qh#;}x3PJVuYBNri2U&c;!ly))Zd zqb$;nSWYGyE^U$rPTm=gQ?+Q>RT5eYw`KF0Hd)oMW#%Eyi#3NPGFH!AvJ)VaJx{cz zJL{_jC1Vy&#Ic1n2TzPBF0#H#$BIZo)KGGjGX9*-*fH_T1Db0q+yiA8#Anv__oCDAIJj*f{D9^I0 z7qNPeH2VUT{NG^#FrSV1MdFk(khujN0r43lNyzlO7?z!H8oL-V95dlG`Xq*-XPjR} z39=d;b9`T(u zFBy6FJD$$LtF$jodktnku$mFMCeLAv3>%ob?3JdR9Xzv%7H2rv3{38l-s2S^yvX5& zV4O@z8nfXSPZQZcNYp8*wd9qqZt;dvoCIa-Vq-zL4ht}=DLvK(&y~$Xpkl8N$)HVP zz7#GZs#6M7Yy~5wkt45L;_SMJW^9Qg7CRL8>fHtF**>+q3|HT$HwNMqN|PmbU4%^; zW7KbJJzZYCgo|t0+3Na@r%ArST#mqW0zLA~`sGXKmPc1ovS`Me#&Pz_(S0J!OV9OGiZ<)J}$27dO- z550YRy?EUJMhp+v zeyWB7nki%m#FYbfU*eH%=%f8A=r+$)0=mxa173BP;jE2A*ycH7KtUh8m!{`q{Ba27 z8R!{bK4UHcZDQR}DOw4gjz(h2c$US~JlMP_!vyx?G~HYK_6g%}Bz4NrO%J$JsJt zyO|D$v|TK?X}XYY*hWe(9gnR)Uxrt@?II&Vk zWoWDvci{qz@tCQBPdh-&p1^a#>=Ov`BmEU)f~Y8;S%*d-W+NJC8~Nso~)mr73aD3jbcb z;LKh|kRQT>A$l=fNW@IMf_T&7{djZ0R6Lmy)YK+AH}fDVEl%k^X7(xKX9Dj-^O*rX zgDki)J^{Q~b7e@z>b%UAArHhk@g%|no3l2faXetL|Hj5T1&iVoWFN|qy+Q>!$c;7WooFf1w99|UCoz<&e8ELeph*Yvo_H7}xHMA=c6e!!GoIXK09SOZ!^He>A zVE=V(gieYbZ|?UYQp4h8pQLR5RAm!7XX0!^NHV40imTl&qmM(ZhJ$1jWCY1tOk|>T zEW{@r5qwRQkYEn**=!@l3qd%g;X{B(3I{9u3o7eu#N{{dRK`H2D@Q^7VG#9SPY#XPMF+{E7$;2-D=Vd8r>P9xCYyJJrrWmT_@L!R~fk#*Ag2~ zivn_3R2a_78i{AitW*|()w0qe_y)>K5tVLp%W7_K2BnDD9OGl52yS3otW-LMB`|X* zGN5*G<>ESa?49a0<5`IL99tDDt1+`pW$J_HA%5p_OjWU}4e}~1E!oM2PevFwD#y~( z)G;iX*)?gPQkDukLp)XmT-(W>$*xT1+wdh0Q~Zw1%i69wyQe$PhGJRoRJus_&NA(* zHaW7lF#cp8|2{PcJtDQ99fbUN9!b1-ZGpB7s73;2H%N&E=DNDr3~h~fXfS|rkyoRE zdq#`E&CaJLcQ&}$`B?70hFN$7Sz+fn!QI`_rIib6W3h~X2ZhD$XQy9e@%ZgS%?5Ir z@Teo%CheycmsxFd^|5de%{L#)SX9>=>`?9#P`4h;x+)OVv%JL;K+L12vOG6wiVlYe zFdL*6A7(ZmkYNOz2GW7IyUiQmT8w0pX5|XEjIKo}s~J6Ix3iQ$DwvsUmN!*`x)TdO zxtLh6qV;s;L}4Z%iGNVXxoPe7QYO^6ejy5RDJgGCFuK^p4f8aT`DiuXe1eH zo?i3J)C0-`((KoeS@u=R@jw$s=z27$k|j0X0uqtg*9d$16tE93ZRi8^B86FHg#goe z1Ax9fmd$`(_Baup?RVS$X&JB4A|N~@}YCN08LPCgE6YdmLGTy$!328`vu* zrL49o%y~g9i9pQWN2U1~mFDh&zE3=-z04MZGV&+`nOW&vPPSL?k_NTp*%|R=_ ztB;F&G!gL<9i;CeJX}0ITHb(l*U=1k8Xcry7C~}P+FEE2;evcH>^IwsL-jd4-D@T9 zNt=s{-C=XsnukB~r3L-Rbdmr_<{9n*HWNYuM_ir<(nCb8)V<*zGSQ52S-` z=adT`EH-HJ$8$f%_c?}8^LKNU#Lmx~#-NB$BO66;I+DUT=S(uC+C+-DeYw<*Tv6UQ7 z$)S4@J%0@4YjuWCw+9ZZCZgLzot$hho@j-c0zwxnt-y(qe(MCdNbZmjlk+#F3rn40 z`&joGNvzYIqX;FiH=*O2TPDpk#i>%nQtt^XfL8L3g!N^*X?%jRg{1t(0ERE z=TWu*YD#fm+UvKweUzd|IEPlEh(3T?onM-xswki~3evn22%V(@W{BRC8mI3uNcZ?~ zq3Psi(%hL2n~Nud1n*8ywdY%1S;;C&afqC-on|Y|De@f>qI1dbOR+L*o_4#A`c%Z; z69T<1fbm%-E;58Rv^pj~c$^%k7ZJ%+daTnuHaOWt=hwe`{qC@v98&S>_o>z#6e*OB z(g^u}vPO4qB{`6Gn#Wq5g=VikY%L_W+0T5R@_U_3ri11}56S|4LH{Z=4pJYZR>T+w zJ&lwcOu56g=8vHfB)2=N2;3Ppg$8h<-(BhfUn(x3c$x}XF1CiYN`T<$s$|;43`qZ> zKtR_&RZ8`7w9w3sDs+ilo{<;dTMUpvvZ&j?`v# zy;0>96lc<7?Pj4XlY1!{#4s4PPf3l2jtS7IZf8jrW+t8McZ>d#%t-AC)rp>uc8J2Z zPBlAAsGuVI2hdg)!FPLMX<@L`>p{#?DkSGqt?tsWhyS@xf=f%&2SIs*9g4Nsv+4ZO z!oq3M-_fvrjgD64Djrm3YvEXHzMup6GfqJdr^h?Z;jmTcG1QrQbLEa6Xaeemlvje; z$e4k4(OzhtXh}xzO52OayC*Q%i!mXg_(e3newWl~!R0~33t^$RcMBCxo!=Rgz}`Y@ zq1*SQb60$HKw`Ry=}6XgbvM%fwm%?lbd-f9x9$<;4KEORuluyyp^k(@30ijVG)5r7 zM!$uw(r+y~a+r+_G-#Y+Z~DP7kJoxn{?SfC_A%Sm?H$mH?bidrsnbV-T_u zgAaYTU9z+3@FZp4?Kp&IBSRCa=;`+2eD`VDE)Iyn2qp1HA&TYEh!gVAGFqo#lY%V@o}++VOpM6q!{~=7c!7c+qu?hfc#(n+QSdVq`~n59 zQ1BWBzeT~PDR`ZNZ^f!$)S%!|3LdB6{S|T1@jb~px}Eb=unVQ z@BlAEI@Pia=QSclE+Z0@--~|ePn1Uan;72L=NeW)1;6oIAn1YW` z@Y58$M8VHd@beV>A_c!h!7o$rD-`@H1+P-@8U?>W!EaLV+Z6l`1)rwibqao$g5RUy zGZef*!J8C(j)LE(;0qMIMZq6X@P`z9k%B*>;Exet`L@1+_Y#*jfBjvfw<(r_|L7gP zcxmhC=H*LQu+O)AY4wpuk3REQZ~5FgtQ~2_<9scjKaZ!WE89n(e+=8NKsc%=MI-%o zgqEQ=ea$DI&`ZxJX_dJ7L3l!#-edmVGP!T>o(c1B@)rAhV(*^)h~2+O|DBxBIFl1o z6H|!WTMN^t=2G!toD=`8gL}PjIM1@b*E^p&^q7~O;Bp%NyPLE-NmB>64$e;atwXO4 z&dtd4dh@lDGTdm~8%_Ua%I;?7&2s(!^modPJd-7Vr%U1qU)R;%Js77jhD~6s!pK!I znw8aUxXR+hD{L}kB;$%5(Dhe`Q5 zQmIkHlAoA-nI8#@<;j>XOqXImbgt05HML?DcU3USYu&G@JuXfE%Pp1u?U#znC^6-i ztW}q@xTG{qALjBo3E2`I&b|JZq~h`-%jf4lE%KA;B+a7i({{XOK4rS%i!h6HQz)0D zDRVErD4qN&s7RUxFS^&#oMOB#EI6e3gXBrV$1>RsmW&KF6B+V$6qBDDRncq&@!)*JO?Eoj=iE1i^Y{VzFw)=wwp zFh1SxN+soUbH1*H^SWuJH076;t)f(8#UazizIMk`tf$wdBwNSz<4IH69T&3Q>#w?$ zVkMBeqb?=cqMe!+J8o5bTAc5A88ha1?!O)}9og)%7u2njSWB-X5m}d2RAgPrFQ-lK zvYgDTY~eDO7?g@~dv?WS@|=d@cMslBv;}We<`i-wma@rPn#tt-mTR zV?4_miGPjnNp;fgtk3NXd$%QvrQ1cl)M!07stwEDAajwWlb9N&Ta~|qSboMAuVb7)A zsfy2?(bbgdalY%xOh6 zW-M~VtNL;ze~p?+%0SBJ`sJdfqFah=C*#k%HHWdZu{I{lBfqX+d17?A1vvcHuAA#o zy4~a(;n{1akf!(s9~N4QyE}@=Nw6M^;{WW9?C_+5pBWUineC6qiQ+cTy z!ms+6@;s>e1dKVW-}Yqtl3#9XllK`opRin#+iUrfy;}03TLb4VF(pivs0t=?kYBpI zIzQPGck<=(Ipy5*ptD~Ib90w>N6Pb)Ekm9s(^pZ3GECiSTV z*Qy=&vGm0KS?0x+8{a9~8Ko%pEi-$Hf4Hq{|Iu&vyMWLy%?-zu<1xnn^xuL%$y>Zs zjKhDqgf7>bVu4ZQpylL3iWnVBxGt@0`7LP?UHe|?f6*`W+KCz5>zcXcoxktM;r~QC Iu$uJ00hf}D00000 literal 0 HcmV?d00001 diff --git a/x2paddle/decoder/__pycache__/pytorch_decoder.cpython-37.pyc b/x2paddle/decoder/__pycache__/pytorch_decoder.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cc1cf724c38450f826f83bb0b3a6a1dbd1f413a GIT binary patch literal 2268 zcmbuA-)kI29KdJx*Ih2TB-S>CJcJ_%_MjxOPf|sqDf;A5Y(QC%VcnS|dt7$6vm5D! z3rG{u2m6ak5!*^46rx!AA{uSe{vY$&T#`QZ-6y}_*}L2(q%S(le0RRH-|zhTe!jCG zl*=Uove-)!=0{d^~P9{rX(+lUkaDM8o$UF&6}GOlUL5g`r{fNx8J`p$75R2k8ScFNLo@)e;9tEKT$2`lYJ29I$ z;#Y%s78wh@MZuUVF}4`-C0G|z1B_i>@*3F}*#{%*-Q+|O`!M#WpeEQTQmr2%3c;90 zgHaGYqJSowo7`HXuEh-qo1@B?d>O=PXQfNyl#oh$`*xyxA%N+^i~B#-qENhi^4;Fu z-*>M+Xha^bWL_WZb-z=pbP71aRz%~7pZ2)eoJBe^m-fx;JKN9pzTeE|*}d~u`^k;{ z?ca8<{k*gFcYg5aUtK*I{-E)`oKMalXgGOpy_GXbDy!!YEpg}B{r1Bj_Wyj;e!7+) z5nX#L?s^_t>T#~M>?~L&c$X16NS8;l^E`mDV}vs!HI4b)vC^s^pLNE zt2~Y3mHC1H@F}Rh{J@@SpE9CEnMpIrQs35SNW7nw6Maz^eyXo=olO#5avRWjh=Enb ZcVWx$uXqp7H^G!G!badRhlhp-{smlcc^&`& literal 0 HcmV?d00001 diff --git a/x2paddle/decoder/caffe_decoder.py b/x2paddle/decoder/caffe_decoder.py index 4174cc2..ccb09b0 100644 --- a/x2paddle/decoder/caffe_decoder.py +++ b/x2paddle/decoder/caffe_decoder.py @@ -18,7 +18,6 @@ from google.protobuf import text_format import numpy as np from x2paddle.core.graph import GraphNode, Graph from x2paddle.core.fluid_code import FluidCode -from x2paddle.op_mapper import caffe_shape class CaffeResolver(object): @@ -50,10 +49,10 @@ class CaffeGraphNode(GraphNode): def __init__(self, layer, type_str, layer_name=None): if layer_name is None: super(CaffeGraphNode, self).__init__( - layer, layer.name.replace('/', '_').replace('-', '_')) + layer, layer.name.replace('/', '_').replace('-', '_').lower()) else: super(CaffeGraphNode, self).__init__( - layer, layer_name.replace('/', '_').replace('-', '_')) + layer, layer_name.replace('/', '_').replace('-', '_').lower()) self.layer_type = type_str self.fluid_code = FluidCode() self.data = None @@ -66,6 +65,13 @@ class CaffeGraph(Graph): def __init__(self, model, params, caffe_pb): self.params = params self.caffe_pb = caffe_pb + if hasattr(model, "name"): + if model.name == "": + self.graph_name = "CaffeModel" + else: + self.graph_name = model.name + else: + self.graph_name = "CaffeModel" super(CaffeGraph, self).__init__(model) def filter_layers(self, layers): @@ -242,7 +248,7 @@ class CaffeDecoder(object): with open(proto_path, 'rb') as proto_file: proto_str = proto_file.read() text_format.Merge(proto_str, self.net) - + self.load_using_pb() self.caffe_graph = CaffeGraph(self.net, self.params, diff --git a/x2paddle/decoder/pytorch_decoder.py b/x2paddle/decoder/pytorch_decoder.py index c1a626d..e0f8517 100644 --- a/x2paddle/decoder/pytorch_decoder.py +++ b/x2paddle/decoder/pytorch_decoder.py @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os +import sys import torch +import numpy as np -class PyTorchDecoder(object): - def __init__(self, script_path): - self.script = torch.jit.load(script_path) - self.graph = self._optimize_graph(self.script.inlined_graph) - - def _optimize_graph(self, graph): +class Decoder(object): + def _optimize_graph(self, graph): torch._C._jit_pass_constant_propagation(graph) torch._C._jit_pass_dce(graph) torch._C._jit_pass_lint(graph) @@ -31,4 +30,37 @@ class PyTorchDecoder(object): torch._C._jit_pass_canonicalize(graph) torch._C._jit_pass_lint(graph) torch._C._jit_pass_constant_propagation(graph) - return graph + return graph + + +class ScriptDecoder(Decoder): + """ 当script_path非None,直接load ScriptModule; + 当model_path非None,load PyTorchModule后使用script方式转换为ScriptModule。 + + Args: + script_path (str): ScriptModule保存路径。 + model_path (str): PyTorchModule保存路径。 + """ + def __init__(self, script_path=None): + self.script = torch.jit.load(script_path) + self.graph = self._optimize_graph(self.script.inlined_graph) + +class TraceDecoder(Decoder): + """ PyTorchModule后使用trace方式转换为ScriptModule。 + + Args: + model_path (str): PyTorchModule保存路径。 + input_files (list): 输入网络的numpy,每个numpy保存成.npy文件, + 文件路径存储在input_files中。 + """ + def __init__(self, model_path, input_files=list()): + # TODO(syf): 传入pytorch的Module(即import),否则出错 + model = torch.load(model_path) + model.eval() + input_list = list() + for npy_file in input_files: + input_list.append(torch.tensor(np.load(npy_file))) + self.script = torch.jit.trace(model, input_list, strict=False) + self.graph = self._optimize_graph(self.script.inlined_graph) +# print(self.graph) +# print(getattr(getattr(self.script.decoder.block, "5").layer, "2")) diff --git a/x2paddle/op_mapper/__pycache__/__init__.cpython-37.pyc b/x2paddle/op_mapper/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2b719c00fe2accc7d97b2436189ab556539bb33 GIT binary patch literal 162 zcmZ?b<>g`k0=wc_@gVv!h=2h`Aj1KOi&=m~3PUi1CZpdg`kf=P|D;z9Id5CH>>K!yVl7qb9~6oz01O-8?!3`HPe1o10aKR2&LzqmB7 zGBGbLF)!V~P(Q*bATcE+CpA7fKP45x%S$cSuP`bAOX=qq#OEd!6r>jEr&OjFB^G4p f$H!;pWtPOp>lIYq;;_lhPbtkwwF6o98HgDG_O2`( literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/dygraph/caffe2paddle/__init__.py b/x2paddle/op_mapper/dygraph/caffe2paddle/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/x2paddle/op_mapper/dygraph/caffe2paddle/__pycache__/__init__.cpython-37.pyc b/x2paddle/op_mapper/dygraph/caffe2paddle/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1fe6c643c68fea1dc45687e4f918aafa906f948 GIT binary patch literal 183 zcmZ?b<>g`k0;|ed@gVv!h=2h`Aj1KOi&=m~3PUi1CZpd**zc7ga literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/dygraph/caffe2paddle/__pycache__/caffe_op_mapper.cpython-37.pyc b/x2paddle/op_mapper/dygraph/caffe2paddle/__pycache__/caffe_op_mapper.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed5d71b5993a072d0323ffebac4ae0bafdee0595 GIT binary patch literal 32347 zcmeHw3y>VgdEUNv_PzH44o@B_io}8B0t6{hk|7c#L5P$nkRc#aT9I5WuJ`VCZ*O-o zGY1@&dkQ7clHyn@MvfI(VUmMVr6?)Kab!7VC$3l}IgaAQPD;*0nUpJ4lC26Wu0(d6 zlna$q@_m2L%+Ad1fsZg!B^98qr)Q?Sr+51A|Ns6+_dHZ8_Br2xj-B?{86eUpN_U{1R))&G z6su=cUKMc6s-i04XsEKP;FwcYHHBkdO{*Ck3u;!);aF6A)LtA*YF_Qbv8?v112|UH zL3IensyeK0!f{G1sGD(|R=23P;W(piRd2^}R^6s<$8k;_QHwb4QAgDsIPO(V&!n#{=qK^$r{ls&}gUa6F{mrQVI>Vf7yM6FA~{tS*s)l=#mj(2=ImWtKy#BiN=N)K~Xys;YY9muHEdc9h=d8K96&b3$8`)xHW zk8hlB_Er}YPS(9uPvOF&O9^+_GB~F3$6>VgV+ie-9UsN6CbkpTV)580-bs#Pjg+0f z8pAPTXF*>W+|MR(W{hH;R40ejXyg^k71}ISl(b}*RcgCpSEba{)%dme)p$I%`5AqF zI`-UX3$1ig+tZ`OwfI(Yl1VgAY}wmmmWUPBx9 z9(`>qJxY&~Z1rMiltNp3*D~gJ$K{Zl*EKG7?0qV`T^Xg=i%-TccAh~`JI3~Y>CFMQ zD(450X8f~5I4kl@#W-8eI$3pPg;M}8v^Z#*t)AVG7w_cda>KrYDTKEe8&=Nu?PhnO z*H`U6y3X!jP@(METn zZJ{xH!P*!M`lj7d3s+ipZSk;^#+8<7av&X}(P;PDcBA1d2CAWN7W*6aV8d=mhn=iGvz$V^$E)q0YHhkF7S5LYPQKS`^qT9f6K&%vC`5FJ9&9q@|v9NYSU`kw&|2D_qOIXP8P-GMpd=V z){@=5(pYZ}8vTLg6oXfs+#{Pyc&U7khO~k1cq75C>6Yd9w(OQ^ITeNYK_(fFIO>+8qXd-gGFqj#m* zTW+-S96M9}A>ks&oFmA_Of;uMb*1N5FuV1J(xMG$t zT!-*y-HRj^FUJl1FUO1VjQrmt`Ml)w@$vs+A`!3Re>pzf6XG2;k5raOuKY)W;NQde zvp$8yj$Msy#W(NAB#3tsn6M{g!p66g{)A0of~NIkw5p?&omtD8)lqyq%ZY|*S$!sE z8zW3P701MWS|!fK?3_w&=k-;WCCrZEXuXKGa+vC@*LwzH?3|u<>9aDU&JXX#tT0;( zn0A~PAcvh3;?9L`+p-s$*20BWw}1J}V!=sb%*_gZj(LE|K_+~W=3ylD($a=y_t*6- zwwxsRskxtb=b7w7vY0lHuw-SU=g&RTqIr}h?qG5!lVg5z962YC(OX(=Ev>c8_u$-O zqcIGR9<+N`S9S3EBl<97{MQ&7cpqy&9SY-N^C2+c@{i7ag{Iep=T3Y zDe%QuCxKx~gRf>#F4akoQu|_CSvxbz;4FJJzHN-MxND4(+c{jzkBogW8M0Y$#^fm8 zD5yAU7Pm{+VtZp-IWMmg7|1W#WxF!UT}`lUmPVV^&eT=b%lE$LmrbJV?@TB=-I;^nxRnow)5vqx&pb@sZoHvL+V8>n?+Laq7EKDS@*_e*D_^y-At z`#T5Z8T&g2-Dk{(OYt{D+EE^4&pH{wM~!+GQ+a*EZq*HN!oGQ-zgf?l{q~^W@3wm@ zjP9yid#cHf!8uWGnI%PY#$mTFvvG|{pD z1O$bNs;B4{u<;7P4mhezr6ZVtNhPpm^HZZFrtrtNQsAU1PUy9?dFoIUCb=dp!thTodw1nV&*F142rfPczpucu!GAK)83Za%aF?59l1N zdUX@?F_cO57I0PSTVL|ae}&g{{jHSWGmxb8%y4`59h5Tsp5boqlC*B_wh;P^!$l^v>|>>)vI0ehV+ht&@Q|kFq^TN7TebNvV{)yX=-BJuK)R zqU}p4QSkeS`@KzhM#1mnZ=pod?a3FJmtGd6msy;b+zFT>f*ec+!h|y24WnNC0CUP0c&7)*_t0?VPuEw`YqY|e-$~=TS z73ot>atVxGzIPn=A;oe3S?|7nMsE-9Q2r(?(}tyBRB{=M_oPpve08e;iH|q8RR$Bu z;8+=DM+T17QErsSacWc?l}D9Pbu=|9EN4V>T1+{GfU4tUpL9iI=`@hHuMe38P`dC3 z^BLyIg1{+0#{2~)ybm_wUl`uy@ge9OEB2}|qWetopgjkmR=6R*?Am*uXtEciBk-=GW6m za8(YMp)PH#H?&se7+&wFne{gYi_^lcoYWe)lR3o>Cf6FPOfJ`x78*f<+<>_Oav3&& z6MejFu1S@pMju8L-l6`L(S0^oaL-(2(qR&TsdH4#++R0VlztW^Ef$Y~krlzN$g*a^ zz-B2taU^c#)oOfyd>-k%d#x(h4hF6nV0|f3NQ5;C+RewS5WfcUWQqF!@NHp+II&;8 zjHSS!7!sy<8#1=AMKHdDkgQ;f+k~c&s(S&{c*WHvl4s5LqVzEJ(HFL!TTF=#;3=il z7zOc55fB++zlI`_msV!yv|`Lkzm0MhS#%7)XgU#v3E{&2PauEK;!l18-!(8h6fG5p z^#tGm+=mDr_K|Dg;-7_u5A!?T$#k+E;f0+Xcu`u#ev)%Kp)xDr{(CWJS7=iKxA5vh zxZ_QAV1m0ZmVhP4wPXydS`)3={f6D@K}{bfH(%JoToc$ei~g7toB<*IKk?7WE_eG) z`<@ev1(Slw$w6V#%9ClbI5j47j)}xl`AS5M*(0zpdb1G0kpLu!JM1x`wW!9iu|Bq2sbv4VmX+oqD09c8X1o(1CC zGRUyE3Sib4pf$sEproa33>#D}w8OI3Ac$ddN<$MaY!|^m;}981_GVvfWH5&m`4tTR zc3DA@qjEMbzk>7e^r_V|i@o?|_SA@|F)K>X9^B_M^nJs&$lMAZC)Z)P6Pz!4m8xm_8Z)s4yz3;w#y$vilrq#k5svR90n@ zUuREeFKoFm1i~Cwx)yK8fk>xbNsP*WDe*b{Dj~_4vrdYPS~Gw0{o(s*1@oEG0=PQV z{v$N8(HI6|^=)FXge{!wFI;Xd0Fy7+!D<$b0msb^HkhqlZnaldZILa*bnQaBX$_C@ zDNFs0p3Nr&WR%$MZnq#oK!M()?yj8&XAtm+?hdXjW~p>R({BPMcVVCci}%k=g{KMw zcCwsqPz8kvI~ig7))^YUqq9O`^>#;`J+r^ z+R0SqWOJqh(oREflMq!SY2T<}3Z{OR71G0&*|)T!m4Sjq3uc2gD2KEyM4nH%DH^I% zAxG1ishA6e6PTBH-$Jrj8P93hA^BUBwx|_68zXIVC2b0H4UL-Fi0ES}?-iN@wi*#( zly>GPkT?b12H&3*;T-12P~i9QC!P~>=2^h`NfXGG@pmiE_Dc!MH7bMC zQ2EGf^nXJn7P${iZo_?Hz7rsu|IdAV?(%;V_c5=4R(|BnhbGN2^9e!W*I+;95dB@u znlG^1BPKqxF}HXsXz%05oiYC`((C(Nf$#H|^JcSW4f@ceIhP`A=Fjue?rg^VB<|ms zv6xp`&xrcb5LLV2*1BDpqCW?9B8U$JiJz)ptsRl5QTV61ae_b%o zR0Q+DSm9!xnE>YL`k06JHRhoKde+B0FWGZG<^fs>V4gC6Z3O95Y~8v>I^O`&sklf7 z#%34k>>rf_NT(7)I{(k}njKhX5{vn1(Es9rU6D+b#k4t#Pa%Qbw`Y(2Gh-;DInJA8 zHq|j`d6Ueho*x52`Mnbv4-FXaCgULsn+!`11CSg9%*X?l+z6g{(1gbbJh>&zfCBKu zcQX5Yh+NkCu#$5>19>XLm0vn#>$D z89GA7V4`j;wc6bV{P^dc@=CXVq1p9(C})O;2>}8Z7uI1khx>BjOtWjXZg!IJ#K13k zX=Qjv2rRe;v|V6=tE6a@kcwafC+GF2b$wCw$URu!=p`ZK=s0oL^$iq~0a#KU8m-up^3c1?J0`wTMS$!%al^S{Jem2fd!a)GM@hk%#NR%Ma(mQ<*OsFVCO7oVot& zeHkaTgmqkxyS2xLgTS2=a16+tz)ZxPpcNu5=K3Ve{h2*LADBZA*$K>_WXFIf9@nqN z55U;Z9RBhMu}O9cDvLQp2Q%iFDky~iY{L{U^CsS@z-*Pr+(`%IUGc2UgyHQW@_qcN zv%8S4n4CNT-9Zt#tI0RqL9)9kdH#{L6SP zi`4)@`MRv8Dj~}pDr=Ig=evElE5#se{v7H$X|Z~lUqs&g5|c1e6&-vr6ObT6Awqhr zKZ#bHB;LT!l2#6)L=4&}wN6-6K^LTqJe)?PhJ1WD6{Y~x^GBaiA-)mu6s)3eF@|J- z7wlZ%q?&4I=Yk0myjxqg{xz&x8Q-dv*H$eeKUk^?Hi8FGzU*PjQ5N(8O4+U|gdXVB zwQA|IyidDU2p%ac0OVu!6ruR@rd*x2@9CKg>Oc8hy=9(TuWrBtAd zNUwbeY5;?2HO;a73Qfl02RM)48nawY>~fG*^ayKHvyI$jL*4NZ{|LIJcVQ$5(xYu| zxz)5c%$94L^UltIfzbTtMM|Q8_?F|FAshHd(4C-Z?s5HUIKLwr^IzwsxG9IjQNbtr z>R5~k!9@q(*lIYD3?8SvR?Odwy)I-`xy&S;d%71 z*R|0R@h0mLKDJmIv#@$WQ-Kbxh-ltEd#hLc=QVUBN&Ir%SYC1?`6{l#&ohrsVua?0 z(5JsaM%YCrEHpF16zNCTM!HmdK?LaGL>_EXd(_Kdk%suxwb>3a8!+7E#awsT6{9rv zk)6_UoO; zD;w_((e{HEtf$@B(Ww1-y>GT);BNL7?yl_=l*7D>=9(9*oq{m8y9Ash&w{QQ8St

J=KzHI2(Swy14&hsR;Qm_B%Lvt3;PA@o276@VpJx!v#PPT&_}( z0RW==27xee-yr%;%pC<0B#2WOK_!6*$wJ+TncGnRb9-aG#0X*!gPCo|A#}p<2kseR zxP(6W5cEMvN`Kh~gdelG2#!Ce@9)CHLabK;WUfz*5xZM==TOJZzlApnOMJvn5Mc)q zj#P5{>hdBo3(>fvY@!fg6Gg9%xKGD?!enmenypH;8#B)c`Oj5kZlb z;Hqaw8KEdp65x-qxqp;JDAzQo$k+xn8KvFm$}YmI2w*aR1RfRT71iD_h|WbQso*k- z#So)7#{jxrC}$^kfIO^76oT2X5YdO|2+O>TS8^Sp>|ESYvj*L^J&Blp0i|9GK$|B~ zsP4t|F~(d*-pT1GKYDy0KnZQGnZ$1bR3&|=XA$qnPzLRZ5^DTAY=OTQL^X`EpbELd*ets3X+!ZLcf_qMt@t!$;Q2o1`Z`VmG!+mDb`s&! ziOrLC%DXQSx<^4zA{s`3as<}kMOeF_L#6>e(}>JP;KIq+#XCVOFv5a60Zw-ch*$#E zf%`z9i1G!YBJdFgO+rEn757e26}&s(OMsZX4~ohLcvESj=z*nZscBCnsi5cyv87>F z9F;F@$$$@Yh$UUG$*81daDqVTdda0V_yzGG^UHXBKuy2ILnSJoXd!pf05mIZY^-<* z=ws9nNnlh_w-n@AW9l;5t}$j;ZiKAW;8FJw2!{^>@1Dx&G%r=suSr;X%S5i9Ez>|0dP?h1T7+gEUGR0^aAMTW48Z%qnjeszer7phv{i#;>LlcQ_>~|N7 zfK%?ZRuJHI2`kjPeR!;Y4lgihl6t(=T)XmA>+uimU}rw!ha-#yKz+Z8%3yDANLJ?g zJ2cTb#-mN^OuN-p=C7b7^A#pz8Wf*+ARyJrN!4BDrL-}L3{&jxy(B^|IG8LUXMu0W z($T}Z!|5;-V?#b6hJe_AL&HBlLJR{j*oQxlkVgi9N-PY7DHbGboDy&m!$8nuQFH5vBakhc7h{>DPDg~eTVe__LdntjDyMG)hlxwO_?!IH3LH@+IT zy+{|=g1LZb?2T4!SQ(>3>8_^#Kv);14k8hK{oPuedt-!Okay%#e%aORsPCCAr6s&5 zyU%hGJjX=z(0$B_N*kFN0jXcl>!x>`8TA}j!XfemwsnCL|B#K!(;})#T7LQyoGQmr zX7W^FJR-!uOqY4+E#&94xFvQ(l}T&5$Q!=#`B?w1o|pRR93c=O|(400NHOutc6E1SS+PGpNdx01q+<2R4va z2tWB8>*rwy29(7-$RQ3aZT=e0#qWaFv2X|WBtnU41QBLc-k%WwozLL+4etzr&Igy4 zHq7SIm5CGKL``^m7#}?Jq~J@Pf`WUf>aS#!a4oBCZ|HbkSvg?C!o*1YanzNd+2IG? zOhbD4Bo-Skv{!mmpSw*g4yz4If>WCAuL)XxoD`7f+TzS4JYv2JZ$xWFW9^A7-icok zG-C=XDSPd@6D3V7yQUFwQTGF3ljui4PzYEFu7LSvK1x8!N0=KUm2dG>0P$}y7omQ` z8=*B$0If|C_Y$sf^-C4j652}i%0aGiA>mVbkFF8)8uDpBBr9HlvNoCaK}+y9z{ill znvx_GA}-GL)Nh~8-75^lr$>TultTFgxX8iU$>nJ>PrlIR5;Q2{Dhoc3xI<;&7$Z0a z?NTl`WToBd+kV(KR$oxa-D?;jF7=WjP=R6Z7jI9*4YR{HbiGW&5 z7*%#Ht6mvH0ChoC+%j>@WGN#(bZfi)5D;)g{H3tYhNazeG`c5Wh zJUF|g6@C5gvB63yKM!lkVmUhZWLA}0y6^6!$`Sbq+t5qr#_GXX$Z%3gn78?x$RTW* z^Fc>-Q!qIxvIlW9x-0UIyT`j-UE#PJyC^!&-P#-~6O-*lr(V{5xu4|BTSft@3ff-5 z)o0TXIkaHVb~#oU^6>U^F6z}n+6_qIR75#XEG4jtC|E0e7IhN3z3`_k-9)~~`T|Dy z#|81=u&47MYy7ESU^!X}4jnrDiwu;uz9P1}BBp2#TF9T;&^7?|G?1%k=aOBeD@Z#& zBtV)DqZw5ov$(-MgbrZk%VE9M1fO>r;Y(P_$>5~ovq>GY%_$GZOWZu705A{0=7(AZ zu$#iWVbj3`;}IeijAs^X1ydVLi2EP#tSV=LoTd#?vS z)f}`Z*Fhr_cJEmj=bQG@D&*99KuNkQB!C}ciL)p%aSBH@rDSVht^CpI_doN?fBO6P zze;oVTOL^LFChoj%hJblIEr6$1x)(6*{!%8+s50*NF@|j8UKaEMJ(=y;>DK zq+0`KszmL$mV05zc0GTRW`(8x`r!WWaE9@Dnb`1-uy*6WpV~=rwwC+G1B&*I3acd zKl5MVfs6BR3}#n_4iH~$x?o*1vEM;c8uHFFH-^>n4zKdsDJFc88hMXdp(vt37f)H9 zA&=M%bDMl1m?U15V|cI0`~WW2%UsjTIF9A!lHE5a+I4gOoB36a*Q2O68I041lY%Kl zdwLF%BPOw0H^Crn?sHIfgRwmu9k9yYT}IvKdIaizfaPs?KIS@9JuJu=*Z=`~i4d-( zT+Wi#%Q(nY2++k9jYS)xWKw7&oJicluU$?8pL?fmEbowKqD9dD)zg>SN~r_6g$%SjPUg}@ZGfSrrE>PBNpRD3~L-0wveyamhv z!bJRHCw%Y)O>J>cl*0&!6779gHY2LOfV6_F_16$hD=A~IQ&?ph>pprlWp5?40e~0| zM9aC4Z{(ja`7I{@l*vD1B0wxGB%Jg)?ltfO@*4&dkl%RzV=OI1}<~L`OKu79QZ{pn`QUh;_`VZJ6pD!_B$46uABIXrV{$U1l z4ks~#6E^2NS8~g~NDMA95rzmnq8FdR=Y)`#GQx*my<&_?_4vZUWb1%T`E_{x}*9>g!C?KL8@ z!tzLmPGtkA{1bdMHxI%tI*2($R4w=Fk&ftACYeUFPwDWDTY+yZJ|>3oDq>tIS3;hh zNI0Ea&mSB5b~ru{BU;!URS5GywI);1We zg!y6k5AzsF_zSt+%_xIwU0f^R8g^@uYedK|+6b`2H3nO0PxKc@Sv(*6UERkm9^ltr zGk{gkV$2zq3f>Pck41m(m`^dt2~VF=1#rp)aCgz=%Pd_&Y4@Hew7OJWmBFKN9bCK; zC{1H(s6AP}$7|EVg=bvflMav7E#r=ES&T|^Yz-^kC-hZuSC^@A4_zcIEOYpd5b}M{ zTtSd(81^=MYC(sYM65+2rg{bo+QBFbE@q>BN65IIYcNj)SkH5Fh}I_aAr||^V9g~g zksD6Eu;o{)DK41{R&dgCDc}ghj2d&Vuc5i1&J078cE4&zR)FN!xprl?ue*8#KR z*>;N8g!>ko<_Z*Om;D~bbw?J|@fu9T=C^o-dw{JsEzO-LOi00%LBE-{J;_sqD}RsA zV^g|`2nWvSae4HD&GrCdRT(vn$$2)3s6z_Vzv~kdm5#JG2 zA|&C@-;jaoIv)Mz=Px||vi>?007*lZp6jx$p^LWkm9QZTxKYrSHfcd;bWK`0U=!@N z1YMJ+tSpW}Q+UIT4$f{}yk12l0k*MNniE0|uT+6fMM@Z=$m(vPecmV@1$3taxIHSi7Xoj20&;i;+B?2Y0y55{*V^9vH~5P zXS3V|401=TFyF>CNF&IzBo=pl+b-xcSW=|yC_T^l14jX*-QTCn0OnMj>0LqC<|sXj z`&b~)m<0NMwSx^VIadl*Q3i2VDzh(!))7z+noj!;H=!xL+oA6Qe%QpuhunHE8u|v; z!eqeWWctg?aEA@CR9o*!q|aq95bpoM7G%7 z);jJ4z>OwOHWGxnr&x#_*Zct!*#JS7XiwmpQ?>{TM7x~g^@8qC!)$?j zOT$r$beDRMIAQ>X-+o#YRMu*BqB?pre$^hI~pzgJYC>9w(rL*SD}> z*T6^A*9!~{q5+ZfUI1j6N1euYQ4*l^gltMVksKz*KTbrdL9K^F3?*aNk)1G9f;z$2 z#)e~IHIY%sp23DQcB}U@90<1J1&Mg!kkASu*wd}yzj1CF=IaGSxv%?;2bOYRi3aQp z&;}%`NE{?Zi7zYobF%1a11YTyVCK@fL7f4%P{C{{fp)OG!yhLvS}5#FcvA0r_qxXi zZ;_@W^BNMTdO`L+SVkanLsZ6b92(W79a@GjN(TMQK{?|Xt<6EdhrL)~YWt69d8hNB z!iS1?KxQtM3Sm<`cj~f3+*qu=YzM@PNE=gbNnv*vNbNX~@fRF4Nf${!$=3fW>EISL zpgjPXj<9b*fAnL_w76gz)fT0`PZfDI;ZwyQ#@`%OKuUlvASIF#yO@Ihk~W{B*0>h8 zo(E^gmT{V^3(ZMz1*3mlS@NQ67l(~dC`)WC)*vUxeFH5(Hkkhcc*fm}hI$AzQCh_* z5B4Gwja!oh#yIBS~vLhZhz*`pLoabJv6mF{=7TjM(Kq; z08h2J8y~l~nj|PEaYplLkkRlkVl>=}=D^FsrfS?)WLpNtpQ@>)~|a3<<`c! z=m0^*MN4n51VhfhjLPOe=aamvm^`kSOW@|!t*o%?8U%RyhRhwr4ka@4j0OebPK z_j~0tp3vH|Y4mlnICA4$z8VR0{ldgBSD4VjF1`lBM`(300Cw{PrYZL#_#7Z8)+56G zr*m1!>GE(EdI7T+pMM6>RpPz2!M<{Cj2BxClo}Hze0)Vg1!UZS0s) zv)UknmB(`gx^5}#T#qR(cZEsKsCg-8sC~kAbXz-1?Uz#NP^klodz?uNp|%dG!&nJ+ zQ=mPsBwBGx>h^Y)T)?jgw|TSMzgr^hK?(4G>)WKvtypP?x-c0}}z) zf%j>^H47DYCApQ`Jc6M4xoa^5&>w`VoX5@+S*(ULsWfAi98~6$v6mAU!G53`GthZP zp(j}TcRR+4XLuIQj<{!d(>Pmn&+rCtb`)o6Y$uE{XN#)9)pywUA>Enhn8N{d$153e z+j5L+*y9TALa^Kwao8R^tJ&Qfhp+ee8%gOUu0+`8&oiNVsKcFV=T{K}wFavcBmbhH zIJzPH%}%MeZZ+&xt_8z>rrOk##46d9d*t8mKJY5Rgv*bA_yBn9pYU1VV)9>+41E^t zF9Z-CUoT^on%>q}2XqIOroYYFnU8lzm1IZIKK#DJ>5txZ_np`#%q@23!oAPc zOE4<;mK)0&tE0&AP>H+leh!=0_m|*~SVE+jg+<@|-P{-1VYryiih(c#{EDbdZsgVD z4tNaZ;@iXy@*D(4dpUU#qdH%a7RRIGWLb_&m=A@PFe^{>aW{($3S7uQe3OCu@f;|BUSKG~(h`b_hT?-)!@kt6QW~r%AB`&KbX@s zSX&!1b&jREW|5vKM)%OS%~dG$L#5ny=u;XV3j4PszsDET)Ea43dKiD!<8MYkfQNzt z&U#b?JZ&^w+4Mz!X}4GxT^~7@bSFDyv&(I)d$aIZ;XjyTMM7HPm0E$Q)eE>O`fvw& zCB5c7EM}%}0b&Ba?(jXF(X*>*H`U7hG$Ji*dW$j_U^#`?@;TZh|?`WBaU8e95VF~5BuconcM#N z%s69$@fVv@^9znTw!ndH2>d=GslvlE?O zId&xQ@z{;Eqo@(it>cdg9l%6M!cKqsjj&U1lzaodFrnBswi`zX5e)8RJ2=DQK8}g7 zB<`hwone7Hp~2XBFyW8;X9WZ*eFjEzxrg1(aOB@I-Tcs9@K?eg_eHAUuZq8^5${aH z{Fi$rCmIUb>Rw@TbHWPaojLAp0X?AvBS0p@1WcE-$;uv=E@vd$I zv1eeD)pe|_SFY+`t?&wK>M%ri{*w#+KpH>a5VfTv>yEwekYHZ$NN7v3xe z)@fFpgzpS*&|l1q@|SV)Bag4fx6I&M;cCqoVh+94nX)EW{49AaZsXdvcZy3`tC!)$ZBJq8 zd>4!Oq)*9AO9U9LOwsHp(;T|%S5f)Mo|3Zf&$rinl^t69?>mpx) z3Rm<;jPt=439C4Hqa((TU_vnmOLtwlWE6U7eco4*x$2Ee@LgA~uzJr`8(y{fA@>Vp zuY2syz(R#xz*?5gX$xI)1>4;B@ud`e9Hl4|)C>IoHh$f8tWlOdL?9GgHMH65*057+ z%N+D!ZkxEv^c|eMuQA@{`_a618&4SGUDSh#h{1aWf55?|e(4V`LPR}WK=Vvg-g_1O z!R5emfJIV#4VgDGxPDwH!9TyAhh1gxmlK~oWB1{nP$jW)z;iFKntOM+TPm_7>=mfP zb};aeJfX;=T%ZjhlZtEEI2xYl@_D+~O(X!Lr||Q!aGoD= z6KleCeoJn>wlTn0yjl%@qb|iX#I_UWP2{-?NUn!$&M3tx>LghsZq`U2*h`v+9V-GW zD5#zK?1Dr;nv6CyKgdMl#>WI>@>&#PXm)Ulw{~UknRD_+{)9Ah6eT^Hp;wJm)_fS$1Z}3BKv>nhI|?_(3a02IE5g;X_w-+y{fVer^JMJ_`R(Ju4@y37a`8*FW`i0 zv~$VgT71;xX_SKdOFn9n@;_?AEfR1Q>ShY&0?E4{HIW^#B;uG~amxCe>GJb(Uw~5$ zvE{OHLiEY9`wX03Tvbx0It$DKM&-vH8oNc7?^sBV4Lm?3yHOh{Aw)J6y@SBbxE>9F zeUR58pz|N_+8B9`i9zTfpm`wXYk=5ae7%;B#7?qiYq`BC$g@7R+&1wMJ9i_y$TF$0 z5HW8-?K1d+8~4Gl32OWD_|p?QILk$&k^Sd_-?m-6iy!ml>_i5eKwi<7?TT33QU?z{tA;1G5IPJ@jd-r=D1Ck`D;wR z!Q>w?`E4fOW%BQs{Cg(9&*Tr7{0AnlGWpL;{tFYJ)tHG7kK>U{_-$$PAd}me+|J}q zCMTHi%{e9a^SVo|Znx1GgM2aqwn!t7eCL?sY@J6UvHi*7A){*CZrp0T%g7jcqlW({ zjHAZ1@c_>D8uP}Salkl;+-XkkCDRE+)d1ZXfV zwC|*`7!V)drQxx@*ZX>JjlQ88NJf9c*G#bFboeg5?%b{4?qBl@*WB;@)|U8DVE22! w@@3BYoL^en$G(vGGV{88r&;Yi6cu}M_JfvChl95{*(L9CM!WJiz$IR=K0&LM{YPLKeVLx31aJh>*PT$1nmUv;rb zZILAs{r2Ur|NZ-W&(6(NEc`vW_ip>%$1Uq8tW5q~6uy98^mW^^l%?#x)iSx=vQc;X z&bHffZA-bzd)sPxs-TL<3#z2b$ct)5Rgjm|tg0d}t2tFeKBMN<0`iJlR7=Qb)d^Kc zUR5X6GV(cfO06KTsnhBV@_BWaI*WWk-L39HzNpTr2J$8KIdvZS33ac!fV{3Qs{4?i zRQIb($d^@9J%Id_dQe?PzM`(EtH@8QYw987XVk;$I`X^Ja{pH)Av9z}k)dQ3fz z{2uj$dJ_3L^?7vzc|$#=ZX*AjdRlz}`FZtA>KXOy+ji?-^@@5!Vtch|r$rs8olejJuH}#CQpWE& z{37Mx1Z-t*INxx__SlK7#M-oDXVX1!^ktSeY~|D~(;J4d$F>7oxm%vzN4X%S7g-9+ z>{IS(Yiy%blpe)WxWt}(uDxPmj1xA{D)Xc7=HuUGo0&g$a>(4VCy4jkgXA|^7APwMWdZeS*7qkY3zU_7 z1p{SKpv=~Y{%LB?zj3{>tTHYt+q1@{aXFq#Eae?I?>PDuS*dnlk7tq@VO zHy6@MFY1L+?1!Boo!ts_81&mwZ$C&&;m-D2prf?7DuvX=l4&^`(#=b|X)%g*PX%e& zh3PR%ac`g2&dL)p*2Mf`Fp^g~na7?pQ!xIBg{h$y`#~@HQxKi>unhSKQ(EX(nj}wAdc( z#KWCf2)y~JdeNtH>B$vl#lJK&4~$d1I@?z9J~-4QY~Ndtq@bEE=8_-aB824(}p zGZ++|L1N(q%eG@L<7CffKc4N9t()lWkgxg2PbVUt!!KGv0?a|y<7^yr!Xh7CV9Z%H zXz|j;Dei6yEy|jyx9!K^K;R2J=B96OIgEZF`q((dOK9Cw6<*iuBENIXnRiaF)L>nnjd8aH;BCHs+S7i(+ z5@k~4^cYehtelM6zL$8LZs;ZUrZ*<7fiL(wPH&W=LUj5(OT@gPrGkEdW%Ao0g{9?A z*AF2nBhcCvb`g;+x7+Tu+}(E1p!EMuT;2A7(+2u}E_I0sCD;^coiN9tf~f=fI);dh zv7D-1b&4b?=YoB~fsnBEL+H&9sEA5VNb9H^5)#Y;=fH)iaP=cV4-5|evXY0Ir#!=! zBs&_980`VU#jP?FAB>xEAuJ;I#)Y^bgvt4Z1CnKFHLY#*2Wx)6Jsb@By>R0Np;N=u zGo7HsJU4@#Cpc-Q`_k4oKoBl;_g5l z$K;&BWVYD`Mn>d%5_Q$~?0bMqFek97VQC*?@MmB}LpWq;&2JFso9;2dkArvSeSD{a zVcgyAMZpdhIb^^mjzdSGA%cPu#+>gIxjQgE9TBb*QQ@Q~dLUul9_j&X+Bg#y_hR

eBrmw?qVOAt!(bhLm z{`6~C%-TBW?;K1m{?uo(ct&K=U_IXUN8g@W|K{=aT{H+{Xe}6zVH39cX$xKh{DmX3 z-Rt+Fc%s%swx41@pmXEAI4L7!*;VlhZv&Nb|41gq9owB~&OY z z&h1BDwQesbUg8$utf3te@-b-l8)$viN{rD1+!WF(1yid zku*!k5%3~f)9SRTAvj6x-f^^44O+}D-^VngK*aX+V1bl@8rY}Kimg}CnI!Yk76 z!q;(^EtaFr7#^m0Kxn(mqGvU(jLs*Oq`YP6-%U#ImE)NN{-2OvC7F558dq+QUd8MU z*tD={%NsTKm(Q~qYHTw$KA^46Tykgz{0p`wvdke-g6OFfjF75`6+(DpU_SJr` z6Eu2JV^( zA-SX_xO#M2(h?S@vt%q4F05(b!S~`iAi56Bc$Wc7)x?`>=kdg||{)_?soSwCZ0e=UMv|MyeNfAd3^r!Po0n5Ylx zn2%MqH6t4QPG?8^oxOjU+Nu4aJAD#uo@rb}8oK{HwfGyy7w74S0H(w&E=P?S9 z95^4c(i^W@z|Q=E&G5x32!8}P-v!Loc$TdOV&HcXP%Wv__{2!(`>tc(e9v(`sY?LR z*8iC-0p5j7u`K`_CjtFQsG#}ege+r7llsBCfJ_7fx8_FP?&R z1ZaT6@k+9ilq59z-)=u<|jg7rk~-z211Lkry9qOy)}ufI9+NGv8(s-iG~yG3y*g zU<4@)cJE)#ha#YB+8ObVeV9IgtsnP>{eUoWh5fJRqt^!UU>hT0_6P`*&rrQL5g}j0 zoVC41=InA~nsU)F==9cmf#PC87$JylzHI-IeBBJdV+$i1*Xd@6%)6R%t+|YHGK=p7 z4ql6O)@6wr(b#x2{fy)DEK4->#O_0s9D#NEQO(nbcApTDCVfxYdt9-V#YhE&R?P%V zCnwQb$O5P_j6gZHw1SI*Nx3{Z%huv#Mq3BNzTXMdktU)ZO;wJ zWMBxRHPaak_hd|~hI8Z1M2t8jj-@3=D=^)f&&Kqkb}v-Hs8tI_Lm0}vY-DQpTN8gs zrXFLeG_w&hWd8>+5hX}qsJdkso^|}2I8fQ~P^vntSDgjPYYxM=Fk}&muj9AkEFiDj zHH_sy4O#~5%P29ZTXbqLf4L5OOuB@y{}N(Nb*#l!g!Dt~pUWhQAzqI>hhOyPNG1-K zc%&{gI4*;r!S6|2(dux$q>WnrI@;V^)4NF#Z7;+gE)@}A6}QZW3-!&U2)zmC=P^s# z5cW59IA3qU`GWTv!x=iTC$cti;ItKzlJX##P5keVSffv?N>7MCo8Tf5aXml0q84u!7+*t4RgRdEHY9_7vdIMpYDV9oS!Iik+8MR?k zrv>^#TXOBR?MH^)4moWT&^OCdymZJ$Lv-oeOoT@xIeRW5;z&ICNGWYB-dj`Z>jYqax1v^%;m}cMG7de z9Io&vt~z=!ygxD$=TJspmlHzdDF5D%iXvqeJePWzpF->30;}j4c@FRq(*4Je1Q^CD zqXq~sA!-04lR;#VmJ9NHHsP(6Ay0%-q^*?{Oq+sd53JtXcT$=9$0da}JZk8LoDaSIR9FHT6D=EeS{qi9~o zHEbM&@5l7RqMG;bpXje#^FcvQ1gYp92@ga$HYdkVs*8x)_Zuf#p;-q^j#8gysQ(R| zl-@vL|D3TLG7#TyjA$|{LhCs#wC1w0Q}GAg=RSn$LzrTV{W6IB&?<3>cC>#Cw21`H zHn>G{x`6Q{C-MgW$rRgaN;XdBwPDcb`NmG(uN?gQ7!I45>2o`}{K+=b8AkpVzTHO;524fSKgf-zEp-`>-}+25lTlXBF&uFe5oydcon-dEJ~Eu?X$KXp`Ig{ zWS%>ko2bON9nJ-iV+0V}=s+IWQo_a20R_|}F|Ld#pva+cI2_~%iYAB$Mhbl;giXQY zF%%lxr)IsPZYv_ z)6C_Hv@c+C2O%^D_|V5vupt{au3WiEP}>U65IQ6n zV}mAyAtQ2HJ4f~8 zvAq)q^bPQ8;AxCH6mY2*f;LQ4ed?bEWKC&{5Bnp4`ylUiLID z#*;#w$_a5DT5NS|!-XO zh1d}V0QG>!h}4uqxzLd;gPB3Wc%UIvEn1Ym%?3@uf+zvdF}L2o^x;ZiXBhe0^y{Y- zK~C{tdlI{xlt}N?i}%2`;w2wp`-lyo!^5e=UZLGWk5PY@ zSequq9BbtLj1c*%!Vn}%P^-KuyAN%|7N3Da5V8c%%iHpz7a$Ignz}a0H{qig#Bg)- zbV9Cz1i{{e6VaI6DDj@BvD=Hgjn~Af7mo{;DDu~#*+|xhf^wR`HO(OLcyRm7a6sx} z+6Uq(^M0ZLL>2!fvBV21dOz~04ZbfeI})@0ZL}lahL=p{_kejcL5Z2p@YPlfu8PF) zG>>uQVZ~K+%TpJ5t)ZV_m2VH^k$~o9zP`zX+%H$Xa;+)%N}6Y&c}Js3!@AAn4JJO5 zH6|S7;_Ts= len(data): + continue + + d = data[idx] + assert len( + d.shape + ) == 4, 'invalid shape[%s] from caffe when adjust_parameters' % ( + str(d.shape)) + + shape_old = d.shape + sq_axis = None + if idx == 0: + sq_axis = (0, 1) + elif idx == 1: + sq_axis = (0, 1, 2) + else: + continue + + data[idx] = np.squeeze(d, axis=sq_axis) + shape_new = data[idx].shape + return data + + def get_kernel_parameters(self, kind, params): + assert kind in ["Convolution", "Pooling", "Deconvolution", "ConvolutionDepthwise"] + [k_h, k_w] = [1, 1] + if isinstance(params.kernel_size, numbers.Number): + [k_h, k_w] = [params.kernel_size] * 2 + elif len(params.kernel_size) > 0: + k_h = params.kernel_h if params.kernel_h > 0 else params.kernel_size[ + 0] + k_w = params.kernel_w if params.kernel_w > 0 else params.kernel_size[ + len(params.kernel_size) - 1] + elif params.kernel_h > 0 or params.kernel_w > 0: + k_h = params.kernel_h + k_w = params.kernel_w + [s_h, s_w] = [1, 1] + if isinstance(params.stride, numbers.Number): + [s_h, s_w] = [params.stride] * 2 + elif len(params.stride) > 0: + s_h = params.stride_h if params.stride_h > 0 else params.stride[0] + s_w = params.stride_w if params.stride_w > 0 else params.stride[len( + params.stride) - 1] + elif params.stride_h > 0 or params.stride_w > 0: + s_h = params.stride_h + s_w = params.stride_w + [p_h, p_w] = [0, 0] + if isinstance(params.pad, numbers.Number): + [p_h, p_w] = [params.pad] * 2 + elif len(params.pad) > 0: + p_h = params.pad_h if params.pad_h > 0 else params.pad[0] + p_w = params.pad_w if params.pad_w > 0 else params.pad[len( + params.pad) - 1] + elif params.pad_h > 0 or params.pad_w > 0: + p_h = params.pad_h + p_w = params.pad_w + dila_h = dila_w = 1 + group = 1 + c_o = 1 + if kind in ["Convolution", "Deconvolution", "ConvolutionDepthwise"]: + if kind in ["Convolution", "Deconvolution"]: + c_o = params.num_output + dila_len = len(params.dilation) + if dila_len == 2: + dila_h = params.dilation[0] + dila_w = params.dilation[1] + elif dila_len == 1: + dila_h = dila_w = params.dilation[0] + else: + assert dila_len == 0, "invalid length[%s] of dilation in convolution" % ( + dila_len) + if kind in ['Convolution', 'Deconvolution']: + group = params.group + kernel = [k_h, k_w] + stride = [s_h, s_w] + pad = [p_h, p_w] + dilation = [dila_h, dila_w] + return c_o, kernel, stride, pad, dilation, group + + def get_input_name(self, node): + if hasattr(node, "index"): + return node.layer_name + "[{}]".format(node.index) + else: + return node.layer_name + + def Input(self, node): + self.pd_graph.add_layer( + "paddle.to_tensor", + inputs={}, + outputs=[node.layer_name], + data="x{}".format(self.input_index)) + shape = list(node.layer.input_param.shape[0].dim)[1:] + self.inputs_info["x{}".format(self.input_index)] = [[-1] + shape, "float32"] + self.input_index += 1 + + def Convolution(self, node): + if "conv" in self.nn_name2id: + self.nn_name2id["conv"] += 1 + else: + self.nn_name2id["conv"] = 0 + conv2d_name = "conv" + str(self.nn_name2id["conv"]) + output_name = node.layer_name + layer_outputs = [conv2d_name, output_name] + data = node.data + params = node.layer.convolution_param + out_channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters( + node.layer_type, params) + if data is None: + data = [] + print( + "The parameter of {} (type is {}) is not set. So we set the parameters as 0" + .format(node.layer_name, node.layer_type)) + data.append( + np.zeros([out_channel, node.input_shape[0][1], kernel[0], kernel[1]]).astype( + 'float32')) + data.append(np.zeros([out_channel, ]).astype('float32')) + else: + data = self.adjust_parameters(node) + self.params[conv2d_name + ".weight"] = data[0] + if len(data) == 2: + self.params[conv2d_name + ".bias"] = data[1] + assert len(node.inputs + ) == 1, "The count of Convolution node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + layer_attrs = { + "in_channels": node.input_shape[0][1], + "out_channels": out_channel, + "kernel_size": kernel, + "stride": stride, + "padding": pad, + "dilation": dilation, + "groups": group + } + if len(data) == 1: + layer_attrs["bias_attr"] = False + self.pd_graph.add_layer( + "paddle.nn.Conv2D", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs, + **layer_attrs) + + def Deconvolution(self, node): + if "conv" in self.nn_name2id: + self.nn_name2id["conv"] += 1 + else: + self.nn_name2id["conv"] = 0 + conv2d_name = "conv" + str(self.nn_name2id["conv"]) + output_name = node.layer_name + layer_outputs = [conv2d_name, output_name] + data = node.data + params = node.layer.convolution_param + out_channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters( + node.layer_type, params) + if data is None: + data = [] + print( + "The parameter of {} (type is {}) is not set. So we set the parameters as 0" + .format(node.layer_name, node.layer_type)) + data.append( + np.zeros([out_channel, node.input_shape[0][1], kernel[0], kernel[1]]).astype( + 'float32')) + data.append(np.zeros([out_channel, ]).astype('float32')) + else: + data = self.adjust_parameters(node) + self.params[conv2d_name + ".weight"] = data[0] + if len(data) == 2: + self.params[conv2d_name + ".bias"] = data[1] + assert len(node.inputs + ) == 1, "The count of Deconvolution node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + layer_attrs = { + "in_channels": node.input_shape[0][1], + "out_channels": out_channel, + "kernel_size": kernel, + "stride": stride, + "padding": pad, + "dilation": dilation, + "groups": group + } + if len(data) == 1: + layer_attrs["bias_attr"] = False + self.pd_graph.add_layer( + "paddle.nn.Conv2DTranspose", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs, + **layer_attrs) + + def ConvolutionDepthwise(self, node): + if "conv" in self.nn_name2id: + self.nn_name2id["conv"] += 1 + else: + self.nn_name2id["conv"] = 0 + conv2d_name = "conv" + str(self.nn_name2id["conv"]) + output_name = node.layer_name + layer_outputs = [conv2d_name, output_name] + data = node.data + params = node.layer.convolution_param + out_channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters( + node.layer_type, params) + out_channel = params.num_output if params.num_output is not None else node.input_shape[0][1] + in_channel = node.input_shape[0][1] + group = int(in_channel / (in_channel / out_channel)) if in_channel > out_channel else int(in_channel / + (out_channel / in_channel)) + if data is None: + data = [] + print( + "The parameter of {} (type is {}) is not set. So we set the parameters as 0" + .format(node.layer_name, node.layer_type)) + data.append( + np.zeros([out_channel, node.input_shape[0][1], kernel[0], kernel[1]]).astype( + 'float32')) + data.append(np.zeros([out_channel, ]).astype('float32')) + else: + data = self.adjust_parameters(node) + self.params[conv2d_name + ".weight"] = data[0] + if len(data) == 2: + self.params[conv2d_name + ".bias"] = data[1] + assert len(node.inputs + ) == 1, "The count of Deconvolution node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + layer_attrs = { + "in_channels": in_channel, + "out_channels": out_channel, + "kernel_size": kernel, + "stride": stride, + "padding": pad, + "dilation": dilation, + "groups": group + } + if len(data) == 1: + layer_attrs["bias_attr"] = False + self.pd_graph.add_layer( + "paddle.nn.Conv2D", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs, + **layer_attrs) + + def Pooling(self, node): + if "pool" in self.nn_name2id: + self.nn_name2id["pool"] += 1 + else: + self.nn_name2id["pool"] = 0 + pool2d_name = "pool" + str(self.nn_name2id["pool"]) + output_name = node.layer_name + layer_outputs = [pool2d_name, output_name] + params = node.layer.pooling_param + ceil_mode = getattr(params, "ceil_mod", True) + global_pool = getattr(params, "global_pooling", False) + assert not global_pool, "The global_pool must be False!" + kernel_default = [1, 1] + channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters( + node.layer_type, params) + if params.pool == 0: + pool_type = "max" + else: + pool_type = "avg" + assert len( + node.inputs) == 1, "The count of Pooling node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + layer_attrs = { + 'kernel_size': kernel, + 'stride': stride, + 'padding': pad, + 'ceil_mode': ceil_mode, + } + if params.pool == 0: + self.pd_graph.add_layer( + "paddle.nn.MaxPool2D", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs, + **layer_attrs) + else: + layer_attrs["count_include_pad"] = True + self.pd_graph.add_layer( + "paddle.nn.AvgPool2D", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs, + **layer_attrs) + + def LRN(self, node): + assert len(node.inputs) == 1, "The count of LRN node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.lrn_param + assert params.local_size % 2 == 1 + alpha = params.alpha / float(params.local_size) + layer_attrs = { + "n": params.local_size, + "k": params.k, + "alpha": alpha, + "beta": params.beta, + } + self.pd_graph.add_layer( + "fluid.layers.lrn", + inputs={"input": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) + + def InnerProduct(self, node): + if "linear" in self.nn_name2id: + self.nn_name2id["linear"] += 1 + else: + self.nn_name2id["linear"] = 0 + linear_name = "linear" + str(self.nn_name2id["linear"]) + output_name = node.layer_name + layer_outputs = [linear_name, output_name] + data = node.data + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.inner_product_param + if data is None: + print( + "The parameter of {} (type is {}) is not set. So we set the parameters as 0." + .format(node.layer_name, node.layer_type)) + data = [] + data.append( + np.zeros([node.input_shape[0][1], params.num_output]).astype("float32").astype( + "float32")) + data.append( + np.zeros([params.num_output]).astype("float32").astype("float32")) + else: + data = self.adjust_parameters(node) + # Reshape the parameters to Paddle's ordering + transpose_order = (1, 0) + w = data[0] + fc_shape = w.shape + output_channels = fc_shape[0] + w = w.reshape((output_channels, -1)) + w = w.transpose(transpose_order) + data[0] = w + + self.params[linear_name + ".weight"] = data[0] + if len(data) == 2: + self.params[linear_name + ".bias"] = data[1] + assert len(node.inputs + ) == 1, "The count of InnerProduct node\'s input is not 1." + assert params.axis == 1 + assert params.bias_term == True + layer_attrs = { + "in_features": data[0].shape[0], + "out_features": params.num_output + } + if len(data) == 1: + layer_attrs["bias"] = False + if node.input_shape[0][-1] != data[0].shape[0]: + self.pd_graph.add_layer( + "paddle.reshape", + inputs={"x": self.get_input_name(input)}, + outputs=[output_name], + shape=[-1, data[0].shape[0]]) + self.pd_graph.add_layer( + "paddle.nn.Linear", + inputs={"input": output_name}, + outputs=layer_outputs, + **layer_attrs) + else: + self.pd_graph.add_layer( + "paddle.nn.Linear", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs, + **layer_attrs) + + def AbsVal(self, node): + assert len( + node.inputs + ) >= 1, "The count of AbsVal node\'s input is not more than 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + self.pd_graph.add_layer( + "paddle.abs", + inputs={"input": self.get_input_name(input)}, + outputs=[node.layer_name]) + + def Softmax(self, node): + if "softmax" in self.nn_name2id: + self.nn_name2id["softmax"] += 1 + else: + self.nn_name2id["softmax"] = 0 + softmax_name = "softmax" + str(self.nn_name2id["softmax"]) + output_name = node.layer_name + layer_outputs = [softmax_name, output_name] + assert len( + node.inputs) == 1, "The count of Softmax node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.softmax_param + axis = params.axis + shape = node.input_shape[0] + dims = len(shape) + axis = axis + dims if axis < 0 else axis + layer_attrs = {'axis': axis} + self.pd_graph.add_layer( + "paddle.nn.Softmax", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs, + **layer_attrs) + + def Slice(self, node): + assert len( + node.inputs) == 1, "The count of Slice node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + top_len = len(node.layer.top) + params = node.layer.slice_param + axis = params.axis + slice_dim = params.slice_dim + if slice_dim != 1 and axis == 1: + axis = slice_dim + output_shape = node.output_shape + sections_list = [] + for s in output_shape: + sections_list.append(s[axis]) + layer_attrs = { + 'num_or_sections': sections_list, + 'dim': axis, + } + self.pd_graph.add_layer( + "paddle.split", + inputs={"input": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) + + def Concat(self, node): + assert len( + node.inputs + ) >= 1, "The count of Concat node\'s input is not more than 1." + inputs_dict = dict() + for i in range(len(node.inputs)): + input = self.graph.get_bottom_node(node, idx=i, copy=True) + inputs_dict["input{}".format(i)] = self.get_input_name(input) + params = node.layer.concat_param + axis = params.axis + layer_attrs = {'axis': axis} + self.pd_graph.add_layer( + "prim.list", + inputs=inputs_dict, + outputs=[node.layer_name + "_list"]) + self.pd_graph.add_layer( + "paddle.concat", + inputs={"x": node.layer_name + "_list"}, + outputs=[node.layer_name], + **layer_attrs) + + def ReLU(self, node): + if "relu" in self.nn_name2id: + self.nn_name2id["relu"] += 1 + else: + self.nn_name2id["relu"] = 0 + relu_name = "relu" + str(self.nn_name2id["relu"]) + output_name = node.layer_name + layer_outputs = [relu_name, output_name] + assert len( + node.inputs) == 1, "The count of RelU node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.relu_param + if params.HasField('negative_slope') and params.negative_slope != 0: + negative_slope = float(params.negative_slope) + + layer_attrs = {'alpha': negative_slope} + self.pd_graph.add_layer( + "paddle.nn.LeakyReLU", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs, + **layer_attrs) + else: + self.pd_graph.add_layer( + "paddle.nn.ReLU", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs) + + def PReLU(self, node): + if "prelu" in self.nn_name2id: + self.nn_name2id["prelu"] += 1 + else: + self.nn_name2id["prelu"] = 0 + prelu_name = "prelu" + str(self.nn_name2id["prelu"]) + output_name = node.layer_name + layer_outputs = [prelu_name, output_name] + assert len( + node.inputs) == 1, "The count of PReLU node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.prelu_param + mode_bool = params.channel_shared + output_shape = node.output_shape[0] + if mode_bool: + num_parameters = 1 + else: + num_parameters = output_shape[1] + data = node.data + self.params[prelu_name + '._weight'] = np.squeeze(data[0]) + assert data is not None, "The parameter of {} (type is {}) is not set. You need to use python package of caffe to set the default value.".format( + node.layer_name, node.layer_type) + self.pd_graph.add_layer( + "paddle.nn.PReLU", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs, + num_parameters=num_parameters) + + def Accuracy(self, node): + assert len( + node.inputs) == 2, "The count of Accuracy node\'s input is not 2." + inputs_dict = dict() + for i, shape in enumerate(node.input_shape): + if shape[1] == 1: + input = self.graph.get_bottom_node(node, idx=i, copy=True) + inputs_dict[y] = self.get_input_name(input) + else: + input = self.graph.get_bottom_node(node, idx=i, copy=True) + inputs_dict[x] = self.get_input_name(input) + params = node.layer.accuracy_param + top_k = params.top_k + axis = params.axis + ignore_label = params.ignore_label + assert axis == 1, "PaddlePaddle can not support the situation when the axis is not 1." + assert not ignore_label >= 0, "PaddlePaddle can not support the situation when the model has ignore label." + self.pd_graph.add_layer( + "prim.accuracy", + inputs=inputs_dict, + outputs=[node.layer_name], + topk=top_k) + + def Eltwise(self, node): + assert len( + node.inputs) == 2, "The count of Eltwise node\'s input is not 2." + params = node.layer.eltwise_param + mode = params.operation + inputs = [] + input0 = self.graph.get_bottom_node(node, idx=0, copy=True) + input1 = self.graph.get_bottom_node(node, idx=1, copy=True) + input0_name = self.get_input_name(input0) + input1_name = self.get_input_name(input1) + if mode == 0: + inputs_dict = {} + inputs_dict['x'] = input0_name + inputs_dict['y'] = input1_name + self.pd_graph.add_layer( + "paddle.multiply", + inputs=inputs_dict, + outputs=[node.layer_name]) + elif mode == 1: + if hasattr(params, 'coeff') and len(params.coeff) == 2: + coeff = params.coeff + self.pd_graph.add_layer( + "prim.mul", + inputs={"x": input0_name}, + outputs=[node.layer_name + '_mul0'], + y=coeff[0]) + self.pd_graph.add_layer( + "prim.mul", + inputs={"x": input1_name}, + outputs=[node.layer_name + '_mul1'], + y=coeff[2]) + inputs_dict = {} + inputs_dict['x'] = node.layer_name + '_mul0' + inputs_dict['y'] = node.layer_name + '_mul1' + self.pd_graph.add_layer( + "paddle.add", + inputs=inputs_dict, + outputs=[node.layer_name]) + else: + inputs_dict = {} + inputs_dict['x'] = input0_name + inputs_dict['y'] = input1_name + self.pd_graph.add_layer( + "paddle.add", + inputs=inputs_dict, + outputs=[node.layer_name]) + else: + inputs_dict = {} + inputs_dict['x'] = input0_name + inputs_dict['y'] = input1_name + self.pd_graph.add_layer( + "paddle.max", + inputs=inputs_dict, + outputs=[node.layer_name]) + + def BatchNorm(self, node): + if "batchnorm" in self.nn_name2id: + self.nn_name2id["batchnorm"] += 1 + else: + self.nn_name2id["batchnorm"] = 0 + batchnorm_name = "batchnorm" + str(self.nn_name2id["batchnorm"]) + output_name = node.layer_name + layer_outputs = [batchnorm_name, output_name] + assert len( + node.inputs) == 1, "The count of BatchNorm node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.batch_norm_param + if hasattr(params, "eps"): + eps = params.eps + else: + eps = 1e-5 + if node.data is None or len(node.data) != 3: + print( + "The parameter of {} (type is {}) is not set. So we set the parameters as 0" + .format(node.layer_name, node.layer_type)) + mean = np.zeros([node.input_shape[0][1], ]).astype("float32") + variance = np.zeros([node.input_shape[0][1], ]).astype("float32") + scale = 0 + else: + + node.data = [np.squeeze(i).astype("float32") for i in node.data] + mean, variance, scale = node.data + # Prescale the stats + scaling_factor = 1.0 / scale if scale != 0 else 0 + mean *= scaling_factor + variance *= scaling_factor + self.params[batchnorm_name + "._mean"] = mean + self.params[batchnorm_name + '._variance'] = variance + layer_attrs = { + "num_features": node.input_shape[0][1], + "epsilon": eps, + "weight_attr": False, + "bias_attr": False, + } + self.pd_graph.add_layer( + "paddle.nn.BatchNorm2D", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs, + **layer_attrs) + + def Scale(self, node): + if node.data is None: + print( + "The parameter of {} (type is {}) is not set. So we set the parameters as 0" + .format(node.layer_name, node.layer_type)) + self.params[node.layer_name + ".weight"] = np.zeros([ + node.input_shape[0][1], + ]).astype("float32") + self.params[node.layer_name + ".bias"] = np.zeros([ + node.input_shape[0][1], + ]).astype("float32") + else: + self.params[node.layer_name + ".weight"] = np.squeeze(node.data[ + 0]).astype("float32") + self.params[node.layer_name + ".bias"] = np.squeeze(node.data[ + 1]).astype("float32") + params = node.layer.scale_param + axis = params.axis + inputs = [] + if len(node.inputs) == 2: + input0 = self.graph.get_bottom_node(node, idx=0, copy=True) + input1 = self.graph.get_bottom_node(node, idx=1, copy=True) + input0_name = self.get_input_name(input0) + input1_name = self.get_input_name(input1) + inputs_dict = {} + inputs_dict['x'] = input0_name + inputs_dict['y'] = input1_name + self.pd_graph.add_layer( + "paddle.multiply", + inputs=inputs_dict, + outputs=[node.layer_name + "_mul"], + axis=1) + else: + self.pd_graph.add_layer( + "paddle.to_tensor", + inputs={}, + outputs=[node.layer_name + "_cparam1"], + data="params[{}]".format(string(node.layer_name + ".weight"))) + input0 = self.graph.get_bottom_node(node, idx=0, copy=True) + input0_name = self.get_input_name(input0) + inputs_dict = {} + inputs_dict['x'] = input0_name + inputs_dict['y'] = node.layer_name + "_cparam1" + self.pd_graph.add_layer( + "paddle.multiply", + inputs=inputs_dict, + outputs=[node.layer_name + "_mul"], + axis=axis) + self.pd_graph.add_layer( + "paddle.to_tensor", + inputs={}, + outputs=[node.layer_name + "_cparam2"], + data="params[{}]".format(string(node.layer_name + ".bias"))) + inputs_dict = {} + inputs_dict['x'] = node.layer_name + "_mul" + inputs_dict['y'] = node.layer_name + "_cparam2" + self.pd_graph.add_layer( + "paddle.add", + inputs=inputs_dict, + outputs=[node.layer_name], + axis=axis) + + def Reshape(self, node): + input = self.graph.get_bottom_node(node, idx=0, copy=True) + top_count = len(input.layer.top) + is_inplace = False if top_count == 1 else True + output_shape = node.output_shape[0] + print(output_shape) + layer_attrs = { + 'shape': output_shape, + 'inplace': is_inplace, + } + self.pd_graph.add_layer( + "paddle.reshape", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) + + + def ArgMax(self, node): + assert len(node.inputs) == 1 and len( + node.outputs + ) == 1, "The count of ArgMax node\'s input and output is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + input_shape = node.input_shape[0] + params = node.layer.argmax_param + out_max_val = params.out_max_val if hasattr(params, + out_max_val) else False + top_k = params.top_k if hasattr(params, top_k) else 1 + axis = parmas.axis if hasattr(params, axis) else -1 + if axis < 0: + axis += len(input_shape) + if out_max_val is True: + self.pd_graph.add_layer( + "paddle.topk", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name + "_topk_var", node.layer_name + "_index_var"], + k=top_k) + self.pd_graph.add_layer( + "paddle.cast", + inputs={"x": node.layer_name + "_index_var"}, + outputs=[node.layer_name + "_index_var"], + dtype="{}_topk_var.dtype".format(node.layer_name)) + self.pd_graph.add_layer( + "prim.list", + inputs={"input0": node.layer_name + "_topk_var", + "input1": node.layer_name + "_index_var"}, + outputs=[node.layer_name + "_list"]) + self.pd_graph.add_layer( + "paddle.concat", + inputs={"x": node.layer_name + "_list"}, + outputs=[node.layer_name], + axis=axis) + else: + self.pd_graph.add_layer( + "paddle.topk", + inputs={"x": self.get_input_name(input)}, + outputs=["_", node.layer_name], + k=top_k) + + def Axpy(self, node): + assert len(node.inputs) == 1 and len( + node.outputs + ) == 1, "The count of Axpy node\'s input and output is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.axpy_param + input0 = self.graph.get_bottom_node(node, idx=0, copy=True) + input1 = self.graph.get_bottom_node(node, idx=1, copy=True) + input2 = self.graph.get_bottom_node(node, idx=2, copy=True) + input0_name = self.get_input_name(input0) + input1_name = self.get_input_name(input1) + input2_name = self.get_input_name(input2) + inputs_dict = {} + inputs_dict['x'] = input1_name + inputs_dict['y'] = input0_name + self.pd_graph.add_layer( + "paddle.multiply", + inputs=inputs_dict, + outputs=[node.layer_name + "_mul"], + axis=0) + inputs_dict = {} + inputs_dict['x'] = node.layer_name + "_mul" + inputs_dict['y'] = input2_name + self.pd_graph.add_layer( + "paddle.add", + inputs=inputs_dict, + outputs=[node.layer_name + "_mul"]) + + + def Crop(self, node): + assert len( + node.inputs) == 2, "The count of Crop node\'s input is not 2." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + example = self.graph.get_bottom_node(node, idx=1, copy=True) + params = node.layer.crop_param + axis = params.axis + input_shape = node.input_shape[0] + if axis < 0: + axis += len(input_shape) + offset_real = [0] * len(input_shape) + if hasattr(params, "offset") and len(params.offset) > 0: + offset = list(params.offset) + assert (len(input_shape) - axis + ) == len(offset), "invalid offset[%s] in crop layer" % ( + str(offset)) + offset_real = [0] * axis + offset + self.pd_graph.add_layer( + "paddle.crop", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + shape=node.input_shape[1], + offsets=list(offset_real)) + + def Flatten(self, node): + assert len( + node. + inputs) == 1, "The count of DetectionOutput node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + self.pd_graph.add_layer( + "paddle.reshape", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + shape=node.output_shape[0]) + + def Power(self, node): + assert len( + node.inputs) == 1, "The count of Permute node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.power_param + layer_attrs = { + 'scale': params.scale, + 'bias': params.shift, + 'bias_after_scale': True + } + self.pd_graph.add_layer( + "paddle.scale", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) + self.pd_graph.add_layer( + "paddle.pow", + inputs={"x": node.layer_name}, + outputs=[node.layer_name], + exponent=params.power) + + def Reduction(self, node): + assert len( + node.inputs) == 1, "The count of Reduction node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.reduction_param + operation = params.operation + axis = params.axis + coeff = params.coeff + assert operation >= 1 and operation <= 4, "reduction reduction [%s] error" % ( + operation) + input_len = len(node.input_shape[0]) + if axis < 0: + axis += input_len + 1 + dim = list(range(input_len)) + # operation = SUM + if operation == 1: + layer_attrs = { + "dim": dim[axis:], + "keep_dim": False, + } + self.pd_graph.add_layer( + "paddle.sum", + inputs={"input": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) + # operation = ASUM + elif operation == 2: + self.pd_graph.add_layer( + "paddle.abs", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name]) + layer_attrs = { + "dim": dim[axis:], + "keep_dim": False, + } + self.pd_graph.add_layer( + "paddle.sum", + inputs={"input": node.layer_name}, + outputs=[node.layer_name], + **layer_attrs) + # operation = SUMSQ + elif operation == 3: + self.pd_graph.add_layer( + "paddle.pow", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + exponent=2.0) + layer_attrs = { + "dim": dim[axis:], + "keep_dim": False, + } + self.pd_graph.add_layer( + "paddle.sum", + inputs={"input": node.layer_name}, + outputs=[node.layer_name], + **layer_attrs) + # operation = MEAN + else: + layer_attrs = { + "dim": dim[axis:], + "keep_dim": False, + } + self.pd_graph.add_layer( + "paddle.mean", + inputs={"input": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) + self.pd_graph.add_layer( + "paddle.scale", + inputs={"x": node.layer_name}, + outputs=[node.layer_name], + scale=coeff) + + def DetectionOutput(self, node): + assert len( + node.inputs) == 3, "The count of DetectionOutput node\'s input is not 3." + inputs_list = list() + for i in range(len(node.inputs)): + input = self.graph.get_bottom_node(node, idx=i, copy=True) + if i == 1: + input = self.graph.get_bottom_node(node, idx=i, copy=True) + while input is not None \ + and input.layer_type != 'Softmax' \ + and input.layer_type != 'Sigmoid': + input = self.graph.get_bottom_node(input, idx=0, copy=True) + assert input is not None, 'This kind of DetectionOutput is not supported!' + input = self.graph.get_bottom_node(input, idx=0, copy=True) + inputs_list.append(self.get_input_name(input)) + params = node.layer.detection_output_param + nms_param = params.nms_param + nms_param_dict = dict() + nms_param_dict["nms_threshold"] = nms_param.nms_threshold + nms_param_dict["top_k"] = nms_param.top_k + nms_param_dict["eta"] = nms_param.eta + if nms_param is None: + nms_param_dict = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} + self.pd_graph.add_layer( + "paddle.split", + inputs={"input": inputs_list[2]}, + outputs=[node.layer_name + "_priorbox_list"], + num_or_sections=2, + dim=1) + self.pd_graph.add_layer( + "prim.getitem", + inputs={"list": node.layer_name + "_priorbox_list"}, + outputs=[node.layer_name + "_pb"], + index=0) + self.pd_graph.add_layer( + "prim.getitem", + inputs={"list": node.layer_name + "_priorbox_list"}, + outputs=[node.layer_name + "_pbv"], + index=1) + self.pd_graph.add_layer( + "paddle.reshape", + inputs={"x": node.layer_name + "_pb"}, + outputs=[node.layer_name + "_pb"], + shape=[-1, 4]) + self.pd_graph.add_layer( + "paddle.reshape", + inputs={"x": node.layer_name + "_pbv"}, + outputs=[node.layer_name + "_pbv"], + shape=[-1, 4]) + self.pd_graph.add_layer( + "prim.shape_dim", + inputs={"input": node.layer_name + "_pb"}, + outputs=[node.layer_name + "_pb_dim"], + dim=0) + self.pd_graph.add_layer( + "paddle.reshape", + inputs={"x": inputs_list[0]}, + outputs=[node.layer_name + "_loc"], + shape="[-1, {}_pb_dim, 4]".format(node.layer_name)) + self.pd_graph.add_layer( + "paddle.reshape", + inputs={"x": inputs_list[1]}, + outputs=[node.layer_name + "_conf_flatten"], + shape="[0, {}_pb_dim, -1]".format(node.layer_name)) + default = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} + fields = ["eta", "top_k", "nms_threshold"] + for f in default.keys(): + if f not in nms_param_dict: + nms_param_dict[f] = default[f] + inputs_dict = {} + inputs_dict["loc"] = node.layer_name + "_loc" + inputs_dict["scores"] = node.layer_name + "_conf_flatten" + inputs_dict["prior_box"] = node.layer_name + "_pb" + inputs_dict["prior_box_var"] = node.layer_name + "_pbv" + layer_attrs = { + "background_label": params.background_label_id, + "nms_threshold": nms_param_dict["nms_threshold"], + "nms_top_k": nms_param_dict["top_k"], + "keep_top_k": params.keep_top_k, + "score_threshold": params.confidence_threshold, + "nms_eta": nms_param_dict["eta"]} + self.pd_graph.add_layer( + "fluid.layers.detection_output", + inputs=inputs_dict, + outputs=[node.layer_name], + **layer_attrs) + + def Normalize(self, node): + assert len( + node.inputs) == 1, "The count of Normalize node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.norm_param + if node.data is None or len(node.data) != 1: + print( + "The parameter of {} (type is {}) is not set. So we set the parameters as 0" + .format(node.layer_name, node.layer_type)) + self.parmas[node.layer_name + ".scale"] = \ + np.zeros([1] if params.channel_shared else [1, 1, 1, node.input_shape[0][1]]).astype("float32") + else: + self.parmas[node.layer_name + ".scale"] = self.adjust_parameters(node)[0] + self.pd_graph.add_layer( + "paddle.nn.functional.normalize", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name + "_l2"], + p=2, + axis=1) + graph.add_layer( + "paddle.to_tensor", + inputs={}, + outputs=[node.layer_name + "_param"], + data="params[{}]".format(string(node.layer_name + ".scale"))) + inputs_dict = {} + inputs_dict["x"] = node.layer_name + "_l2" + inputs_dict["y"] = node.layer_name + "_param" + self.pd_graph.add_layer( + "paddle.multiply", + inputs=inputs_dict, + outputs=[node.layer_name], + axis=-1 if params.channel_shared else 1) + + def Permute(self, node): + assert len( + node.inputs) == 1, "The count of Permute node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.permute_param + order = list(params.order) + self.pd_graph.add_layer( + "paddle.transpose", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + perm=order) + + def PriorBox(self, node): + assert len( + node.inputs) == 2, "The count of PriorBox node\'s input is not 2." + input0 = self.graph.get_bottom_node(node, idx=0, copy=True) + input1 = self.graph.get_bottom_node(node, idx=1, copy=True) + inputs_dict = {} + inputs_dict["input"] = self.get_input_name(input0) + inputs_dict["image"] = self.get_input_name(input1) + params = node.layer.prior_box_param + steps = tuple(params.step) if type(params.step) \ + is list or type(params.step) is tuple \ + else (params.step, params.step) + layer_attrs = { + "min_sizes": params.min_size, + "max_sizes": params.max_size, + "aspect_ratios": params.aspect_ratio, + "variance": params.variance, + "flip": params.flip, + "clip": params.clip, + "steps": steps, + "offset": params.offset, + "min_max_aspect_ratios_order": True} + self.pd_graph.add_layer( + "fluid.layers.prior_box", + inputs=inputs_dict, + outputs=[node.layer_name + "_box", node.layer_name + "_var"], + **layer_attrs) + self.pd_graph.add_layer( + "paddle.reshape", + inputs={"x": node.layer_name + "_box"}, + outputs=[node.layer_name + "_box"], + shape=[1, 1, -1]) + self.pd_graph.add_layer( + "paddle.reshape", + inputs={"x": node.layer_name + "_var"}, + outputs=[node.layer_name + "_var"], + shape=[1, 1, -1]) + self.pd_graph.add_layer( + "prim.list", + inputs={"input0": node.layer_name + "_box", + "input1": node.layer_name + "_var"}, + outputs=[node.layer_name + "_list"]) + self.pd_graph.add_layer( + "paddle.concat", + inputs={"x": node.layer_name + "_list"}, + outputs=[node.layer_name], + axis=1) + + def ReLU6(self, node): + if "relu6" in self.nn_name2id: + self.nn_name2id["relu6"] += 1 + else: + self.nn_name2id["relu6"] = 0 + relu6_name = "relu6" + str(self.nn_name2id["relu6"]) + output_name = node.layer_name + layer_outputs = [relu6_name, output_name] + assert len( + node.inputs) == 1, "The count of RelU6 node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + self.pd_graph.add_layer( + "paddle.nn.ReLU6", + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs) + + def ROIPooling(self, node): + assert len( + node.inputs) == 2, "The count of ROIPooling node\'s input is not 2." + input0 = self.graph.get_bottom_node(node, idx=0, copy=True) + input1 = self.graph.get_bottom_node(node, idx=1, copy=True) + inputs_dict = {} + inputs_dict["input"] = self.get_input_name(input0) + inputs_dict["roi"] = self.get_input_name(input1) + params = node.layer.roi_pooling_param + self.pd_graph.add_layer( + "paddle.slice", + inputs={"input": self.get_input_name(input1)}, + outputs=[self.get_input_name(input1)], + axes=[1], + starts=[1], + ends=[5]) + layer_attrs = { + "pooled_height": params.pooled_h, + "pooled_width": params.pooled_w, + "spatial_scale": params.spatial_scale} + self.pd_graph.add_layer( + "fluid.layers.roi_pool", + inputs=inputs_dict, + outputs=[node.layer_name], + **layer_attrs) + + def ShuffleChannel(self, node): + assert len( + node.inputs) == 1, "The count of ShuffleChannel node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.shuffle_channel_param + self.pd_graph.add_layer( + "fluid.layers.shuffle_channel", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + group=params.group) + + def Upsample(self, node): + assert len( + node.inputs) == 1, "The count of Upsample node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + params = node.layer.upsample_param + layer_attrs = { + "align_corners": False, + "scale_factor": params.scale, + "mode": "nearest"} + self.pd_graph.add_layer( + "paddle.nn.functioanl.interpolate", + inputs={"input": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) + + def Select(self, node): + assert len( + node.inputs) == 1, "The count of Select node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + input_shape = node.input_shape[0] + params = node.layer.select_param + layer_attrs = { + "input_shape": input_shape, + "point": params.slice_point, + "axis": params.axis} + self.pd_graph.add_layer( + "prim.update_end", + inputs={}, + outputs=[node.layer_name + "_end"], + **layer_attrs) + layer_attrs = { + "axes": [params.axis], + "starts": [params.slice_point[0]]} + self.pd_graph.add_layer( + "paddle.split", + inputs={"input": self.get_input_name(input), + "end": node.layer_name + "_end"}, + outputs=[node.layer_name], + **layer_attrs) + + + def directly_map(self, node): + assert node.layer_type in self.directly_map_ops + op_info = self.directly_map_ops[node.layer_type] + input = self.graph.get_bottom_node(node, idx=0, copy=True) + prefix_name = node.layer_type.lower() + if prefix_name in self.nn_name2id: + self.nn_name2id[prefix_name] += 1 + else: + self.nn_name2id[prefix_name] = 0 + first_output_name = prefix_name + str(self.nn_name2id[prefix_name]) + output_name = node.layer_name + layer_outputs = [relu_name, output_name] + assert len( + node.inputs) == 1, "The count of Activate node\'s input is not 1." + input = self.graph.get_bottom_node(node, idx=0, copy=True) + self.pd_graph.add_layer( + op_info, + inputs={"input": self.get_input_name(input)}, + outputs=layer_outputs) + diff --git a/x2paddle/op_mapper/dygraph/caffe2paddle/caffe_shape.py b/x2paddle/op_mapper/dygraph/caffe2paddle/caffe_shape.py new file mode 100644 index 0000000..8434440 --- /dev/null +++ b/x2paddle/op_mapper/dygraph/caffe2paddle/caffe_shape.py @@ -0,0 +1,443 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import numbers +from functools import reduce + + +def get_kernel_parameters(params): + [k_h, k_w] = [1, 1] + if isinstance(params.kernel_size, numbers.Number): + [k_h, k_w] = [params.kernel_size] * 2 + elif len(params.kernel_size) > 0: + k_h = params.kernel_h if params.kernel_h > 0 else params.kernel_size[0] + k_w = params.kernel_w if params.kernel_w > 0 else params.kernel_size[ + len(params.kernel_size) - 1] + elif params.kernel_h > 0 or params.kernel_w > 0: + k_h = params.kernel_h + k_w = params.kernel_w + [s_h, s_w] = [1, 1] + if isinstance(params.stride, numbers.Number): + [s_h, s_w] = [params.stride] * 2 + elif len(params.stride) > 0: + s_h = params.stride_h if params.stride_h > 0 else params.stride[0] + s_w = params.stride_w if params.stride_w > 0 else params.stride[len( + params.stride) - 1] + elif params.stride_h > 0 or params.stride_w > 0: + s_h = params.stride_h + s_w = params.stride_w + [p_h, p_w] = [0, 0] + if isinstance(params.pad, numbers.Number): + [p_h, p_w] = [params.pad] * 2 + elif len(params.pad) > 0: + p_h = params.pad_h if params.pad_h > 0 else params.pad[0] + p_w = params.pad_w if params.pad_w > 0 else params.pad[len(params.pad) - + 1] + elif params.pad_h > 0 or params.pad_w > 0: + p_h = params.pad_h + p_w = params.pad_w + dila_h = dila_w = 1 + if hasattr(params, 'dilation'): + dila_len = len(params.dilation) + if dila_len == 2: + dila_h = params.dilation[0] + dila_w = params.dilation[1] + elif dila_len == 1: + dila_h = dila_w = params.dilation[0] + else: + assert dila_len == 0, "invalid length[%s] of dilation in convolution" % ( + dila_len) + return dila_h, dila_w, p_h, p_w, k_h, k_w, s_h, s_w + + +def get_strided_kernel_output_shape(params, input_shape, round_func): + i_h = input_shape[2] + i_w = input_shape[3] + dila_h, dila_w, pad_h, pad_w, kernel_h, kernel_w, stride_h, stride_w = get_kernel_parameters( + params) + o_h = (i_h + 2 * pad_h - (dila_h * + (kernel_h - 1) + 1)) / float(stride_h) + 1 + o_w = (i_w + 2 * pad_w - (dila_w * + (kernel_w - 1) + 1)) / float(stride_w) + 1 + o_h = int(round_func(o_h)) + o_w = int(round_func(o_w)) + has_c_o = hasattr(params, 'num_output') + c = params.num_output if has_c_o else input_shape[1] + return [[input_shape[0], c, o_h, o_w]] + + +def shape_convolution(layer, input_shape): + params = layer.convolution_param + return get_strided_kernel_output_shape(params, input_shape[0], math.floor) + + +def shape_deconvolution(layer, input_shape): + + h_i = input_shape[0][2] + w_i = input_shape[0][3] + + params = layer.convolution_param + dila_h, dila_w, pad_h, pad_w, kernel_h, kernel_w, stride_h, stride_w = get_kernel_parameters( + params) + + h_o = (h_i - 1) * stride_h - 2 * pad_h + dila_h * (kernel_h - 1) + 1 + w_o = (w_i - 1) * stride_w - 2 * pad_w + dila_w * (kernel_w - 1) + 1 + + has_c_o = hasattr(params, 'num_output') + c = params.num_output if has_c_o else input_shape.channels + return [[input_shape[0][0], c, h_o, w_o]] + + +def shape_pooling(layer, input_shape): + params = layer.pooling_param + global_pool = getattr(params, 'global_pooling', False) + if global_pool: + return [[input_shape[0][0], input_shape[0][1], 1, 1]] + + ceil_mode = getattr(params, 'ceil_mode', True) + if ceil_mode is True: + method = math.ceil + else: + method = math.floor + return get_strided_kernel_output_shape(params, input_shape[0], method) + + +def shape_convolutiondepthwise(layer, input_shape): + params = layer.convolution_param + return get_strided_kernel_output_shape(params, input_shape[0], math.floor) + + +def shape_innerproduct(layer, input_shape): + params = layer.inner_product_param + return [[input_shape[0][0], params.num_output]] + + +def shape_lrn(layer, input_shape): + return input_shape + + +def shape_relu(layer, input_shape): + return input_shape + + +def shape_softmax(layer, input_shape): + return input_shape + + +def shape_input(layer, input_shape): + return [list(layer.input_param.shape[0].dim)] + + +def shape_memorydata(layer, input_shape): + params = layer.memory_data_param + shape = [] + shape.append(int(params.batch_size)) + shape.append(int(params.channels)) + shape.append(int(params.height)) + shape.append(int(params.width)) + return [shape] + + +def shape_concat(layer, input_shape): + params = layer.concat_param + axis = params.axis + output_shape = None + for shape in input_shape: + if output_shape is None: + output_shape = [] + for i in range(len(shape)): + output_shape.append(shape[i]) + else: + output_shape[axis] += shape[axis] + return [output_shape] + + +def shape_slice(layer, input_shape): + inshape = input_shape[0] + + top_len = len(layer.top) + params = layer.slice_param + axis = params.axis + slice_dim = params.slice_dim + if slice_dim != 1 and axis == 1: + axis = slice_dim + points = list(params.slice_point) + count = inshape[axis] + if len(points) == 0: + assert count % top_len == 0, "the parameter of Slice is wrong" + part = count / top_len + t = part + while t < count: + points.append(int(t)) + t += part + points = [0] + points + [count] + output_shape = [] + for i in range(len(points)): + shape = [] + for ii in range(len(inshape)): + shape.append(inshape[ii]) + size = points[i + 1] - points[i] + shape[axis] = size + output_shape.append(shape) + if i == len(points) - 2: + break + return output_shape + + +def shape_prelu(layer, input_shape): + return input_shape + + +def shape_sigmoid(layer, input_shape): + return input_shape + + +def shape_absval(layer, input_shape): + return input_shape + + +def shape_accuracy(layer, input_shape): + return [[1]] + + +def shape_tanh(layer, input_shape): + return input_shape + + +def shape_eltwise(layer, input_shape): + return [input_shape[0]] + + +def shape_batchnorm(layer, input_shape): + return input_shape + + +def shape_scale(layer, input_shape): + return input_shape + + +def shape_reshape(layer, input_shape): + def count(num_list): + return reduce(lambda a, b: a * b, num_list) + + inshape = input_shape[0] + params = layer.reshape_param + axis = params.axis if hasattr(params, 'axis') else 0 + num_axes = params.num_axes if hasattr(params, 'num_axes') else -1 + if inshape[0] == -1: + inshape[0] = 1 + input_count = count(inshape) + + input_num_axes = len(inshape) + + input_start_axis = axis + start_axis = input_start_axis if input_start_axis >= 0 \ + else input_num_axes + input_start_axis + 1 + + assert start_axis >= 0, "[Reshape]axis %d out of range" % (input_start_axis) + assert start_axis <= input_num_axes, "[Reshape]axis %d out of range for %d-D input data"\ + % (input_start_axis, input_num_axes) + + assert num_axes >= -1, "[Reshape]num_axes must be >= 0, or -1 for all" + + end_axis = input_num_axes if num_axes == -1 else start_axis + num_axes + assert end_axis <= input_num_axes, "end_axis[%d] = axis[%d] + num_axes[%d] is out of range"\ + % (end_axis, start_axis, num_axes) + + num_axes_replaced = end_axis - start_axis + num_axes_retained = input_num_axes - num_axes_replaced + num_new_axes = len(list(params.shape.dim)) + output_shape = [] + + for i in range(start_axis): + output_shape.append(inshape[i]) + + for i in range(num_new_axes): + output_shape.append(params.shape.dim[i]) + + for i in range(end_axis, input_num_axes): + output_shape.append(inshape[i]) + + assert len(output_shape) == num_axes_retained + num_new_axes,\ + "[Reshape]invalid dims of output shape[%s]" % (str(output_shape)) + + inferred_axis = -1 + copy_axes = [] + constant_count = 1 + for i in range(num_new_axes): + top_dim = params.shape.dim[i] + if top_dim == 0: + copy_axes.append(i) + copy_axis_index = start_axis + i + output_shape[copy_axis_index] = inshape[copy_axis_index] + elif top_dim == -1: + assert inferred_axis == -1, "[Reshape]new shape contains multiple -1 dims" + inferred_axis = i + else: + constant_count *= top_dim + + if inferred_axis >= 0: + explicit_count = constant_count + l = inshape[0:start_axis] + if len(l) > 0: + explicit_count *= count(l) + l = inshape[end_axis:] + if len(l) > 0: + explicit_count *= count(l) + for i in range(len(copy_axes)): + explicit_count *= output_shape[start_axis + copy_axes[i]] + assert input_count % explicit_count == 0, "[Reshape]botom count[%d] "\ + "must be divisible by product of the specified dimensions[%d] "\ + % (input_count, explicit_count) + output_shape[start_axis + inferred_axis] = int(input_count / explicit_count) + + output_count = count(output_shape) + assert output_count == input_count, "[Reshape]output count[%d] must match input count[%d]" % ( + output_count, input_count) + output_shape[0] = -1 + return [output_shape] + + +def shape_argmax(layer, input_shape): + inshape = input_shape[0] + params = layer.argmax_param + out_max_val = params.out_max_val if hasattr(params, out_max_val) else False + top_k = params.top_k if hasattr(params, top_k) else 1 + axis = parmas.axis if hasattr(params, axis) else -1 + if axis < 0: + axis += len(inshape) + assert (axis + 1 == len(inshape) + ), 'only can be applied on the last dimension[axis:%d, %s] now,'\ + 'make sure you have set axis param in xxx.prototxt file' \ + % (axis, str(inshape)) + + output_shape = inshape + output_shape[-1] = top_k + if out_max_val is True: + output_shape[-1] *= 2 + return [output_shape] + + +def shape_crop(layer, input_shape): + assert len(input_shape) == 2, "the number of crop's inputs must be 2" + return [input_shape[1]] + + +def shape_flatten(layer, input_shape): + assert len(input_shape) == 1, "the number of flatten's inputs must be 1" + inshape = input_shape[0] + params = layer.flatten_param + start_axis = params.axis + end_axis = params.end_axis + if start_axis < 0: + start_axis += len(inshape) + if end_axis < 0: + end_axis += len(inshape) + 1 + assert start_axis <= end_axis, 'invalid axis[%d] or end_axis[%d] params'\ + % (start_axis, end_axis) + output_shape = inshape[0:start_axis] + if len(inshape[start_axis:end_axis]) != 0: + flat_sz = reduce(lambda a, b: a * b, inshape[start_axis:end_axis]) + output_shape += [flat_sz] + output_shape += inshape[end_axis:len(inshape)] + output_shape[0] = -1 + return [output_shape] + + +def shape_power(layer, input_shape): + return input_shape + + +def shape_reduction(layer, input_shape): + params = layer.reduction_param + axis = params.axis + if axis < 0: + axis += len(input_shape[0]) + 1 + assert axis <= len(input_shape[0]), 'invalid axis[%d] error' % (axis) + return [input_shape[0:axis]] + +def shape_axpy(layer, input_shape): + assert len(input_shapes) == 3, "not valid input shape for axpy layer" + assert len(input_shapes[0]) == len(input_shapes[1]), 'should have same dims' + output_shape = input_shapes[1] + assert (input_shapes[2] == output_shape),\ + "shape not consistent for axpy[%s <--> %s]" \ + % (str(output_shape), str(input_shapes[2])) + return [output_shape] + +def shape_detectionoutput(layer, input_shape): + return [[-1, 6]] + +def shape_normalize(layer, input_shape): + return input_shape + +def shape_permute(layer, input_shape, order=None): + inshape = input_shape[0] + output_shape = [] + order = list(order) + for ii in order: + assert ii < len(inshape), "invalid order for permute[%s]" % (name) + output_shape.append(inshape[ii]) + return [output_shape] + +def shape_priorbox(layer, input_shape, max_size=None, aspect_ratio=None): + fc_shape = input_shape[0] + N = 1 + if not max_size == None: + N += 1 + if not aspect_ratio == None: + N += 2 * len(aspect_ratio) + N_bbx = fc_shape[2] * fc_shape[3] * N + output_shape = [1, 2, 4 * N_bbx] + return [output_shape] + +def shape_relu6(layer, input_shape): + return input_shape + +def shape_roipooling(layer, input_shape, pooled_w=None, pooled_h=None): + base_fea_shape = input_shapes[0] + rois_shape = input_shapes[1] + output_shape = base_fea_shape + output_shape[0] = rois_shape[0] + output_shape[2] = pooled_h + output_shape[3] = pooled_w + return [output_shape] + +def shape_shufflechannel(layer, input_shape): + return input_shape + +def shape_upsample(layer, input_shape, scale): + assert len(input_shapes) == 1, "not valid input shape for upsample layer" + assert type(scale) is int + input_shape = input_shapes[0] + new_h = scale * input_shape[2] + new_w = scale * input_shape[3] + + output_shape = [input_shape[0], input_shape[1], new_h, new_w] + return [output_shape] + +def shape_select(layer, input_shape, slice_point, axis): + input_shape = input_shapes[0] + start = slice_point[0] + if len(slice_point) == 2: + end = slice_point[1] + else: + end = input_shape[axis] + assert end > start, "invalid slice_point with [start:%d, end:%d]"\ + % (start, end) + output_shape = input_shape + output_shape[axis] = end - start + return [output_shape] + diff --git a/x2paddle/op_mapper/dygraph/prim2code.py b/x2paddle/op_mapper/dygraph/prim2code.py new file mode 100644 index 0000000..27e0620 --- /dev/null +++ b/x2paddle/op_mapper/dygraph/prim2code.py @@ -0,0 +1,447 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License" +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +NO_OUTPUT_COUNT = 0 + +def gen_codes(code_list, indent=0): + indent_blank = " " * indent + codes = [] + for code_line in code_list: + if code_line.strip() == "": + codes.append('\n') + else: + codes.append(indent_blank + code_line + '\n') + return codes + + +def get_value(layer, key, layer_id=None, different_attrs=None): + """ 进行optimizer后可能把inputs的value直接用数值代替(ConstantFuser), + 会把input换成attr,所以需要此处的操作。 + """ + if key in layer.inputs: + return layer.inputs[key] + else: + if different_attrs is None: + return str(layer.attrs[key]) + else: + key_name = "{}_{}".format(layer.outputs[0], key) + if key_name in different_attrs: + return key_name + else: + if layer_id is None: + return str(layer.attrs[key]) + key_name = "{}_{}".format("layer_id/{}".format(layer_id), key) + if key_name in different_attrs: + new_key_name = "attr_{}".format(NO_OUTPUT_COUNT) + NO_OUTPUT_COUNT += 1 + diff_index = different_attrs.index(key_name) + different_attrs[diff_index] = new_key_name + return new_key_name + else: + return str(layer.attrs[key]) + + +def prim_add(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} + {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_add_(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} + {} * {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + layer.attrs["alpha"], + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_and(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} and {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_append(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{}.append({})".format( + get_value(layer, "list", layer_id, different_attrs), + get_value(layer, "element", layer_id, different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_assert(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + if layer.attrs["type"] == "eq": + values = get_value(layer, "key") + if "value" in layer.attrs: + values = layer.attrs["value"] + if isinstance(values, list): + s = "" + for v in values: + s += "{} == {} or ".format(get_value(layer, "key"), v) + if len(s) > 0: + s = s[:-4] + line = "assert {}, \'The {} must be {}!\'".format( + s, get_value(layer, "key"), get_value(layer, "value")) + else: + line = "assert {} == {}, \'The {} must be {}!\'".format( + get_value(layer, "key"), + get_value(layer, "value"), + get_value(layer, "key"), get_value(layer, "value")) + else: + raise Exception("Not implement yet!") + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_check_dim(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + lines = [] + dim = get_value(layer, "dim", different_attrs) + lines.append("if {} < 0:".format(dim)) + lines.append(" {} = {} + {}".format(layer.outputs[ + 0], dim, get_value(layer, "len", different_attrs))) + lines.append("else:") + lines.append(" {} = {}".format(layer.outputs[0], dim)) + forward_func.extend(gen_codes(lines, indent=indent)) + + +def prim_constant(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {}".format(layer.outputs[0], layer.attrs["value"]) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_contain(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} in {}".format(layer.outputs[0], + get_value(layer, "element", different_attrs), + get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_dict(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = dict()".format(layer.outputs[0]) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_dict_construct(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + lines = list() + line = "{} = dict()".format(layer.outputs[0]) + lines.append(line) + for i in range(len(layer.inputs)): + line = "{}[{}] = {}".format(layer.outputs[0], + get_value(layer, "key{}".format(i), different_attrs), + get_value(layer, "value{}".format(i), different_attrs)) + lines.append(line) + forward_func.extend(gen_codes(lines, indent=indent)) + + +def prim_div(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} / {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_eq(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} == {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_equal(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {}".format(layer.outputs[0], get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_exception(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "raise RaiseException({})".format(get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_float(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = float({})".format(layer.outputs[0], get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_floor(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = math.floor({})".format(layer.outputs[0], + get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_floordiv(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} // {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_getitem(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {}[{}]".format(layer.outputs[0], + get_value(layer, "list", different_attrs), + get_value(layer, "index", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_gt(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} > {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_if(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "if {} :".format(get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + block = layer.blocks[0] + b_init_lines, b_forward_lines = block.gen_dygraph_code(indent=indent + 1) + init_func.extend(b_init_lines) + forward_func.extend(b_forward_lines) + block = layer.blocks[1] + if len(block.layers) > 0: + b_init_lines, b_forward_lines = block.gen_dygraph_code( + indent=indent + 1) + if len(b_forward_lines) != 0: + line = "else:" + forward_func.extend(gen_codes([line], indent=indent)) + init_func.extend(b_init_lines) + forward_func.extend(b_forward_lines) + + +def prim_int(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = int({})".format(layer.outputs[0], get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_is(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} is {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_isinstance(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = isinstance({}, {})".format(layer.outputs[0], + get_value(layer, "input", different_attrs), + layer.attrs["cls"]) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_isnot(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} is not {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_le(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} <= {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_len(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = len({})".format(layer.outputs[0], get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_len2list(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + lines = [] + lines.append("{} = []".format(layer.outputs[0])) + lines.append("for i in range({}):".format(get_value(layer, "len", different_attrs))) + lines.append(" {}.append(i)".format(layer.outputs[0])) + forward_func.extend(gen_codes(lines, indent=indent)) + + +def prim_lt(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} < {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_list(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + input_len = len(layer.inputs) + len(layer.attrs) + inputs_list = list() + for i in range(input_len): + inputs_list.append(get_value(layer, "input{}".format(i), different_attrs)) + inputs_str = ', '.join(inputs_list) + line = "{} = [{}]".format(layer.outputs[0], inputs_str) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_list_unpack(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {}".format(", ".join(layer.outputs), get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_loop(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + loop_range = get_value(layer, "input", different_attrs) + line = "for {} in range({}):".format(layer.outputs[1], loop_range) + forward_func.extend(gen_codes([line], indent=indent)) + block = layer.blocks[0] + b_init_lines, b_forward_lines = block.gen_dygraph_code(indent=indent + 1) + init_func.extend(b_init_lines) + forward_func.extend(b_forward_lines) + + +def prim_min(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = min({})".format(layer.outputs[0], get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_mul(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} * {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_ne(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} != {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_neg(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = -{}".format(layer.outputs[0], get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_not(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = not {}".format(layer.outputs[0], get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_or(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} or {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_replaceitem(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{}[{}] = {}".format( + get_value(layer, "list", layer_id, different_attrs), + get_value(layer, "index", layer_id, different_attrs), + get_value(layer, "item", layer_id, different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_requires_grad(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = not {}.stop_gradient".format(layer.outputs[0], + get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_rsub(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} - {} * {}".format(layer.outputs[0], + get_value(layer, "y", different_attrs), + get_value(layer, "x", different_attrs), + get_value(layer, "alpha", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_select(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {}[".format(layer.outputs[0], get_value(layer, "input", different_attrs)) + for dim in range(layer.attrs["dim"]): + line += ":, " + line += (get_value(layer, "index", different_attrs) + "]") + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_set_attr(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {}".format(layer.outputs[0], get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_set_item(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{}[{}] = {}".format( + get_value(layer, "dict", different_attrs), + get_value(layer, "key", different_attrs), get_value(layer, "value", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_shape_dim(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = fluid.layers.shape({})[{}]".format(layer.outputs[0], + get_value(layer, "input", different_attrs), + get_value(layer, "dim", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_slice(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {}[{}: {}: {}]".format(layer.outputs[0], + get_value(layer, "input", different_attrs), + get_value(layer, "start", different_attrs), + get_value(layer, "end", different_attrs), + get_value(layer, "step", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_str(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = str({})".format(layer.outputs[0], get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_sub(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {} - {}".format(layer.outputs[0], + get_value(layer, "x", different_attrs), + get_value(layer, "y", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_tuple(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + input_len = len(layer.inputs) + len(layer.attrs) + inputs_list = list() + for i in range(input_len): + inputs_list.append(get_value(layer, "input{}".format(i), different_attrs)) + inputs_str = ', '.join(inputs_list) + line = "{} = ({})".format(layer.outputs[0], inputs_str) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_tuple_unpack(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + outputs_str = ', '.join(layer.outputs) + line = "{} = {}".format(outputs_str, get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_type(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {}.dtype".format(layer.outputs[0], get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_update_end(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + lines = [] + input_shape = get_value(layer, "input_shape", different_attrs) + point = get_value(layer, "point", different_attrs) + axis = get_value(layer, "axis", different_attrs) + lines.append("if len{} == 2:".format(point)) + lines.append(" {} = {}[1]".format(layer.outputs[0], point)) + lines.append("else:") + lines.append(" {} = {}[]".format(layer.outputs[0], dim)) + forward_func.extend(gen_codes(lines, indent=indent)) + + +def prim_var2list(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + line = "{} = {}.numpy().tolist()".format(layer.outputs[0], + get_value(layer, "input", different_attrs)) + forward_func.extend(gen_codes([line], indent=indent)) + + +def prim_warnings(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None): + lines = ["import warnings"] + line = "warnings.warn({}, stacklevel={})".format( + get_value(layer, "input", different_attrs), layer.attrs["stacklevel"]) + lines.append(line) + forward_func.extend(gen_codes(lines, indent=indent)) diff --git a/x2paddle/op_mapper/static/__init__.py b/x2paddle/op_mapper/static/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/x2paddle/op_mapper/static/__pycache__/__init__.cpython-37.pyc b/x2paddle/op_mapper/static/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce9f7f6cef160188c07b7f9b0c4f8ab282c4409d GIT binary patch literal 175 zcmZ?b<>g`kf|ZT4;z9Id5CH>>K!yVl7qb9~6oz01O-8?!3`HPe1o10EKR2&LzqmB7 zGBGbLF)!V~P(Q*bATcE+CpA7fKP453mzb888lRV1u3uqP09K%%Ul5<0SWu8!q+eW; kSdy8nA0MBYmst`YuUAlci^C>2KczG$)edCiXCP((0Qb)>B>(^b literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/caffe2paddle/__init__.py b/x2paddle/op_mapper/static/caffe2paddle/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/x2paddle/op_mapper/static/caffe2paddle/__pycache__/__init__.cpython-37.pyc b/x2paddle/op_mapper/static/caffe2paddle/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b20d50c877d33e5b093373e6110969a04ef2275 GIT binary patch literal 188 zcmZ?b<>g`kf}W;X@gVv!h=2h`Aj1KOi&=m~3PUi1CZpdLs7FA>OwIqiZ`t#OImHXD@qxWYV`K$ndzRM z?%t{z?rgJKrnwX)z@b7%POQj8G#faIqbNomWFQFyM?vC1FpvPTV?ZgujuHEXxd=%f z1V=!SeBXcS(%sV|E^YZKGw8aWbLv!`|Ns8$IZu_#1p|Ly`|WQw-uq3%_!FK)e`y>% zfuGyMMj6WN8S8p)uA6dht*5ZJdg-0addB3vR4==eThGaHx@TYIwM;L+Q&=yA*NSp2 z+bivq*UP5ybB4+(`&C2P?cA-@dPSL6jMe;Gc%pG>)eH(xH#augKj$=uTR5{nJ^WO2 zIBYvOHXp*K*y8PmyVV@Fd3w32R8JRgv5Dx9ufjhUy)fDayT5KJQ&}pd(r>2LQ(rbV zOqEgDSMBw*?$CNhJ)sJ!_^P>{RV7t^)mYD|imGC7tD0KCKCc$l5$p?UNgc($sFu|V z_9b;p9ml?`PN`_t-z`Y`r)tH;$xu&;mFNE_?-;6pzdl;s0Q-*r?N$`@Q~UUNsM!sC8_ZDY6J z@;ZZlv$y7UHg^UcH7bU8y=H%FH5Fu^yyiaF>;<_i`hJkU+U$RKP5DM6?PYEm*k^H`8(SS7+g=_tgrR!f!dbyLy6J8a+d`ux z$7MOLcvY3!uXzh{ZSj_Q+q`9(#@=`I`7_1~`(r$zo8CX-r*50$w4d%Ssr0txyrD9F zy0OIbamG998-C`N<@~m9>@WN2+i1gE(bvXVKkKL2>h+yqviEC#hCO}7ygvLa>URtKcVV05bLH$bwl>d{ zLB6;YWL>Z6by}-dP#QL!cHe9Cnm5}{P}n4oG`yR`_NaPw;5B>o{y??e$LcSQgQaKM zLvQOw$8A47=)XAV?Q$%GrHPZcz*VqOspB?UyRJ9bX$*#=2d{3m>w_WMsdxJI9W>gj zcU-jY)!p6UaNu}tRlnKx)>iKhGPu%q9FBHiHyWLO$7?hqE|nez8NExZPRk2&H`<-e zEzb>Xr|k}UFXBZOcipy%9xw0aI$3FNs2aL%vC|*!dJTEMAS)+sP#Wxd$<>@bbA!y# z>GZuIyD@NfnqH9VwfjNd8w>{x7d;Q`&9-hRD9E$q0|vRRrrY#9C#bmLZ4EI&4#nk0 zIs87lgTaDoH+xf^4r;37v|C>9W@D#0#CJpwC$9wgkM6ZFDjW!T3P!WJ)Arhq8&vt! z@B!q!)m)Hv+r5n-r`j!if5%zj2g&mTH2SC&qoywy(D+I3 z%ue4srvHv1dR{^r?pXFhjcV`YO4PMisz_S-kk?48#Per7P_8(>(^Xwo|)m>b>I z034TD8{Tvl(Ybr@b05XVFe|2w|1AD1X2~pB%d#)Xm4d1NZG8`CCCf5vI2$cb0n=JU zOf6uC{1Hv~dm2CYHEcZNmN7Q>9tX#mT?-t4ehkj*n)@l=0>`KPR5v{a!hoC8w=B2j zr@icU&Z+t4elFz6+Gh>V_Q8+J1Ts9QtSbi4C$(SDS3^u$@=df}LRUA#Iv2flIIbucMe2Ly1CWgf2gI<8l5y7g=A-r&Z?)nbss*gH#Tz~Lu!-o?$k zxw)5{2e1iJfM}=A)6?ACjm>JtS;H=hZw=ec=*y!il*f&#{^wbgEI zx1FbO?w-KL2nQ-_F3O;!tt^HO17nVkO%F*@`C^N>&c7ml@u>W5oB+v?Vftx8fSo~M%TjV0PV&(luLIre&)C_w!N&M!&&Z@xo`VGzcIe^eje8fe*U;Y zG`zk9v`qVEqo_>OEbW(X8!N`R;9JfYy$VMV_X_LkL7KxAqyQg5rqfsLy&y}@=&Klgq|G1% zC^?bQLqmfbZD)O{HP``2wHqN==@R$jQ4YV_YEgz!4_J*?*)``GYyz_rSpA(K52g+g zE9LDBgH%WD1$i>PAi>NCJNKdLv-rhBo5rw$3$b0YvR1`RgX1u?r>2K?8unr-o<~dZ z=zbR`GP)MV)w*Slp-^p-&%k}k0{=Ydr}k3{(iU>~H9v)M|03nCmuBhhjPu0xHQ(~n z81W3QWx5&g9VD{52l@AuozWoMvvTb?lOThtRrk97JkHhUda zm(ltBeeMhO4QH@Z7pSY>*lPFdP1S{bY$TYyYDojmC(*!aDmi|O3J^qUy1CbJR|`Q} zHNB?u5nR+9zse-&T*%2Fj%SbzGJD?l0X$q^D(wJmR7(iX_c05mjjye+ZA;W z(e{fxi~ETC{XKa`G4A818o)LHE*@UWRSB|9PU$ zkJui#7&XVXveL#lKQ1V1T*R-0(a%6*wwx{8uZ&C5Y8A**_RHjY+~L?)r6+kgvM^$W z{#o1y;^6+X(S6-=e;IdxDQ^%}43+iEA)Z7}+C%x;xag;Re6Mi@*pbD)>gRkL`=of40@gZW>9_xN?NRC3nDn529XOW04HAH zaRZytdn2HMQo8AF3DOKU>Z;R&ia+R6-JWO(g6axt1%-~==|dIkx7tAwlDpsTHQdgq z9poUuu2Fx?UXlZiD|T39D~OJ6(1>t4P#A5!Z{S{dbR$SX?St0bRJba;8$kgLhY~BW zqux=GAM6fS7q#Y*-Uj*vSwTcMNNuAwHfYY}k;|hYkA^tPvJIT*{SBwj*Gjb-1KoOq zI(k-j+u7jP>Tt8e&5Qzbl zI|bPZ+}o$V0=c;Yn~zsQ%`$b#c@m{Z>CeA3eql8&8ewF%&O)c2-{kZnE7NC zb}lraQ0ZTwoJ$Ng@J5T)tRgU7IDQ2x0WolY0VjL~s@>g`GLOT|2AEjN_!8L@K9+F` ztPIr;pad{lF>q#fvpCPZ32=uRnCsdbU{4tQxDIg2y$Oc=RqLyf;cIK9T9_%wZS)3B z?~(Iw{a^fn4Ks>XuR}Elh7jIqw5Uy&HEpo0QRYG@zk~u>p}Gr4kfS1oKg?>O!X_l7 z8%n9pj^>m8rZZ;E<1Cnm?yW8EQ03O&n9p{8QCiYwB#lIOmRd6(r``HHDCSbdG|W>b zwQCqZB2O)h!05;{B}t9=vsY1xI+#$-DUp}Zu1PKU^pH*lv}tO(soU1`*>RdW_815c z{TYL?Z9Ct99u9e@%zg2&u@UFhYIzL^!{g5g!E1F9@ExgX*_+Kun$ z!uw{oh7v%1_eb7>f&_&YXml65M<69El~;w=Q;-nw;Xv1yCv=bEtf=3NmYOPoxab{P z?yi8i${i!Q3lgh<#LAk)bQx7eiCR>D(?o0W8CqL-J&pH;W#Zd@-h&ND`$R3inc*|` zkNKtBX3~CAQyoDs;GL59r3^Ej@2e$9rK1;&{$mqQUY>e#d=@=f#aH|a>&EZ30?CBF zOuv!XIL9uHexAOY7#QmiE-*zOB)ias^U`=V_T<2SQ@=8(-)PsN``5k6YAy}bvx!dY z`~pZYC^fY2ra@8{0T&b+*E&sCE{x9b$*sX|-{Vs!1f{s89&jPtzzfmi&#herW{KXK zZViSvNBNh=Vf$cQGL~?q^9qjch`UF%5CSgKt@0$gII6@=N)G{`(|{n2gR3cVf*h+u znv2v4vLfnST_P2ju>17O2gQkxa52ay?_a{xlCzUxZ>C+T6z=EM>T!o^ydyxgFd^`@ zdExlH`EG7vb6*1z%|fx;ozLJX$c);~0Ky;kA>r_O-d*OV&W#w7)E~sU4XOloZPkh& z&G`~<35*fvoY%PdA~vhl2~i^b9O7z_Yi%`QJ@xcAWF_+=np<4*dTbht&aO z)CftmzhzFL-&s}c1GhyM0P6*(ZNcfI-DTpmssK+{h^HO;c8F}%1gizvYJaK7W{1e* zYE;VzAB#IL0kkO{|7{@6`6ZCb#Z@~fMqVFi>sqn>>$sy8+t+zScys;+HbI)$F}m+i z*l2i8v+oWEZu`7)zKR+L^_yb}tT=y@4c{S&duN9h8Qsm3$&S*z-;bC9K17F(5%m`4S(0Y0uXd+j!sT9`_!;bu{%Q= z!tQ^H1G0M;s*OkG2I|NyYPdW`G!e&eaybusDFufX+!$F!lM-&{eK;)P;GDT zgW46bP6ez}PFSb^U|h2D(@P4IRWAkkR=d+{FywPJsBHEI*AV0wYBTrZ=rkq##Krm! zTw-u!*Dp4EZu=h5)kp7{QUgL0`4E1I3}mVS!ZQ%jO&*JQ5mxyoPDgjo<%*r=p7iZ7 zemJCB^To}u8YG>F;Bxqeq4>R@JrKc}@K2rBM52nTn2ZplSxtNW?)JR5;gv%ou9(|Frr8^AH37|t0cuV3!MT*! z8??YD2xtxmeHQ^bL>>=;=mn>auQ;Kb2?6GNPG1urtb1vTJ#hX3iUt|6eRYIF@b_D6 z;5WDtB#tfBga}u&6L2i_$CeI-*&v0F^m!5y)lGwXS86d0&xl2xOZ2aVU>2qc1~p@1 zDWP&qEWe5N2$~bNsgt+E(T2(8%# zg1OSxD*VVGQiAbMC;bmu+TL+K8Lb)mh<$*LrTd^Q%%Lhn$C7diJwVwC7>HRDC%PxW zKvh-4w>ae&LI%S3QVU?9;sgUNUK)LZyz{fM9@l2fM_79dmFTN4^XS*O5hi>SN29Y- zd^;JbIfQ|y*!CjqxWCT^sZk+dic|w+w!jk9*6J_w2}eTa4V^?Z&04}02UV%N5>+ZV z(qYu7G1OtBP)N*JW@4?0dDdemp5lf!+N^3tdn?ERDJ1lhk9E4acH3nZ))ppWlsgj` zFi!_7O)!0Iwk29tP>mnf7&u5N2+SLSz0nHu9IBDcrmZ~pP_%G9g26YMXG?2_~o@c zIwWIDmpnuURc2*StkQ()Q23yb(&b@7+K0*;dxG>tRT;8_Qc(yVsXsgJJPkj{h2P=V zNl>Ry%K$0SX-~4D#Dr1h53jJ6Wijp>4^2r*rJA18N87g7$C0UDq?H6zrKrVv7s zSe|KM&}dT5^KghVP2_n$i#wo+aSlSt-aFyv5b9opD6!!_ML0T4S?D4BY$I_j(rL1A z3^JWf>of>ii%t{K4t1J5Ah|F>JB3iES)IbhD?J2#@0fDnFRw~C$7#4oPePcbs<^hfph`t&&hz77&yMl`+_(wPfv-7b|hb zDKG8BbVmH4W-QhrE!YV<%OqMc6_~w8y;O8p5@GHa9hiwP5dK6atpmdqZ0MvUI@zy3 z1j+$ANq9E{Is*lIMO-NXfifWimAe%Pb|fPDmbgtypdTCbGl@O@~Fx5)ft&_^kjGr$VS zsyqV}NM6uo;2W4Nql&MmkVslW__{ngTPW1SzsE!~5(0QEx#+aLU8kR1(B_mRgT`+C zXWO6r%v{1br}e!lnH5y}?M(!>UqrrkZ-6w_yXRX^?dJAPSWmrOO-Tpb^uL48IyyeL zE^8go-Y&raucOX{nKg}AL4ox&p#}EGn(oC;yQdsJ$N5Lx9OvdWZiGB(V@*Z>5-O8A zGX$3ASNfxvyt3L3rz~SwnX(LoC9Y`XVJ%LR7pum^HKpkM8+78G6PFfc&J)O<%Jh-w zE`rMzk!Eb?-3^%VHZtuX%=sS?p8H3>_Vxnk||J0q@Cv)2>1$*BL1td_qv*sb^5$YAycbz~mbhc~?~q+Sf0 zt?lL}@}4)sv@hI_0%M_*rj8`B-S(OoE|98ey2+g1B$d$U7@e3)p~Hl*4+Ny&Mz97& zsT|U2B@Dn&VRza}8z^`4>EZ$q3q+znU*ffC3Z4Yb1R{ex-vB`&xUqhhmBehA2bmce zn;(;;A4I`OkP;~&LZQbKKPyZy1aTiOMx~XQx*1Q2q4S40nNQtb3R%9^oLlxktYa$|Hj5^FnNT7m4=qtM1uR_LgSLTt&_2O9-|jHQ)_@2QW&*q96qq5rlBFfX zzRf$eGBve3Dh>e72LA$LWL8FR0c;aoO8jH_2w2wlqClA+MTtd~}wA8|6 zdG>MgU66LDpNThZw{HYk9m#gBCvq zfrv*MgKD^PigH>@Yw_rF;tlICKqNVnKjj`vA^ThEC#|xz05b-+ZTAb(ULK%h!;Jd22i+A88Gr|V79iPr`ACI(cM{H0g(%85dgOw>)V88WS~vCk(NSz`@K{8$5TzOwRH)xs<|9K*Pu(a zhwkS8-1?KR{MMiS>EmyuFqA*jgtK2jgYk4PkhV-cxq`j81~-A^FD#8s&)CNRAQ*&z z(rxpXt?QVGwQ5R^8X)8|DDFJR&97jCS(Db!*}lVfIhFF)*)u|H4dMSHPReCfUm17vq+byQ-XQ5qy-Io z8p!?#jcH4O&JmqroLCs5m&rH&8t?KO>)6FSt7F!ur(JpS0A_!{g5pQBqj$%Q7{pOA zBZfPErVWH~2F_f^aXucH>l{3GE3rx#Ye*xfJBmsOLZvVb3K0lqZ$mQ+!{Q8=>kP5b z#k_LJ@W&Wd!GmH9OM?Z#ui#tc+IBq?kxhzr$;plJMsjH-&|rC4IQcM>1;mRvAW*Fg z2E&m;t#f1#yNl2Raz~#*ZCdG=e^0gSkQQ_a*hL3QNGqt1)Dbu#Q?P0nX{|`46}X2x z%)iZvx^@)^Cia%ZJ`n`*(L+h@CTcAZ$B?9g@CV$NQ0l@fsqji33JvptkNB9&=ND8# z6v`u1@NiZXWfJIxl$3%hN4f9f(yE9{>%X8cIs^G_pRAuDkxsU#GkRqVs4#lV$jc4S z@NSaP3)Ri?q4xPXZyC=?sUs5Du{ot5F;mVl*T{;g{c;~~-Z9m&s2$+zao}r7yXg_YK-_Tw_`jUiovNya-m2 z9z_|FP;l@|1~#Ki%{@#Fz%$M7U>oat1;y`n2kj7 z9b#dW!)x#-3iTci0A9#b1UM6(Q;c~tOOZYUiEY9;2AlsOm4S+Cg#a&#Fs8vl{Y>%_ zk#9(K6%ia0upbDrRa?>#R_q!Zb56CNL`NU{tBmOsE01(b=Os>!IaGQ{0$IQWSqqp#3TG$G8hCSjnn^ueE8-dr8(z!d+RI)ElQ=C-poV9M z`3_%)D1<-gmia;Yf-;~H8y=gp7=O6*zLeO1_Jn$x08eF={?Ed&h2*%{VALU&RQ zLMN^R(OHe_*JOH)e#Qc7Eb2Sbx{hcriW~Kas08=U?G2O47nA2lo(v`$jov?H>^$jg zerh(|qS;q2L+Q zs+=u>1<9&N3Adad;2M}3u#m#s_8%YxK+Z73Tgv*GWAvaP9s$t!=Zs7s)dXjH#xvXW zGfTLSbux0OizIWfi>>JT%p-{aDYadlL;#%}+B#;eg#eHE1?STF`3ZoJVi?m7!lxaq z6U3!xX6JVyeu1wyqok29gRE9iM)hbev_6-)(47Cw7G+8$Z1W^}`XXD0fDyCvInMBt3HlPyB z7!AcslC~YuY!oxIKoLlTlB5!*qeS$86%J4z>*9cN>^Q0wC6_!h1+>pVSCuTIPs{2| zbBSY~Z)r4F890E3R3$JDb?heMM#(7888*=ppeyqu>&d$uy92WRBIe+G?f%C&9*Zcz z@z8jq(FH-A85tkeh(G(3jQ@4CaL1p0Ixmr~P0XH_%rh)cpwb>qE<`TG#LGc9qY(}% z4&6*=AY%E5kafAx12f~paJw!C=Ox_?MadIF3Bb!#a8l&ubbjJxB%ET|nmZmo=SO&7 zh@St7o%46G32N75YUl<`7i~_>B%D!^ozKOwzkWElaX^}3VK4CtVe&?u&k{4^3Xc*Z z5LPduHq&+~!mzFZPo#HTccru89j-P`Bp6LZ2C;IXq2I2;qj~(Cxy*-LApt#v4n+*D z4GFB30)n!TY8J7u_PldDI|p3aiOFh8Ce@&d{tU<}4Mj79wKK4i-L?t8In+Tvt>uQa zULdc-WS}}oY4x(Kv4iyqpdw~GPSb!i!F?MoFeNfzqNASv4=Kbf5r+O?B#4x$c55J2 zEoByBQsrc&2`Ph6BG%+nW$`~DbH9sPTz(OfX{L?@*zW9k=!X#f%(MF6#*=FDq<<1V zX<^Qju-Kz0seiF&J$?t@2J_hY&F1wr>>qqvlO+HP%P|tK%=Pa-+vYMyTvlYBfS!lJ z&LvRo=rpoR!ivd}h-Yxnz_|!0eHG=nx=4G7>AX9dv7?0~-Q68wn*-hnodXdS!+Mj# z{my^lGo!0u(@mW7?}-Df?-ap#u@*qz@hDDTC7K+pKs3U7C3Wl$2a0O^mZ4(6TokR< z{sE6D(Y0dmFL@-6ZmI(_D#8KnKr02)jRko1@;D*qCajXC;tAD|xe2Z&6VOnIgI0(#96TV?gjl_{9M>Y_~hIc*hd=)qV zA%+;8O>qe9$?x4SNCb^nQfrstbGH40%*82l9iQXynqT)mYakJ~xc5CNE4U=3zb!E| zU<`7GfOe9IOMi4mPyy>KNm7I9T+sYCS!(Z5zr;SlNM80!@WulE zblcbe82LqEJ@+B3a1rGVu@4jF{Ng?%dPg{^Q<8!I9@Z#a+CS>&@b2Zk=l$~j@@)fI zP?z!bD&wlR;vMs`W)kpLW)eBdJB~F~bAAP1u-Zj(x?f?kgPffV&&qOkDm+8UH_Yn_ z{WH3Ebx#AUtNV8or!i|tD-B$th4Y0XwSt;reLIIY{jtwUeRUt>CFb|yC4mFsnkhS4 z(gT9(jDG0+Gti8~B~ElU(%RK6q$qJ|A!g#u6pqaFc*VneM&~ccqL=jtTC$Gi@Y?yc zOM^a4{m#zr&K0ay3g40o47^M=fNs9Yp4~=yk$MY1)`gVT#{~i}hXW^d<1G z*L->GrLh3V0We#=M^92utPva^6j3- zyJG=iE+-nUjp_)_)cH-8xtZ~#in8+*Z-}Y=OE|(ZO2A^Sqk$zCyf!9VQ0ouUvM$6? z7Cj2C)VB6Aj}kRmEcIh3g3O3SLq3lS>&0+IQdxoY5w=3J9V_BEo8ofC(i7V^!q;!+8uk5<^L5o)V9|Px?}?@7&>%@1m=q~8o2WyF8>w*kLoBV7SZP6KS4`F(jq*u zQJS`1)vy%$W|><{AFZS)2s{$N2xG{xomrmlz|yTTkfrh97|3X95XeB@T%xWf^I65a z+U2rwWC862h7M?j!^^IU9F4RJvtad!;X=N${^%TjDDl-b<_m6M?;3Mh)AI8s|VaL9oJhZMDkK5*EjD9jZOTU_AxeXqMU zQoLptqN#p=zxUqP-~0ZzzA`&o)$sSq?nm7pU(~c8QK$Gb(D(&B@sCjmP3Qydo~reG zI{L=I7@GG?T@$9TzM;v&W<>+_jF=Nm)KxJr7Ess3 zqG+M6i(_I5^{iMHE2tadxHy4&PMj2{P&dVM;xy`caYnRJFNiOQv#1xv^Wq%pmN+k7 zKz&TSC|*LnBs$_{)XU-(aRK#;xF{~6J}xecE2vM1tKu5!lj2oz9rY>kbK;ArpA)Z% z8>mkU{avl|`V+jpw%XA%NBUyB=l66Pz#mc0#q(7>u>iG%F7$QdePg0ej6_SdO+7I- z&3!{&p!T{hjFzTm!!WUEY+o1VmL>Plwz>5VwL+KXgn3+>=x8}SBd8TF&`h~jU(o<# zgl&xS`uGn@_>X8zt)CbrX6D3V%qlr%p8%>-Vuso9A@5#Mm_5?v3Lsp@;9sf5xK$W8 z6a5*Q0hB>)#;#JXSJ+wMxnhS$dVFPS%}pAg@GcVzJY+KM?1{q|RuzWWdlPWg5=YF3 zV?Lu=;`lKjYB?s~rZ!_)Wh`y<>-pTDQ=74@mOCgc9mX=>BmTkEnjg}B6W(Rw2;I^q zm5H0orkb$!jYo#O%{w*r^~p>+BaAH?qx)#^=@oA2xFt8df_a_i-zetQZs8JUOqz*r zCv(D1XO^^m{X;#iq;6`bPO7IyYNl4-1wT5gPly4ZkaS9FWv&PYUJ^v%-o+q%=nVqV z9{AyUvT^@>{6RbFw~P695Vm_!_%Ir56L_bYRf9MPaEGH**6$xbzMMxOMBab|8|4GL@$Q!A#{h*~4G9C{^mRK24}&;^Qrp~*AY_`fzT z55wegydCa(Vc!eauUx(S8`o}oLJa(FFB1M#&+GU7Zs_k^9$y2d0Jt2Dx2rEp4W=X=B}5 zCw1$rN(*oCgx*h5kyXKp-Do=*Z70m{9h)hT+5JJ}C7Bt7iCiS;=TUU#^A^)#Gjqb;GP4O;F5ORb;WU`6=PJ;;O+WSzJN`cSV!Mw zo_QN$Qfi7{BP0kL4V)@pMP1@~i2wc?GYUKAy&TihSP(brsr6KDG=#OHr8V%NL0pM8 zaV5rx^MtjmffM0oV7$&S;7GKIBd1}A3*m<3-1eQ++B8Ee)iImN|c#S zo~0I1Uh$GGFH@W3?SR8Fx3}SiP?Rxv?HWxHAv1HM8)W8AH&7(~shMj7?1W{Jyg*yQ z1yc%ainSKibqIgzLcR_-)-jq<*XxEu3}u|t&lyk>x_lk8j zgJ{hgbVpG%2*UMS%%@7IXL^1x=nkQY-|INpY!0EAHmg9G*hX9dx|v0w@+z9l%^l}} z>ba~>9V369mN-K{*@(nZ#-3G_%nw@!WK2>|OkLM4{WNF^;RH1$iEBwgVh9pT7DbX_ zMZqbCGT_}AtiB3iG_u72Od`7@8Nq2xa&deft2^eA7dVRMfSjq0nsYsZ*6iwxrwU(Kjv1~S~kM9hjumv5o{|KGc+_Llx&`?seyfBSRU zJio>`>L){Q{NdF8w~p>_Vt@%l_E@>(n{bAYYjDfqkRQ^X)*y(J!f>$m%xr#*%oM@! zS(L057Y38_%_($#Wf~p2no61nHTW0>)u{C)MB5 zK6nS-wk0&^m|q2TtnrP+fD?gt->2DyiIZB7E%J+xYeby~*Y9c%mQpJfrUysK zQdeYAr+g2~l!|8W&Vr41K^k~~#)>ZQVzwkq6P=PUFF}}IaMm|{|aGKOb`Kt)R_ltb7dKSD5JE@{+`Hxq<#9AaSXq(-63 zq&hyER#SINlYf*}9=pj*iU5n*ubR%hrcJ63#&@xL9bO}RMt5C@O|IaAYU3oSs&n{) z#H5bV+3{H(HOgyiq_dA*;MK^l?NeM%C9Ml%nXV133VQTlgt9LNv0?A*9n4by5cM&2r`Spu%ir?D;mb}PXHd@Kmm^uW}hMm2V$rA ztd2N?xUGt~qt2rX%zG4nxEO5$Rtl^wdGP;iMsl1+r0f5laH5?wUHYHP?uD)o)`w9b zKAGCT|GDg+i>$X6BXItQsqM)-csg+?+9EM1%m>*RzNlxJh=x)PUaz+;z25FWPMx&z z6Hg*@@zYxK+~OjX-}rQD^Wf;_bUUnoN#iL!Q@jXy5&!FpmmL_n1?JVk;F!a+pn>u=Tl|PzX`a8~Fb`wfkUt_v7f4D%rplldt>@1YpWLW`&%G zGZfs)7oe1l&=6416yi__BH!Tf!w?RXHL_dqryZ!TJEB58C^+PFNL3?$L^U!!oIx_T zY$?;35UMzxLFfZzJE>COy=BMWNo;}9JUc|nT95w{V^H6z+c)xY2j|%F-(egI99bml z8}gIXMc`dc9qd<2YrOW~07Gqz_F)>no=vO-_I9(f>8fxZs(o(UR>lF= zxP`!K?r{~_sisovk94xS<8y#O0ss&zIRx)91$6^ZX8{FF_Klyg({J6?K%KdLoe~7g zQ2xjuegvHBNsUGo#-Q&!uv!q6$>La+=k{@KdFG^*wm8G4%YRQ7fNwK5Y)w$(7_dJE z6Ev3|yQzgQ0}}K?7O$Spt5)pbOr$nI%jRSrxy8k_^}04$#Jm0t4ADH@4d6db7bi<} zw`uivDD|=o+?Vqlr6E_-CEPgrSP7`KCbXwOCDTyjmy;Rnv5=e46L_N~U|CHUh>m17 zX|G4dgAdu1F2T5~EaJz&w*iy7RPaPnr`}22Ft49qhIa&PK*PyOx{_8n_xPV!HAlT- zm2lEI=F~Y>5zW;y^-ZdR^d6Zu_WP1tKvHerfL%6M4`~8YIt)s>Glkk|p?eUcWV)B> zYe$Wpdvo9o*M#@xzhKdS;L-LjDdix2t}opjL=fWm%}Z=f54y#|4)W4ihSa@FIVAG$ zj~pVqnEw%;T#4kYS=wHhJ$t35txV8mE+U;=%O+ zZi6Hk4Sd3hcG&xR3B492(GZYudl-e{HAL_bDd;tnG+b!-g zn=e*$r9T>YJzpFeBGO~1PEfcL=90_>UHH=2tITGDu#b$Nuec+d=|!Vm1=zslQT{(c zx{q8^_FO^kY%Yfh;%*QMf1EY^@d(~>P(bQ~GAQDuM%>Ckk1X z1|HN#DMt56GrVzLTxio5qA=RIF!Z({aNE*v??&704eud(e!?0=g;ezIXgnTYf{Z3n zGEUn4VBo)}LaLdp?#XrJVy5IOtAPda8D-psW1ZO)5N+|touL;inL7|S%5HVsDSgM_nR(h;{$znFzyUrPKj(Oh_HT)1+K2Sjz-^ekG-kLyA3- zPvNn9sjbEY!h5hSU{f1jrUV&~en{;DR)wEqu?IunYms(OZR)YVW6HmUFTwu}Ff~KW z?GXDaallM|7OkT(c*gkF4c2!G7AGHL@1?_J?&D);;)kE5^i{UcdoLD7tTcbHl7%*9 z)Y9#GDMNIGhTT5ytN0rsN2Xy)(*v$Wi>PM0^A`2VYQNvpCylB6ZAcGG-yJlj(}HlPNt3>mnn;jB zBALOEF#@}Qr4PoyqHiVSdKX~~U{jMAtB_cF0p+JHeTG3`8}Vh#%I5e#Jm_=u^mUY% z_1WYoE2YC6k>wF=xmBNDypGc;H%cUL>8Eu0UGzI=Z{Ww zntqiE`liF5B_z>_)AaIfYTcrOGR>T9<-{Uqb~pheDN76P4q{wkl;McIBo@*(oOlM?tWh;S#$b z0M7v4j{+}Q`|6WVIjxSl=ImeK@-M(Ox#%zCpwKO_&EUYqnYo}NlS{%nu|yX!gglQoycMA0K1XP*ah|?kFf{rM?Pa8 zxEJ*p_ke>aU>pGVqdwz4@E{s69sqAd8;m!AhtZJn5O@@ggr&rp6>Z9q-2B3duE;BL z3(qZiRc_;XRbG?V@!Xa-2)HyB?Tvj^#aivBrczqT zI7?2HHkcUbA}xy|OY?nNi2-YbrQd!^Fzx>A5wq^i`UN{hUxtFo>j>gR=?Cs}$* z^S)6T=7BcNte#FWm?e3xGDxmZiv05;t7*4XWi>lW4efE0@V#dYvH8U!aaV_I3HNg?k>E_X+-0l+4@c+ zqI+-I7+JtEy=7m%bIgAf^Vi4l@1S;NTM5AeV|3cOH-7pEulWJDvFVz=&2Z-8g zIjb?@{Z7m~Iv65^Rg&f=OsYz!dpHym=1NH=!@ZMmdJhwNzn-hSGOX<*RnmL^eiw$wrCn`+J|;_BzcZ+lkcNL*dIzuj z-|_A|uXatHpCtJ-$@hQp%iX^``ePzxrr3zJQ8JyXI9Erz$B#;0uv?VzJSoBST>~4^ z$?k;KT+H$yp46r)=3UC{#&MdbRUAJoPuc*iA?M6>*kLxw`Fchx zimMa2MA7NG+dyp}@$Q`XDTaoV%+ap{uM&=|>Y=fTHVl?RTA77}6X)VNis7Mc!X=dKK7zY))!GjYq3gF{dMSN0b!ns@y7 z%A<8i;XnP=Uigb%<4XZuUn6OI^!8sP$v>;|`xyDpE&UBr+Ls%*tR3sT$78Q?rF)as zd8GA+`;hXO=Q-Hicyc(k*}iSEb!q!jRs{_TgtX2a9M0&erjw0EQms`oO}9xa;;|ip z;j04Iddyx;K^)tyOGQ$L7@T^aKKXg{thHWH+%<85<>P*9Yt?#LainzX9u#TbI+;Re z>brIEQYqIzA&qy@wF5SR)4dg@4XSy$9Hgy1nO&yE zfNq8^=N@3Yc@v!_0?^kNL;k-GLc16T7`fsW_gffk*`W9%tX@%^_~W24e?9@ALcI`; zsKlXfZdW!+ws^%l6aRpu5j$WM$~TpBXW^ho6I1Mk3nti&duD?N-onQ$vB7S7Q=Ir$ zf{OO(LsX(UZX(5%xx1A@GfPTECGG!G<@7E`>C-pNGZgNs%POkk>nly`v4(N4i{@)^ zpxu08jM8*@eW-PzHN}XX=HzZS(i~RA-M)YhTq@oj4OkY_1<-A}LF)Qc&$#~So22Pi z)Llwc+?ecI+_}{${}2n!Hag3pbD^{FQD!#L>)$}SIs>{eS7-1TgCBPW(vfb4Q)rxX zw?Sq!HeZk4>EjRt;V4r@M~O-&voNI#o9>%#f(zubU|rmJT%vwl+`ITtmf`Mr8GHQ} zB;b2)N?em|A9k3c))O^$_1l1g0&j{F;*FT1FnTK5c)lg zpROvQN3&?mH70aM-w z9*d{r8*Om$F{rs9k&Buf*2Eu8?rZw|;!37Thwf8HWlwKVM*-9$>L^b*8~8Hml7lG8 sA>~)DS;?ov_fhb8UdTFAzolYmsB{gTuHani9p4GI13%aaHiM1t0Z^C;UjP6A literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/axpy.cpython-37.pyc b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/axpy.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d02bb43450d48c860da1c49bca18d48429635b10 GIT binary patch literal 1117 zcmZ8h&2Q5%6t|tE&APUm&_Ej!QV#48Bo>%BP7~wfS|Q+ohEQe2ZksJlBHP*8DZQ{= zcjd|v@h|zxss92e-g7sYaAdzvKfm95PkGhrbqK6)KfVv&dxZQ#;dZrvc?ny;fkO~M zb225zgc8Aom(!0#_#(I>$4vA@TZC8i*b{xRCOW|Rl8AN5q$hpRz4qZw>jQ~5mJ~w8 z)P_oqGi{_=f{Y6KBt;W01{@UC` zpdGBBTOe)ikOpl6Y;D6D3U}B_e@RE|0o(+CI;N0dI9Ngz$i?HLG|?H)GZAG)Rhvjp zcqOAzsUkkB=26b)QYmy*nD)heJt^y4L=%1{Bh9BW64_KQ5z0lpx{DFhvd{>-Fn>dR z-qq2;)2D~guKp5x*2`sK`$t+!WwNq(t5m71r;Uo4?YdNkE}XXAvNn*}O8iMw7vB19 zl4)TQT^Dm+jCe7A_B{Et|Befh%VAmyc}L=-ksKECJeloR&LJtQ;gnaEREaj+WNDJ3 z&#l!7!?f0>oDLoS1WU13&20ynRU!doq5E(Ux=E>)?Od~Cz z8Bo{O3Zo6Db#AgMpIZ+=$8FnkMb&owPI1*H1hE0ns|mMsW+`|>wOxrkh1hXc9Oe+1F2S5t_z-NtcFR*y*fwf? zhE|gvx)rxd{0FobR4&}Oa^w%-I9Iq-LWlz=gb?31c6R%V1GfC;&71e;jmI;e@x^kv zVBz}V@i*PeE0*;ORTftn$_}1*7lK=y4Xu%N-C`EExie%}sC0++b(@!Yj(hjnb%$4Y zp8L>TzRZ_+0a`A!crkQBH_Y+U0~gWmXI7{BgrU<8)4mM*QJjSG3F>5Er+0fL(s8Zf zaq(Qj6W45vf^pWjZ`kwFyqs9dP1wGDXva!(aZxuXig zQ!aPbta(WljqQpmiVAv3y;&Ox=Vj4OBjQ_`*IiYhhW-d{FaShEy+LUS3dGr;*i~rq)!9TL(*ct3MPIg={qRR_#p3NL5=kRI`t7j+Nx?N?J-! zusSU1K-@VKiCe)i;+w-z^pnBoZ^d71j`ucsGz^kxEH)#t*&B;nzFDuxMuza9unIr3i=JIJy&RFHU5{L+vj{s&nw698}D- zpE(bXG}zJ@I4!i9c4AZ^piM}dP%QP?rMK{mHpoi&MC2He<2V&Fxr`~%toNNIPec3b z{`N>D+woM~4#Zv{`e)zWzIN`Dfb(J4?TvZ(R4v%s3%eqmZGU+VgNDQQc+wpOlSwGI zF}X?9+wPIiqSaKoy=k0`N8Mp?8*Wd18y-%Q!7Pfyt;ub@{<4O2Vm*v9z6@cNO02}H zc8t^gV}OK~BOoyVNdPZYew+b`7WXnxL$({w0LeB$sv01nHNKm9 zeFY#Pf>4m*5Sts>u~|*_?+uXB?6mE#Ef9kYzX30l>8A`xlzj!LA)9^%NNM((SWhG3 ze^M`OO%F&y3=o8QK>TWkCg5V;z|#ac3})o{3`q@?gqJ4J1TTZ8Pt!J_sR2917-Yo* zgQj|hrV?c^v8^LC+%sr6ZfUEQY9F{5+2bS!9kmU7vO^{+rjGWsK3_|`{rtYazob?T zLVzt*?b~Wa)gadt4l=;5%snIX5B&MMS_fqOU4s@w8kxgF_@9u`es>l|{Xr7zT=!-a z#GSlsp&QwR(0SS_k?3o$FUQjfu#-A#mt3_gf>GEhNS^}oB$i0fdXz;HRS4~+`%T+I z=)&Gq^axbJPm+E=%M&DCAyFgo9tks$ zlcc>$Vw1#c5IScv$k)m628lPx>h`*k(77IwPv^`lf;H2`z$F!Lli$lXxPiiXw8z&V ztV+67|9gwq%;xs7k!kZkzsc8ug%a9q=9xP!eI_rU?;f2YbkUJ;^xr}6(6{EcvL9Ns z;9J%s#&G(Zn!TuDkvO~Z41DDl1b$>ZqMqO#$4r`bd5{FUlr4iUebeVB;KWM_mqpCb zYfLk5;kAfS@*;FyzjS<*eYn77$JOpZB)HC{=T6Q{qRwYsb&9*kSEqceL+h~Jjwfj% zbZx=#t=?FMTR1I;>8B?&nh7&Qcc6V4@>kVg_b2^+z);k$KgNp=V}H=% zR&dQe#@3u-2Ml<@R&2&N0}JeepD}R2eZ^)LL=Zsl6`$E~03q~o<_HD@VF_C}FuZoq z$NPefM_Z1e#$5YSWO<{6+~S%P{7FhC5*jURpJHnW9dNKRE8`fzo^Jm^*?Oq{h5UMX zS+7;xERtGC0&Kd!zn!c~HEGu6RZ`BAGJE>bI%cI(C zm{fJVN@|QZX;h-}bdu7ZTdP^b>AF$XDlU>M+y+94ROMA!tremXZzfQ)s z?w6}3RtqVbMOA=y6`tr)+d?Ih$hD;tKTWIbO2+5arS9*i@ry+2=sZc6nXJ|&AVJPWVf=O1pfXT`r4V(zgEcfk z@4)TQKEyiqwYOs=0;-E#=?-N?M1O>Mw6hdfjdth7ItT3;CN?_S^Df?zaXiwVxg*UE zK%K~Br61gf80Rn>U~nm7Ob%IJmGV9M%fqxP=Q)Tn6>l&~;;DAZWF=+?D=YzCs&XFB zi$p0=&iY1Wih{P;=s(N2z>7b#>hqahpTE$)F;YQ0Am+)sP}-a4qJT#8xwbKhH`iKr z>9_y8S|)WeHOJU7-{&Fs@rz79;v*d2#^P~n|9O133b}lW{;9d3I3a2f zU(6+>j5D^eS_{d?rv}v6mQPTv9Y|E7!Fsm!nR!me4jy%0!tT^vldbOD_>VhNL*X;oLtja6#&nAK#;&9^^yv0gJ0$8J vu2o)`m&ask{&xqdc|T2!{o_>y>q0!I{G0c&!;;#*i}l!l!3TkRTf~0>G?c6D literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/normalize.cpython-37.pyc b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/normalize.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b3dc6250ae13189d75d5be0dd4552b0007171e8 GIT binary patch literal 1268 zcmZ8h&yUnL6t)v5lli&JsuBfpm`kOmhaH$RpaO;C6bTO1ULq^@%x3U7k?n{bl)V(L z^DmS$sDF{Koc15+p_leO4?Aq@$d2vzzL)3kea_SIctD{2_2yHkreO8#1jbI?uWH2sD+`-m|j+};reTGR#1$`6Q zV+V}Br@zyVi1-ev+w&1R3s_NN*F-+p)_OEXg|UBJ)(R71bwTho_4XC)gdb z0?fJxUKyY=K=pT240iD)?)AGKAiIDZ-M~;W+(p4-(51oW4>8gaL>>b&>oTB!vUu@s z{;O8oscoC4Hg3vGQ(Ifu1}Nid`smU01y@#1m$jKLt_dKr|$sAt!?(%KJ}tuE2TmG2+w<fpkEX<``r5U*h@+eBAep z+^3XnTIH%nZS%fQgBI=CdMm5dxw8cp=>J*WI#5`Ab-M@1a90T(Q1c_k=^sRpGtb^s zT7>HPBpk%|z9MtBfCkq)PfU==$@RTFEo&oBTUV)YkHX19=o{&TKR{qAd$_4ZtK?%8 WTcBt}vp7rOR|3!AXpls7|NIL}Hc8+B literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/permute.cpython-37.pyc b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/permute.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7aef9647aea78b0ced9f809e3909b449f6af541 GIT binary patch literal 1000 zcmZ8f&2AGh5cb&KY?3Bz6$#V|A@u-cfs)$e@&vzOTb_e z*tU|>OdF{#(RK&0J5I3zzXFtQKgaE}H<*2!%;}WO+38*bFUaBUoHyK%hMvPQ(L)1* z8hV?Y!xw1Sch=B`Z<8MYEicd+NiJejjsdgr%+Nq?P)keO* zuMZ<`SuP8^{#0wJOjZ^zl`56xMKY7Nl~k21L=@Pytf*=e>!YNSHt?Zrm=z1VSJvkG zmS)*cqSi6@-;>d-Fe69k;zTmkC6 zSs`q&B#5uohRc_j9O*b7yMLe@@$FzW=3!bYIjl{V`+@j&eDE9PxTci{qy1=BiaM8% UU6Xo%q6cl(?R3L#Kw8a=;N`Z9}1n8yc7Aer95uiB~ivWUFq!jJSl0eFh z9nh!rx(|??(qkW?kI+}(sX$MA>$#V9hARhd(IGgR8I8yxzi;$pJRTB|FE($JHvu7k zptF+^7~jCu0+b+vR%A)e2_=FFucFV0xFdWK{6x>07>H2tf{25HiXn^fzyd159G#S|dXkIdYYuDj?@9c@4TY^@YC|Q9Qg>3_fR6!sf021;gA8DD znBT(G&p{ce?SyPS!M5B`L9Uqo)NsMRB;YgWKL+3Hh+(hkjJUWbyc1$P!B5DH7(OFg zAI~Lx? zqg1yt>k^fAWn;rU>oe%W`itb^Vr`>l)xH1n7ow)H>|g8ovhL=3Rd3QdPwV2r!}$-7 zo~A-nGRYbt_q;UEWm3zR^Yx?FCCr;PS*C3()m(S5Z8py^XZO}QNw(5mvrMXV18J=) z8+FmFPuq>1?3~*|eW*qsgCaDf943w_ozV$Z7%uP?N^ksr)*FAGeOJ7FgYxTHZ~S(a zA)!!?U`}8<0N;Vp12}>w10W*+ioT{Dy$bcc-ey;!deAY9*MXsi=4?Ceyi0!Rn~{l( zXSnfAXaX}ZW5di4pyb!lbGDsq4>9juU?u=C6Mo_S4ADMzxEFJImjg2~hXrN^*K9jA zQy{@$j|9)+p*>iZwS!oL*`uxPcpq+U!yi*srgbJQ&#SVvJVWgRty^t_CeO9(EU(k0 zw0ChmTm~4TEK>@-~`Aq19u;&c0QE6i!4;|Ik4s$N>M+{Q<&ZYN3>dvL; z0&8~|$o&E(_#BCwU} z@CF$!oWf7xb|47U3d5$iM);6X{v5S@{P3ofbV z*0(YSAY1n>MY4&`C;Af2%KsHDr;I_%C$ZYH<|FM~S0*+!L*8f23ZBk%Z9>a&e1l|Skpr{aW;Qbuwr>6mPqx^G^& zruT`+^rCl?Ku>1>>(#ejp`8xX#kY6WF&R)r=ty$PrVua}i>t#~=$5`ssPt{@WpiGZ zZrCV2gHQ%hWUfLKm1_q|acMgKOv+4_E<{**=FXJsNmNZ(WMNe3m!8Q`nyHqqA!gi4 zJp7L2Dx7|ub;d$>)I?0F=u?qAeAIcf^W1hfGTmL`Xur=R!B0D9J5~ETSssmPo^#nz zn(8#}#Kxx9TINbTo%$(wkWb5?W||O(eQnT68@HVm95@cX;j}PNTyYMT>tGebUMv{C z>a8#tX#^9AyCIh5WwWU*ku{K&w-T*d)4dr#9*if5YO@4;LdMo@r*kF literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/relu6.cpython-37.pyc b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/relu6.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..301b7ed66f6dda94bca49c7718c169d00b3cf761 GIT binary patch literal 784 zcmZ8fOK;Oa5Z;#)HxG&`aX{h^7@-(?=m}K_IHCygkWeqy%DYJ&?6tMKRvRg~_1fRS znF9y@&R#k7FK}XJlR(8tJD%Bj?Dx(1^>jKSAkTh$D}GW!exY-a0T{2~xVNAbQM4uv zxgeA%rg%+15hYY|LoS$_s-a46=ml2~)R7v2B{WfE%{13SO>PC$3_g=vg&s}fU(k!G| zK7WyaIejmcs&!GeO5gM3VxbG8uk!2DE+*t{S2VJNe!26~S7lye&LJ9|qU@b-o1&I$ zNVB@`&t}~^jH9(fgJ2v3MW{_d&r6OD2u9G3gr0!0fmuE#Tkgqed% z^0~!ioXK@2!e9q7Fk`}K(VNm&t&w#|;?r3Y25}^V3aWW2N-y_$p8QV literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/roipooling.cpython-37.pyc b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/roipooling.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ab18417451020413d93f87d9552f72fdb393aee GIT binary patch literal 1141 zcmZ8g&2H2%5Vm6{o2I*MRUz?nKmv(FI5f}$rwXAWA#qtM;DES9CiZ4ivPon+YKyWr zu6+j19C#sLIrSAdG2`7WD{OiG;_=LUGdZ115&~=b>qnk)LVlyNSrG`Y;Fvc+B$2cx z3-XyzA_K{4`jN=Tv##$ViGeuYhy^(N8;*gKW6(<9 zzZKcXTia;*0BALLfRG5$yhkG%tanWHW3U%EYq)USaQHl?7_%hHUiXUza5eDTLJ&^%6MQIn|L*PCY7|oL_}8kDfD+ER3w`!( zw02H|=@kADT#!u~YE0T4ZieuvXtg@(ZB_d*@pbtSTY2F~X%A9-vS?*rtCv`n}P Qj3v8Jz2C_s2{$191u5_zxc~qF literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/select.cpython-37.pyc b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/select.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cd158cdde33528afa43defb82f50d8f6e0dc1cb GIT binary patch literal 1265 zcmYjQ&2A($5O%rk?#^VA4Xag>y^TN|dRb*4NP9pl#Q%ZAXr)C01?@Jiwlitc(>>Up zO@cf^VmPwTz;fik3;D_^ufQc#c_t`sRdv}_E|b=q9xoEG5Gq)y1jk)a5b&r$3Ts63wjkS!C>m)!Gc3&%WPf#T=f zJi@gE{j$qF^Ast)^x|CDuN^4ig_oy%pRM3m@NAiq?~wV_!&B5#z`Eqb(Ban{m7x-U zey{0XRc)h2wry%%&ikfwql?C!jlQ?8GVY^y)#H)w6#icnJS45|!tSTmYU7%|`_h=+ z6j|7Ay7|JD_NZ8TQGOXY8jZ-!h*g&^cqSYVMCG8=qA>_$m4=;`1;K4w0#D~8B0Bq?p)Bpeg literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/shufflechannel.cpython-37.pyc b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/shufflechannel.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad6f5acc02546f09c125f2a263b1282798b50f95 GIT binary patch literal 849 zcmZ8fOK#gR5G5s2RvafTf__k-7f=gWZPr0iG+i47(naBoK!}vBR6mdu5(9Eq+2#Vh zL9fJZSGhu0ouTbE^$4Vf`P2a3VQ;25y z_Fek*{IigD*NpH_PACR_PR*MeurN#^8yEv{jCf5)%H_(M43QkZ{`^42T`XSrjX)@)`WYrWz1o)Ga$ACgIrgn=q~pFZ9QU~0XqNyK zoX??y60Q-pJKL9jmbY5XdRtb$K%SX@;1M$4WL}*3qHbkhsrSg&pb8TjhcO#RV;<1$ F_Xn*a&jkPg literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/upsample.cpython-37.pyc b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/__pycache__/upsample.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7052c15853d0cdcbeb413aa25a901704e90d1825 GIT binary patch literal 1394 zcmZ{kPjA~c6u?P|vQ2h^M4|+e zY7)b_G`;pS^t{VH5U)MuE9}%qD$d-sqrvC*9{>2g_j~ly!5~5~Uj6+c69GaWt+5_G zP#(inFF`OgLku&o#1)=ljF`_z>HUUSz}{}qk5m;2F^Rv*@Kn(WJBuA}D_Ey`(FUsJJo{F)Z5-OcMN+NYYsQZm__ z?4^76(#a&b`(S$SesqZ;s4+I7CL{1$6(g)gx7k3;o?273jm-9hKzU3rY$K_hhTJ3VkZ0WYkB#320@S&;K!6)Z5M z)>}S`A_z8_HdN9oDMZuiOdZmOtLf%~%4x~3Jd*2Hiq4QWspXHN*2x=M7OcA=aW^FM zS|)9yXw{TFDd`!P(ueJrHrq?9jfwF~F3j+WQe5hy7SE-ur6KyP;l?k79`{VV8I%cJ zBxY;FX#&AdvcvA{#Kd*0ue10I$#39?{y9x6p;Oh0Gb-i)Z0~OR$K)5vSjn@zW_-h< z^EuDp#`JX3xD9FDWEE{1E>oqcF7hF5#&F;GSctWDIx9ffctw7<_7cKeKua1trgM%?XfSUEQjMaITN&A+} z&bn3MA4u}uRu`pfC|3*j6B|+4_@ygl_^_&3Tk=O+;8G(H#EWt0hda@BwB_OM^9dU! BZ0-O6 literal 0 HcmV?d00001 diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/axpy.py b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/axpy.py index 6e0f843..9760b46 100644 --- a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/axpy.py +++ b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/axpy.py @@ -16,8 +16,8 @@ def axpy_layer(inputs, input_shape=None, name=None): alpha = inputs[0] x = inputs[1] y = inputs[2] - out = fluid.layers.elementwise_mul(x, alpha, axis=0) - out = fluid.layers.elementwise_add(out, y, name=name) + out = paddle.multiply(x, alpha, axis=0) + out = paddle.add(out, y, name=name) return out diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/convolutiondepthwise.py b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/convolutiondepthwise.py index 670d543..9088834 100644 --- a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/convolutiondepthwise.py +++ b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/convolutiondepthwise.py @@ -15,15 +15,6 @@ def convolutiondepthwise_shape(input_shape, kernel_w=None, stride_h=None, stride_w=None): - [k_h, k_w] = [1, 1] - if isinstance(kernel_size, numbers.Number): - [k_h, k_w] = [kernel_size] * 2 - elif len(kernel_size) > 0: - k_h = kernel_h if kernel_h > 0 else kernel_size[0] - k_w = kernel_w if kernel_w > 0 else kernel_size[len(kernel_size) - 1] - elif kernel_h > 0 or kernel_w > 0: - k_h = kernel_h - k_w = kernel_w [s_h, s_w] = [1, 1] if isinstance(stride, numbers.Number): [s_h, s_w] = [stride] * 2 @@ -79,15 +70,6 @@ def convolutiondepthwise_layer(inputs, input_shape=None, name=None): import numbers - [k_h, k_w] = [1, 1] - if isinstance(kernel_size, numbers.Number): - [k_h, k_w] = [kernel_size] * 2 - elif len(kernel_size) > 0: - k_h = kernel_h if kernel_h > 0 else kernel_size[0] - k_w = kernel_w if kernel_w > 0 else kernel_size[len(kernel_size) - 1] - elif kernel_h > 0 or kernel_w > 0: - k_h = kernel_h - k_w = kernel_w [s_h, s_w] = [1, 1] if isinstance(stride, numbers.Number): [s_h, s_w] = [stride] * 2 @@ -122,16 +104,14 @@ def convolutiondepthwise_layer(inputs, c_out = num_output if num_output is not None else input_shape[0][1] group = int(c_in / (c_in / c_out)) if c_in > c_out else int(c_in / (c_out / c_in)) - out = fluid.layers.conv2d( + out = paddle.nn.functional.conv2d( input, dilation=[dila_h, dila_w], - filter_size=[k_h, k_w], stride=[s_h, s_w], padding=[p_h, p_w], groups=group, - num_filters=c_out, - param_attr=name + '_weights', - bias_attr=name + '_bias', + weight=name + '_weights', + bias=name + '_bias', name=name) return out diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/detectionoutput.py b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/detectionoutput.py index e8463d5..7e29c31 100644 --- a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/detectionoutput.py +++ b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/detectionoutput.py @@ -14,30 +14,18 @@ def detectionoutput_layer(inputs, confidence_threshold=0.1, input_shape=None, name=None): - nms_param_str = nms_param - nms_param = {} - part = nms_param_str.split(',') - for s in part: - if s == '': - break - else: - name, obj = s.split(': ') - if name == 'top_k': - nms_param[name] = int(obj) - else: - nms_param[name] = float(obj) if nms_param is None: nms_param = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} mbox_conf_flatten = inputs[1] mbox_priorbox = inputs[2] - mbox_priorbox_list = fluid.layers.split(mbox_priorbox, 2, dim=1) + mbox_priorbox_list = paddle.split(mbox_priorbox, 2, dim=1) pb = mbox_priorbox_list[0] pbv = mbox_priorbox_list[1] - pb = fluid.layers.reshape(x=pb, shape=[-1, 4]) - pbv = fluid.layers.reshape(x=pbv, shape=[-1, 4]) + pb = paddle.reshape(x=pb, shape=[-1, 4]) + pbv = paddle.reshape(x=pbv, shape=[-1, 4]) mbox_loc = inputs[0] - mbox_loc = fluid.layers.reshape(x=mbox_loc, shape=[-1, pb.shape[0], 4]) - mbox_conf_flatten = fluid.layers.reshape( + mbox_loc = paddle.reshape(x=mbox_loc, shape=[-1, pb.shape[0], 4]) + mbox_conf_flatten = paddle.reshape( x=mbox_conf_flatten, shape=[0, pb.shape[0], -1]) default = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/normalize.py b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/normalize.py index 19c583a..806d59a 100644 --- a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/normalize.py +++ b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/normalize.py @@ -13,14 +13,14 @@ def normalize_layer(inputs, name=None): assert across_spatial == False, "Only support across_spatial == False for Normalize" input = inputs[0] - l2_norm = fluid.layers.l2_normalize(input, axis=1, name=name + '_l2') - scale_param = fluid.layers.create_parameter( + l2_norm = paddle.nn.functional.normalize(input, axis=1, p=2, name=name + '_l2') + scale_param = paddle.static.create_parameter( shape=[1] if channel_shared else [1, 1, 1, input_shape[0][1]], dtype=input.dtype, - attr=fluid.ParamAttr(name=name + '_scale')) - scale_param = fluid.layers.reshape(x=scale_param, \ + attr=paddle.ParamAttr(name=name + '_scale')) + scale_param = paddle.reshape(x=scale_param, \ shape=[1] if channel_shared else [input_shape[0][1]]) - out = fluid.layers.elementwise_mul( + out = paddle.multiply( x=l2_norm, y=scale_param, axis=-1 if channel_shared else 1) return out diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/permute.py b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/permute.py index 617af1f..2e6befd 100644 --- a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/permute.py +++ b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/permute.py @@ -14,7 +14,7 @@ def permute_shape(input_shape, order=None): def permute_layer(inputs, order=None, input_shape=None, name=None): input = inputs[0] order = list(order) - out = fluid.layers.transpose(input, perm=order, name=name) + out = paddle.transpose(input, perm=order, name=name) return out diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/relu6.py b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/relu6.py index 4e94fcf..e757aa1 100644 --- a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/relu6.py +++ b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/relu6.py @@ -8,7 +8,7 @@ def relu6_shape(input_shape): def relu6_layer(inputs, input_shape=None, name=None): input = inputs[0] - out = fluid.layers.relu6(x=input) + out = paddle.nn.functional.relu6(x=input) return out diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/roipooling.py b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/roipooling.py index 5e0d459..09a2e72 100644 --- a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/roipooling.py +++ b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/roipooling.py @@ -20,7 +20,7 @@ def roipooling_layer(inputs, name=None): input = inputs[0] roi = inputs[1] - roi = fluid.layers.slice(roi, axes=[1], starts=[1], ends=[5]) + roi = paddle.slice(roi, axes=[1], starts=[1], ends=[5]) out = fluid.layers.roi_pool( input, roi, diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/select.py b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/select.py index 497a3c2..c1c9bad 100644 --- a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/select.py +++ b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/select.py @@ -30,7 +30,7 @@ def select_layer(inputs, out = [] for i in range(len(slice_point)): out.append( - fluid.layers.slice( + paddle.slice( input, axes=[axis], starts=[slice_point[i]], diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/upsample.py b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/upsample.py index 44ce7f5..a32452a 100644 --- a/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/upsample.py +++ b/x2paddle/op_mapper/static/caffe2paddle/caffe_custom_layer/upsample.py @@ -42,8 +42,8 @@ def upsample_layer(inputs, scale, input_shape=None, name=None): :return: """ x = inputs[0] - out = fluid.layers.resize_nearest( - x, align_corners=False, scale=scale, name=name) + out = paddle.nn.functioanl.interpolate( + x, align_corners=False, scale_factor=scale, name=name) return out diff --git a/x2paddle/op_mapper/static/caffe2paddle/caffe_op_mapper.py b/x2paddle/op_mapper/static/caffe2paddle/caffe_op_mapper.py index 4283bfe..7efea61 100644 --- a/x2paddle/op_mapper/static/caffe2paddle/caffe_op_mapper.py +++ b/x2paddle/op_mapper/static/caffe2paddle/caffe_op_mapper.py @@ -13,19 +13,21 @@ # limitations under the License. import numbers +import copy import numpy as np from x2paddle.decoder.caffe_decoder import CaffeGraph from x2paddle.core.op_mapper import OpMapper from x2paddle.core.util import * -from x2paddle.op_mapper import caffe_shape -from x2paddle.op_mapper.caffe_custom_layer import * +from x2paddle.op_mapper.static.caffe2paddle import caffe_shape +from x2paddle.op_mapper.static.caffe2paddle.caffe_custom_layer import * +from x2paddle.core.program import PaddleGraph class CaffeOpMapper(OpMapper): directly_map_ops = { - 'AbsVal': 'abs', - 'Sigmoid': 'sigmoid', - 'TanH': 'tanh', + 'AbsVal': 'paddle.abs', + 'Sigmoid': 'paddle.nn.functional.sigmoid', + 'TanH': 'paddle.tanh', } def __init__(self, decoder): @@ -34,6 +36,9 @@ class CaffeOpMapper(OpMapper): self.weights = dict() resolver = decoder.resolver self.used_custom_layers = {} + self.pd_graph = PaddleGraph(parent_layer=None, graph_type="static") + self.pd_graph.inputs = self.graph.input_nodes + self.pd_graph.outputs = self.graph.output_nodes print("Total nodes: {}".format(len(self.graph.topo_sort))) for node_name in self.graph.topo_sort: @@ -46,7 +51,7 @@ class CaffeOpMapper(OpMapper): func = getattr(self, op) func(node) elif op in custom_layers: - self.set_node_shape(node, is_fluid_op=False) + self.set_node_shape(node, is_custom_op=True) self.deal_custom_layer(node) elif op in self.directly_map_ops: self.set_node_shape(node) @@ -54,6 +59,9 @@ class CaffeOpMapper(OpMapper): else: raise Exception( "The op {} in model is not supported yet.".format(op)) + self.pd_graph.set_parameters(self.weights) + self.pd_graph.set_custom_func(self.used_custom_layers) + def op_checker(self): unsupported_ops = set() @@ -71,7 +79,7 @@ class CaffeOpMapper(OpMapper): print(op) return False - def set_node_shape(self, node, is_fluid_op=True): + def set_node_shape(self, node, is_custom_op=False): inputs = node.inputs input_shape = [] for i, nm in enumerate(inputs): @@ -83,11 +91,11 @@ class CaffeOpMapper(OpMapper): node.input_shape = input_shape func_name = 'shape_' + node.layer_type.lower() - if is_fluid_op: + if is_custom_op: + node.output_shape = compute_output_shape(node) + else: node.output_shape = getattr(caffe_shape, func_name)(node.layer, input_shape) - else: - node.output_shape = compute_output_shape(node) def adjust_parameters(self, node): data = node.data @@ -189,27 +197,16 @@ class CaffeOpMapper(OpMapper): def Input(self, node): shape = list(node.layer.input_param.shape[0].dim)[1:] dtype = 'float32' - attr = { - 'dtype': string(dtype), - 'shape': shape, - 'name': string(node.layer_name) - } - node.fluid_code.add_layer( - "data", inputs=None, output=node, param_attr=attr) - - def MemoryData(self, node): - # TODO(syf): Paddlepaddle can't fully support - shape = node.output_shape[0][1:] - dtype = 'float32' - attr = { - 'dtype': string(dtype), - 'shape': shape, - 'name': string(node.layer_name) + layer_attrs = { + "dtype": string(dtype), + "shape": [-1] + shape, + "name": string(node.layer_name) } - node.fluid_code.add_layer( - "data", inputs=None, output=node.layer_name + '0', param_attr=attr) - node.fluid_code.add_note('{} = [{}]'.format(node.layer_name, - node.layer_name + '0')) + self.pd_graph.add_layer( + kernel="paddle.static.data", + inputs={}, + outputs=[node.layer_name], + **layer_attrs) def Convolution(self, node): data = node.data @@ -219,7 +216,7 @@ class CaffeOpMapper(OpMapper): if data is None: data = [] print( - 'The parameter of {} (type is {}) is not set. So we set the parameters as 0' + "The parameter of {} (type is {}) is not set. So we set the parameters as 0" .format(node.layer_name, node.layer_type)) input_c = node.input_shape[0][1] output_c = channel @@ -229,28 +226,53 @@ class CaffeOpMapper(OpMapper): data.append(np.zeros([output_c, ]).astype('float32')) else: data = self.adjust_parameters(node) - self.weights[node.layer_name + '_weights'] = data[0] + self.weights[node.layer_name + '_weight_attr'] = data[0] if len(data) == 2: - self.weights[node.layer_name + '_bias'] = data[1] + self.weights[node.layer_name + '_bias_attr'] = data[1] assert len(node.inputs ) == 1, 'The count of Convolution node\'s input is not 1.' input = self.graph.get_bottom_node(node, idx=0, copy=True) - - attr = { - 'filter_size': kernel, - 'num_filters': channel, + layer_attrs = { + "dtype": string("float32"), + "shape": data[0].shape, + "name": string("{}_weight".format(node.layer_name)) + } + self.pd_graph.add_layer( + kernel="paddle.static.data", + inputs={}, + outputs=["{}_weight".format(node.layer_name)], + **layer_attrs) + self.pd_graph.add_layer( + kernel="paddle.ParamAttr", + inputs={}, + outputs=["{}_weight_attr".format(node.layer_name)], + name = string("{}_weight_attr".format(node.layer_name))) + + layer_attrs = { 'stride': stride, 'padding': pad, 'dilation': dilation, 'groups': group, 'name': string(node.layer_name), - 'param_attr': string(node.layer_name + '_weights'), - 'bias_attr': False - if len(data) == 1 else string(node.layer_name + '_bias'), } - node.fluid_code.add_layer( - "conv2d", inputs=input, output=node, param_attr=attr) - + inputs_dict = {"x": self.get_input_name(input), + "weight": "{}_weight".format(node.layer_name), + "weight_attr": "{}_weight_attr".format(node.layer_name)} + if len(data) == 1: + layer_attrs["bias_attr"] = False + else: + self.pd_graph.add_layer( + kernel="paddle.ParamAttr", + inputs={}, + outputs=["{}_bias_attr".format(node.layer_name)], + name = string("{}_bias_attr".format(node.layer_name))) + inputs_dict["bias_attr"] = "{}_bias_attr".format(node.layer_name) + self.pd_graph.add_layer( + kernel="paddle.nn.functional.conv2d", + inputs=inputs_dict, + outputs=[node.layer_name], + **layer_attrs) + def Deconvolution(self, node): data = node.data params = node.layer.convolution_param @@ -275,48 +297,52 @@ class CaffeOpMapper(OpMapper): assert len(node.inputs ) == 1, 'The count of Deconvolution node\'s input is not 1.' input = self.graph.get_bottom_node(node, idx=0, copy=True) - attr = { + layer_attrs = { 'output_size': None, - 'filter_size': kernel, - 'num_filters': channel, 'stride': stride, 'padding': pad, 'dilation': dilation, 'groups': group, 'name': string(node.layer_name), - 'param_attr': string(node.layer_name + '_weights'), - 'bias_attr': False + 'weight': string(node.layer_name + '_weights'), + 'bias': False if len(data) == 1 else string(node.layer_name + '_bias') } - node.fluid_code.add_layer( - "conv2d_transpose", inputs=input, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.nn.functional.conv_transpose2d", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) def Pooling(self, node): params = node.layer.pooling_param ceil_mode = getattr(params, 'ceil_mode', True) global_pool = getattr(params, 'global_pooling', False) + assert not global_pool, "The global_pool must be False!" kernel_default = [1, 1] channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters( node.layer_type, params) - if params.pool == 0: - pool_type = 'max' - else: - pool_type = 'avg' assert len( node.inputs) == 1, 'The count of Pooling node\'s input is not 1.' input = self.graph.get_bottom_node(node, idx=0, copy=True) - attr = { - 'pool_size': kernel, - 'pool_stride': stride, - 'pool_padding': pad, + layer_attrs = { + 'kernel_size': kernel, + 'stride': stride, + 'padding': pad, 'ceil_mode': ceil_mode, - 'pool_type': string(pool_type), - 'exclusive': False, - 'global_pooling': global_pool, - 'name': string(node.layer_name) } - node.fluid_code.add_layer( - "pool2d", inputs=input, output=node, param_attr=attr) + if params.pool == 0: + self.pd_graph.add_layer( + kernel="paddle.nn.functional.max_pool2d", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) + else: + self.pd_graph.add_layer( + kernel="paddle.nn.functional.avg_pool2d", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) def LRN(self, node): assert len(node.inputs) == 1, 'The count of LRN node\'s input is not 1.' @@ -329,15 +355,18 @@ class CaffeOpMapper(OpMapper): # We'll account for that here. alpha = params.alpha / float(params.local_size) input = self.graph.get_bottom_node(node, idx=0, copy=True) - attr = { + layer_attrs = { 'n': params.local_size, 'k': params.k, 'alpha': alpha, 'beta': params.beta, 'name': string(node.layer_name) } - node.fluid_code.add_layer( - "lrn", inputs=input, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="fluid.layers.lrn", + inputs={"input": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) def InnerProduct(self, node): data = node.data @@ -374,7 +403,7 @@ class CaffeOpMapper(OpMapper): assert params.axis == 1 assert params.bias_term == True input = self.graph.get_bottom_node(node, idx=0, copy=True) - attr = { + layer_attrs = { 'size': params.num_output, 'name': string(node.layer_name), 'act': None, @@ -382,8 +411,11 @@ class CaffeOpMapper(OpMapper): 'bias_attr': False if len(data) == 1 else string(node.layer_name + '_bias') } - node.fluid_code.add_layer( - "fc", inputs=input, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.static.nn.fc", + inputs={"input": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) def Softmax(self, node): assert len( @@ -394,9 +426,12 @@ class CaffeOpMapper(OpMapper): shape = node.input_shape[0] dims = len(shape) axis = axis + dims if axis < 0 else axis - attr = {'axis': axis, 'name': string(node.layer_name + '_softmax')} - node.fluid_code.add_layer( - "softmax", inputs=input, output=node, param_attr=attr) + layer_attrs = {'axis': axis, 'name': string(node.layer_name + '_softmax')} + self.pd_graph.add_layer( + kernel="paddle.nn.functional.softmax", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) def Slice(self, node): assert len( @@ -412,27 +447,33 @@ class CaffeOpMapper(OpMapper): sections_list = [] for s in output_shape: sections_list.append(s[axis]) - attr = { + layer_attrs = { 'num_or_sections': sections_list, 'dim': axis, 'name': string(node.layer_name) } - node.fluid_code.add_layer( - "split", inputs=input, output=node.layer_name, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.split", + inputs={"input": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) def Concat(self, node): assert len( node.inputs ) >= 1, 'The count of Concat node\'s input is not more than 1.' - inputs = [] + inputs_list = [] for i in range(len(node.inputs)): input = self.graph.get_bottom_node(node, idx=i, copy=True) - inputs.append(input) + inputs_list.append(self.get_input_name(input)) params = node.layer.concat_param axis = params.axis - attr = {'axis': axis, 'name': string(node.layer_name)} - node.fluid_code.add_layer( - "concat", inputs=inputs, output=node, param_attr=attr) + layer_attrs = {'axis': axis, 'name': string(node.layer_name)} + self.pd_graph.add_layer( + kernel="paddle.concat", + inputs={"x": inputs_list}, + outputs=[node.layer_name], + **layer_attrs) def ReLU(self, node): """ @@ -447,12 +488,16 @@ class CaffeOpMapper(OpMapper): params = node.layer.relu_param if params.HasField('negative_slope') and params.negative_slope != 0: negative_slope = float(params.negative_slope) - - attr = {'alpha': negative_slope} - node.fluid_code.add_layer( - 'leaky_relu', inputs=input, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.nn.functional.leaky_relu", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + negative_slope=negative_slope) else: - node.fluid_code.add_layer('relu', inputs=input, output=node) + self.pd_graph.add_layer( + kernel="paddle.nn.functional.relu", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name]) def PReLU(self, node): assert len( @@ -467,44 +512,40 @@ class CaffeOpMapper(OpMapper): data = node.data assert data is not None, 'The parameter of {} (type is {}) is not set. You need to use python package of caffe to set the default value.'.format( node.layer_name, node.layer_type) - import paddle - pd_version = paddle.__version__ - if pd_version.startswith("1.8.4") or pd_version.startswith("1.8.3"): - self.weights[node.layer_name + '_weights'] = data[0].reshape(1, -1) - else: - self.weights[node.layer_name + '_weights'] = data[0] - attr = { + self.weights[node.layer_name + '_weights'] = data[0] + layer_attrs = { 'mode': string(mode), 'param_attr': string(node.layer_name + '_weights'), 'name': string(node.layer_name) } - node.fluid_code.add_layer( - "prelu", inputs=input, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.nn.functional.prelu", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) def Accuracy(self, node): assert len( node.inputs) == 2, 'The count of Accuracy node\'s input is not 2.' - inputs = [] - inputs[0] = None - inputs[1] = None - i = 0 - for shape in node.input_shape: + inputs_dict = dict() + for i, shape in enumerate(node.input_shape): if shape[1] == 1: input = self.graph.get_bottom_node(node, idx=i, copy=True) - inputs[1] = input + inputs_dict["label"] = self.get_input_name(input) else: input = self.graph.get_bottom_node(node, idx=i, copy=True) - inputs[0] = input - i += 1 + inputs_dict["input"] = self.get_input_name(input) params = node.layer.accuracy_param top_k = params.top_k axis = params.axis ignore_label = params.ignore_label assert axis == 1, 'PaddlePaddle can not support the situation when the axis is not 1.' assert not ignore_label >= 0, 'PaddlePaddle can not support the situation when the model has ignore label.' - attr = {'k': top_k} - node.fluid_code.add_layer( - "accuracy", inputs=inputs, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.metric.accuracy", + inputs=inputs_dict, + outputs=[node.layer_name], + k=top_k) def Eltwise(self, node): assert len( @@ -518,79 +559,68 @@ class CaffeOpMapper(OpMapper): inputs.append(input1) if mode == 0: inputs_dict = {} - inputs_dict['x'] = inputs[0] - inputs_dict['y'] = inputs[1] - attr = {'act': None, 'name': string(node.layer_name)} - node.fluid_code.add_layer( - "elementwise_mul", + inputs_dict['x'] = self.get_input_name(inputs[0]) + inputs_dict['y'] = self.get_input_name(inputs[1]) + self.pd_graph.add_layer( + kernel="paddle.multiply", inputs=inputs_dict, - output=node, - param_attr=attr) + outputs=[node.layer_name]) elif mode == 1: if hasattr(params, 'coeff') and len(params.coeff) == 2: coeff = params.coeff input1_name = self.get_input_name(inputs[0]) - attr = { + layer_attrs = { 'shape': [1], - 'value': coeff[0], + 'fill_value': coeff[0], 'dtype': '{}.dtype'.format(input1_name) } - node.fluid_code.add_layer( - "fill_constant", - inputs=None, - output=node.layer_name + '_const1', - param_attr=attr) - attr = {'act': None, 'name': string(node.layer_name + '_mul1')} - node.fluid_code.add_layer( - "elementwise_mul", - inputs=input1_name + ', ' + node.layer_name + '_const1', - output=node.layer_name + '_mul1', - param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.full", + inputs={}, + outputs=["{}_const1".format(node.layer_name)], + **layer_attrs) + self.pd_graph.add_layer( + kernel="paddle.multiply", + inputs={"x": input1_name, + "y": "{}_const1".format(node.layer_name)}, + outputs=["{}_mul1".format(node.layer_name)]) input2_name = self.get_input_name(inputs[1]) - attr = { + layer_attrs = { 'shape': [1], - 'value': coeff[1], + 'fill_value': coeff[1], 'dtype': '{}.dtype'.format(input2_name) } - node.fluid_code.add_layer( - "fill_constant", - inputs=None, - output=node.layer_name + '_const2', - param_attr=attr) - attr = {'act': None, 'name': string(node.layer_name + '_mul2')} - node.fluid_code.add_layer( - "elementwise_mul", - inputs=input2_name + ', ' + node.layer_name + '_const2', - output=node.layer_name + '_mul2', - param_attr=attr) - - attr = {'act': None, 'name': string(node.layer_name)} - node.fluid_code.add_layer( - "elementwise_add", - inputs='{}_mul1, {}_mul2'.format(node.layer_name, - node.layer_name), - output=node, - param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.full", + inputs={}, + outputs=["{}_const2".format(node.layer_name)], + **layer_attrs) + self.pd_graph.add_layer( + kernel="paddle.multiply", + inputs={"x": input2_name, + "y": "{}_const2".format(node.layer_name)}, + outputs=["{}_mul2".format(node.layer_name)]) + self.pd_graph.add_layer( + kernel="paddle.add", + inputs={"x": "{}_mul1".format(node.layer_name), + "y": "{}_mul2".format(node.layer_name)}, + outputs=[node.layer_name]) else: inputs_dict = {} - inputs_dict['x'] = inputs[0] - inputs_dict['y'] = inputs[1] - attr = {'act': None, 'name': string(node.layer_name)} - node.fluid_code.add_layer( - "elementwise_add", + inputs_dict['x'] = self.get_input_name(inputs[0]) + inputs_dict['y'] = self.get_input_name(inputs[1]) + self.pd_graph.add_layer( + kernel="paddle.add", inputs=inputs_dict, - output=node, - param_attr=attr) + outputs=[node.layer_name]) else: inputs_dict = {} - inputs_dict['x'] = inputs[0] - inputs_dict['y'] = inputs[1] - attr = {'act': None, 'name': string(node.layer_name)} - node.fluid_code.add_layer( - "elementwise_max", - inputs=inputs_dict, - output=node, - param_attr=attr) + inputs_dict['x'] = self.get_input_name(inputs[0]) + inputs_dict['y'] = self.get_input_name(inputs[1]) + self.pd_graph.add_layer( + kernel="paddle.add", + inputs=inputs_dict, + outputs=[node.layer_name]) def BatchNorm(self, node): assert len( @@ -619,7 +649,7 @@ class CaffeOpMapper(OpMapper): variance *= scaling_factor self.weights[node.layer_name + '_mean'] = mean self.weights[node.layer_name + '_variance'] = variance - attr = { + layer_attrs = { 'is_test': True, 'param_attr': None, 'bias_attr': None, @@ -628,8 +658,11 @@ class CaffeOpMapper(OpMapper): 'epsilon': eps, 'name': string(node.layer_name) } - node.fluid_code.add_layer( - "batch_norm", inputs=input, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.nn.functional.batch_norm", + inputs={"input": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) def Scale(self, node): if node.data is None: @@ -659,73 +692,87 @@ class CaffeOpMapper(OpMapper): input0 = self.graph.get_bottom_node(node, idx=0, copy=True) input1 = self.graph.get_bottom_node(node, idx=1, copy=True) inputs_dict = {} - inputs_dict['x'] = input0 - inputs_dict['y'] = input1 - attr = {'axis': axis, 'name': string(node.layer_name + '_mul')} - node.fluid_code.add_layer( - "elementwise_mul", + inputs_dict['x'] = self.get_input_name(input0) + inputs_dict['y'] = self.get_input_name(input1) + self.pd_graph.add_layer( + kernel="paddle.multiply", inputs=inputs_dict, - output=node.layer_name + '_mul', - param_attr=attr) + outputs=["{}_mul".format(node.layer_name)], + axis=axis) else: bias_shape = node.input_shape[0][axis:axis + num_axes] input0 = self.graph.get_bottom_node(node, idx=0, copy=True) input0_name = self.get_input_name(input0) - attr = { + self.pd_graph.add_layer( + kernel="paddle.ParamAttr", + inputs={}, + outputs=["{}_scale".format(node.layer_name)], + name = string("{}_scale".format(node.layer_name))) + layer_attrs = { 'dtype': '{}.dtype'.format(input0_name), 'shape': bias_shape, 'name': string(node.layer_name + '_cparam1'), - 'attr': string(node.layer_name + '_scale'), 'is_bias': True, 'default_initializer': 'Constant(value=1.0)' } - node.fluid_code.add_layer( - "create_parameter", inputs=None, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.static.create_parameter", + inputs={"attr": node.layer_name + '_scale',}, + outputs=["{}_cparam1".format(node.layer_name)], + **layer_attrs) inputs_dict = {} - inputs_dict['x'] = input0 - inputs_dict['y'] = node - attr = {'axis': axis, 'name': string(node.layer_name + '_mul')} - node.fluid_code.add_layer( - "elementwise_mul", + inputs_dict['x'] = self.get_input_name(input0) + inputs_dict['y'] = "{}_cparam1".format(node.layer_name) + self.pd_graph.add_layer( + kernel="paddle.multiply", inputs=inputs_dict, - output=node.layer_name + '_mul', - param_attr=attr) + outputs=["{}_mul".format(node.layer_name)], + axis=axis) scale_shape = bias_shape input0_name = self.get_input_name(input0) - attr = { + self.pd_graph.add_layer( + kernel="paddle.ParamAttr", + inputs={}, + outputs=["{}_offset".format(node.layer_name)], + name = string("{}_offset".format(node.layer_name))) + layer_attrs = { 'dtype': '{}.dtype'.format(input0_name), 'shape': scale_shape, 'name': string(node.layer_name + '_cparam2'), - 'attr': string(node.layer_name + '_offset'), 'is_bias': True, 'default_initializer': 'Constant(value=1.0)' } - node.fluid_code.add_layer( - "create_parameter", - inputs=None, - output=node.layer_name + '_offset_param', - param_attr=attr) - attr = {'axis': axis, 'name': string(node.layer_name + '_add')} - node.fluid_code.add_layer( - "elementwise_add", - inputs='{}_mul, {}_offset_param'.format(node.layer_name, - node.layer_name), - output=node, - param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.static.create_parameter", + inputs={"attr": node.layer_name + '_offset'}, + outputs=["{}_cparam2".format(node.layer_name)], + **layer_attrs) + inputs_dict = {} + inputs_dict['x'] = "{}_mul".format(node.layer_name) + inputs_dict['y'] = "{}_cparam2".format(node.layer_name) + self.pd_graph.add_layer( + kernel="paddle.add", + inputs=inputs_dict, + outputs=[node.layer_name], + axis=axis) + def Reshape(self, node): input = self.graph.get_bottom_node(node, idx=0, copy=True) top_count = len(input.layer.top) is_inplace = False if top_count == 1 else True output_shape = node.output_shape[0] - attr = { + layer_attrs = { 'shape': output_shape, 'inplace': is_inplace, 'act': None, 'name': string(node.layer_name) } - node.fluid_code.add_layer( - "reshape", inputs=input, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.reshape", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) def ArgMax(self, node): assert len(node.inputs) == 1 and len( @@ -741,33 +788,29 @@ class CaffeOpMapper(OpMapper): if axis < 0: axis += len(input_shape) if out_max_val is True: - attr = {'k': top_k, 'name': string(node.layer_name + '_topk')} - node.fluid_code.add_layer( - "topk", - inputs=input, - output='{}_topk_var, {}_index_var'.format(node.layer_name, - node.layer_name), - param_attr=attr) - attr = {'dtype': '{}_topk_var.dtype'.format(node.layer_name)} - node.fluid_code.add_layer( - "cast", - inputs='{}_index_var'.format(node.layer_name), - output='{}_index_var'.format(node.layer_name), - param_attr=attr) - attr = {'axis': axis, 'name': string(node.layer_name)} - node.fluid_code.add_layer( - "concat", - inputs='{}_topk_var, {}_index_var'.format(node.layer_name, - node.layer_name), - output=node, - param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.topk", + inputs={"input": self.get_input_name(input)}, + outputs=["{}_topk_var".format(node.layer_name), + "{}_index_var".format(node.layer_name)], + k=top_k) + self.pd_graph.add_layer( + kernel="paddle.cast", + inputs={"x": "{}_topk_var".format(node.layer_name)}, + outputs=["{}_topk_var".format(node.layer_name)], + dtype="{}_topk_var.dtype".format(node.layer_name)) + self.pd_graph.add_layer( + kernel="paddle.concat", + inputs={"x": "[{}_topk_var, {}_index_var]".format(node.layer_name, + node.layer_name)}, + outputs=[node.layer_name], + axis=axis) else: - attr = {'k': top_k, 'name': string(node.layer_name)} - node.fluid_code.add_layer( - "topk", - inputs=input, - output='_, {}'.format(node.layer_name), - param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.topk", + inputs={"input": self.get_input_name(input)}, + outputs=["_", node.layer_name], + k=top_k) def Crop(self, node): assert len( @@ -786,24 +829,25 @@ class CaffeOpMapper(OpMapper): ) == len(offset), "invalid offset[%s] in crop layer" % ( str(offset)) offset_real = [0] * axis + offset - attr = {'offsets': list(offset_real), 'name': string(node.layer_name)} - node.fluid_code.add_layer( - "crop", - inputs={'x': input, - 'shape': node.input_shape[1]}, - output=node, - param_attr=attr) - + layer_attrs = {"offsets": list(offset_real), + "shape": node.input_shape[1]} + self.pd_graph.add_layer( + kernel="paddle.crop", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) + def Flatten(self, node): assert len( node. inputs) == 1, 'The count of DetectionOutput node\'s input is not 1.' input = self.graph.get_bottom_node(node, idx=0, copy=True) - shape = node.output_shape[0] - attr = {'shape': shape, 'name': string(node.layer_name)} - node.fluid_code.add_layer( - "reshape", inputs=input, output=node, param_attr=attr) - + self.pd_graph.add_layer( + kernel="paddle.reshape", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + shape = node.output_shape[0]) + def Power(self, node): assert len( node.inputs) == 1, 'The count of Permute node\'s input is not 1.' @@ -812,17 +856,22 @@ class CaffeOpMapper(OpMapper): power = params.power scale = params.scale shift = params.shift - attr = { + layer_attrs = { 'scale': scale, 'bias': shift, 'bias_after_scale': True, 'name': string(node.layer_name + '_scale') } - node.fluid_code.add_layer( - "scale", inputs=input, output=node, param_attr=attr) - attr = {'factor': power, 'name': string(node.layer_name)} - node.fluid_code.add_layer( - "pow", inputs=node, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.scale", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) + self.pd_graph.add_layer( + kernel="paddle.pow", + inputs={"x": node.layer_name}, + outputs=[node.layer_name], + factor=power) def Reduction(self, node): assert len( @@ -839,46 +888,63 @@ class CaffeOpMapper(OpMapper): axis += input_len + 1 dim = list(range(input_len)) if operation == 1: ## operation = SUM - attr = { + layer_attrs = { 'dim': dim[axis:], 'keep_dim': False, 'name': string(node.layer_name) } - node.fluid_code.add_layer( - "reduce_sum", inputs=input, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.sum", + inputs={"input": self.get_input_name(input)}, + outputs=[node.layer_name], + **layer_attrs) elif operation == 2: ## operation = ASUM - attr = {'name': string(node.layer_name + '_abs')} - node.fluid_code.add_layer( - "abs", inputs=input, output=node, param_attr=attr) - attr = { + self.pd_graph.add_layer( + kernel="paddle.abs", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name]) + layer_attrs = { 'dim': dim[axis:], 'keep_dim': False, 'name': string(node.layer_name) } - node.fluid_code.add_layer( - "reduce_sum", inputs=node, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.sum", + inputs={"input": node.layer_name}, + outputs=[node.layer_name], + **layer_attrs) elif operation == 3: ## operation = SUMSQ - attr = {'factor': 2.0, 'name': string(node.layer_name + '_pow')} - node.fluid_code.add_layer( - "pow", inputs=input, output=node, param_attr=attr) - attr = { + self.pd_graph.add_layer( + kernel="paddle.pow", + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name], + factor=2.0) + layer_attrs = { 'dim': dim[axis:], 'keep_dim': False, 'name': string(node.layer_name) } - node.fluid_code.add_layer( - "reduce_sum", inputs=node, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.sum", + inputs={"input": node.layer_name}, + outputs=[node.layer_name], + **layer_attrs) else: ## operation = MEAN - attr = { + layer_attrs = { 'dim': dim[axis:], 'keep_dim': False, 'name': string(node.layer_name) } - node.fluid_code.add_layer( - "reduce_mean", inputs=node, output=node, param_attr=attr) - attr = {'scale': coeff} - node.fluid_code.add_layer( - "scale", inputs=node, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel="paddle.mean", + inputs={"input": node.layer_name}, + outputs=[node.layer_name], + **layer_attrs) + self.pd_graph.add_layer( + kernel="paddle.scale", + inputs={"x": node.layer_name}, + outputs=[node.layer_name], + scale=coeff) def deal_custom_layer(self, node): op = node.layer_type @@ -893,7 +959,7 @@ class CaffeOpMapper(OpMapper): weights_name = deal_weights(node) for i in range(len(data)): self.weights[weights_name[i]] = data[i] - inputs_node = [] + inputs_list = [] for i in range(len(node.inputs)): input = self.graph.get_bottom_node(node, idx=i, copy=True) if i == 1 and op == 'DetectionOutput': @@ -904,13 +970,19 @@ class CaffeOpMapper(OpMapper): input = self.graph.get_bottom_node(input, idx=0, copy=True) assert input is not None, 'This kind of DetectionOutput is not supported!' input = self.graph.get_bottom_node(input, idx=0, copy=True) - inputs_node.append(input) - node.fluid_code.add_layer( - func.__code__.co_name, - inputs=inputs_node, - output=node, - param_attr=kwargs, - is_custom_layer=True) + inputs_list.append(self.get_input_name(input)) + kwargs_tmp = copy.deepcopy(kwargs) + for k, v in kwargs_tmp.items(): + if str(type(v)) == "": + kwargs[k] = dict() + kwargs[k]["nms_threshold"] = v.nms_threshold + kwargs[k]["top_k"] = v.top_k + kwargs[k]["eta"] = v.eta + self.pd_graph.add_layer( + kernel="combination_layer:{}".format(op), + inputs={"inputs": inputs_list}, + outputs=[node.layer_name], + **kwargs) if op not in self.used_custom_layers: self.used_custom_layers[op] = custom_code @@ -918,6 +990,8 @@ class CaffeOpMapper(OpMapper): assert node.layer_type in self.directly_map_ops op_info = self.directly_map_ops[node.layer_type] input = self.graph.get_bottom_node(node, idx=0, copy=True) - attr = {'name': string(node.layer_name)} - node.fluid_code.add_layer( - op_info, inputs=input, output=node, param_attr=attr) + self.pd_graph.add_layer( + kernel=op_info, + inputs={"x": self.get_input_name(input)}, + outputs=[node.layer_name]) + \ No newline at end of file diff --git a/x2paddle/optimizer/__pycache__/__init__.cpython-37.pyc b/x2paddle/optimizer/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1de2d1b7e44fa429b1a81ce0e5322e62904837b4 GIT binary patch literal 168 zcmZ?b<>g`k0=wc_@gVv!h=2h`Aj1KOi&=m~3PUi1CZpdN5~C003Z_D-Hkv literal 0 HcmV?d00001 diff --git a/x2paddle/optimizer/__pycache__/caffe_optimizer.cpython-37.pyc b/x2paddle/optimizer/__pycache__/caffe_optimizer.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b13eac48e809d0167368652047c72728b769407 GIT binary patch literal 1933 zcmZuy&5zqe6rb^T94Fh|f|dd;P{cz)`a4TGxtauQkn0Qd;t7oPX9jCzOEyk~D7fpma(D=C+<$lsxvouP&m!t?b=F z7z>H}giA3aexw?A!(wo-oyy5`RPTMI9G0V(^0QEe6BZVQR4z-0!(0@qu^)wbrStc3 zCQD#y$5c*~WYZ#7PC6|>kgww!D-u5x)hN;|WqB$K<&8uEQ#gc=!($;?ahwUYg4+GG zDAGw~G-Ht{w+fXje;7|AuBo6LJuR14Ah2FUIe8(YWHb+$gcG6cK`cV48bA@ICv-g7 z)T!XwO#4_mjVA0;By~>hC}&*6q5v1najiC1>+3+u)*Dr;wdutnX)agL#--X5AxDDk zC)Ihr3A+4K07Ba|pgz3>-Jq|5?C1yI0kuHugJkIEi|dX@yg5trv3Rw=~w_Q_IuEJL2WdPu%84ik$SzSCH zW<{|zO)>cvD&9u83ZUBH8$AiJq_H&1LfS7 zMgK7>as)t*V2K=SK!}%!as8FVxDa%^<6!|rvNNW9#wJk4p`_Avu289#GXYJkxta>$f|$x;8J&T@HT|E@ZbV%k@ZN4OVLl kr$H)37k)i46psC|fJ?yzUbV9FHmm$^W0pC(1=KVD1C4v_jQ{`u literal 0 HcmV?d00001 -- GitLab