From c63fe589343c9fe1b737c56a2fe6d0df3737c7d5 Mon Sep 17 00:00:00 2001 From: Renwb1991 <1534609090@qq.com> Date: Thu, 17 Jan 2019 20:27:54 +0800 Subject: [PATCH] X2Paddle: add caffe2fluid --- caffe2fluid/.gitignore | 2 + caffe2fluid/README.md | 87 + caffe2fluid/convert.py | 81 + caffe2fluid/examples/imagenet/README.md | 41 + caffe2fluid/examples/imagenet/compare.py | 102 ++ caffe2fluid/examples/imagenet/data/65.jpeg | Bin 0 -> 109527 bytes caffe2fluid/examples/imagenet/infer.py | 328 ++++ caffe2fluid/examples/imagenet/tools/cmp.sh | 24 + .../examples/imagenet/tools/cmp_layers.sh | 48 + caffe2fluid/examples/imagenet/tools/diff.sh | 83 + caffe2fluid/examples/imagenet/tools/run.sh | 79 + caffe2fluid/examples/imagenet/tools/test.sh | 12 + caffe2fluid/examples/mnist/README.md | 10 + caffe2fluid/examples/mnist/evaluate.py | 83 + caffe2fluid/examples/mnist/run.sh | 75 + caffe2fluid/kaffe/__init__.py | 5 + caffe2fluid/kaffe/caffe/__init__.py | 1 + caffe2fluid/kaffe/caffe/resolver.py | 60 + caffe2fluid/kaffe/custom_layers/__init__.py | 114 ++ caffe2fluid/kaffe/custom_layers/argmax.py | 73 + caffe2fluid/kaffe/custom_layers/axpy.py | 51 + caffe2fluid/kaffe/custom_layers/crop.py | 77 + .../kaffe/custom_layers/detection_out.py | 79 + caffe2fluid/kaffe/custom_layers/flatten.py | 66 + caffe2fluid/kaffe/custom_layers/normalize.py | 56 + caffe2fluid/kaffe/custom_layers/permute.py | 40 + caffe2fluid/kaffe/custom_layers/power.py | 40 + caffe2fluid/kaffe/custom_layers/priorbox.py | 103 ++ caffe2fluid/kaffe/custom_layers/reduction.py | 67 + caffe2fluid/kaffe/custom_layers/register.py | 37 + caffe2fluid/kaffe/custom_layers/reshape.py | 133 ++ caffe2fluid/kaffe/custom_layers/roipooling.py | 53 + caffe2fluid/kaffe/custom_layers/select.py | 67 + caffe2fluid/kaffe/errors.py | 34 + caffe2fluid/kaffe/graph.py | 371 +++++ caffe2fluid/kaffe/layers.py | 250 +++ caffe2fluid/kaffe/net_template.py | 161 ++ caffe2fluid/kaffe/paddle/__init__.py | 2 + caffe2fluid/kaffe/paddle/network.py | 576 +++++++ caffe2fluid/kaffe/paddle/transformer.py | 391 +++++ caffe2fluid/kaffe/protobuf_to_dict.py | 185 +++ caffe2fluid/kaffe/shapes.py | 160 ++ caffe2fluid/kaffe/transformers.py | 414 +++++ caffe2fluid/proto/caffe.proto | 1411 +++++++++++++++++ caffe2fluid/proto/compile.sh | 24 + 45 files changed, 6156 insertions(+) create mode 100644 caffe2fluid/.gitignore create mode 100644 caffe2fluid/README.md create mode 100755 caffe2fluid/convert.py create mode 100644 caffe2fluid/examples/imagenet/README.md create mode 100644 caffe2fluid/examples/imagenet/compare.py create mode 100644 caffe2fluid/examples/imagenet/data/65.jpeg create mode 100644 caffe2fluid/examples/imagenet/infer.py create mode 100755 caffe2fluid/examples/imagenet/tools/cmp.sh create mode 100755 caffe2fluid/examples/imagenet/tools/cmp_layers.sh create mode 100755 caffe2fluid/examples/imagenet/tools/diff.sh create mode 100755 caffe2fluid/examples/imagenet/tools/run.sh create mode 100755 caffe2fluid/examples/imagenet/tools/test.sh create mode 100644 caffe2fluid/examples/mnist/README.md create mode 100644 caffe2fluid/examples/mnist/evaluate.py create mode 100755 caffe2fluid/examples/mnist/run.sh create mode 100644 caffe2fluid/kaffe/__init__.py create mode 100644 caffe2fluid/kaffe/caffe/__init__.py create mode 100644 caffe2fluid/kaffe/caffe/resolver.py create mode 100644 caffe2fluid/kaffe/custom_layers/__init__.py create mode 100644 caffe2fluid/kaffe/custom_layers/argmax.py create mode 100644 caffe2fluid/kaffe/custom_layers/axpy.py create mode 100644 caffe2fluid/kaffe/custom_layers/crop.py create mode 100644 caffe2fluid/kaffe/custom_layers/detection_out.py create mode 100644 caffe2fluid/kaffe/custom_layers/flatten.py create mode 100644 caffe2fluid/kaffe/custom_layers/normalize.py create mode 100644 caffe2fluid/kaffe/custom_layers/permute.py create mode 100644 caffe2fluid/kaffe/custom_layers/power.py create mode 100644 caffe2fluid/kaffe/custom_layers/priorbox.py create mode 100644 caffe2fluid/kaffe/custom_layers/reduction.py create mode 100644 caffe2fluid/kaffe/custom_layers/register.py create mode 100644 caffe2fluid/kaffe/custom_layers/reshape.py create mode 100644 caffe2fluid/kaffe/custom_layers/roipooling.py create mode 100644 caffe2fluid/kaffe/custom_layers/select.py create mode 100644 caffe2fluid/kaffe/errors.py create mode 100644 caffe2fluid/kaffe/graph.py create mode 100644 caffe2fluid/kaffe/layers.py create mode 100644 caffe2fluid/kaffe/net_template.py create mode 100644 caffe2fluid/kaffe/paddle/__init__.py create mode 100644 caffe2fluid/kaffe/paddle/network.py create mode 100644 caffe2fluid/kaffe/paddle/transformer.py create mode 100644 caffe2fluid/kaffe/protobuf_to_dict.py create mode 100644 caffe2fluid/kaffe/shapes.py create mode 100644 caffe2fluid/kaffe/transformers.py create mode 100644 caffe2fluid/proto/caffe.proto create mode 100755 caffe2fluid/proto/compile.sh diff --git a/caffe2fluid/.gitignore b/caffe2fluid/.gitignore new file mode 100644 index 0000000..0289f96 --- /dev/null +++ b/caffe2fluid/.gitignore @@ -0,0 +1,2 @@ +proto/caffepb.py +proto/caffe_pb2.py diff --git a/caffe2fluid/README.md b/caffe2fluid/README.md new file mode 100644 index 0000000..8520342 --- /dev/null +++ b/caffe2fluid/README.md @@ -0,0 +1,87 @@ +### Caffe2Fluid +This tool is used to convert a Caffe model to a Fluid model + +### Key Features +1. Convert caffe model to fluid model with codes of defining a network(useful for re-training) + +2. Pycaffe is not necessary when just want convert model without do caffe-inference + +3. Caffe's customized layers convertion also be supported by extending this tool + +4. A bunch of tools in `examples/imagenet/tools` are provided to compare the difference + +### HowTo +1. Prepare `caffepb.py` in `./proto` if your python has no `pycaffe` module, two options provided here: + - Generate pycaffe from caffe.proto + ``` + bash ./proto/compile.sh + ``` + + - Download one from github directly + ``` + cd proto/ && wget https://raw.githubusercontent.com/ethereon/caffe-tensorflow/master/kaffe/caffe/caffepb.py + ``` + +2. Convert the Caffe model to Fluid model + - Generate fluid code and weight file + ``` + python convert.py alexnet.prototxt \ + --caffemodel alexnet.caffemodel \ + --data-output-path alexnet.npy \ + --code-output-path alexnet.py + ``` + + - Save weights as fluid model file + ``` + # only infer the last layer's result + python alexnet.py alexnet.npy ./fluid + # infer these 2 layer's result + python alexnet.py alexnet.npy ./fluid fc8,prob + ``` + +3. Use the converted model to infer + - See more details in `examples/imagenet/tools/run.sh` + +4. Compare the inference results with caffe + - See more details in `examples/imagenet/tools/diff.sh` + +### How to convert custom layer +1. Implement your custom layer in a file under `kaffe/custom_layers`, eg: mylayer.py + - Implement ```shape_func(input_shape, [other_caffe_params])``` to calculate the output shape + - Implement ```layer_func(inputs, name, [other_caffe_params])``` to construct a fluid layer + - Register these two functions ```register(kind='MyType', shape=shape_func, layer=layer_func)``` + - Notes: more examples can be found in `kaffe/custom_layers` + +2. Add ```import mylayer``` to `kaffe/custom_layers/\_\_init__.py` + +3. Prepare your pycaffe as your customized version(same as previous env prepare) + - (option1) replace `proto/caffe.proto` with your own caffe.proto and compile it + - (option2) change your `pycaffe` to the customized version + +4. Convert the Caffe model to Fluid model + +5. Set env $CAFFE2FLUID_CUSTOM_LAYERS to the parent directory of 'custom_layers' + ``` + export CAFFE2FLUID_CUSTOM_LAYERS=/path/to/caffe2fluid/kaffe + ``` + +6. Use the converted model when loading model in `xxxnet.py` and `xxxnet.npy`(no need if model is already in `fluid/model` and `fluid/params`) + +### Tested models +- Lenet: +[model addr](https://github.com/ethereon/caffe-tensorflow/blob/master/examples/mnist) + +- ResNets:(ResNet-50, ResNet-101, ResNet-152) +[model addr](https://onedrive.live.com/?authkey=%21AAFW2-FVoxeVRck&id=4006CBB8476FF777%2117887&cid=4006CBB8476FF777) + +- GoogleNet: +[model addr](https://gist.github.com/jimmie33/7ea9f8ac0da259866b854460f4526034) + +- VGG: +[model addr](https://gist.github.com/ksimonyan/211839e770f7b538e2d8) + +- AlexNet: +[model addr](https://github.com/BVLC/caffe/tree/master/models/bvlc_alexnet) + +### Notes +Some of this code come from here: [caffe-tensorflow](https://github.com/ethereon/caffe-tensorflow) diff --git a/caffe2fluid/convert.py b/caffe2fluid/convert.py new file mode 100755 index 0000000..b0252e3 --- /dev/null +++ b/caffe2fluid/convert.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python + +import os +import sys +import numpy as np +import argparse + +from kaffe import KaffeError, print_stderr +from kaffe.paddle import Transformer + + +def fatal_error(msg): + """ fatal error encounted + """ + print_stderr(msg) + exit(-1) + + +def validate_arguments(args): + """ validate args + """ + if (args.data_output_path is not None) and (args.caffemodel is None): + fatal_error('No input data path provided.') + if (args.caffemodel is not None) and (args.data_output_path is None): + fatal_error('No output data path provided.') + if (args.code_output_path is None) and (args.data_output_path is None): + fatal_error('No output path specified.') + + +def convert(def_path, caffemodel_path, data_output_path, code_output_path, + phase): + """ convert caffe model to tf/paddle models + """ + try: + transformer = Transformer(def_path, caffemodel_path, phase=phase) + print_stderr('Converting data...') + if caffemodel_path is not None: + data = transformer.transform_data() + print_stderr('Saving data...') + with open(data_output_path, 'wb') as data_out: + np.save(data_out, data) + if code_output_path: + print_stderr('Saving source...') + with open(code_output_path, 'wb') as src_out: + src_out.write(transformer.transform_source()) + print_stderr('set env variable before using converted model '\ + 'if used custom_layers:') + custom_pk_path = os.path.dirname(os.path.abspath(__file__)) + custom_pk_path = os.path.join(custom_pk_path, 'kaffe') + print_stderr('export CAFFE2FLUID_CUSTOM_LAYERS=%s' % (custom_pk_path)) + print_stderr('Done.') + return 0 + except KaffeError as err: + fatal_error('Error encountered: {}'.format(err)) + + return 1 + + +def main(): + """ main + """ + parser = argparse.ArgumentParser() + parser.add_argument('def_path', help='Model definition (.prototxt) path') + parser.add_argument('--caffemodel', help='Model data (.caffemodel) path') + parser.add_argument('--data-output-path', help='Converted data output path') + parser.add_argument( + '--code-output-path', help='Save generated source to this path') + parser.add_argument( + '-p', + '--phase', + default='test', + help='The phase to convert: test (default) or train') + args = parser.parse_args() + validate_arguments(args) + return convert(args.def_path, args.caffemodel, args.data_output_path, + args.code_output_path, args.phase) + + +if __name__ == '__main__': + ret = main() + sys.exit(ret) diff --git a/caffe2fluid/examples/imagenet/README.md b/caffe2fluid/examples/imagenet/README.md new file mode 100644 index 0000000..ad965cd --- /dev/null +++ b/caffe2fluid/examples/imagenet/README.md @@ -0,0 +1,41 @@ +A demo to show converting caffe models trained on 'imagenet' using caffe2fluid + +--- + +# How to use + +1. Prepare python environment + +2. Download caffe model to "models.caffe/xxx" which contains "xxx.caffemodel" and "xxx.prototxt" + +3. Convert the Caffe model to Fluid model + - generate fluid code and weight file + ```python convert.py alexnet.prototxt \ + --caffemodel alexnet.caffemodel \ + --data-output-path alexnet.npy \ + --code-output-path alexnet.py + ``` + + - save weights as fluid model file + ``` + python alexnet.py alexnet.npy ./fluid + ``` + +4. Do inference + ``` + python infer.py infer ./fluid data/65.jpeg + ``` + +5. convert model and do inference together + ``` + bash ./tools/run.sh alexnet ./models.caffe/alexnet ./models/alexnet + ``` + * Assume the Caffe model is stored in '*./models.caffe/alexnet/alexnet.prototxt|caffemodel*' + * converted model will be stored as '*./models/alexnet/alexnet.py|npy*' + +6. test the difference with caffe's results(need pycaffe installed) + ``` + bash ./tools/diff.sh resnet + ``` + * Make sure your caffemodel stored in '*./models.caffe/resnet*' + * The results will be stored in '*./results/resnet.paddle|caffe*' diff --git a/caffe2fluid/examples/imagenet/compare.py b/caffe2fluid/examples/imagenet/compare.py new file mode 100644 index 0000000..c995e6d --- /dev/null +++ b/caffe2fluid/examples/imagenet/compare.py @@ -0,0 +1,102 @@ +#!/usr/bin/python + +# +#a tool to compare tensors in two files or two directories +# + +import sys +import os + + +def walk_dir(rootdir): + for subdir, dirs, files in os.walk(rootdir): + for file in files: + yield file + + +def calc_diff(f1, f2): + import numpy as np + + d1 = np.load(f1) + d2 = np.load(f2) + + #print d1.shape + #print d2.shape + #print d1[0, 0, 0:10, 0:10] + #print d2[0, 0, 0:10, 0:10] + + d1 = d1.flatten() + d2 = d2.flatten() + + d1_num = reduce(lambda x, y: x * y, d1.shape) + d2_num = reduce(lambda x, y: x * y, d2.shape) + if d1_num != d2_num: + print d1.shape + print d2.shape + assert (d1_num == d2_num), "their shape is not consistent" + + try: + mask = np.abs(d1) >= np.abs(d2) + mask = mask.astype('int32') + + df = np.abs(d1 - d2) + df = df / (1.0e-10 + np.abs(d1) * mask + np.abs(d2) * (1 - mask)) + max_df = np.max(df) + sq_df = np.mean(df * df) + return max_df, sq_df + except Exception as e: + return 1.0, 1.0 + + +def compare(path1, path2, no_exception): + def diff(f1, f2): + max_df, sq_df = calc_diff(f1, f2) + print('[max_df:%.4e, sq_df:%.4e] when compare %s <=> %s' % + (max_df, sq_df, os.path.basename(f1), os.path.basename(f2))) + if no_exception is False: + assert (max_df < 1e-5), \ + 'max_df is too large with value[%.6e]' % (max_df) + assert (sq_df < 1e-10), \ + 'sq_df is too large with value[%.6e]' % (sq_df) + + if os.path.exists(path1) is False: + print('not found %s' % (path1)) + return 1 + elif os.path.exists(path2) is False: + print('not found %s' % (path2)) + return 1 + + if path1.find('.npy') > 0 and path2.find('.npy') > 0: + diff(path1, path2) + return + + for f in walk_dir(path2): + if f.find('.npy') < 0: + continue + + f1 = os.path.join(path1, f) + f2 = os.path.join(path2, f) + diff(f1, f2) + + print('all checking succeed to pass') + return 0 + + +if __name__ == "__main__": + if len(sys.argv) == 1: + path1 = 'lenet.tf/results' + path2 = 'lenet.paddle/results' + elif len(sys.argv) >= 3: + path1 = sys.argv[1] + path2 = sys.argv[2] + if len(sys.argv) == 4: + no_exception = True + else: + no_exception = False + else: + print('usage:') + print(' %s [path1] [path2]' % (sys.argv[0])) + exit(1) + + #print('compare inner result in %s %s' % (path1, path2)) + exit(compare(path1, path2, no_exception)) diff --git a/caffe2fluid/examples/imagenet/data/65.jpeg b/caffe2fluid/examples/imagenet/data/65.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..fd3a93f59385d6ff632483646e6caee300b56d09 GIT binary patch literal 109527 zcmb4qcT`hP@Na-XDAJ2TLQMb#M0$tNoAf3qNC`y|=?Fw1G!Zc*gx;h`2SF)H69okl zKtY-)MbHF85u{2FzkGl1ocI2IbMMJHIXgRd@7>v%ozKjke>-0SaF`pL83Sl&XaFA6 z3vj*xfY{s#3J(gq6BI0>C@Tk0(>JpO0s;R^=W_sq|9Jl&(bCe<{zpJMI$An0#AB=A$;b;0(NDlB+Lbr=Z87n|yv7Yew=X9C1dAKmD=AM< z#R$*<{`VpO_bGt?!~m$TO>I=6j^{rD(9_b>0jZ;*HUc>5M7iXFdi304ww^o+`XLFp zat3HKuU+5P__m_gPrmb6zy&HNEhil(01h}Kmbg| zz;u;=zxLh@`FAp04$Yv%Fyaa7WA)&laF3U58J`CiEkm}qQ$*0;alZ;% zShDdaa2OumzA+>Hy>2q%a#=Zz`=~~wyUq7{F3asqT_GZVbq9Idi-Uf=@WMqz{6{Ss zGUafUvp$$%%?EKq^c4-Ap!|?PdXeH1l&y)7oIpCgu$Ro;sos?bOlCnp#Kd^V?720C zXV98z?j~X2?Piysgn6yG+!SAI%Ls*A|64uE7v>H0WRe=|*&R!uzN+Bk%=f{?;v=ows-XoTa9UYYOVoePNlbP z_pFur_Sta}`MF7GxpeR&4^BaYI(3mKDE4SSCR+DaTBxv{hk0c*vjG1`)zuZqhcZGn zxwrCDe?ZotN^t_Ebamo!GfyM`l?(W}kL+~V!#zDAGPmz<+r;OOWrTa-bJe~gA6{=7 zIwa(HL<--*UpE|}*n54t*ytQnIjMe)5z zg9kq9Tj?Oq!=ALo^FaWQ-eNaOu!{UnOMlAWHe1DDv18Wm78ZAvKa_b2n3IA-Nc)$4 z(`@8K-smj}6(wLw-vkbqvFRO>n>>vzgx@Bbddl$PZsGcb*ewaim(ZsB3LMb*hO8`% zRyc1x(-Yp&fv`+?khXNdD2_v1+yyK#!Q$6s>syhs=i5)q#94otl(0J;&HJU80O(Me zNZxP{I(;t)RW!@;sx(%by`=_9INAQv`Y`#5Sbx%l7T~%pbq!5EmoMy`P;?FO@p3NJ z@@r~#RBPlfYv3ckzNljy8Zu$ESXW>DB{W|odQfq!_cWq+Q_#_Jt7v#fk4|gpXuaUi z6y2^U=esdG6>oJB@BJ^Fp$aXueJpYa;=y{^k^~)r$e9O{OkWfX-=w|s%g6IfzY@bE zRNQ);>VQnt9FDb6%sNJ$9j-p*oM%Nxat|r3kz`DXyGJWALprpU`Y&LEP22U53-mi- zrP_+_wM#C!jGlXP6#^z&TWlynb=Au^HY6oP-07mUmQ>`B@uZ%Ste5pZHIGy}MdTq% z;Rd)mj)=}Y`8x3Z&Up?R{J+%>=5%EjXzDw%=}ba*Fr7BwXSk=0*;N1623`BjOX9Kf zS3J)Qii0(rb{vH36@E$Y>(6?2ieCos(?Q}s)^}feooZQx>v?)1;a6HSu7%AxLwyN5 zwC29)v^=OR*mp2;^^t68iYL_t09 zSnoODy`ZfPcSnulm2$E?dQA}M@Q9P~>7E%pua^NyUG84j!&xclJ#t;>#LW+s5s z!=u&jMy9X#B#iq~X2)xxLwMnK)AqHy1K9aIskv|Tc6#vkQw<-~+ahm~eMN3{uta#<*J1VD0r0jla zq1RtXwfSh$-b;qJUsuoOR+-gxWdDHP>R2y#tkc-M15;4b)rr~Oo|MX(a^&%k=`JAH z6zM`vaVO!Q%J@~3D^$M^FLA}k#FSK9|9hqg$@#6I#r;vWqk=~>#UzJFiumTNg+ODf zB_7G>4@ohJi8Nf<`gjuYi6+@w?5_DEb50XhsaI1?Z&QEimYZoUvFU_O1~V&3N&EO} zCQAc*Wlc#Xa6w7I`FkRPzCL!<0iAt=prO@24put96Sg8%nD3LYTl)&rYaz5vCa?m} z4i1zgdHaisrB&hMaJ$O*_gPrF1_fLz!?7ZR(f7A>9a}U<_%XZRk^=i8d>$rybeA*G zIiymxF$eyYcm$$*eu1QKs_XcGxJ^l_jD)S2r@Z^UvM%ziUUyIng9=1vyFS zYX0dU$w6VwNfBY8-|gFH!;(r_ChLUnGzE1wRPfW*=MqjS8oyt8(jFJ)>jrqgW6;G5 z!u>MLFHY}Zv_GTm=PZ!l6#B)oGq z2e2j(6lU;I*uygepaYkz9Ev!4#Hgvre-wpti2D(>i??!hH2y7>ob+H>s$;h}i%2(h*>wW(cjkv4J;($n9j;18@|O%)@ajTi=%Eer$9^HxpJ6t`)=9f3 z5z4oUjZ6#WoO2fwW8yTdG&`2loeR}w9jCbB;P0&(?Lt*pFC|+Jg!Br9O>>&guU4>* zzHE;(NuoS68)6Xkx@-x7iRa2CA~Ojs#8Y@*e@E}XW!K6Sv&b1-@|410oi{Tl+P6KO&}dPDmS zKEH&?N!6*!xfh1AbgqqL8H`dN3>mle3l0H=xd2Vm(WVRvw1=zCMai<8K91T(vk8Op z!%GQAYlK8aP6uDgQ*~HsPD3 zd(j26F;d8s-I?dk;THWIrFPmyq3W<;V+FvfL&&9UCqc_k_uk?%U8RBo;zFZw>vN`8AX5BNRR#l$|w*kDkFHq?Z_Z}n?R ze^~$1%y^Ye(eIA&^d@6gL7iMPof_T3zAC#zW2JL~e)fmhUEuiqL)AANnedL))aDQt zflkPbzmsF8*{>9UDg$myaW@lLK;Q}Z(0h^=7>NqJJN~%)<(=v8sf^yQ(gfguEb_l~ zS!*~W{FcGa?G;A$b3p9^Ze9Gr<^s#~(9eJ?L%Z=NB{}b)N#n)AaK}pqKiuki_};Zw z(ZiZqF4H_Y2Q;CU$KBza%Aqe4j?-`4oP6fcSRb0Av(Q4zt^KG>NKh$Gpq-7&21|0zzsA*r!lDA=^UmZ1leIwmDZEmd2= zJ`V4B_lMmZFxI5KCDhT1+;QbOK+JOOZ}2AV6(JMX!-Rzo^q1?3nRtqQtkeQgfAQv) zgZ9QGyWXa!@08CBGt}|A3F@hmw!U>ne_urWnhLzz?d`NLoiiUY%BBZ~Ysj`kFaYeHT(I2FFHy6-6p}FB z&Z0b>sr;fNl-oGg(0&(dbhF3oYF7cZkFwhCXwJlh_W&N_YiRO#?==HjjgC@P> z09Cho>k4=6^k)yOZBDjC!Ck1%bwY6nDISl>QEY!*&Toego}QGEo9{3kRm=s!=l-ps zXF8ZSt4l|GJRF35^+uy0r+fB2ABKDQ1-|6Ke1_>VE6I4`^TF3^AzqeDG<9BT368+x zB?u?p=*E1366~7Y#1(eBI!PU^u*tku`%b6+MDu?N27`ohYt1lRDJBp6l^E2aX^{%a z360Z!hayBseA26!su_`-zv1UvuG{`1?}qg9J;Fy}68mvP-nHy{r2GutD_r*O^;;7W zbmxGoZk}DpIlv+j12_lF_6ikfCEsa$gs(@FMgqNhGrjfqgI9Kiahi;KU^CZ#*G)1Fu*-^>++h%Wmr^iQuHkR8iG~2Kb zq9-S>qR#=c$RNpKpIa_5o`F?RyW__?PXw-F#y4Y)zy1bSI1pM89|H-!&_R^&``d_^ zTVmL67M}phE^m(>s#x0LnMzAJUKmpzqv(>E8UEzPJ0{zxuwJT2V z+We=5276?S=7Kc?1*CrB;Abx~bfWnW*HOcxPV?u0zk6u|ppERngFnKx8dFSHG5hWR zDk9&o=oBJ^;~h{h|L#~jD@GM#r+k_hTkBYTmAfm$ z!dc+OD|jqO2w7q++}GdtE#BmJ zVWn~hk%kep#UE2<9Qi^g44}C3vR`0?@LqQP@7L;x?RdLEWNzcjF2*lyS>7k9Fm&8P ztQo0v(vHG~uhk)}Px0 zc9Vu>rxh$fOxyGm4A^}{MSU?puAGC85frhRHdL#O9}nwzkwOH-q~ z|1wC=G1eFjROM67C`Kg=m!ZD#{HZXQ%CbArz;W4pOX1zZ=efbdiR$US=BOqjP5wpC zBasN~eyYF|g(RT7=!=1>#LKo|ri=c>2XV+2Day}_WThLj36(n4$H}$tA4;Vb9H{&j z#;U2Uwj8P*g%Uf~vvI9VQ%cC2#aC`lJrHlIgoqbF=S|SrQ21QmTJ$N%#$5>cK%#XL=qTTvHp7-1ky{ z=`0S#V|6OswaLLNnYVEWr*ps`{j#Gqx{rf{bD<*zNX(bFKCcE4VDQkO4ydG!cMo<* z^GX$ZjT2{&)Jon?S+-unq$PxVaXggsY3GKvJ@6{JD`mAEj{;&rbqt`ok&4bI8-&_c z6B7H@Zpn+=x%jXgJ!M-4-27}+L8$$TpECLEd^_!PF0GSLlMD6HrF`se` zK3^l&@`=|)Tn~|IZvbP}@g*7)q4%;P6fZoiN;IjD{iJXD!a&QrSjn;j61Q{?h|_Ql zY*Mdl*2KA2j-}Urx&zVl*z7}W!q=w@=vm^I;Za&Yo|QS>+$WtfxZ)_W*GB9D%ieS+ z{k2G)3b0YLUD`*u8d-%stYd$hun8fWU;x8zbP#AvqE z=}x;I6{r%-<|UY$BqB2jxg6h(oIh-U2f;hHKyXaG+@#y!Zzx}ys@Dz1N3Rw&-=j9B z-??NT zw~nkWz9Xa{9TC3Zh3<+?T`nmu!KZiJuK?F%#d=Atzm|-1JAj&fnR#Lj%$8m`8SjZ$t+{<~G@-6@R#RW6y03BUUe-tqY!$``MFKJ;tkHs%Y(e>TZOeo?(dUs=T>F?nRQm80bCu@S%eWG5sef& z;_EOwR9ykw&2le+`f(3K&H)!5D4RuKer^1q{5`V|@NnL=TI;UOaS%_e#ceS-zmZS0 zb{g0!<>s0|&{3_;rw4}^y{4Lm6O&cYW0v@D z`y!A9H%o$oMxr+zfaXYj3+ePc0KcTys^o@w;g#r;ap9->+|j63cvW8QFxji-v(haW zLa#6hL>OA>UV9$vBe8j4rIcUk0WCeif41sxq6 z^Xmg#7kYTScKB(tG9nRB;%V0};umxQKtVMVCjF|vY=@DA-CslQOMF+Y+f~0%nH$Y! z)(eSSP)J$}T?q_g3hjL%lN^!~$W9hspBhLKC+u{-2(@Tvu5hwA8dapY$*Xqie-)KM zI4)0@f?xpk){}Pjp9%J^M{|M4C^zUh2}Rvr5mM~$HTxcX~Js~ zo-eIeTlr*`ewt}f`^Rr;l3=EWJ0Og)2Si2hYB4JBXb)X~sb8e^j-~bkkE-vs>S|C> zBw3K{rx%}*Oy;L%nWhAj;;6GO-Acpp+hw<(@>gDicI>vF!YHWK$%Ov#HQRLScR!RY zUf76H0TF=5%VShreyk?c&QI}yWZ#gkZN3U%D~h_|$qv$&7T7f3u9JC@319@4gi&I3 z`r@xzs^HqPVHkBayyG=#y+=bl^{Sn}MB>!R|5o_Ek_dn)j9bgK^h!3kI^H0DAHwpz zFcA^KR*nf)-yIC2ip6MUt8!VCe-_5Q{irTPg0n@NuEM>ub< zZ)f>gQ`X5n2_1{|9@0}YPG2l%?HyzFS5ovI(^~MMkKBF)Q)2~EctMPE#1gW&^YD-P z$b+Va=PMCy?*{Kn>N|WeVoAB#G{km&cr%inuZ>QiNK4&b~vAgx|aDoJwnMw3IFzoC7*O4h<>~-n!?wN$^U% za?R0)ta;E%vM^_hy&5KgL zJGYuyU)zZ*Pn!le(>BGi{fgP!{KHtMVxy6wr*)#(#rR!7J>cZMNQ8aCXz~bwyLOi6 zB=xJEm|YnDXwUl_(`adrW}Ng_rD)|1US)8ZqS=Mq+-nvLHf+B?JbVh!p3gdgSWEW5 zx>Dvd$*me%{xWhV!pwa5-|eCr|E}bq$9H66CqKw%e5%*a2xS~JDW+)jodawFN+}BP zgX7QO5Fp%nYUv;X+k|}GDqHcx4PS2R9d7FX@_89m7r)&g3b93LM;`@~gMJJm0l#S1 zZLXTEmb_p1muI&*`fKK;x&7k0Szk<2@94#N?T)!|_s86CR3*ZsR>e#5CX6N>fj57Y zJI2dA&;$si*8nJskl>i8HR4B?&!nVpxy)w;ap*_&2gVJM#X4Zq=cL_CGeJi=r|f~F z)mxXI(OsYHu0jYkHQx4v-M=cprvn$Ft^ys6?^uC!ZrJAI@@rdDGop!NJ9y~QrchCU zaE3|gVxy-}n4S(-^;U`BH}1a-;u)DZ69aF)J~q-n#&i)1|B|>|CRG*7xEUrt{M> zcLK_l4kjDN;suT?0?mf#U&S$?PWOeUM75mkuwa6L;}e|kODR7o$Ag#{;t6$sdZT=S z(DU{AL4$U3kKk2vbbIUx|2o&}p;5Y84^v1KRQ+(x-S*P`VP4+*bkpUV;*;D*JcS%z zRhkaEf$x$ocoIyD9>i?$}SJ4J|Zso)vfWV*1%}_Fj07{G1SNljWM&4!bYG^gP47E(LHL&^ed2 z6C5_9;gZ}+=XKZJxK7VAt&;b7hPSXW$J62?JQi<|?q8AXb+wxQzBTNIX^P&^gWa(~ z4Uj;jMCjE!`gZ9pPa&!GfLRy!yP(}tIC+{^*;rKC$(SU+I*ymYzV62+PB09GsvX3g zmUJ&OGQkKl*Pmqc%ca)4=u)FN_AzoD`qsRRb%)YajV*^9UL!MX6H1506l5>{kVzd_ zjW;IcNvi3<4+XC;pH=Y=*jtvB!2B-Q*|72JE+NRmi%mxrwexpuih5?(1S`>6Nop4= zH_19kkhoC!5rA#U9XYOZlRoWgXHX#Hm@etW%3`b>sv`!3FIf&tBWjViI>ifc1j`P`XGH5f~jS@ZbXvA2&jw@XlhU>AIa!lOaGC#OWUqqDB%NQuki$uI20ltOB%--Y0X z_o>O8cjOP^H|TS2i+l%-f*kxnomRYGO6k@#^LcG!O0?MLn*161OEfwKyfYd3?G%$R zVHk{p_NZ^Gh-`gtOHiP-kOqAMI=?pHQ$0WL8{!=zvGng`Fc)%PMbtT~oqzQ2afCwe zz7##PqA@58Q1zG$Kfu)YIx;ac1R!)9Gw0c{B^s&rE5lnr|2uQBTF<(TwW74;5L3dk zj|drDR)%QBwwSQP^+bVML1mC!SIcpFhsP2EAXHDUUzz?yp!XB)WdIhB(O3~oShVBQ zcO20mW?Gxh{6pJkG%In+b1pkxOz&lfFaBFq5p!B06pD7bJds9|!b;*b$~AOJiP|z6 zyI2DMs$OUBjGbcUMdqf%?Jf4=@Y4&c&qWkPH_@hvq4d3|UW4WW^)gmCWwa7nJUnW70IxzK7cp@`_yY%LWQF6kMtNlfk= ziI6%QSa{QBiXNYAe&9g=#)vaARJ|7{G6v4@}-rT2;7XuJ05@DM-CLZ`#iU- zHd*r(>2i4B1n2kr*mP1fuh)S zCL+N;3G0tOguf>pt>%5&Nu4=0)RBcRYx)JmB95Ec0_Mk0^rxLXm>aoynrce;o#UgH z5b%;Wzjod}n&UjH9EBkCks!0byDuD1R+R|rfdJD?lqC`VE(*Sg>2Vc`rTo-7&1Y0S ztI8F;W!ZVjV$a{==PTy;CHX@T4!5H*gH=&crmbK*#&~uEb*1`K>)9MVnR#1_GeD!L zpVRk|3F(*Qb5fTI6|R1Lc^BsPO}>F$4Uf^$ipaqqq)1`gHJ$n=6#soKne^uCy^<^Z zYnF|1dkib+Yh>S5>_}C@8yi-0dCY}A&S0IsR~e{RRCoGLqwGU+1n0BSN_;z}1jqc~ zs(yJWCk!S>B&d%a?O91p3_1nHnu*d^M!^#I0w^C4y^KF}VoG8n5E1$zR${+Bx~uzY z-70%Ny1eXfkhaS=b~O%G<+XpyzKD0ZF+3=Mg*TiG4zgUusg`!O`Ew1q?Vs_=WP%L? zj0Cu%={y6_Bo9tvJ$ou~`Aur@rZC@IKReM(0p?qh3#A|sjuf^1^<^>JS84Sgv-*4T z)-nmOeN-YOjQoH9x``JPF(sXJ^qw8B%+mJHOSQf_VCTsG{u!wjo9RXXXwX+F|=&(znB({Wl-p8_9kiXB&@?1e%z&W zg_a2#0u$cXR*|{pf zpgSH{dPG59MHei}xtSJOznA!(-F>Z?oU;9e*%%v~$(F1kb&U@w-DMTZLJyxSDQ?;q zYx8aS_;&;%cLs&B9*tSm2NGM@-+nRX*qXDaQzvUfvmPcDJpu6xP(zFFW9F>- z^$!)DLOOGB2{3X3OW0$zVKfH*C!FuB*Y1r3Yt(|OVjE2>ekpFT zz2s@2Sf;g|V$Ec(_Tc^-eK~zo2J5ji4Uz~Rw5=K|+s61D)~w(sl1CP%8eI98UVx=b zv@zrCUmxXjRIpi>3TbHFXh@||M77DJr^u9TfZ+!x`*^Nr;BV6j`B7!|olF-hg%oky zlv3?$!>TrjAV;w@nCz&FjmQ1IQFc>?wXj-2i=X2>v}-!iB<$artK9Za&uaQ}puj6E zR8R)lgM6xEE0Z?YBT}X+a&#J=RyYsi3NS65aQnRa1iYlGHng(E{rF*_T%s}VK^Fmj zfT-BpXS)9_qfJ>?Auz%ngWmM@6szwDk3{b-QC?Dz#<%X|b!C~pSciFYX>sppM14i# zl7m75Ai?mU#im4-oOD%nHgzH?+^n%+3dwy{IYb~Olu?Uvc-B>Icesce)VvUp(C(@~rf;+~J*lG!{L5hk(S(XUD zeQiV5VaSE(u~T))CL5{Vc%fTd5si93?;H%)3M4JR57`8Ulu|S%@;lSgLbt~goyV0} zG6xhJ%-4(!Lu%pb=YVqJ$^OTWv^2Q*HUBoH{N$Vl+eZ~K7w73V`H$Apo=zL8yQFr$ zbx;Ba7$PQD`_ygX>He~me%l|yf|VG4(*NBQ^;~DS_CZ;?&eAOK3TC zg~4j;qa#}ZlK;{QFMblaQ8%s>*SB$>uhAL0amLlZ^&yAydm*CLbKICF*N=XlxDywr z@%?eX#XVh%&&yo?ZtL3qsbBhdoxN;3On5^c6Ehe9Ak528>~@A+K~uQE8I1iSFZ1*c zn-@!A7!W4vl&$nPJs+?htiPu3Y6?sP&?msbr*S4{$R&J70r)t}#8<7CR zB4eSOv>0mG!Q?ivX`i9XPsLPsCqhLw`EEUCs+6WvG=H~Mq2)(8T3b{&Ne#)r=~`f2 z-N+v-XQn1uqM)JG54mup?6PB0o4f-OGWmmaLbXA8p6oXO(0_z+@AryPi^ptvWPw^* z3_3^oOSJ?&KoaPefd5PBu`m6lZ8MbaixRtwO23J2?e;4`7%(T9U)x{6}Zlw?+ z460=9j!#?B+;{Y!eY zDA(P-UqXq99p@4aBLo#S>h&pUq5Yt|bRW8VsLj<6c1;X0dZ{E*0GLBFh&R7N;Y)BO zXI4_igTJ1vFJ#x2*B;AsdNeVq9i9VPr-b>n!K)c;zjH1}vgBSGeea}tD6}KOG1|vV4~)h_ zh`9b z$W+@w&D7U*{EX*PkSNe~K%>c-p>MY|>6f#fg!|P>4fU7oqGmZN@ zhD1uI4=D>4U%sSSrpfu4(aDQ<5U430L50eTxAOH67aBFob-;Uzvd40>I4@71K;GbC z1t!qg-x;@i9k!2~p?f7B-2}%WH3+dT81_9OzR|k!k+?DXGGm-Bb*2^10YYsd)5bnp zzE7+w%QgufF_h>vVHrVw`Gcg-)s7j$#sdVG4iN|{XlKoYgTbb!(b8)Y@#Dv{x>6m3 zQb}ro5!~9ZBaNwu*}NcitaD3`xO=$%jIUwd=@t$qb3IviWVj@1E| zF94Cr&P;VU#WQQ1JeDGk(Ht~{v9V1J8r|hxQPHKS@H3BJm=&)aq&m9NkQcM6Jn%mX{WzwU3^ounjU3zjt-)TwOZ#$jI zggVHdf4MH1ddy{eM2Y!3$(!T{%V3c}x*_7}aW!NwlC3G3e*)+t_(y*d8)B>Cj3_43 zL4}t~+OSuHd_lkfCWwLoeY9_`^S$0qr<@`h>oLjgqwVQZHM&s$s%|d-uSyG+FYwo0 zD?+9X3lfqrx_v;@52UMX`YRvWZFFJgpdd^!sc!@zk5PcXL6&=tXZ>ccV@xwG$hkM7 zRT`tm#rn^s{P1+@AorJGP7#nTwv)r_M$I{OO2{j!_ zI;5Pba!#=r1drbaPvzy%g)%?F3u~30kZYd_nuxuzNHMSp6fJOlm=J(|_Li0b^>cEk z$6@)QvdSLsMWjrNp$aiD8d^fF?gl~F^e!r&guW{WJgeakl7x3szO8p8%ok=@TJ&cr zCBL|ys7TJxiWWXdm;v@owPYYO>kWUD#RCL%4*03+m11|refVPix&4 zi$k)4oWtVXXJ&nXXuP&&Eho%&@cI>$OMK9sMReTaT-{7(rAmc!PCjlW#o|JuNw=`t zTH43?%uLOq!sQ~F`oAX$8E|+fxv5Gr)F?q!hhpjWd&Jt8an#|DhaSB66ykS=EHK(v zRwz9mBBI%OPngiAF}du#dgWf zs)~Z7&pOuQr8S;B{ry%LK9Mf?c$H~H^)zfLLgnE;I~|?v0rSO)TnRP)!055kb3mje ze+D)*Y6ravF)e9ZF(kV{L#Q>dBhn>~a5y>~tr3aI&It`DEe0{$f& z;$7u!S`gkeT}c&VoOv@5-!V+^74{>xthE!XZ`{7`xEV}XIR$ig^meKKsXp|K)WNNM zw*trFS)|p>975iu^oYS_lLOL#i7pFz7`1jyE@P8YDz7Z|6J4_=_@r+^GNVOo#yW#h zD^Be+jQG6f3;OG%&tsdE0N8Q3ZHeABGB%$2p)DRId+|iF$zSI*&P49iLB*|1?T6ON z(;3U#^%*8I@W&y~$#g-4vtwgBP4tpO?<99s((FZ6GNkmW2SqtP1Ek#(*^v? z$w_}hU={(a&?~>ZWoEleRoNFhCIi8;1@GG;wp{QNB98AWA3$C=e5+eHtGjAu>yu{v z&U&rlLdxzAy8-*)+t4>?DfWq1ldL0%JF2RKS67e`okar4t$~a*y&rjo@L(YOU;M3S z&JSoZtp0hI4hKBB^#?5ZdUB#-Frj*t7XdmVRh;0}e zf_bySe@>8=p3jEzX$X$qy}ouZan*RNswy%tsj=Qb6nCX}0-X4)CJgPtMx%BlEEtX2 z7lvSKC=&Cxb*u<^jjCPU=KVnj*J@UqTC$%;tfYM^clfK%ruz|~2Otv#Z+tk!Nbnpd zocc2Kb$ZWL=LEH3s_J-q((Hp_r*%TnNBp~A|9NYeU4n+(?l`V~W@Yev<+}2(KAsQ5 z>27bOYqOFt3_n_r)V{Ho8=rkh&a$ij>M@PSg5R?kl0epc^{ooOEgQE`L4TbJ%UNw=gG zwJ{`ovlq3&-?cF%g^iI=hpUe#D>kqA?)lanrI46p;_}pFLYs^k)V8BsY!H`-6~|J*3f#`J#Q>Td0ci8>9T^k{hhAoDa9C@261m7aRq^^JHY z;0dSEWNPIJ8>J8Pf_5mm*=$YW7`?)vzomeJm(EoMft${HeRokZe z^U|HO;~QRn2E;^?&!v7Jj>~p6tXX88DB3a(H2@$!p|r?6%t_KF5=hh ztd$m(AAHJ8lQ|bXNX_N2v z?I)@=>%SP$p9c+S+DqaXN(5xVJH1D^%U_p&Xv4+oU#8|EC$hs5L+TD4SNChN+-&$5^3C zd_%oqV`j-0MoDULGjf8czFN=W!hpUwW%`3Y!{WM6Fb?8(60c9*3D&UEGBT!SS-EUH zX&}v9njCd)C$He(v&Y((J*TX5^*TAJHLRSuR#w&tL^YU4_|9oWqKJ`y67$lzu`xG)WsQunjReOM=bkP z7BC7p5WP{;xm;WcUDDhDmV#mDfRA$H7(nx3p5{ ziH5n?`uB|+AL0hZeICr#*CY%P)rH`3t%%);!>o=mqTHoNS^+JFKwn3ew=Do`C*+UNwLEbB!n7mtJRGGIN6!G7xkoH@3G-PLZ}G z+ws=R^1Sn0SM8n!+1+$V9|@E9HQx~WgPpQ+_T%SQC)U*z@_d=E-5%oeJvhlsXR#gc zRA!z{ezNuv(nQyC+Eu18(O@S^C(AK!1lvp#6c(x z_I4;U*MOdY%90-){N$!A4p6j3)G zolz0&n2})~vbkfWuC{)8D~sv()AxkOWbPJ!8ez{f^_IjtC!p8L@hHmk#;9LU-qUcS zUhDSKVF1M1)3*r9;vZO8G|kN?ic_;D5tEJJ2FHNe27c!+ObSoOL?AoH)<=mt8prRa zf>zf&TxC$%cijJVTxVcIv&^npHa{vkoeq(lcf^gF6l>AS?8?VaF1uDkSqmxx`}6<$Az_Rafy zPC`GiKJ1`OOBL-^_(n8{`+tfjL=K&W;sx^W!*y;4B{4u)?wx+Vx6SpwIYP5$+`4>W zW^cr_^?FBljq%^Q4O5B2_34u^NGiYmq~#b%9+VLi8ZuMy^N{J$ouH#noo^q<{d#xw zhwb!b!wAy4Uc2G>KlHeB<*ff*gL~z6j*cxq_Psut-VUz$7HS3gA!;n+ZnCX>Ew@U) zWweNDcGFWCjhjA}{IXX`%>lnM8m%hvk2|Nc><~oqB z8m{x!Ovwx>wDGJtJBYiXA5|Sha}vcvFnZ?z(V40w&DRS|zx|Bd$uh47YcBX3{cW-**vfkvR z7NmOBGkTT;n~Wt)=jOXgUO5>;<=$O0FacxPA4w%6uu%Nb{wE$2-)a7wNhl78!^7lv zUT%EHode+K0PV{}NypA|&T=_JbJk%!^}#SAZY(KllFxo#ie}p4{xvbrZ&##x`KYD4 zp<1UQ6S4ByfP0;L*}rnqhP|KYb0?6f5C;6y_Lt_Hk42}ZR&$6xOyafwc5Y4g9k9XQ zLaVIGwm)tLr9+IPO-UM;Eo@g{csGY#J$XY3XXni!9w&JKAF0#vYO@a?b%TVjeuOxJ$2G zpK{AYRzAn}i-=va7cHs&$iT4XvM#Q0hZXJ z4?)6$;SnCjm3O@yMScU_ffu3=N8@7l$8RAsdcKqprMYOBX+}e^rJ(2E=~)LIen~M& zw7#t$Y|hW#eZ)!z3U5&JAraj9uAbh;g+kXxk zrS)o_!TOos*N!NRWp#_x+9W*4(383D*DGkLppH5RIFrHU!Q217^s~He>P}3VXLEpL zhN86MQ0+IL3O%-qd7>mi+;FY)Kgq+TAI(vxuK-@USeERDW6nJQ6@n|rYCY!5Zwwl< zpT5@)16V{%Id=v6DdasJ$FFdnw3GM!$R`MLl>^{!?e(Gx$t$~%sF6tFw zAiz!l7XJ|=7rj0@9A6@GW4AEm=_DAnb7mZk*bA_4phXjkV%<)+_=|Ac#0E(WDx!DH zZ7|d+M=Z#nAC$>vjCxNV6AU==U@93pYnG_czx=1C;YQ{2oR5StPeQt*vDFgu7HEjD z4&6TM9}%B^m?eH^IjyJ5Zl?LB(LR^mQe~R49xxh`REoc8uw&&PpnI4;&ZXR0 z{lLkK_83C9W2Oe*Tf3g2+KK1bDD#o}`UIgT-Khn9D$rt0MODYLFi7=cgaDrh5t);& z>HMZN{CFm4apRf6Lva1suyf0HBCq#lv<>8xZg(aNtH~nUeQJ3%9iH^^lcepmQr9c( zXU5KLX}o~f+C_s3QkTyG+)%y)kN*MrKnA~OoMXv7SIzT6CBBE|R}YQK0G-vQ>Ji48 zLQknk=^T(v989se+;~; zjyL(aRLNUAjkIWWtv;}){MO}~ev-r{oW{j~<8TTCWwDaXPD$e*Iy_GtsL`<}mBY&0 zC!;Uk>3UAJeZ59C;Jkc(k-^D5XUl{+{!+((4dB} zmSU!B1(r73hIJnx<#y!pj1GG98^m#tbM^fH0Q#GqhaF1=YPt*)>K8QH_w<>hg<}>e z*JM)|V!;=9+Juvnfs#)rt*07UWl{>DvfwUT6#L8z6FP#0m6U8XqWi287uxxi?cg2d$K860`}=^1UTHsRzt zA}yob^Vig3(WSSi-P9ygB%g1lzof?`U`R@?<|BnAkMGpZcSftf=^e1CTM$}Wexx-U zI+RJLQ<=`$p^BuEFmHz}@(>6B`)~(2{kl}Z%+_dsAt9G1)5HzOv3B*z5H#J#EYf_k zm4u(}h?xHXt})h!at(7o;$ys@VIJxFB$nKYwIPZrrY|Ev$PNZhNXNf80Qn^2+^taBy*x@zzK3u?w>-xLk{=2mb)7-%MeJn@#S1+`Tl5B#{KS z2x}NDGZK@QP*jZYKMliSSK@fbM&rMT^=Cp@G@kO(HSg-sxR5GDra>&@Y&r3kQ?Z6J zP8jje$R|qqfy7tBK%Z;K~`-b>^>L&!E-jby;MMWynU`lN)0hBR)tO z9Q^e}@~m=gMQ1o-@M zgZuPfCHEDVc44y492P3@#1HBV#C&i;JfFu)^9P|F7Nku=HhqmHdlLl=R6n>6hT1%5 zoPVEBV2Thnx`rB%0u^eoU;$j2hBQpdtXmDVv0bWv zNhjbQIsN|t%c91hD#k*Sqyh$D5=PjY^OXnv(Ttpv^Z4p%nDtYgDu|n^yH6v_4%mS5 zK@LD-1`vVg2ge>c&mBK838S+KY3R~~Jc<@9Z_3BHf%x(9&&cPFpC^zMM#MF#h_AUx z)qPfQ*+9c+Q|ESb^PK+uO>*4h$eLN2mf~gmg@t`KY-A_uCpZc*6 zq(!=~cy^wibm6P(I!zzBooiPz*iy4fLG}U(0f#)C0l+6Db%FjVfk*`G%MTUknuAZO zQFoRYVZJsw5;;~-&&k>ufX)Y!ybOGt9;|$uY8^>&{6SV}W|9|Fw}7la-30#tiy$5k zj1oT|8S03)u>jGd41-M8r3uV&!ZJ`ELCNw!!9N`SM^$WX+~UIMM}tL3;XlkIq9Q)$ zNi)D7Bk}Nh96%eKHfT?)(9wNHj;yq-u)xHANIpE{&Hy9xo{uG6!;xE|BvjMYO2-G= zuH(x5AJ};K@(0IO`HDt_N3bW>No0B0tN08LxOvAI#((9~HPl=xN$v|JjiA8-$G|&7 z>Vl;MUKB8;2u;H zmN_Kz)I3ifk0O7hcW}d~PuSI&;Vlo}IvhvsBMqKTPBMAO!2ETjC_kA#U^et1R%g}V z17@2AiNGaeOA4h(zy*QNxRM4zz~jl|tjqI8%%LcMX@}CUZ4xh0PVtS)zXQi40rB8t zkIz>?RW}e4cM$3oY3cQ(hPqUzUo9zTR9NFBxyc~yB$9U!bJX)=0Bcgp*SIxXky?06 zb%eJ-+5lqFWBueH1>_v$c+NgJ7)sT*_c0t6HE}Gx&D(KnR{L!CMLPP+BSmKOTSAtfS1txp=YeQ25VRQ+-PIpDkMvHLA0yjf;B8>X*O-{{T$}=Rf;KIX}Nt z&*tOeK{4dvKP3Zrf}dTgVEw~rYZK05O}e7H+6QT`<|hiV2hv7u|3B7$rp#d$_|W zIRx-j6P|O9qPbPOI{ZsAS&H>)+T74Bb!GczEKFi@lFDo!72rENtT zJ#GPGQChQ&HeGC8)lhHrEXbaswljc01o6&s{+Z88Jb*ruWkv^K$%dfmuF+<%2B)fO zS|*_m8PrD5#XEQiCU6nl=Y&(A&qF7Wm;T`({{WBvA^Atpjhb1kR^L+6q!NJ1xx+FL z51tN5{A6{b=GuY`&QOh1Q7(Z))Sp#){=KcC7E`z}U)T;Y`woD_E1g-4xh!$&E4dZ@ zMzyNiJ%6*5x2DZ)C5Q$2J3tMQSXzyCM4sjjCq-?RHqC;QSLtf0QB#+yRL?@ zr+1%f==SSbGC`=764qU2GT9A03jUOgeZ0%Ph6gypjTrn8-~L6Me2NsNE}Qst?Ktb* zq1w8IKG@Q1L`1q`(XPJ%Cr z)%(_OrP_U|EJ_6%FhFg&<+iJs;6999ga()B-g+MGn-}DNSZd zw;u*f=Jw3letJ6ae_MIg@;E{Zs6ACqe|6%1+QM@l%hFM@kJW}9(YjP zuegFUz~t}%0F#U#QL<22?#82{QmQRYsG8`^L72=i34G`C!F@J;eS;o-i@SI*-i*uA>&UHyBsEimcwZs2M|h9XVorVST3@ zct0Zpsum14CKLk+YYAK=;>>od?h33}wig8C{0?$5e+R0P5ABm@-9coryq0*U79B$$ zP00#2oZ~!XVR^{#eh*6gM|&~W$kfZs;#^Hi$}_65G5bS)dC%>TKmFZHm%7wj`_1ay z)1JhTr=+eC5>_}2Q=Dh~KoAZI_~*w(NxgL&fFe3u6r_+$^2WuG1`bF$&N2_rBaC^+ zRmYgZ5;BH;J%ok^jbzTr0Lrhy{{R*Sc{%4J&srBTnVTJ(5TM2+O@*VKlx@NV8Sn;A z`JdmOyO8Qpbs#saixM)`fZ&287#ne%Z)1-b;~CCz`}Nr}vYkNSRlw6#nslB)46&6b z)H^n*2a-rWM~+A2eh*0gK|bNdjc6HE*Jkc$YR{CC{{V8E{7q_B_9E)`d){j@!=_xVZ%!`n>S{FQigwA$#6jAN$YKu%7*aAi zhCDt+*lm7gFBMTA+H_A=v@s;wo}QXgf~;b3EsZ{8uQX>r?BrqYZ`=d7{vAkpmes_?{WgiXREu`d^<$Q_PVttG zCOk5coxqTJI46uBKc1@j06$bzQ`C~Z4MHgy3Z$SR%z{m-q-P|KNBB<%;QVJDI%NAm zz}BI2$zD3rpqXkw7k*I{_YQJ$0R-UubLWpe2lCN$J;8-olLKoNV^IK_D@xNC+)cSd zl1bVK134HTen*}<&iVZgrK?vEKW+B=T1nSS)2b61ss)K4QpDwmj4lI{!1IiLq0dn& zyl#ccsCwP*2$gKCf|OI!)TFMARX)kYjD?hvz`0;X6aoJL?S4mGt9pQn7G&}26!d7d z8&m2K*M=}v1IGn4C5TH?dOhw^VFRk#y&j=T(vJrQ6Y4_3noXgbes+B>T|F?L!%`mMf+PYFhPb@1~wlUP~D6jhALUIc5Lmd1P2R}T4 zJegbzNXn~pWpcN3MP}L?LQN(;&sreStdYFCjvPxK<%}JK6lPpxmOcQ&o@0}{{(a4V zBz@s=Thv8#w{oMz?8PGy(>MblD|;Err~@In_{irSICcy5ZsKsGsV0jaqchC4`?Flt zh?V2nEl3g+;c`=AF69{Zu<*>o$l`c`C$s%$W>u{Sea&jNiBg?G zVYPAAUkk`{eC4y^P=1+Zj<%x9B)+M?5}re#-Nzo|k_bEu@Os>zS4JU9)YjW-lFR7U z(@HBc`mECce;Ysq@WKE*$5zFaIb``I;J8VK_kmQg((a5mEzR~h9zM>shwzS=DjGAt#ZXe~-$IVB9mNk9D1%PnzuNlThc;~?D7DX8c z7CMv!r_>_NC$?!NdNdO}D2}(fe(6{UvutWAF7Oj!oDFS|%ebtAmk@ z^YhjWxbvw40@(b^f`nUlGbL`|hfa38Lr9iBV;cwK%7NA+;FKJ3 zuYdd8&^(~zyXrjBJ5yJAUTe|mQC(O)5q70y^#b@Smn_5cgTTf)>iB*f`&QVHe=&Mk za&{Cj!%D89CcE79;+NE8<y5UxuoEE5GL^JeDHcQT2*b zS9oewIy{r^4fe|_*IlIY%7`#e_s3Mrj4pu^O4q+on4v8h63Jx9xf`S#SulTY56Ag+ zOmr7!BUh;!7xdL?MP-Hv{-Ts-1M`!}{{TL!!Ou`}p)_X>PR&A1Nu|mMDhYk03XEtjWp_;V|gj+ z`j~-)tGM32m#I-J z_WG3_Rbq)!%vPa6HvBQ%0w_>OW#D|{lhqXn6?Za_Y*b|)ms(VwLmJB(hfpkT5hy1F zD;yqo5Jw~9+tkhkZsiFiS%Ar=>5$5*mo*vPV%)LUI}eb!$QcA+V*n3<j_ z%`8dVqU9QvHT^$L)?Q^nTF#*iR-`0j3Wb%uLKis$CzJU-Lno1l9z3@!Ff~{jmP&A2 z5@95Q<#bT8W%s;C802Rez!~TE1mN|hd$M&iW8UK=tvz)?YJrMZ$Zf~c+}Y%XI6U#l z;OEcDsbvfGXvTK}FB<1=)C`krI(-eLoUFQpE1Z4_By9&7{{T*)bK^eY7~ilWJAL=r z`fQLvYq;nKmDO4}$(Mqmg9b%q!Bd_OBgcX|pX!heM;Ms;n1|{|XTPa64RY+1pdZwc zYfB(2l6|OD?<5AofLr#@T;5G?a(JfoBd7I~v@A+uuRgnOs?4c0qfYeJEy&CZJdD1= z6pi2!kU_>eLFZoR#(x^8(0x^k^>cSZ?Hyj`nH<6ir=HAlMm_slL~Jm8V19x@+yFT_ z>7UNXO@-IRPo*&r`81J!vI( zn;teBGUW!b-ZRIGeYK(1NOv(Z*p@~-s9nTHHn8^+3Y-uyPdL~zX65x%&J!Bw8By+i zC+atJX|?p|wMqM3R>(6=q3RP;wqoG063CJ$81*;wSLF{Tlt4?Y9cC^%HB?yQfnzL8ji&dummgy?JAS=9YN2@F4+!5)J9g zsX^|*{Uk@oM|&jI%Hb%zi=5B5dq3+RYwC8VrS(@&?@IH}B=S_&^x&+UpOA(o3%Czz zv!MMrImpHgUkk~M@>l+ZT-le*Mg!@iI-QQ;?)tG)r(dz-om$fgq=!?5Vn~;Uos@vs zCkJi|kTOR-dJNt`6ry86jP_>}?)^{eN!DT3G!Lc!08ZYLv6Io(ZX}j0$RBRxvb)k8 zs8-xmZO+g$xb^2`$K>;{025+n!d8JWTiw^+aLar}+kLnoK@-ujR=keuv|y^l$6=Ey z7!agj0fNBdm78S$0D%;;^bh%*-&yaEsg=!BF0rdq(vf#nsYzKOjNlLHYy`ThVVC?l z@#l{{u;jm}nRI2U?h&H@01zKgwC$1K)Gc>dja_Dn)k6YhNCjDb)7G`^c`#Jc2dzHQlZdj+#2!ge$0Uy(Zo;{=p>Ahzo!w4l zss8{JA4;pu9X%UbzuX07WOu2g+E}EM3>g*PH7?mn7hnM75uEk0{4B+QS`#?Rm-mRO ze-V^5Smm2T`fuGg>P0+ITqx)(2@7oq?230xSnLs~cV=OQAg<&;4x>)Ob9o>bkGOui z{d3W5>DB1{7VKRgNuEgJr5>TIyps}8m1YqIjZQ&3Lls5KGlk2GH;Ie>>Zpz7l6sU4 zNBFM#sY=+>sr@PKDC3Sug`C>C95mzfQhni+dU*7kd1A^!?rdQ){CpHGVW3|ug`zhq zo!R_#caLdLNiO{vi(P4BGOU#By>TmSiG*az2vdoXwG@x()VqE?II3hHRaMv_UmO#kK01zH35V@Vgn3HFu66s{^({T2 zrs4|E81$v~N+t{kDp-_no_{`nJx9nqmybeM9=Z`NO*>4MFSLVBzX|kI3pH^AJ~5y6 zB}gQ5?Zz|btOykS1#Erg0czx?1yiw`O_k@6H<~K47})T!+Q6#!Oj>ohG}t?P zT&$qWn>J-kETH&MUAK=U{s9~gqP}Ax92ZC2=fEo1!7cYiM~WrZG+kDusopr>(xoX5 zUx41Ks;>h%Cjb-p9aAoPA6l3OI~pQSJ-uF9V@lF>YV%7ZUXsM`hrs^;6k&4G5D%Pt zPdxPblaL4v547xbHbqqB+V67xEbaY5dAsw!D)xul1&SCugIup!)T#i?$Yl=9eZ-kL zJBh%~dWJs{7!#89?&IZlZ>2h%_UG{x{44hxO`}KrRQ)xglQ>fy4)di}Nf?9eNfC)< z8Nu9kleYx$dX_}O+HHR$0XY=tPeyjOp`~lOO=x>xy7cbzh9Ih~Li872LAN7xk+EIJ z^YQ2L*RSK_L5M20=3Jx}`lEV%vkViQ2cvWa&o5ctln#U$E}Pi3FJa{{T{q z5y1Zd=dN4|7Per^la}hFA|!yq$hNGxae*u39P@%nG&+IVqgvE8eQJ>*gqal_Wq}#wZ6}^bkIzwc)`wBk zmCVUvc&fa$^y#OQU;-m5sXX}Hc<8q=itBQ$pqd6n4cl|LgvVVYEAC$;AB>#;0G2WF z(8rS?05*)xlN}iY&!#I8Wi1JhB&Z}e?m8zZ_Z#Jyt_tzTb!B+tGA{f|PSLvnjFIQY zFnAgH9Q0oqi+~@zJkqG|BP@wDhb4Xg0I2iE)1TX)+n~rjO7RD{Y$B__uReCERGZU- z2Ow@^yMh4ZpY8L|Mf=9vjT1U&BSTi!<9g9H(`8T^Bh*C2mVh@e7ay=5hh+5}u;Hhrf9{YS^`_#ES&l%<=f z15u;VlC=#o@`t-#RGrvi7u-o=Irgv&NhcpZM;!*losm+3{T`%?3s-xGVu#K_ccE}q7hL0aA2Z%nTd!CB0tGK1~NCoGYZoM#KkJPh@X zvW03UGLm``cBS4LZ8)3kD8F&YNQyY?M&>&Jjy5|F)5pgo{{XL1&zON~EXyV`N7QP0 z`?;U$uX0{}b3)Tu}t+PSV2{ia2mn_&yDGT3b&qm^GA)Dk1nRcmlCfL z8eG%0_PL;@uiYKg(Nbl3po>e)QXpT|y$d1|f2ggvFx&%hdeQP3jhoa#lrQb-G|{^` z^%DO8f3No@whMZZHlausjzYwk{YYB{R3O^SP6<*=6OOFDDuGpUO^}AME-BdBHDs0t z?aeu)Vz!?UNXA3Q8&8(bbA=v#*)7$^Ai7aGbDoT}CW}_Jgr21>Rq(Cvf|7?AIlv_0 z3Gxu3+m3O^PEnNg5smg_GQ%pvXZgm1r_-*JNOAY8ERt?f%Pgs}cMZghfsS%W9XSq1 z)FR|{AGF5uE%l{5$)Lw%k-dK9m@9%Z$&k5GkUzq?#{h$pqk5df*@I0NcI*}#L+y&l zp>{_hm8C?$4gFOl5;Dhb0LB0~1oT@DPf~4>N8TiRpHiK6OG~u0DYNczJKB|%7a#`o z)d0v)08@&e@yQW*7p*weBYoel$YX!n(Esw61!kf#Z+JifAEGswxlx8(_k78hvxU&`7FdKH{QHwd> zD~eqYLZ#P8)w|ln(Y~psOQ+I~M?llUBbITvZD6QmSjuG-rzOKN_LT(R7taUk>Lx;! zL%8C%dQgg5>or|$#=yHy+EauFKEb&%MxY`b9X(_3@yW6vTAz9LR*7a9^OaQ!(Q1-uRq2lH?i;r(dJu}5W5R5*l^9Q4 zf_=;s9ix!mcD5OQ5u7zRsH4kM(45m){yc%U>Da$VyNkVNh5b+e0NG^DnZab)5%&J3 z41J7<&JNA4Zr&3w(~-3Y>IolArGNZee-r)BEvsFR^ooXo+X5t-J36m!*;X)>JHu|U zz%nd**+TD8ums;AMX301js@~EM=N(3FVvsoTe_-RdU0qvHC?EItZDGZkvLXn5}p_` zt3t(aKdS2pVp2&D9C#OSu2<>>Ezjcr09U^iskOaM2$^Fnj#l>Kwm=-q&*NUS#XFquEvGe?&GoIpE) z>@z%Qssfeia(Ny%d?m4b!f05NYnq4G9@x9vANlj^&cE8y+lt+~kXf+Pr=wX(dE~PM zZ;2x^RZyi=$kR3kz1F5pgE=K!-jfy6Py!l=fkR=suTXStPyR{mZV^ssQxovd?fiW?+o_ zlhEH9Mp_ zjjY235d?qE8Gvnw#z$oY$LVBpuIkRNBSF+h#JM*XGOOVlCQ$n;>h`1D8ns^C?@9Z6 zU(+Uer@WHZg<^JC$u7(4M(&Cum)_OM2vvf!*itoQ@(>Q|`pkIoa&8^}07=&10GP*UZVIUvxfUW)GR5`)a#@&0?a=st#giechs*d;L&S=n ze)^5{%SP_6s*!f5Zs`5J+d6A$U;h9w)E`QcdisTlE7)lOK%y|}vak{>stFOYrGZuU z6U+#j?s?7P{ri=^`2Gtl&)V9Squ71ZCCyqJLU|yT7M2(sP3wPI@(4bhxgaSY(TNTY zPgcR_*|KV87zq8saUBD{zPEo5v+H_|y??x`L0VT5RI8;%G^U{{6$dukR$DVcU$gS{qs>{TX5;!w|`2WoS{sO z5owu^rV3%t=^oN>9Ch;EUw}gu0P+6--*eT$<|>P6=pA>x^BKT18@6u*$Vwc5r*TXkJMsz0e_2w?Rs^C~`n5w{DX8gPtMx0nmR9s( zGig99TVx8#3lvpg!yMs`4F~B7)7SKho?s_wsmE;p02*J!Z+%qICiUp7w0h)69h{NE-_z)?vc?owf%Y2PY*Iz7f7q0a1UP7*)$z?s(#e z?+7&tzMii;#>JTWCH^wo^3d9eb zWclj0dVx;0D_ZAx>7BTZCaBXug!qmP?0Fs(dzk+KV0y*md50M4Suiu`MAbX8jr#`f zo2_edR$-N@wX$Rc`4N%7AdlN0zg~KIVb35<#yA;8A5slhxHWrHOQ%w8GCQC&u}5ZO z45J^Un3cCO`R60?jygHKmv#$aAIz$#t;qDPQ&+yz!*jRVg9lXmyHi6NStsT-Mr;lT z$UKaE^i}ec^^fave=HWEyOw2^vVfN2s`D?mF-y5|@q&5k%0Lyb{r;01ozH?4kpBQi zyug(sbC6q(0XRHh6O*4f>(a?}sVru?lf)yH2n6h3)D|&9f?<%iR)ozi*qgdvm!v-rLI@B9@RUnP%YnCyy}3QLe-+v<-^ zvyL(w$^d;cf)akAMsckp>5tZq_tblfxc>l%4*hEjsZyUyrP$q_t=!W4dqbxvWpg!{ z9WCls{{X1ju_Li#oQNcN-&qV_NGQQjIKW>(YU zp`{ISqvVt}+aEXU|Yc2L=5v9c_qVZmX) zt!ajri8nK5bz@*u?X}{)9P>#&ulJVb-0fXsuj!N{2tp$aFN~7gh&*S(bQPdtG1j9B zcU;RKr>|DMP%{!`GDkFRfZG;TBy9&HXi}hIMu3mB2f2axhC{867o}G-1X*@R3@-m&l(n%EH05;%oNy7}|I3K@9g_}Et zpY0@?wGXke9!2Ylc06gsxHLq~| z!&{JvF9&-PX_T5d*Kew}$GNu>KrI*;^uZ!ReBcquB88v!_axzKeM;V`-~F){sBZT+ zZPs)usLQHXl)2r2OlVo7!791roxuBf9X2l;G0<3pIv$MT9qaX<==3)#Q`YbD_FBPKTW3ZmvUEEv0IZb9Vn(|`VdE1N6+fB1v`qL{sp?I^l;@gwxVvoBcD zyE?zr9_7?EIis?bbscG%Re4rcSr_gLOX!t~L+w@y0_1*>G~xdMOg=N{e{&8`ij0E0 zAKEAGPvWOvr*_SK1L@w0PQ+>J%4}*9&LNFsUD6nn(`P6jV*)_TfmE@{75@OGBDhj+ zXZ>RVInJm2LVpw$qSke&J9F*y=qogFjXhxz!eb-Vk*2Zik|@blZIdUza~5=u9WRlG z7mfAR{{UHR4sz*xg(jW#`@LwkvE4o6saer(*jA#CsU%4RERD8BDoQGVRn|?LnHl8^ z?NsYQ_>YZ2(0_j7whX+unO?Wsn!jXN?j0stw)I+a2!hXMS%3AAO&_WyHW$=*6vZ1Z z6iQ=>^2Hukajy@=OYLzvD~oU zvvR(xM_7-x%nfZ7k*Q9}86-`zGb=%}F(SwMVO2e1&aZTW?p%2E^hD^-{Av9k`i-Yq z)4ra4Z>yzuDo%awFD^wNMIBK!6GML(&r_{Ld!5nguS$#Ppe8%`5r;o|D zs7?MPGujK~QsaN>r_;Xi?RtH$-sQ3p|UqIMMO2Lh%4lfwhs- zZ(DG9IZ%>C34a;tx1}jpisWfS6%#uYAl1XIr@wG#| zfN;L$RU2{UMGeJPD*30>^EQ;C}E*4|!bAMLc{o$35K_FvU4Gg0j8{mDlD zvt<_T54Y}Fp=e1`kw~&j8BLC|`P$NIq zhB8DbMpq}VFU;|Li~{=~gFPPz;mGz48Pq!m_=xS@TFi9fo(W)b$q_l391s)wL^zZw z0W-9p)h2&f>&EduEycc}zy0++FEhlU$eW&t(>|`~S{0phxNE~_TGb>(Nu{oWcf08` zK?1%u&e5w+7wHG}JK=zGKBva`k1v1TcwR?_pz|}lc2!>F);^#waq1G@r=;kqCY`3& zlF1eA&S6xV$yo8~uJG15yi5!bs(p{& zI)xVE%C1%{-W{@W7CKVB@cl z@_fW5r_%VoCixL_&+`3Py{+o{-)>vh&atLfB55h=WhD^UE4vtU&*?9g45Tn4CkL+` z{MS}2u1DhTZEp!5OrPRdHIL#eRqdYAsdu(!)-6F4QA4Nc`d!Gy_5bXCir$WJ!*Ll$98Jz-kSES-(BD7!An+x1#Ln`l6bdD!w&2U z!~=-QVV2w+0

*r{f+{{Tzt?vCqRXk)i#B^d`05_j{|fwP$@Vwly;PWvHvwqi!(* z*C~%>b0V%iHX|i@;XqYEmBsN)*F=x+2>4S_AMqo{Wz^^z(x%q~Z6 zG?ea8$fbZdbZG=-myO%w1KHar zCWuPs0w{!Pa&QzZXrSdt3f@MH9~ZRLT)rR#-lyna@LAja%k(R@Z};Uo5VpCgkxuOM zW?7-C4n#%K+YE3ErgsDFKcEh~!ps{Xb~DHLh88VGezf*C)z|L37AWc!A)$IKEGd#! zXs`*CU|}*z!sjFb*SUk@!=KB24`2TPW)|^Uxed;=y6S&MHHGH|#8Jr~{{TgZ=XW_c z$C00(!Ru@JoK029DieB!&n)vg`Yr*^Ax6qbv5bI(nr^hND`B zuU*RpO9-x^2*Ch=c>MnWpFLywOu}5`;x#CgzMR+XPh57ki`zN{Cn86VtyrP;TNopH z#gYF2-MF4lf!0qW;qDEk2;(xJrMs4B`*s>jPTgl(zTm}5*0oNxCKuGC71z%a6FQ7x2P6?>IXKr ztiwD}CWEDKVLOcD_bv#^FJ+b4p#K-a2$~Rk8IeQr4ks z4)KfnWoa5<4-tvVk+m`iU;}ahCmf7^8z|x!!g49?VA1w(dtQI=BV%X#`V52z5#!h;UOb**7Q5|aPLk^9uH1f{}eNS$@ zrP)?(G6@p15JH&PFDKjy2aE%QlYutz^`dSuWxFc{k4^8%Dn@Jfj+bOb8jn&|0 z1zUmu+l(3hjfTsZ-#LAVOsn+;l2~f@L9YP}OqG!`M&d|jE1muT#{(n#^tpU|hqg0@ zOnuasfVdzsZUbreV2!;M!=9+Lpg4%CiMxa9AAa`lt8-Aj zpnZ&XB_5d5w;Km(+vzs$2r2^~a03U%I@j?301wH}lw(%Zo_yFbEwG$+f2_Yzbrq1O za9W9w<^_b;ioiG^TXeH7M}Aen8&URfoWtd*KbrH(@zM5M${R23dW5I|gM@ldE)4&!C; znZ0T}`gio7_;=Fj>$=BkwOwmbn3(L|TL#67q~bisAON;T?ZB1~_cv|oI%QoaPOH9muM{@YJ5e_5q0D@Z;XPN{K?esp1fIO)_<_iO z-4oEu@X?J!WA1ow_=@hosQt_I=e0C`-tNj81ln(N>6a(c+F!SeR@0JD@#*Ge*(^Xu zR;#kY%&e=%;CA$<3ued@7`u;w*B{lFQknIKyuPMd?Ruh(t9_wbI@^>-2c;ybpVGt0 znb`vdQo8|M05aWu=fnOFiwX-iXMyH?NnQ2S{J8Ia&s|oTzh25y*W|n*f=P^M^4!dh z%CER?%x@zSa-#)($308T$e7NcT&B0w^7nDJqp5%9EfNY7#aXRIeuQeTHGRz@vn&a? zG{F``SA-blXMTNBvJSjg_>m}h&qp7QKXBCauBF-i!`sy#y1Sb~)G2l*i`$iN%#z8y zZjlqu6}2j=l4a~fsVFK-N;4Fu%PRSY;>pNa+)ho*BOd3KbS~%C`?gb}y^HDGeZz3w z)aQ;+e%g#IP)5we7mxb#%2qag>MO1oMltIr#8$qeHa->Vr|8e{?c80-r&Q8qw;Wm@ zZ)zx&C~5Osu_W51c;F2!n(C)(1Ch2!z>Xmr?m67Y(88wY6=VwEt*Y%1qE8gtOUu69s?GEp)sN9>sQ@OO83X(x2(tX5Qg+1zH^{Y1j07-S<5oAK}`Zlj-7j9+%vDoZ8&?C{q^kP*aKu@H1d2^%rQ}ovIaz zk7P^lefvk9lP%=YoAG$WspO+mSw5`y>$SZPS2}-j_XHZh^3fz-oobB$0Afn%HJ01y zylh(t#F5}~Jx5mIN_Y90aBu|uI+cI?iKX^n`yDGyift77LTD&3tktB95~iw+vPO;x zLJK}oR2&W)!SeCvp+jT2>v*e;mwKHCQPt%7m89D3XmrW1(`n&*S*Sns3LG-omB=6g z>Fr?32_b@#PCC(t%gV--L7T=!Ug35v(E4lIGexO)4!PbPu{@|2bEP8&u86N7^p)M_ zFSbV;e?qGePB6MWjL(l9j>p<&!hroPIKO9iC-B7_y8fOtPVdwtouaL+C7nU6eUmaa z$z^C_2vPE=3rInCIRKN^bMah7fh!q2Oo-O4ITRnm@9?K{U9+wB=h2hfdU2L(Cf9B2 zdX-4;NUI@lQYUwX*lo#FRcwL*DYSKs@nOt)alg0U`p-o?yvy5vdFI-mLwldKyK7VR zKCX86d}(oCHX>PTx+QAm-Z*2i8_HcRH>ixL6JU18-IdRklf?ckZ9-@sNFs{QbA1W= z?+1L)QD0VS`khH;f=PW{$d&-uWH9JgA&ALWoh$aW%melP;W^+uJU z>2&l6H0f=<9R&%qZL8TZSk){$KKQ^~n33K>%?w`M=o172$Ljoioc6mj!F37yr?j13toe`Sce`h_YNo|0^KlPx(`&mQfwxT&@45#aH z;cfwlo^D(g)q0ga(faSHeJbxtn)=)mQj+4&c8b}`U8r&QD2vBi`kD0>w{7ZD$sAfPi()fUuvaLMM(Ge9l9d?G zAQnvI=dZ{zc=)n?V}7cA;`zDgC^ReWD;56$@bdm0X;_Zbx{q{eQL;R9Mf}FZCdZt! z2AI1Q+;h(xusT!Vc&D*Z+v0veTIK%$zuI@2pTZy08$GcuiCP_M?K@DcY%%usVgSZ> zj43gaVqLBRocTE$^vl1hP_KJZ`kv;fi6ME` zzLHGHJ?;z<$0V6x4mWKCE;lZBSv*WRM;2f1L{HUbU8$UM&;A%+O4C=;-%jpdsLmz!aByiIvNcswLyHG*6~|?HwNC z(P7f9T!~t@-9=$Zi371+uL(tE1e1fDd>;p`STkYG^$-b}Gb7_XM^n&RewnM+NG6`X znLVHkQp*KraLb=*EW}BS5HXx&{>MJ1HgIVF0L00aLHx^O-IBRj^$6N5w%FIx%Vhrm zgyW3y@&E&^pRFKnPxQj57?wNLnrc@x?MnXu+SoEj40TGh7LIw5MhIfNNb)cN`8{U+ zRWKl#$HyR1jCFfG!4-I)N3)_w30BO}4b?T$xDkILmi#2GP>)QM!0sPrvW4^hh%ZRLGVe@GY# zamRz?@;c|tLw+V>-lT>~b79OviN-xPkwO0a93St|e2B*Q{{Y01z#@v(BCB0VSB)2c zy~iv8&$q`J1mJUmI?@$X(SdVcxKc7INequ1a{7A+RVl_e7|HlPGI{>oAYpVMzzEcy zCPhYY^CWSc=1hYNDucDM08^a)e{=FENE$NjwWfrN5LcCB4;kEmqsK3-xX+Rl5u6k5 z#~+e9C+X{Q3`qb~Y)Mu>b}mvF6v1m6$tSyz@Z45At$C9PoGd0IN)=) z1D<^3Mc34F!0Hw1M*)>Ul*Y`gyUy*nE&3FLftDB?A8!N3cw;@q%ES@&l94QT#I|L* zX&AF4Mocgu<2hhJCkG40KWuQ0jo6ZMDq=;Nb%j842gAZ3JjQgC9_tq0%yy|wJ z{{W}|08p${6QpgcN2E^l>xm9gnnFT_8w#AMU_mDUWMCd!D{$leW}$&dCignckQ^CQ zT-;QGvdTu_#0-Vr3CRF?1K@t!`D_`04kuIdtN6wGk84NQD${tRzO-o9^rAo0c9Lci zj0`ysLhN96_4JgqHipDgkQviE zLl7NQ2^-X6FgYwfojk=ojZCBA#0yhB8}wKBqxwyy>npG7y55Q|Tch^2V>6|EA4p6# zyT*v~fZ5t#d0;^#07sGWJS&w?2BZ0x!V0kFCvwyO0ElkfpGlGi?|lyBGDyo((q04Z zA<6W}KC3xC)+CbLmKgTr{0E)jc_V7KUupDS7EBdH{{VP~-`83VLfkQ5j>OsHWNPbt*%{7*V1jBQ`Y%VPLuJN?4{0GOPsy1$5y>6S|8N$sxU zeL%Tbktt0ap^FddEZa*)0chKE6+((ZBgoC;;rNDKt|qU&fAiGWmoJZuQ)CzXcJ5xibpHTM`T1++(l6x9E%AR8 z4oG<)#7C_CeeLZ7O?%o*^7qcU94$GwEOJ7LyLmDte|M+>ft0$23IJvxGh^@m3E<%H zlqfup-h8LUy!_egj)bFB`n5N7SeCDKUzPzcA)u};&m2k}nn_@Vvn)-7a>FDnZM4e3 zqigg0wOo9Z?s@Bt*}yxMN$=${*U0lsPdd+3T#gU;=U4i>GLl~ z^Dj_M^uPFP(zH!FjrhBt>V5N5Ow{|1)KN-C%} zJX~hGpCtbP?yx7R&$K_P-OGPU?+t!V$-Cd(m7xef^CS||Ll%o==5&YEyzscFBzWw& zPB#^0K`7nnGkLg>2FY8LUm2Z>uc_N~kE}mIyTZ1UYhCU;(W_UI{-IJ?>&GaOA&O^c ze&b-2Vpvrsc2dGU(eDZaYi!E;BT`6=lh~g~e~6^)Z{@weH+O3{`weZ$I_WyZ(X6o` z^p)FBkU=b{!6%A96yeD})I%bl7tHuFIp(INv3Y!2ns@C>y?buscDL0E-N~c&Jes@> zet9leu`MlPsLD+xS>uZq9tV)b(d~?73jywppX2f?QCDpC zpVZA;S=BA~mv76VQ?EFsPKCKl>wTR=dbx#)q$H^Wa|0$A{9w(-Yn2($3KQ2lCW~K7 z&~&ZZMfwtmp2U;G4#=mGR|?V;R7T2-5j3YEu?laEu)ny=BFQtj&6=V|twTE3lHpV*xq zCUm1MNTzMC84jRMzOlrPSy->>1-EsHa67jp+_P~VwFyzWo&DMtv>hw9ChbXW)4w%K z6(rHV*1T%%8Hlvo-4Mnk1jqvsA_RMKUyJg}-65YKi-WtO_P^wvgHpKCEyI3(W>`YqVont?ym z1fHiz?;rd?;nyhlB=tL%uVzVRs1>ws-_qVm?QZl$ZV8#8{=knY2_QYGcz+~ z$Si%N{SGW4YwRbM0ApmzE#IqJeT$IVmPfrAc?^Gik0Fi0$EUtbAf0_pTy(%Vq2+n-BxsWn+rTQOK^S?jd2XZo^$%+6Ts zT&l4E6&MP7*1)OyRv=WZoyuz3`k_x+)L&5geOI{Xyg*h>ItozCs*&KtS=}XXd%nj1 z01}A+qHE%v+A^F(2CQN-}1{3ZXH5gvW1A$XjVHU`;;hb zraWMtlP{T#Tv#Lf$H#yXQt4j28s*JS{<(JMiFWD;W7F;JniZ4NB^&J=F^P!A;gt=r z4Y!qrRgL{LONWT5~m3)!}&<7PDcItHcgB4Xe3_;W9jYbrQ@44aX{n zaw|Rm0NS-@lD4I%YJYZzYZQ;y3_b{N?T$V=(w;y9nRhe0@1Y*no}}|5!Kt;WrDk}Q zH;43)V}fu98Q-`b2Gft8x8%qhxqxE^Y=){l6@{8nvtc29{>h&i+Nzvn<96bB86HkL z;lyb7C^4I*Gk>_OO?QloOu%}(m59Mmxy~?2-I0(mJ~NK1$!~F#6a1ktfZ`ZZ6}K5X z99JL~J4)|dE^=6&ehxl5ZF-7w4pla+)vD#GR5nI;Gl~6KCFh4E@^DIita%6LsSK>7 zgVb}rG`LdlWsH7+`5}2Eh94t$jCDfrs@Cp747OoO z3PBFs7KF z{0KXji)~h+Y*cyTW=+Oe%yn222qV~bj2w~f{Qm%cn}Wp(0l1!9vi+x`+P#RE5)~zk zOkAIny}3BaB>entJwcbzj62Y|`KSD1_HF*~?ta;WLAOSX7c^^zNvzE`sWzbmk(jo} zf{nTfEFJ84F(QcB8Ob<4ATU4THctldRq1O?!MZ)q(`L*noHkzn5xeb zqsYc|4nT>$FH`-LvTl(h1JfXTtMQa?b1LGc-*c2}-Kv^>JJ$88w|i4hNMb4c*1Jb7 zYU~NGC79dPD52fdQHyMi6Qq(HWP_8=#t(8~nEfMWd9P8SdqvhBTF_b0ELH}ju-*(F z0-;=2nZQ7&KKNmW1Y`ooCA!y(&J+MK*AFpFHxH@YUF|DPS^KhC5_rL9zj=hn$jp67 zb16tI6@{bwMd8%sg2#u=<^>eTxnYDaVs#rG<*d=TuutFJ#Sk>(*&184vDYq+<|TOM zefQntDzqd9HNbBxp0PKQ0MVHSE-%O|b0cY=j`7s zOg1*fx>BcL=WZ7RtK##9fj2ZhApNZv`Z_w54R=o$^pUD}as$v2s=sL`CeyZ(iL4PBiK zdT(-Ty}R8Mp#FqaG@3Q+ED)AOVw27U5EH!RhD(P8DZtOr{5$C z)ONzjtk*es?ObBUDJOq%laC7PrlBzPOG!HA5 zoN7h%g&WBo2cy9T`;?pHP8mQ)*iM>lUwCntH4jS>yYbhB;Nwu(ZWiRgOp96jIExG!iT^ zvob$ZY^*)NxcHkP}IslJ-_bzLX_04jFpd>M2q+U@xCm|~R!Pihc0&*^P)MmI?xshy|N zM_iU8uM6WJ8pn?vF79aXyhnF0Q}z$(U1+-lwbQjUuc$P1D5@A?(zOULMLU|&soe`B zAy5g6)MJvxLHB@mpU=u;QdB(yy>~suXXq`@PH5jxG@0sX6e?*ct>_Rt-JwfR^{+#v z*~r{#+4T&Sf}N~@291>M;lECtULx_(vHm}w^PKrve{|_OXJY7e=tgu?t#WJp9urP{7I5~{51A^ay`f>BsPa?E? zk_-oM*|fmzx1gHSt^J~NH5lpR;JG+jo}vnHvj)|8+Q zvAk@hzd4c7zJ5pfo~&}DYd)%Vnwo1#>AbaLX$mj(3Wab35~Ko|3^t7CIU~jfI`$I{ z^e2$H2@+MIIkNIwo~@>AB#|^pgu@2p;QN5v?mxbAafUdRM^RXiYB5QpS;&^wY`c21 z?P7_%f;TAf@(ETW=N#jp0QVgju;L~jtE6uwg_7uEcx}?gyq{ptAL2OA0|Oim5Q;Z0 zmtRyKO(j-ZZyf&sXB;PJiY8bC?n1aJ85#Dm<0KLn@H#FzcRCPYp1L%0nvulpsO3y? zsoj7Tqabc92Ltst9Qh!wx}0*>r1fbUCF7DvwHQi)A@->#r9jF3Do30JzyrYKw-67g z%UYwrEZGr3 z>3}jB1`h8yJow`r=dJ>*nZ;LC8AS{>OsNB;=wd#Z2V{xPd0aB!dx+F zap!@~a6!QU^)+aLKC6rEzPIa^NDU<+gUG<(OV0){34CN>bI8ZIbA)_<#VbT^R35&p zkouP9uWDvM2`Fi#hi(SY#~I3=2P5(DW0AQXnZ*A9+bKHa;;Y)PHBVN=l1B{ARR~dm zlb>$(ZhuMOXMxn(x)H;khNqBzvUaC)cP%%Pg-gAysMgKKE?3sC>ArWf;6$j~MtCfN z*C*Y_ljUMLH#hj+Qi28Ucy|8)e|KMNQMx423M8CkMDHfbae z=*`CH-H)-9pD!Y^AH4Jmb<$B-n!SriYiq54cZR(V(q-_e$%p$L()+@Q) zqh(c|WA4~@W(d5l()L$=h6?(#^{yp!&7@-hTD19KDOX23EK!*j7a_9DF->PD@k zTkOqKu)8O;^cxixC}*`&lmN@N7ml2@gfWM#>s+*qk<98!mjJ%(YRLXMwQvo}W5V_> z%FHH|_F(b|H$go@x-O6Z7R4J7Lvh_UWqBg>yqsUyVlJUKIa-&Hn4kEr`pc>7vPo}P?XKN;T1h9C4IU_pgg@72a;#XwhyyZi zW_L~Cd%E<0OXH&01NzA2S1M@i%U5+-?tiGuqUfF9s%jLk>Y~+()#hl!Y4HzB?q!lj z!bcQk)-B2EyGqO((h3h=dH9GzN{}jNRcZv8$nX6L`kSR|cWddropQ`k+bU&|Jy_+P zUEP>T8y1qH$dJIcBg`2~Cm2^fP5%JYIP^3Ll|rQbJC>f=`bkIV{-3OOzkh4G7NOkG z>HD<$wk@JT9%D$t*m#E0GZL_9S$Q%mD`02k{ww~WY@b4#6Vk=tyN*Oq6T+XwO#c8; zyPLH~YIc8b>M_G2%dzzt#4@}poAVMA{Xn6&Z3F^8NI5-c@GysdStqrR8e>fz&r|;Z z1U=~wX?+yX{+oSXr=-u_nhaKSx}#FB_YF3iERp&#%96LIEG9u5Y_bCw5yH3wC$B@! z$^o&VG2z3^l4quSzqNZ0>Ry2!hflVTh%IN zkXHeLbJ%_=acH)rTf{f?&noNx0K*jee#rZ7jSV+K$gwn5Zb_)tGQ}&kd07lh(^^w6 zqSaMojbU}2&Nrfacz!gsN_8_nJ_Ow0$LP;?=rLQ9SM2`l(d(a;Oh8KOznKG+&D%W%d`<)w6(Jg04o))nLT4*giG~5Q7 zT3)mfH1gG$yZ!94?T#{W;0~_7Q2sGF4URIitkJWmyVsvl(=Olsa2Bh2zkdp9ZijE6 zjV%8F%vR!&x^`blBpea$2AKSj%E{EOH3~SHSM}o2^w{NU@YmDJP3*E<2lU+f^rn4~ zeg30a)HGo~qaNUZoF25No5`qkBb&ss8B7{oTIKsUE8Nvq#AxtMcFSBdn5RGg0EwyE z+dTfYk+oFRjBQ+U7ml0tC)udw2NDFkS6d#k(f&^$)!N+98go)xwhp2|dFt$1Oe|WR z2$mQ_dT?iRG4a&@09HVRR5e{l4@X@taA4X)K6~hlV))HA8h+L9==+ zYZjqojm)YL0qrJh6b*%YxF5)f#JMJ;<>GfSpQ>{LcC6Qif4Dd@#Ae=yoV-A8Tk-cb9WrV9B-v_N_ z#{l#Pm&f**F^Yw*<-EZqpLj5l$GPSVx&Q!%NeMzK1wM+_dVY2%JLP*{MV3Q)l|$*cN;M#88Bm{|@S z{{Rz36ig!lEyPbq_D9ez=AKm1JB@U$16`UM5>HY0w=aUf_(t9s=(T&m)TP3`n-DQ-)XwFxTLG6)>0V51W4{I+w@w^ z_>{L%VWv%ekM+a<=+Tbixc4G&yk4(XZ%f=kPUS%IwxuNS76w!OJ38p_;f8}QAg9HjbpR? zw)%!=G2C^MW%SF+swB#+as^`JEz&X!e_0wm%O-O66wg!Wt9Q1vO&u0a=) zOf)tRq54&3f(2B9V3sqzgoa?A-*>qjF#_@3e}C~OT#wb9%(djn+PabX(6sH}@DYhnkUHb~{;J2T_Q zJp2ad`woI;2T|J?8!}66BSPj0rj^3)+a#cQ@TWMy&mKqoI<1D8*-yATSEX_~%cxS1 zP>g$mdS$SG0fsVj@_ENxxJOlS{&DnY$)&=ZefHuJv2CG?h4|a<&IdmpJ`bL__}A1? za;Vfk~oQu{+5;pK%~vip;yos8>1b|GOgVlqk2 z2^q;gIQ_cJeiEaj6ZLC43)zh-MP@5?tjy%&{{T`6;NatN$iO6@!N)y%86Q>-=2Qmi zPPQvVs%D-VF^Gz@$u1RuDhD|LoackbIr+z3izq$9bZ$tt1N+`M=7`3`e@I~MY-byC ze13U9+dUJI4W8u_7y`tQ+_`26E4sx3dz6Nq93yeYHjueEI5-0!^Y9ljZsQC4G!w0!7OlV75uHy0ovlh>+S%!E8i}e%*4;f(VFCmCK zKn&Zqfx!cgayeBwjc#)x)kTL~y8=n}=8D7#NMCb1$?hKY2Oy|Y?YJHR2ORO2CQ~#b zlNLi}M2f}vsmpeEAz+FzwpcJp!6$>CYZuQ0A#sd!pE!|Ht4pEoJEUQ-)zV0zvn%X) zy;*-ll-fwZ7{)l-OLOBHBAD1Lo<-v!Z68ae1ohUvZ5lGN7O<)c?B{~{zyR$f$x)s& zj;g6*ghX9gGsr(u{{V>J;mfh0*Xh{qtt-AehKA#Ewuq71ma!S$U5u)VGRwG)*aUHs zGpKwh@w2I%xEr6IA6CDK^RH>|U+IU^KGBMsiR-nC^UHMFkVP{H@f##&U9l+$;O_;7 zNXJloK0Y@3!6s073MWF{ofegRn@uaUfJ# zGm^le!jZN7+(^9ffJ7;gnEhtQ^PZ0SN%S|pJF~UNbN96ivr~DiQWtN{y(ULoym^?h zFod-tGh=eJZU?2yADHw0B0gpEshc9OZ=io}r&I1f;CHbv%Try_d&2(!ZqGa*ww$yX ztHR1d^2G04Ov{u-WML9U!IyNa<>vnYQ57|JJue>yQY-H|M`8Z}4ZlfyCDUBhYQ?H+ zYRI*l?iREV1&N`JquM~3y=-amS2Q1+e>|3!t74;O;kg9{lB`S*r#8tXbi)}V!@{HjA@?df8RW%_&+?svW11J?NT=&EsjdG2 z2tBVWsI%DGR8vawM=gKnfC=7N!@Q~#kRSSLt-NiPGw2l?caKcw^H}YKDdOT*zUM}* z+6&v-GIj8^RMawkwu$Ut)v|>5j8y#pDBLC76rg z;kzKuBmH}!+OS6)-SPDIR5Qs=CK|4nrTXaxNsK?=!jBUYiqU3LLoP8Iz8|c({O>&t z4yLSM4Ds6N)bfpZe}<0Q*7XS`tZd$@O=)OdvcW3)Pne=;oQXFyjCyc4+}(F(aSIb?Q?Gz`~#R2&o0{s6SY{-&9>2ORw7BOElfm`t~(R5vPq1v@hvX zjnoNNMfBTrtG7610rGi19{CMBSMrWV6jQ(hQavZ=eEq?%MP8n_t$j%D`n#jmZCa-l zhof~>ob7>B!3b0V!Bl4p%Q0Zs_Iy7CD_$k3iTvWiuBUwMPvK9#HO*x;eJen1Qv8yM zZObGXLlk3qS1c6*g(Msi%YM9uJ$eJde)ll9mB`fe-jVzz{WiCxxcZPZO-5?+y>X}M zZj%aQ(hup_l_vn5j*Mf8m<{T_Lbp}wZMa(1ss1`^Z~zoY@O(Ik)M9_qcb+3loSxl=&x8;LL) zD%2gly3Ld6lvo-_oXSC0K&rb~hTP=tIHAprS5U4SJx@7zSMbI3KkELSM#iP3*3q?D zZOkEl&evqC2D|7=8WB8khlX5F1Y06x*d-t#CH9ev%SKQKaK0u7eSe9?JG=NV`b}2m zo2~a(bN4QtuIe%mQVn}W(=}~RRGlCZpH4NcT6=8JdYUrC|w-So+ ze_ky5cs`AB8QSR13-v+dWU3@wVDxTz=e>V~&#!%js%ZLI`XTkEwACwGSmn|CR()=f zl(+t(v~Nnqp$t<3rNMTw47mWtGu3hC%6i}R5V03l1_nr`?L9MC z(zV@S!L3C-GCs;C?ECO(v}9Hr%S9zI>OlD!tQI)nExTmpMsv57lsD}&lL#h@OMhqm z1)shVDfY+HAFDr5Brr;qVbeP+Q&=?;L6S;R+KM1r4D6OW1-)5z?P8Iyra|-a`V|Xe zRncMn=Wft{gWs)_YI~#X9`v1E1k#S+)8NwH=?lc<*s`r0`w}H)RcPEG4vd5C*dHam z&S0Eq8QG|R2!B@lqB-N#ew_V0qgv^@)yAdVmu|dmX%tTOgiu#W-I^fEz!I#hv?_?g z!Sd$5A&eOEC-a@}PyP(Prpxw+UHYc_YGMtM>CnwmyQ0J(0pwYd!yFB)W0E3ryn8?x zP7->R#pEKl5R=-Sh5S7BPttmJsYR^zCuZq7yve4$TCWUCE#;5YG0APW&nuEu{VAs) zAOQ~KWjXl;J8$Rsm!>|q_ub2;sq|-1p{-~=!Ajz20p~M%&8OFTHts~FVp~x}=_3Qb z+G7B=)%OD2ua$vXtNX^qwYQC%Hi5vheoJOet|?J zfDSN0@(E+F4h3LUPjY9K*&pI|Zrbe&w=8K|rlF#1RQ)iK#UkrTD=1I_Ai%zytBu8n zwfVqZqYJyk@WUP}#IUm0VCnX^?waWSp5pF!mGj8XGRGsFcm(tD){l@wYDu$_yNs*W6)L^O zT5!tf9b|;Fjt)q`{UmY${{2%L0H^N}3HQ15w+wWn`>3qTGKR>kp4I2ufP54Fg@?fB zr#Taidy1IPaeK`bn4t|VrAKGo_b`-b!1J^w21nqNG?FOm z1BMZcJ>|q#(+-W1mcOk0XyK&*zS)pp8uDW<~bFWp!UfqF|h3*gtN5bN>Jz+p4l3 zQQg4Nkk3^%J4<3y8$MYhU=X+a+2@=e{{S6Lkm}+G08-6&?36;H6lDZ~oNgXD{C)uZ zV~-tJXw*W*c1+SZ-YHRp@UjezfiM#-kaOj{_~ZT^F9HuC21_F5jT4!f7>`+rcMSgB z!1?{oK7Vd{eml5?*pMXXpzgm#Se%jL{=EMHey07*&)gC0h?Xeco(neTh0v8}{{Z%L z@H6@T{ANvrp5^i1D>FWFcJh4*fd#f3Y`q8+bXu z!w+x+^&IEOxxlq&xof}MAvP+yg7Ly5EwB|V6mx<`M?Nvf`F`bBRTP3YOqnl6BZ{=9 zNMz;q#s)$E00_fla67Y+6FwmXycH=(IloKfQ8=YbbS7vc_q0xC&o@sj=2<> z(qu=I`k$v|x}32LNQ`_yeZ^1GrIFce{hhsV&*wsk0(#G31y?Xx%VB zNd-s)1Yl&bz&OAJz`DJIC=@|f+v&?U+oq>p`+!jvMUrb6@P(gf+!S&E=YzKZ^n~$= zjWkR*`9xV5lbqw;OT+=Q;cd=@ODJ6r~aUgOe zb|hp1J-8=1RUBX(WAo5#KyKo*A9FD4bY)oU%RZlCWjSF^R>BRW0kw`$e}|us&NI{T z$JmD;AW<+$wP;pK!79q~$^@HiFRF+#0}-9OGvgy32_v3Wsd^CwK5wIMF$sK(kfrAi0=edi_@*KSZJSAJ` zMup#f&lhj^U5ejLd*Dqf#Zy&*rmv|qZdyp1NE`(!0FWBe8H8XlR#L$-d{4+J`%E~n z^61W=qJ0*x=} z=v~pPcfwC=N2l6y-K(dxS8M5&PoW5mIgOeILP9tj^X%@Dg|wyIn)yW! zNhC6=^RyosJJSV@ExDU6Yu%aID}N4uO{lCA*7`~G`n&qAv`LzMyFm1(@C$kwy7a_$B^eC0;`SeSTF}>?V*{DKP zGAc8+T1Qzk2w~~3xxr$Y#&_qazl=}r5kvY0bBq4~!{^fNPe-ApYTc!IXh&AY`qbyM zRGTNCt|T$}iL`e2vLnl%eOtKIo=2lCtY zE6t~~$F%HNWGbqFuF)FD>5^Gna{}2%gM?a-a?9jpQ+F%f%ltFc?)#m)sA^r!-};uP zN=R&Jvo-M%)6*n8!POPFl434gnIB;&2e@<_;Xr^6$c}vWQfh!diD19<=f5t}*R=Df zkGzr*7N2S;#5XH%L6@FGw90asb{kxc(U1mnMe*{eGG{(M=N-B9$Ls#7dhP!0??0vA zPrScFvFr= z4NF(_R`?{R z`zQA_u+h}D9ZyndH3#~ESG}f3cqtatTCC1;rDYM9g%(8$SC%*=ZBj@qzC){VbUeQM zulb3S#ZoL0U((_5>AmlBP}8)EorA8_sVuUH<<&cwW}){5m_-}NB?oL-K*_K$7~H(s zpE3GsC>Vgz-|~v!?C$V+;ugWRyxmr~ZI?A;?+l{P(1Ff&$#K~cWE(nhRf2ppej_hZJ~ z4LEV+}4jgg~Qn z9G(VFP65g5(8b`YYzse{lFj9ltN#Ed8&9B(>(o}Ta9dO7pJKp_QbJn+qh22nn@7^*JEp;FFC0Khv!P zDLOF#7pW9tqq=TE4)9bBxGnfN#~+N7pWCUb++&Ts^&*Nnl13@CU}G#v2z=z6%$j`@9ZybYH2h9?uU8fD6 zc;xU$IUlhU`L&@de1*u3q^rIu6;a3q1isc)1mF|2k>i~5bBql02Ytlo!Ir^k9hin)G#Ei6h$Qe0Q-S(PXUi0 z;~C@6Q*3;n2P~cT&rg)dMUN>AG@PichquV8s(d63l%-!GO;1X(aLqIL-!t zeyUM@LRf*^TWeDE;;mrQf&?;eAr!{DP5~ecl0xGg9Qjwa(^rx`ttEjX`b?x(nnM{;9wJS|s0XaC`et9K9nI|Qor%q^ z{Z9Hff4%#My;JyI>@7FEZ*7X+rKHf`_g3W*w91B5K!~mBZVQN=)wY5NW=|g1Ird>*AmP-t(R6Me-95x-sK?EcE zdzn;q*!&QyL_wFyHDglqS51nUrBZ&=mgrgk0NT-S=++IL{{X4bvW?l>wSgqyj!1OF zT?rJ)dnQ?;($+M1wEBV)P$6M5D#8i&?HJnZNdpazK68$=M9H(bbb53`Q5h^JEU4&! z@8L(caO7a%=llG0y6P~XKAp`W(<&IIQq?3@b}=)^T}cPQPb7eI`}rRjae;dxOk^b7 zQ>cLuSEbtv0oC(&$b zm21NZJxzkZ~R*1DE`LrSlzMRu;O zRzI`j+VPiX0D`3MU^f1r^UwKcIK#*;Ql|PcSiaW2AZyddak(iZ&46%D;g&mxIOOmT z_UXno8Ygp***0}HrjSO}WQIRGLjf@*7Y8|DGR?@~dEjyJ-y)?rlhi}0cUF^1Tae9b zR$8?-8+MX}(#K7>W@nXAlrRMgC?xG6xEUQ`&*Y*v6D^O$X4rvC+;z9_g$*XbBY-kj49XaAGm?K%&PmB9^r)vsI^}6z zz1%&!qHDT-liXd`rPkFo26%MsDqXR>r<0VxE$$$DnSf?E1FYGyV@9Sslo;}476ydp zA5}iH_V;7j?QIP^Zs&9By3Oz9mFmGbg(IIHvu>5tMnH^7BQ|!bqjTg4=g-9bD{{EJ zdmNn3nNLuT#FZp%FIbOKe{ofTu8(-Bd49N|V3pIJsnNXEzgJH1eF`cc2Voo{gS#sqU#br|pAKYUN6ZxHpt0uqO zM@ZH{Smd!zytXT=hU6=dvIJlk4CsWAPB!56KOOR4`-?^lh@evSEUBc&J!u5h(d&{6 z#(*-g-RB3-Ax_i&ryXoKffimj7DhONgyIgnQ{QEKvTa4Km=fgU(?}0 zq2bN64!=8J*yVfj4|4!m@#rWW01!mm;qIE9!P`2VdT-Q*`g+x(U$)eBDt8PJB!>}1YwcgSI3$RpkhG1o0fo<6 zvK8ZOc#ph}ByM&9Bl(F!?l0jNwf>=fO8Q6jtLq|9-9J)$Qnb)gy;W_|t6?QoXZ6*} zJc^7Rp^9AkMrpTaAyg;B#$^W~<@So+OQovlAI$Y0o7;K@i*7p_bqzM1k{AY_@*r?Q zVildxzA=I@NF4A1>(pF0@r??Z+_?jC{a&Q-Ls=qqikr6ZtDVviF@`ufIL-(B!0Tc# z63G7YMG?d7;11gb#0)UJMAD%b_et5<@+6bXDvKyPn1hPd0VmaxRDzSpXLd1;j zZV6ri28#5*elmUO9b8 z(TcL;8yF0Kh>^z`1K|AU$5p?ih@1$6{mfHEN-w^GGN8ExgppK?{{T=QXyZP4s`Gu#t>nJdUyg>*#`H%Sgh*qOScC2)5a)JyCqF+U{#^7~u0!OTjN^%nk_ya)+%8%|IT-TX z`Tqci!RZJmsK5mqGd_o25GwYG+iy} z)JpAMI1c3!GP?$lh4QL%f;{6O4l(i2KnJ+N#~U*%E$AjD)w)(R)f?R&!+MjJAck^; zDI9^Zl;HEb&T-?%^!IC({!20$jL$jf4l()l0I@rPkf?eOCQQhS}3E6d23`b zm(`So*j$$LmQux!Kda7gcALXE%>-H9X`g;w-TtVBD(VZhFJ z7Q^%I;A77m^uU55fKA*O-K>tWsDZ5S=L|NIeB&j&6hGTObJ7N0s&c6@4zsRBMgeSS zdfu?o;gTA0hBGNRZ`43h$Bg^FG1MQ)ui7qZOgy_aC7u_(0Q*#yPt*ZqY_kAB!jB#o zjPs2A^u^C)NfcmcPFvmd^otdzt*B{z(Q*Rk?hz$3LzpV7CbcMVXGN?BWL)1Syi{RA?VKA(Y}zE{fl zQOj*a8S5vSs=Qc(Jnh|`>#kPPbq?S9q4W>xFKpFG*;>RpTWSz87RivUF((Yk9z!V% zRZynI1Fsh^h2^Cb*&ms)6UKjFPG8%-!~8vsSnD@Fr1tiYG?$}yqp7>no=_e(FgnY! zMadW#JDGxRVq!!$`TkH}sZ7h{d_Nm$NHf%3r61DzTIDP04x9G#EmzaEDxl52Akuni z8!J9QM)xv+5Xx0{`f;+4*SQ=R5j_6@`dKa9;NM)kyXpR!uf?X&?H=@dH#CbHwu!6N zrq?Y005FC^HZ)rgq+Q3S>afWyP6l^%EDxDp)wMkwKMF&#b|s3x)6VJCy9ZI!b+4*A zb-ixPwDlNQQL5EjNr9Dzpi3MX18y#iHnzj<05ES~{;bPQ?0=b`^iFd9AXN6p@SF83 zy}N6=w6CMx)7)A=cIbMA18Yr|wX$Bk$XEtQnN&p_ikr3q(dT<621j2<;rv`@9qwDp z@Pel=bM$F0RHZkgHH)t!#>`YzmTlewa;Q_17-SLu08Y2QVl)Kt6>conJGV&FB$}fG z9o4R9^rr?0eHGmqec+XDLC65e6lZD2SaW%Gs@&C!9scg8n04RcdtBFhXSX#UrX81K zzuNVvp@KLxD{Tzw(@6)dN^QQKw2D}U^l71Ri0)e+!-V-cn@T2y@lYFMLSKHLcc0Y_ zT&$hv+a1%}OZ`b9uWF*J+uf3|x1{Ds4OfDJ0Q2zj- zSe?uPVsY1xoGZ8_bv;ZDQ}(obuXpd$pQbuyizjpc05j3(Ig$Y~?~=BkDnio~w>+3? z`%yvNF5EE*<+k;4A3NhD z6(O^O)o}T0+p3Z{sPR*CT}I_WroEcgEl*tHJcu(WJCGa@Sb)Cf-JSr>IR~c6lsMMo zrZT3UtHF8o0U=ry>d^wij`zL_O(VT!J8pV?P-0a23|*AdGXTu zFLP+iV!cFErtce9b>HPbQ8c}0Og+&?i3TShT1mxCp~I87%&xLXP^|Z zGQ)B$QjFCYYFa*zrTV*P7Awg-?Om0Ev?`cIZR~?B?Ftx^k_T4Fk$@m-COU0_WBg82 z-hJ=fG3f6;lcj3C*`?F3E}f>y4&WC0_`lW;;o4c6E>Ey8a7|0eU(F0O?OE8QQMjYkAJ5q?Pw;uIdYN{6_sQ$1B`(qG6G0t zJoRjzIAO^~q()K)8!;DkUiyVjT^CE!bp0Pp)9MK(l6x>4KWCA!3auFR5==Y47$H+= zZ*qYzCllBS)UG!{vpLt(-4E+$Z+Df=PJLe8&c>d!_dd;@TN9UOOw*DT3mtWYWEqOL z=uc~Ug_qC4;^s%SYaeO1Hhg&!xY3B#@AQ-3kZKBd(y|fzOay{pk8T*GYNA^)*k=Yn=-!#m;4;BmYMc}9;N#9Z zm|}x{%KWp&#DdGjzjI=76n1buSr#v7f9yEG$YM@EQP0O*7+uP;5-j%?$Wp9^e^?lm zckP9Dy01G|2uWt=0KPaU^VIV_`;+2E;jl!-tq)wXCg$}3Vh5eW(m32aHb;+dCmnK= zUCt~4pd=DOU)4pirYv>|DDLPpD_|)Z9ORM*xO{P*Iu6Wxop-yRHF$p7YO5xj3Iz1# zgbnc^AQCZwpVFWZGyRTFNNkD{%ac&ZW0Ai@%N9STA~BgaV}Qf~?&JVh@;UkGf*1b5GDvS(yEg?Saa6sHL zSoP3_9heN1W{y)lK@!Osl!5PzuLpZ$1+sh`0fXbB7>Ng8a}6D;?OCJLBY>(g84T>o zd=1#a7#@5P)_`7xnIaF`e5OzVI^}3&AQudQ$!v0S@%_3#s3TVi#=mHK)~8Wn-+qRb z8nD{T!ZjHf3cGSVm0T#!c=_wHQptR~76jfXDawMcsw6-;%)|Ug1y2jkS$H@;2OV}b zzU9`v&18a|O^`ztmX-<^i)O;?o46ZUhVDUMBa+zjfy`72ft8PDXe!iq`<1V{SxHbm z6O`Zn5(!|S@(u~fQ~Q!|)e^FfKM#_dI zxH#k=_0PZ-WmeL}%FGQ3t*9#)!MJA(hXaKJBpie1=d9TjAEZqf z+*aT^d_trViCXe%4dzK6Ngg6e01UAM0Qk-@aC&4)8iyucT87INk3j`{AVKO4%@{GX zZ^;J(BjjLy*gqW>M(QFx9rqPj)Ak7(6stQlW;U6CZ(-vs2Gj6-{{Yv6@@PQcdx6QL z#UmNfYgv()K152dpW7Zl2anjEGs#bvTc~Ily60i(7Y^yG*NI8k@3a!_w;PBD4U$=R zAD^F&r}9Dl(bUxE`sdJY$k)}W>iw~x>I~S0no27sP6+hQN9EY_#&gd-BY22cP_|FB z;GM<)0EhSR<*3_KxAd#EH2LC{h#GVgR`l#i7>LLXc2yEG`w3iRW1Nh2gm_r&nPl=^ zMx`;o@bLOCq~5uxX%@TtvuBkek%d0tw+IXu(;HxU7!$J|&;h_Flatn5p9a9+xJ?h| zW~=35Z$I!G+YKTmuc$v<`=+AAYqcxg?WqdjFEsBzQ5n^3W<+O=jW z%Ew_$oBc;sAS8tY2^4Pf*~aao+75Y?M^cHz}KOI^|{FWVVowX0r*5(#%O^z6i}&XJv{9k#nE zd;mxTpq^lFbvq}BLu^?5N|ui|boUjTTJEn~=*QGbY-O=h-)*T|dTg>VN39p!B8f;c zHYRO^sa9p&dWsU}i3U@MU#DVr?%30{wGr6veFka{5;j(gT9VVLjP1>dBLX~N0>ohp zla=s#)#G^O4j>(a@PR;D1I zRT!qfb~Hu``lKgIw80B)Z(S+_#y6@DJBZI1&N6z;zCjlm?lQR_JWE|e03b2I(ytHnTJ~bbDgc8+0tOKH)x*t?;5ej^)HX+ zYbr!h9I#|EBS$$3n^>t*=Wn<+I?u%szKNi-9kkwu)1Ll|Rfo8HSL)2#eufqyHjl1p zW@zE}Z*nAs(Ur)Mp)wf)lm%5zPgnURwuX1iuiPMg$@Gu1dy4cO&D?$0sc6^5wYrw{ zOY=cqDHuXs*lv{S(fGqLEs|hQ-;ZqxMs?1PgUok=B{7@c5@|EQJ8{ARewIuz;r$ZR9 zS_f)xCZLfg=^;Y}Y>mNW46HrO6cQJ$3xUbAp%>={>U6y)>7J{j&r-$jsJh0ndaOn| z&w7=INqHNjWqGq1!noWp2tPSv(`Nfc%3&Y9cOX_eBoonn(i61tPVt22)+ z>qY{rjz&OSg8o4u>>v{r1g7dY_3qULEOz&G>aa~PsizLTK{w$XVBhE46Sx z#51QZa5Ru6JN?k@Ke>Cexi0E@pK4gUq-jw%+9O>HKAMrHN?Wd0RB0lVH}xBN07goJ z`C^|v*y~XIr2AY|de?Xjc6yb*mg>5S&n# zu2plFfUzvg+L}Fm9<3>K<&RrvgwIk-Kt9apD*>FOY*=A=A$JDwa0tYtU%3Vh4)rKq z!`uDxadWmi+g|MLDzI3VKT0OkqDOGjyx!8YOhkJOtm>c_;~w6#=J5$1K_aE`c}f(m z1352sFL+(n^^VEbI}5nW`ZIZq-vNRQClnlzyt%&3&&l^H+{K_f68mEZEgL#u zpBec%^OOC)aYf84jG4!0@gu1)r6jV%CYDTqkt6Tgtj8r(u`82OD(gy70ZotUT$s~3pm;h=^GyVK&Cd7m|0iuiA%aQuA zkV(gnAEf+@b=fj8Hxqywxv!?GrP|w15!hi++!gp%!~UFnfzLk!u3Eb@xLkkK z>|tZvkV6b!n=~dW%-*aiE3$!rM&bx9f`7x#dHE-$#Z^o<3uM-}3)AAWZJi-DlE9d# zGeYl-hX-S!!lp3Vu0Yh9za5E6K9a~yujXL#2J6-*J{kaTA2Hcb*!N((k z=d8))Sv-1|`DK4}ily3><3+jLnhkl?{VWt%h$XmZ=vaa5u6j zRtO?sH*n(1h2-qW@-rC24aYpCu1(5~9YbS*Z9MFn-Nm+`tc>K1LuEx5xn5pC+*;o25;6xBEL#meJoec%y^XbtQ>mgC-f5 zV;llKoaceL4u7Ns8;-a|*)ha=;gScJLGAjJ)i!s#+px9Txz6Va9JzPF85=nF5OIuk zbn-nO;^f4m-lMv863?YG%YRX*tjjbebzxYM=lWq7h0206i~-L(y40*pi6hiwTSl&J z%WWya0Is1G>w}a5M=i;2LJ&v!XOeoT#yc_&fWBjcQk=5)kJlXp^a8>uEBjd z3t)gugfsZbAY;s*8nN8va-%}y7!5R_w5i&bc?u~=ug4_8m2eA?+f=ULzT?3-J_b5q zplC<3M{vTnC|0as^by@M;Hgkf8{ZPdG8ZQ#k^lvW#!p;LXhXpE(hK@W-vqT{4KCHz zje(R_K7uEwf>u}cFqkBB`2*)8K1IElK?Ld|w-pNMUsb2twY!LI{)^HPEXEUog^;@c z08bds3i}l9<0GyJI|AbXL859Ye$(uYLs9#?y_ecraIBuB7S~LxGLriVMLwFyHxg9F z0a-Sk*%%!Ija&F*DX(zaJ-yr1mrkkM{jp<6)cx{Sv0q4zlu;>ckiMF#AaB*T11@>N zAf70yDMU_e+c3NH>zZ9CuE3qcYO0ovt6a3!3Mp=VkcJY-O63&f0stfdg1JK~+REd| zys3>!u9@B4!uoxS8n0*f%><5Dj%C!RsIo~J$uhE+R7liuhin`iezBa`GO~(q@@BWj zgxEjgD(>&9nzwC^+=ooP#HK$prn-8?QK%_~vquaEweZib6?req4oMgs7 z+ntYtZRezP3hUgO8PtBMcKb)%G<_>kU8zcHma4}NM%J>Ue7IsU8v-4fhQt$TS8QfBLLE-#$rrPEOOyMZX9Ad-4U!dZ9R z;^210tZ^<)Oc742MwyqlkVf+EXJ!)2NSL!1+xd~)y+N{iFyQjaYByOae zE~SyO3aLf+U;r700n1={9aWIA)I=kwzN|Wpy+*~&Pq#Ea-33KoTD9b{B+3BVn@T8A zyF3i!@E7!vG%i}$f0@sbK>B-$b$-q4Zs^o2-0mF@w>5iD535>B8f;tRj|!oI+DiMM z^p-r4k=H&D$Lbdk%xmN!OWImqnN2BZ`Zk|J0)FB#6mUl(9sCm6ba!1XVtoylgsT`yPe-Ac}li6w10wN|bSyFg4>LLDMqs&<^P{-Mb%M@9TL zQN(I+@=f3VBT8C#cUG`#p4#lr-q0eNB{FE!u#(dHBqEr00ICY0+}Xex9OU#yCsq2Y zILryL{$bB=yzN5udw03Dd1k<{Yie}D%L+#r0A!7<#E?cEhUVq7$l=esh|ZqGqjvH7 zgJV~xrRx2p&!)XoY3Epyyan)1%L=kHs5`(fwZR?_Bd!aQ_9KN#vSVAbeN%&7xRNG; zrgpaGioBKQCd>hVRWOqnh#>iaj&_r^pBY(s9zIoi1XNLx4uG>0>%F`60)F4uleN2# zPknlM6|397HM-TIg@SEF-j3=9(8{ZrfyiLHv(h#vHUZRP=nRuV?SAa-`Mk)T;{WjkqECKXT;9!m3=3bS2e1mr>^HYfgPMJqtudP|Pr;T?`e&g6A zohfBwSimD8iQY!yd=}>bcX5-RWsl2d>`;&LY*Ox6 zZEG*3{k(8Ne%^@*c1w6DYJh;Kc#7q>fO6Rk9|}-|yR9pofY7lbai!VOnbUi&2)AkR zz{{(b9+sZ>l+WqlFRitC3RoU6Kvm=exDFmj>Y_doc4pJ}HI=B+LL{p!EYU&tR9r2$ z4}u|>Iz_Y+ppxJm;A9SiXAln>=Rc$MM&)_?rpIuY^^2=vhMh-E(dChn6h%8N%49M9 z#_>2Y$C$>{T(R6&EBP*`m-v_cB9W=pwCz%la6~TDp)7FEA!afrT4FfbK-$59YZ zGsjzTW%+{UEQBecCRCv==e=?TwYcx~U8Zx-1)mF(jCcTbUzB3}=#E-CmXoJbYVzB< zg$l<6!Ig@*z?*)br1ZS#bR^!QgkP4{(09Q~dJNeq@w6qaU7D&eZ zNzBSJf2emR5k}B)Zy;=>}ndND^zOJkzi<&-N@#zP=U8F zh(_Sdd1cR>;OCR1Bl6^05ctCJ0!?#EeO4BIQg|0@i2{lZrlno!U) zYQ3qW_WqSM8MR*OeHn)1BG`AGh$H%I>6IW6k7ytfpWJoLiEyO>7w7VFZab+KjL9yk zrqtE`^3iEor?)hXB$*gO&xUL$QNrYM2pGwt(Hn?eiK1j$k7>(p^}R{EtGcw*HOZQq zP9lb=EDi!XZdURPiou8nCm8EH_=?A7RdSUbMKix?Xw6eW)a~jTzM=q{2<}R<#cKp$ z`to~AGZI4#vG-)39W5x(7ap0nb5zu*)72%>)9K}#nw-kuOCFM0H+eJ-QU}1Rku#}b$5)N#R({DY8)*oF}n*q5c`qP1{q?k2hP$um+Hr08C+f~ zR;n$p-rrEK+SKNkvlmY7Mvgcs-L&yZMtMpSS;^ZYL|7CIRB}$%JOjf%QrGG*Tp@V3 za{9ORyD&%-Nz?RgI3s)9WiqD<+%_c6TvCL#Sc66X!S| zqwWaeUOO!56FcwrbmsU}8#hG%VN6*j5xIT_iE_#pnSxiN(rj`{CKE48PTvuYdZEk_1N_2$&qX5v(Oao1o(O^v`e zk2&dBAALY<>~$MywAP(V6^*GbQbrN7Lo!Y)D8NV~julXds;YejQb<}b*!S^zbJp6P@iO9a@1*(#wKp#e`#53Axtsm95 zmIo!USQ0p>`szbIRzFP$17}`)c#Mlg>RIy!Isi>IZsG2IW)@D- zy-FVFr3+cHqW4|N*{a!+m}IRTpY}~ZyAnkyWmRN8&uA(O5GdFDN-8eN7asPF! zT^{te`vPqs>5x6hbbU#tlEb`WUsR;Am5jvCH}zRY{{U!>NyBu?nS)t21etWMG(n5t z`k~yN)!mgX{V}Pd_g8Blxrb1-VLsKg85LGor;;cBtg+0Yl>&kTsZtXe>Wk)*#0}1D zXrukfuW)ER>Ggj|(64LVs}`H7QkvZ681Kb?t5)3}(6U6u45|Q;3FTQ>agch@eq;KR zS41`p2e!LI>Smp4JuZb_;?=809!yFj`(*?ajYNK;wnzotzDpdbz&#a5Q}~l-Pkm0; zqAzjjmIaMtSnZ8C+b9|dV^J+o8I@ys@${GO4y6+Ir`&OBV8Qh!y}Huu5J9JC zWu{i%Nm+*@VyXDs`w@}^GG!#tGRcfq#NSBzm)f!F+TNdQNBYMk)TY?;e=C~JI3|;F zfF}!vSKF1Cj1*#c2*gVtpGS)W{O2DGGVZfKQvMnqyCTyoFm4 zm_tT=Oh)g4yt4QhJZb$%BE-%y5&Wf^rl4=pdE=JDB0g;Tl-{Gy7Vcv)wjE0l3HTtK zXE+*hu{}xT3O6IID7mQ5r%gjwlIlc~&2Uu)w5nK<5=)QOl_ZUzjClAVauVEPwmU%A zCAAVI*%H0Uq!NExBMj~u5`1LFR29nS&JVlJc9P?Jilp(HSTQCyJ za=8GMQg<@0L0kqTbAg_VxUI!PN!(vM#IvQ#6kO7dHj;IO#UtgvP7z!X0mgYbDli9= zB_dORiuJm7H45gfJ22MN9U5m_bny^!Ny`}7OJ$Uu;g}7CV1fwX%03wWpxL~P9Q4uAm>qRGW{6;xeJj?EImF2<7nM)4t8Bnw$}3;gnYTcD?wThWYEzhV@HYYQ3)@ZeA78D2BJVsGGe&EVZ!c?R0rmckQ&PdCfs zaZpa`OA~Sga;Z<7>gx8|cK)87ryiNwvIJVwE%=t?(vnL+)k`>x?F@%IapXG>cTrz9 zx>^yxOe^pI028a}z3JM$=LLDBkGQnirNm!t888_!B!)80S(GvrP3jpy*^+z?wXRih z#OTb~120nD4wkB^9hA9n#!X{95h!UCcm)}D6i`a3UwB{f4zv#+=^*Y&a{8Q?yL&sd zT0yPrdXIYOZ%zYR7(KR92%9k^tsE)(2yB5M0Zx1!*~^#5P_|`;R$)szhL^0-zeIm3 zyI#`NRxyXO`ju2NtiGlWc4VAx;173I@w|XX8JV%s+&1R7uU7kLtE`I`q!?_5X%ys> zl|H8fpB!WH*0Nejn;0;fn%0puJHnT7X;-0adUc?gQA-u!3k)5{1-T>O3=VpmdFV$G z7xe6XT&QaQ08zD#Gf|IC8a9ul_WW@-+*p>t#=_?kU;?TMEODNGN%PiRo^!F|>TAd1 z3W3ev`ZlN|9ks3KX4KWAwNAV!zt@bh1p1PajH7;b6&?mYI!s<)%x)@WPlixjZ>Q`1 z!)2wh7N?|MqCM{o>7km;7%RJTR|P9%o_h5TY4_WfD8zxn;gY^*%pIh!rQF^*$dXH`?_ssUIBc!T0Vwq#cy%}OXmUasIB!GBk|MQZl$v2LZ9>>A>8R=WU!c?D%E;ZHBTV4R)1v!1f~4A}@k zYBn@cy12Ykn9!w8-1p4vNoy4)h9|bgSS!iMO{G9zOqoj&fHS~W!~mx5F=JLE8I75;P@E$>qby9rGX^8iK1b;ey2~nsvC4=hR&TW zWU!HkMV4nHB&>h~BV{l!BUL!bY;dT-m+Bggme;9Fuk_=#gqnm}C7Co9xaMm!>9c`u zOzkZag&R|F_OL29Wtb4bTc~64u+=Q4OwOQOZn{>N+fj`ghMV;hRMX^&H#Jt&(^XtP z2;N3_fVtX91zYk)ZQwIcZOUf^k(_a#oShV+JE0;UCH(>`3nVX2^wMlsXk`*QD=;Q9BRhb| zJYXm);A0&tW&#n8#^koGX>r!EHOSzvr$%>AJc``S|5ER(}J9d?A&5!^a*eZ|@ z1pJPWCh9Int<9`h{#O}>y(`wBS2F}hLV<}S@AX)2Z;TUx`54DVkgnrn$PaMZwur3j z1W$2*W(oG&WRd<04a6&DyyWM1IpeD%3E2=Hp(|dMsnV+HI$c?mNCcuFqB2y3C=qxE zB>Qo<1o506IRVfTL0aau{bBC=dO+jH#xeorJo}pml00-b@2OGtn?f}CtSB+n zC(>Brk=q+*-a*J90CMY}l1J~sQVIM-XCt^w1}TMSwRx;LVq=*=m8Eg*EsO!To>-0v z^YPP!u@to%Gg#xh8Z6f2(QH!6&bX%xiT zzN@I+Fv+$Xw1b-k6+LuwWqAQr1od5>bA@hYAnqwNJ2J%;igqYLHGLkYoW8%Q9D1U>CKpWQSyX_SA;1|bap!^7m14rgQluW|EABU5 z>a86sy}RFaOcKW~pDv5s&_K3ThFr8QCf96#^kr5nn9ehi)?BXS@nQX@q{Zfg~yi@D{(`= z%&TX;ve=%2^*g$f!*5pC>1fod(~2085oP{iDh2grN+@?2nX*K0At=mu3~)A#j8K+8 z+Gb&A=n`E@aFA*zw)I*XZ*oEC$z(&P!VJ)aIYK7{M(hZ1q>Qcx;7)pU9{t2b-o!;_ zht#0Z$uu4BtJbUo166`YJX?M7%#ma{rp8CG(G%(_u>{AG${6hCEbL z>FyiB;NY+Y!QgUdByuo(l^dy3hw9|Iec@B@bX`i(HOs6Op0oxw_V0-LjwBh}8C8_4 z1MT@SZ!9i>xm3a{dY9b;UahF;l`ZQKN!v+nnmFs-j6)@k7y!t=$Oj9+%CK$;$s)M& zk+%0DRC<=7M!guOlAIRx2qc-L-x7!189z`eRdI}w;}>#qIJljnvf3WISt>9MaqocO7AXS>gRJ{`UO{^RbYiFFW1&+`OCmjKVU6B;#{KgtzVQHPa za% z09!w-J~ucF3EXj<9OPgd6ZVRPbyJLePVAizy(iMGE&lP)ySq$|K}t$yS=<74Nxe~` zfx~cpHj^Vehso>B^8j(g&6wJaDU16~aV+R8YH?P9q?wdp#_=*a ziX5GxLBYoD#|6)wj~nCqc4KBm_qlh~`~Lv8J9^mDwQlzAoif(NlhLV5)QLg!hV+hk z-T409dkF7S&tyMplTV>q?#Uw3HC;EguIUF4d|a%ON?aVQu}K=Naj_B;vmV{ehu!7} z$1Hac<>C8GvS~fPQW_9xbgIx02-#L6zF$+2L$fllQ^q*jymQu+#bT;mvI@CN*L!bO zg2WbkIXi2%wFx5hzMx1qh8Q;4O>inShF9Z3}m6;tZ%iX zD#JSg$ic>P2U)zIAP(k*$H?gJRNGm3h?BQ;-71G}D~UgurI*v?cy@wgXxMuP>Q+*~ z`$i6OI)mmcyuAoc6}mey{CA-`h}0zRsk@4in`KtEXR@dqD}Wgy%VTd3kQ5&T2=`--or%3ks;S+@x4Ukxvej&B^1}s)W)nFU^Bc%P zCNQJ|RFa1tPBVZ!&o%{PW>HHn)aqo`&~!gWX{XY=YO_esD?I9E)Sb|k+z_eAWkNR( zJ5D+32#Sq!L2|-`y3;7aHLG2W9-~SFN)kBF0RI3>;Q99)4nG_Xz@Eg$;DbgGW{q~Y z+bj`C>?41+CSv7)-T*nz=>XuIc=OS}QhaN3btBfEN$0f?XDcLC;9vbX<2ecxmhf|& z=aa`w0QV{LELTwFsSIgd>sN|X5#8zqL+Pj^1&BT|^ZC!?JwE^i2;9wG$aM;n32U=h zO3vx%M#X|NGhvPuy^g>Qzz|MxgN%-%_zhT#^GP&1mAP+f5WCygELN*@&9am=LHZ>061vfo}q9>VVM%ntE1`jt2#SFU-+p3F}ZIw*xCzD$$27 zsb5W{M&0T<%?&5inUT^a*%GeYyrtRRHRmOW!OsKZrcA&YF@)E0TC>!*N-a{Cr+zyj z1WyZ+6;A9*GB*x3_|F484y|6qOo%iIn2o1-n6KQ=bb4jtn8h2$gC;d0ax!;20g`j` z!0JVd7{x%;mMM@*MR_V2*^U^%ulqH-G%3Oo!t^^sK6SyW}RXNSS6MO z(}TjEo6j4gC_JkaKI4;)Z~~A&B$q=ckk{2Cdlm$ip?yAMBJKevf6_)sP#KJ1fS>|P z0&|}rbk24qNdyUCvuzrPt>vE+b-hsYRQ?L6cL`^6&l7Rb7l;aVC$ z%(Q2&!8SY*DX_e;E}rd?lEmd$F&`i+0=C9?8rX-y1xnCF_O%z5oTR8vxWsCpVOB6b zySTt&PU3kuJr!Cv5zi&uB(-Qn^Q0QALPb?F#~KGzT;QSuw5|alIODE&PWN72OCiLFvV9jAsnURJ) z#dekCAiy5q+s{Lo22+jdGyAy$s=0xbEbD0g+p?No&}@eSm{vpaw1NmLTpTBrX_+3%G_SEDlCGmP#>^ z4a&^7X49s(U3T8}QN;DaCizs6wEBz?sqzYfqmn{pdd@Y}xNk;LCDJ?Y7_YX8sCM?6 z^E1dLxlGCS<%21dByv9aauo(dc@i-jKpjZq$ZV`-%!5t2A9HCs-QM26OzN%MI>c4c zB-XU;HY-y~&Z-^#5f`wjQUa@EEte#adW+#_(bTGDRcjr6r#tM3Yx$eb24w zkw)>?o47O;jnoGXB)-1oXFkt}%k7|s5Cya4ebE%`q&L5#urPA*u?uZi3mak&E zB+$pGHJUbIk>g}nu2v?HMI$RF;;V*PHlB0V%vp&v1fE3-JKTlzT@^KbIW6dMSDw6a zX1v<&YRu^xkVSaL*%%n38yw&foM$Atu3VT&A#dR%#}>P*wvsiwYhJH5o&l2D{{S*p zh{n7-2P`)P18D(C2MVJ;<3#cl^k*DLxGQ}_r>ac)o{1)ls`iqjrK=a-#5Q1c48#8b zN+V+Ygv)z&0DPW0M9drZi;DO-jz94Q(>taOLsW)_ttV;7mOr5i=~}B0(gYr_sa{b6 zn1jc)aIEY`3a2>-Fy(QbNLoGr0PncZN9`Wdtu<>}CvJAFsb!W?C7QG&c%vSs=`te& z2-^t?INjr(PM`yNnVEpkzTP8lVjCkJH&Rd0by<71^_2G!5%Y$3t|LR z2pIP{R-<;Nmnej5dim3Wzw>(Z^tj?ia)H)RT>k(`*9-a_aKs(EhCJ-92-MiCdNRik z)Lzq$GAHgEQ|b)zL|_{USp<9V!ca_V>O${qBDP5@w~Ar%g6PG|;~KFkm;V5WZp7`$ zV}raGT&H}!)R&ebFiKYk7Yz^%6z8C`(UsCCZKw>Gb)_a#p8x25VuC57C) zOoFai+AmGU*Btu*kw9+0T&ZPDuKZvX{UcqWs6S9Bh8H=Xdwn$RKH&O=^w+#K{^ss@ zwQU;FvrY{wPI&6-QyT6MuCC1-P$^}UZ7YY0F}rDT)P5n28iW4;iKhl^ikw$K&Wi_d z-O@c6yR*6UOU79h7Hc)7sXTGB`#2?;*zmY4M&dbAa4(A^FK0$eHZEHOa?Ku^m$f0O z!>ikhO06TBYp5@wAsE1&0<(KOgxm-$OJE+Z2w|xe4!=!79=&T(>C@=;=b=C*1$Yqx z*$pP_W?0Ka2rHZw$zz7+q$|kX)NsXn7YL_S?8`ZCxSj`vq${-pT4KO31w1H4T>Hx$ z;0`g-3+Tx;5pe5NwKlM5`ZH8SG0QAzI~boB11iT0k8As~a2E^-_fVlxxghOSv0{-* zNNCm5sm~177y|^r!|StKhHIR6-WnqhTAv581j$IKYh(79gX0 zSwE&Tv>n(a6~P}Kdauj_ay%@=dYm^bNnv$R)u}00m250?E=j;KIojyW`gWY|037mq zi~U{Am&AQB8g`Ft%#q1|w{+-aDmJXL$XG5v`#9%3Wd8tj)Xo+B$nyUH_Kj+55YG$~ z!>Q@|luV(T+>S(4(Xi1+D8cmdHV{rTxbSg~f_Z3laxI0fh?8p;XVaBrpmgU!8asOP zt#V{yRI1~4T(>z4Gq?lbbwlJJ79*Sns&X6s_1_78&3z+W?ATC7%INg0XHCKbyoN$2 zV}iLH4cW_W_hC75a^w!Cj9hA&XGN#3MG`)xskWabd1XTkkV!0Y*e=QdaWUGG9D+d{ zdq5fMX!3v;CU72Kh#s$~Y8tGv)q_Ir4KkHkoqpl%As6axWnirQ=HKD_qq+LBjot6?O}5df$+ev#@T;m&dA zE^=P7Ec6=~;tICsXVUeq+iJR+Y5FX>=h_JB%~lf9$)>(Y21zBr3&z~8ap#VD*^|mz zr4u9IH)QHD(xY8NPyYb#74xMut|+L4UT`pz6l46>cG_?=kX7M)vI#GsXS)8zT`D#ZO4J_ z_hW!gaBy?U2cl22C5pk3Qfk)SZqTHbAPjV6*=|L4 zS~E=*SZCJg$j(zW+7=+P4xq{ zD^C@FxQfg#W=eQS-b)^;1H?%ikGoK(}Wbh*XU?Y$b@!>0|T6c)GQqa+~=8oX> z{Ya#dqJ1@i-aT205af;AjQHo480GQ{e$f{%i~;(LY}&Ne#jP^VpkTKk0po`$1~&LC z51vQ2JC1YbfcY6n;@;;Bd(fUwyV>7esP~D#r&{dmy`>Bfe!LoSDXT|nK;$h@6X3uze>dbZsu&BMKxnOhS2(B-ifap*M6hwwRBYW>H1YFk=3hNjtO9>#E=*< z1)Butc6VSRcrV*DC&~nMI=yWo_uD0SpwnKIaRQ9!suh7b_8bIUDI19)i9a2B7|T^m z*&RmafAkGX3lnN_jhHFwOr~6@4CN3IaCZ(9cldbeikK0J>NFw=C9y8bpMNket?E2m zfno^3!(ea*21($M0L>IosK0Rqxn6pbNiBMn7PBiEAoUm5wofRideoy&C!jdf;O(y}(8Bv5Rt zVrW-nWDpri&RAhvEw>~NeDTYE<1ZbPH;eA-{lW>Lc*<`Pfh@9CJ9$rN3Ksx@;DP=V z=dNM-oyb?>M^ss5dlr~_g#ng1BOyq^Bh+3%0B##bPqY)0)8j&78X|;i4L?dq=9z4~ zvTl+Oxs%o?(#F~koJwD8JmLKKUjFP#F zVQBZMUL`EYaRGdOazMsLbB8BJXbd1)lrtlWnnmiA^E7c4c+BNbAhQVrsP^tT$T%z) z8;(`Qnh+895cyshR)M(@`?!?I+ld&%a)CFNkCHhV{{SFJ>q0P5Mkz`*B%*r|MIw2!Q+MS<%-S{fgvu;d@EkhiJNYTVG$@bP7G!jEBd1T0N zfFX8f8TS$HAdL0PhT6>L$y$SH_p~hsTpg<(kKS5!mx5LM*|gWNmLiEkm8P;?-B<&Eg)Q(ci zcidNJPVFAXt*IKFnGAKNj46F9Ns33aw1yx0VYk;%ef)ufc_i{>krj7RxE#KLmRee* z_H^hp{VmPw(B&mPVw zjeD~pw?3mHvsg*mLtT9*WIm!26vgVR?HKp$U=9v?k2K2NZZHgk?<|_Oi#DLO=<1SH z)nSL<1iEWW4a&~`)+kVhQsjmq!vYGBtVTtKA_oFsIigP4?W%fP*wg7%?wW==$igel zD%3zrpSVX{$n#^)WEeR1k8TDe3#hM{pWV54xV5y6J?vh()#x$=wPHF4bmuA&5m65y zum_JDfgVRyYEgFrwVH~m>a29i)>2JJy0rBoDWr~?>CIR->UTh%MsgQ!Km!L4(k^=( zmNTHbJ;9^b_5D(mmo%=Bm=5NX+s?P7MC3EEFBXDJ*#ee1Z>1zDqFbP1P6j zU{ZHia?+Sm*LzUum6B0jJ8FGcOktFrny&u#Ox`@G83YY%ene`gF zM!O_Q_g5l&F&BZVB1e)(^yI{W2zD4?0VH5A0O-a`3o;HzN?{t5o{bAq){{=JrWGwK zq;i(48iL|PBy-1vC>Ug89SoK>v$<-Z?;eq;H{XBeDg`A0(7ov)1#!mI-nbv-=RE}S z5Qx7my@Nc9r2QAE+1gc&DgL1SOru8Bf;OjAJ^7KJD}gEv#X0ve7)3Y(9eF?Wz8jmz zocTV3U(C+Wqv#r?JwEMP`ZsXsQ2ziUD`pkVVM(|0^qSL;AE5es$^004Zs!Kx`=IP z9oMNv6_H!DJ6}Q|SZLzYA-VUtW)f|7Bs6W7AxPbpKJ4QTJO(F`I*PtRCV;s9m)=tp z^xY@7`)Z{HT(}Q4Nn>EmxWNhsLaMt?3CJe{IXz!0A5bf)*^jH~Ei`6-w7gM&tzxW& z7(jXDT!A4ZV}NpZW9Orb#1vYj-zd8EE3ne^H~i2R-8qXqy1 z7&3v3{;isOl-WyX~1_68$IVbQ*_!&KS$|Iu!k_^+{ZT)#LZP>scK4hCyLpjRckqOVx)7Uy34~7%v9lk$ijoqo^!=WraEeuJc{Hb8BC&DZy$C`e$;0^x@wsU0I^6*MBW zjc@7JY}7$4m!d{y2xXO}ie1|S0H9_(tKgjDX*kH;(8k{4u_n~U7c6L6)Ry#`aa7bM zuOL;86tci}t135wMlg1%Bgj4q^>W^z7^|qpoLX#9C0M_gzoZ*x0>;F67*!eCGRK}U ze>mulBl*d*8>uJq`EuK3NMl8l3eftBq_ARk4{|W)k&J@F$AJh|3#cUbw5T9~qIoMv zV=FL$GnQ8FmzPs2vBR1d~*O-_p~XHMWYO?3Yy$J_#{~BXD7m zatF^D3VKuI?u@_5u|&qvn|9G-r(*q(CL(>&l3l<+*j%V1!RP+~amGpN%aGJoQSKi* z$6Pm`r zE4HfL(V}+=zM?XxfFgq#IKkY1Qx+qhFc39B;`b-A^Fdh8W&X-J8+pxy4C-(T$c()5 z%Z%e03~`Kf+=Xf&6#|TyXRlf+*qi0zOt#H6<4wNJm?q-2G`2hK7I@G_jh8<4z~ zYH1NiEqxi$V@aTn5@ucJAR)*M=g*Px{u}|*@zsczf3{6dN0!X7MFm|w^M+)M<~J&N zEI>keCph?C4?QikAc%F6$yy55o*AsnvWlkBQe^@$gD8xG`S%t8lZF6}KnM=6++d%1 z582wb(9Lqh>0qpKqyiv45>%%2LGE4)soF+Y001$E;iy%G(4R*{!Ae@5w3nhX6j>d2 z#-WG}{ZWTtPae;HdFROBN{z`nnX=o{>DP)ju_dWt1i+;FVEe%!9(Dn{h`Ur-s?$Hq z=UcUyGdoQ2go)Va4er{&uY^^`LHvQ&J08|LfOg$V)g3OqIp)=MF5ao8-(`vuY{3LV z+vTtx2!W6lz*Q%HR396N=`aiWk8r&a#2rDdQ>{F;Y-*Z&KBF3c>7t`6Mq_f!<)RJ_ z=`uk4F~9%;)tKCyEeaC7tM=BhaIM6(eA$Y>qRTK36hcA$NYTz%WlN!v4)7Vd00Gn= z75?7kQ!NT@&LQ7lLHm=xX;RVtpm*N2V@9p1J-9_Vf0tRR4(-e(g4Iedo6^)M3C+;?OeZ5Zjn+m~ddk21}uTb_xA!l?!cLLg*ZN$=h^ zvbn)rZWYOs(T#`!+~+z6)Gw|2-+1c!1f7dNWoS@SYf$q{ z#_0JZHwP2>4o?s@_kpqF(Nam5{TI0OJCJELZ)m-{4UIy%cb27?sTt;sGdyrrk;80F z#jwqjyyqV5*>kHX=T{phHU!sFy?ecNyEi+tO1)3Hdn$&PBnrBfc_-a@eWaS@Q~HGw z(6VK+g;#0LLci|fx)V+@Ly$ky?Jt!(i%4m$_U4hO>eoAtDsj(ZH>o9CN+iOhg(VGz zWqj?&IKaRJ4U3%t;&x}6_!{k|H62&E^!l2D=z4!@PpHB$={$v**Jvbfav~1ia@fle zkUaHPW~+|4@qxQ0bL;)x^<3SrEl;O9HF{!qj%1N%63E*@2Dh<;79POOv=T^N!;_yT zcd7ziQy3Kp)3raWnv?2uwY>wg>gfUh0QBu&QFWO%knQTi9|WQzWU)hmDi!vtb(_miaSu#dk0u-qx%C4hTCPEzx zo9Wup(y};sc?r<~z0Jp47nU7J`6u!*eW}`ukiFIRuX#dI^=7 zZWhF+>W!qa1h*a=k7mn78I@soDF|L2aJed;Mt*)d9tS_RFFS}D{4m7d?T0)!blebJKH;4k~Y2r zU=VS^7?-<+AMPZqbri0V8qKyeSsqxMY*3Oj>WuC?NY4k4cluWwU$~8{u`#M!(!@y8 zYZ2U%8zKmq?+F#BI13XpXKbYIACdjWI!w->>Lw68-iMlhdo}NgIFy znAdA&Om6rCkVY_1P;7&{ngdi{9F%R^Sn26D?91xMU>oPv66Ya=n}IQuLJm^|w;3aG z>LJbON)2q7Gf=aBG_$979;HeH7xg2i2(K%yaI!w;a1<$DYag5rMm07I5k_j+m7NE& z^=kC4X}X7ecTv)9s|joAk+m(X$8p&#vP&+0cLrQ}z%9g6z#hzT<`y~|m#Vr(k)-K~ zO>JwHszBnzvx{pJOYR#)(u}r911C8lhI5hDq(wnOXUNR$xp0Qyc^N+5dU7l6^^Kri zQy$~VFXNs;W6zI0XnHbc)IcRoJJPWB5ulld)|IPzo8lzx%XtKnM&FE_`5hwwXD&h} zuuBzROpPWwbEpOt^g}Qo<5emF+;B<8Gspv;xkmTY=4*K0A}`sl5?L2 ztDdzc2<~gAdeu$!&ZHfv!xHas#;>PIZ%dO-OF3yRQHhHTa;4+iRZBZ# z$UAY4n;2z3S5a~h)j|rkne17fdo@=9B7tLxP(;C5OEa;!W z;?wVXVvLohF@XCPDMJ<#Y$1tK3WneL{U-+i9-Z@Ep-e<~45uZVS0!luRFxlNOsCap zFrkT!2emwabK_~wNykM^h?x}LwULwX6Cyqc}mVIsH?u$p7W@<8xC+@E;be|$}J zcP<-JGh;U=4RLDNUW)1=0xER#U%L%3F+U3gJAK)q z%N5CF)oI?SuzHI;)1ici!4U}%@OLoC&p6HxNsG+H!%Pi8*tbMQ8n@K0$kLCTEX=o$}e(bDXDd=CZ9FiQtVS=HAVDC3fn;1 zcW|VDK|BNFJt$f+l}}d#lHJBKrKtpxmv!|4-yqyJmDuf(`mzf)++zc!%C|W(QL`3D zYQza06oyJ{SmDg%vJ8f2m0NPg8)g&&<;x$=;Ux#7EGw{Jlk1v|s+5NGA2Ui%#749H$++iI#>QiP!xTw=4 z(!7Q_2*@F0M%-MmBY;K}F#{REJTIvr;AqSw5Xm~rJTWGsq2)3|5LAmS0QAwfZWva} zWUt0@I&|2mMH8s8v>7uoU%uTM2;vVC1&J_0+mnTDp^pQ@9i#)Ei(x&+1QSpg^*gsB z6W67wY0S1+eKQgjmT=0(Pw_hf7a73-?`A#w=g6Tk?Il~fI8+%A55Ay_Zv{Bs4aDaSG7K? zQbu%-Pn`>_a?Hy9q?3A%JvJT#aKx#??d4Y+AE|_P0F6WBicwOQTGVoA85)Mmjt})2m@(u0V~fR8$ivG!Wt%3SCFwQy~o;}%iVfx6%)8M zZ3|yY<waXn8s+WI;5mq67e)bvT?)_a>O zkzLdEIO9Pk)9OyqO2`|_s;C%k%aPAmU&6{iGT*9C;+1U|>p$@w+MUe$chnBdp2mRo zql`zbRGPh_NeZb1ks`;Fza!L;j(o4_>lpHUoNZ1uGzGztb)!9zqJ2`+JD!j8T^c(} zQKV%lEDWxVocpq*9D=M0k_cdPl6v-2;sUk|@-ty&Y77js*R|Z*h1YLWQxP(}Mtc*b zca=dq!a%#W1lj;6hvc7vQy6RCae>A@ksC`jYSnc6*L58yLaY!UR@Kx{q$>d4tK&G_ zSOo*{PbaMkN3#dI5Q*;T)ggH_iQ?8`iN+#^c-}ci%!Otw3KBra>B-J`>N!vk+bfNj zEj)q^UsAWJeb|u228VLzN3H+ zk5GCYy>dUe)~xp*bW37L8hBu}VWmKJY}@wu}sWpih-#PuA+H7I1O z8dy5B!2bZ$k~<3iNai;0yfE9LsN9Hdoy_b7 zbP{cl6B};GR{2sm&U|8Tg|AR30NF9dpA^>UnI_lTEiMIP91US*+(A=?k+Q(97?KWo z@-vRAF_jk?&4{Pfp5Tj8g|&I(i+o0Oj#4WxIT6Brr3+&$0m)!M==kc_WW{<>UZs|J z3dshW1oWYUK8c>mAV(l(Cujr=61nFf4CEYRtq#WGA`fu;dbX2Vv&|flTM@yqOFDXw zDCD?Efeae};c>_V9Qajg+?{%QczM(8|M9C{7SSoSP z>LlRDgN{!p@zIQ!^>8>V+i6Xy-o2P2K8jZns1Dnd?=mH)e3mn6!0{2br~ zJT5u-9R_en5UqNU&m;nX)OCq1X_|V$>R1xnXpDYJmd_}ooE0aZ^ysq%lWGRNz$)6U zdMj3RHzNRL*soF|)e%Dpf9ywGwvm`9-um*D)E(v3m8DavAwjPOhFoJ1a57maK(<%g;EC!NY9Fgv1Ta5K#T>XH!f+R);Wi#z2iHIC+o?QCibbp z{+>qv0CqM_5P%!GCGBnsQ&6#~!)jS9%wq~nlD(NxSgOW^a0wXNe4aSZQFg7g zbzAo;l6Ji&RvmehS*^H_7HlJSn z`i6&JM$$D5fR=03jvEQJ0&htpRoY6A)G^1$9N+XR7IS$FPUfTOuWU^m`c3^NhNGrS zQ|+gzaGAe=naT@N#a5ObsI)f1Qx%P# z7-u75`P_NP$B*BN0b~$06yvi%$ev^qN^DM`naaZmJ3wNY_J#!K$GePUka3dO^hq*o zq%{a6aK|LeVkw{-*t5*7fB|Ej{yY{L$QVCAg=@R2m6gW7Dg~)VzKL5-khqd*CoJVy zWU43|Gs-bMkH;apA25%n)T%O$;dRS<1{Jv-z^{4{jK(&%c;Ba|&XFnfU~4!+RSN|;PFOKw033sy`8_`|6D9Rf zdzQ;lj_0hViNn0nyNUf-957Q9W8WSKau;?>hGBuv46(*L-$ox5n|^{?)Be*={+O0s zv565GeYim;(BKC78RUWRI%LVnYC*>nMpj_dEK2c*wQ7xqmDamTgkhD`g2QuQ0?eQ& z+mJFz+tFv#e1wjp3);XKr5%ed_Ma~l6Q{95;O+&0kBpq)oD7_RGdImgaH=2O2C|H} zqo>xMv&s=gbh3vmToCHNoFfy!IgI!?$#c7fSCFGBz0cUs-INh@jbpjAr-|9t#2`E| z%FM&uqXTkpJAeB}EKUr3tY0x}+`b&&Ea+-B(`jlNEme#8hu=;lSYmWm`EekMM| z$jQ5eL03>Hg1w8cAN1vx9GHR-d=7jOpZAFvBY-*$l*Cl_+L?#%r$Xxbg2TClY;6<{ zxjcZcOuc_$Tjf8lvHlt<$1(c}UKJ4u|RZ@A2$-o{)3F{s#s!gbt zZy%4>GPmB!iRbOhG!`WFE54voH;DoF0SP1`fr3sz+l~R_q0jDW99`>kSntOo2y2No z1u{JAZ(nr9q5( zjB?o_fdzphBxA=@%s^f-(TO%7x_#ZqQmq)x^8TB{JdsR-KX4m@ju#8SUQRL3fh=l3 zp&3p0BaFzmW~&4>VM`H(k*I176kw^&5-6wFF;W1?Vxz(MQl#<+RygQbfif}C8idt9 zaW#=Cu?1#DO88zOROF#zfN&H5G5F-;Jt=DuNF>yW=+=7Ch!EIV<0S7@Za>1?xHtni zAden7JtTGnPo_&l`m<`fm#opGo`3+x>bcyE8yggsQ;g*2qS!d>PDkbYgxY;JR))M* zzS?5Zsf^BKkA68a7E`sk$irtp=fNsEA^_HP3{lYG6g3N&w`)= z7?5~41ohYVjz}j_tTmqWt09UCk18XH)Q!)9!DF0dc^seZ$mzfbLPH^=xW?OCkyKRF zH3wIHt2I)HirY>Dqps!{4%pj_0x~@G0Igi8PDRf}{nqZeKalEegvH@DrMPB_Pb`}_vPMB$JPMbg=YSW~O8 zvY)t+6*9>rK=Mx2$hjoJb3pJl?6z_ z1xPQDar2n7+0cxvLsLveq+fX*VwAwr&mO@%rc_n^Ml`?}PzwN<;eq`?^~;L3JA~yI zP>N7M(Ifq=x?HLVj7DQ+vWMX9_LK}%f<8w+4o)PXOhes9RqMwTFv)0Bhf$VQK`fvd zB>w<)ImjH5kX6@wjFHHUsATrzWU^-rHx&#)=hFar!S9d&i~5stk32ok29%m@ zDwg!J>e2ekVI_*5N%vQchF(Cq{d-PEdGK*@aCJ6(!B+#)w4H9eF{Dy=Bt;-a-)~T2 zZ&;IYc732=VSh^P!=I7K=futQz;Z8e+TVXkqj@Lw5+Wj%rLyw8d|{R5e1^d%01RP& zJatt@K~NSAxJ~}$kGJEFe&Dxmsd&;BMztJqPZ$6wc9g0xW^LH>?)+n|36t58ZycVd zJk&plucX>N$$@zi+-|_6&8Z{ZsxSR^M-5?VTe=?yDj)*EIc3gI5=(-+++K5#0x*Hn|PEe&;w+I*wl` z1_zJ%l*G$lRHgT)(?8;;>OJ{;hJUYJ@$}ad69$Xg)(kZ0xNzno3H2;V`bjf07UO76 zSEBj+lH9P6qwU}MGNZ}Hrj=|-dpETI0F4gO)F^0={88wd){6(CVY8}eF={p(XKaSa zc_Ygzs-3%tEac&K5JnF(AZ{%pDmR5*QYyGRNG`2I)qj_N3Kku+nLhXz?a+`&Fjd$c@8# zL=xN(9R8E=KpjDY2}R@VO(C;+%X-LTiZ_z9@z^7_L2Y?vbRL$cCfI>Ade7 z1$8XNh8+9O8B{ZEKLh+dD(7Kxjf@>ht7^59KS3t9QV67s$m-!FBQAX;t`vpgsqd`h7Hr=hSZdp#rcq~ATeqN`soHW>XcUOnNqt}%dkz#DLP5y8h= zKTbfI!_kU<&eV-8??Xt5j$ufm4iSoOBQndl<#$GL$OH8Ldd|K%7cMg>BE(U4IV#4s zr?F#Ex?))ci;^G(#OkF>GbZ(u0Y=l2oN0{|88qdVU0+nLi34wz&0s7E3*pY(mq|ys z=PE#E8O|`ls8Ngofla1}uWE8quU`J641!wd%CNnEW0EAYtalBFpf9F&wm}V+V!??Z zNxFh1nvoZ%tdy=pc4yrmuOr6tP8S3j0wEiRxcqJ;a57I?>E)%6s6dWG8BbnNE}(=!=Y;qt0SBl~#pI?QuF+H~a9_1#jW_N&+W zciwPC8wIN^B&!6}Jw^eFEN|`vhLODSka_DkJbDt?-AdKVdo32JWi^@TS%j|NeO6H3 z-OL_7PT~N{fA%qnQs}_Y6FGX<-FuD1&KWIpFL>=dx%t# zR~Lt4e!lcuWHZ#l~tJ+B0Do&vBV1+jyZft(yF&HbeG<+tFdQ z>FMj++K#8E#_@n*vm8hY`?HWiE1kYF7-t_Rrpb&vlD83NPa4Q)hJ?0hKq98LBuFci zc`?BBn7@I6ka7Bs2|Yo;=uH>y6X+Rr3k@7kEUuvIZ(fC)I)0<9tP?p7Bas*Y zvN#K|`M@oO+qjT&dee&{(GgA$wEEOcv9|SiqJ~A8L_wLd0nCZF!Q^?!IOOrwS*SpY zFHmo`NHJqiNC9^eF4y%71K^iF;KXe zRweTi#`XYkb%6j;h45^aBvC7MJldK>bWkW(92dS5n__~24q-S zqj;eNWc!Fig#}Jd2nWD9=zQCisxnWwqgvcl7FxFCn{0*(@+p!q>Jdp$PRRU{-yn{B zbtQ04rAP*AV6+#K3es5hL{8z>hpzHB3+y~+g&YES!9N`|lgEs_ae`(cG)|E}yO6tSV{RHd?oljAUPvH_3PEg@Lw4P zDhMdC-0AucV0I>*Ex9z9w2BEM7+%5z24@(^3rSePB45X37D+!8>@9y+1tdbjS41Pwl1e3w8`iYg5r$7?aXM=ERZJP}a6a zvN|Xzt(JBnR|h*V0I(L)*q7?3>UmpH)W3>`(lm=&U)H~)P+f^b-5Q@|`iSmiJH!*A ze6&~%hXA+$WPgXx@w}YB#0CEVSxmx>fjhs^JsZC&rn9d1zjjpb{aaW2XfJ)fjTJie z-acZSs2ORDZmPp4XQY_UIQ4vd*$#~l{rt=sM(j^Ttdc6B3ArAp_0?l$c(PCU--4q& zrUy9NoVQmueZZ}9Ry3s4UO^H=cLXyuj-)V6%3_dX+*yGvxl#GZ>$v+egJZaLN;6iO zQ6sXl%@!Cec~IWb?JN%FA%957@CosT3w;?fN3PsgaitEod~PqjOX<(^izA0c8W z-_uMTkLoT)0|s1x!7F=>%jx8OO)O}&8vL@8Pf@WDMzYF|6DmoLH`};v9!K~?5^xVp zFplmiZCDc(saLD^bmqT4tacspNLh?0d6Nv}D{WE^7_q|>n9F z{WUy+&NGbU?im)YCl(@vihWt-CMr?eMuxbUmrb$$l0gKF?b>$}z-~GG7Rjm-Y)84R zY2>!__pHXWjM09e*}&eRm~uxven=ekA%5X!07&fJ)MAPWVEcHk258lVK{GH?%oUZC zgOC8p@sdUdo-bO29npxaTcIsk{{Ww9a-~oh+Vy5i@uY?K7~K)snOVp=*^X2KI6Yv< zf9{%-7PTR*uCQObtYFpQWs#X7vWn5i>{TUX3f^6T-O7@=$!?S9eS~(zJ;A5e=|iW< zYSxh`D+!TlC6#4}gYGi3Hx1!~C?!3kX*eAP%X_FB7$S*>%xz32qpIq9O^IB*Z%(hN zvJ8)N6k^BD7%?Lrp*wgTRTjpi+)&?ASSdTuMAW6LT^cNsBoQj@c5T6zJbr=Wf}8=6 z0VqVD(nU>Z+Sc_6D9H`2OH-jUdtocIq)Q+8Ko|!g`?<&nIL6V!?y4b00-!gkP?FS= zPowGfCO9l?$gq_-@T$r(2j`KycPAY`AT<-^G1xM@6&KK8sY^}OMHv$@?AoMFk+de( z8%Y~UB$X!zIR~tbjjq9##^iqKYp)cxwCfe9$xK>C^?f^PF#hZp%w+R{?ZNi(`09sZ z1;=8HFbLtTr+PJ{k{!gD+D~X;GT!C^n11-rkMGkx)hw#lvl3PA?HkpWDeg;7!DF%~ zt0Ko1PBXmZo!-zmz`?-j6Dj?*xTgrdQN%FJ>plHKX@%{fzoEA)8xM~MIRF;m@^R;` z2uY(I&c2x9mX!=KLq?Nnx>86mM)f&nIRtqG@^F9a&sqjCAkN)Im#e%4RE@PQPKXFC zr0xI_o(n7Gix4t)h2#U1o}^CYF*QM@r8MHklSCferS9~^J46wTdnEM66v}cvrG^R5 z&Zklb}#d7HbCd@E3fio8=h zK_r&tFS>8*u|$yk<*+l!;PHY_Rr+MWC!q?cn`_yQph~J^+VVW48$RrQqImE|I3xVW zS{SOV=2j305}m6lBUft@%%%@kV9X;N^SQCSAJgsqiR&D_k=$LWO$8YeIH75o$PTR| zewH|6ji7GH03RN5MmoyjROm}CGGGzc)F!uD)H*yg0!9(!`<6CH4Urg;K-hqeS0^Cx z(6Q9F7W>OJDK!GsWVJnXrWj80DU>$<04k`w06FDGM}U0wMrzTS_}u4R$@OpPZ*0`n zF<^U@3)jR?OvL84_lx&FWXKJ42sO3tg^ z38~X?mrkFeTC~v5Bzxo9RuUCjJi6^5Fu*50CQRs`xYTe>>N4#Qt(t$;Di`AI%H7eU zt#V_UeR8vDKB3yK3PCG+=i0#ffmIuD19nL048 zV-L9S&Z~lQIVXSz$0MmL8FCV6= z-*SON7G+bou`ACW;s!=K0Z0ZsekHJiDN@P1ET}?nO`%ID1?CI2ecZPPjEr>2eVB`_ zO=qrPo@s8X#|-MgGRpfGYX#+#1wl9=Kp4&m=(-cJ)Lvvlp@PcTw;XQ_jq0qM3I-xD zgV=!kj&L_*i~y5BY`+jO!RR8G%aC5=a4d1jf9y2PLrZB}-zA9M5kGK2V}cJOoMYpT zxdrtLw(c&S>uL30eJw)M5i?0*oIBMNZ3hLHAT~fM0BjBhdXu*frHH+VinBODVFPggM|c?_%+1;Hgi0OaR!;NTuH)p6%0DAD$R1kU23!{yiFyIqkp3#D+2Pm4WMi)<90TM^TE$EGaB_DhOQ@? zwxg_RvejxjcBxJP(LrLfH1f+30IMSAJh23V1~8-N$%b2B+dB*K?@_Yp;{AAURIy^M zxBjH?RT5KnnU%KgAmwt-K_na>k~+|ETI|kEHswa8eJv3H&3e3cy zjAd7JjLjMk-GJ&CafNF$owZjlHMCQ9X`aN=SVrF>sgyLO25I&*rN?r!9D@hmohUBxaK#1Zxr!=k+UKbMt@~J!nOtVoks+MD7`0 z`o^~F?6DTB9KW>i%;BXW#w7zF14XP_M3;~i3b5oLP%TxaVS*<(~8PI9c2nv<=n3ypA zW7)FVHFsmj8lyis+DzN#zPN`jBq*%pJ~rV zNo7||iJ{VIRc9sTF|3=T4ypkz=Xqn63xXK%amfRxC_Tw5u_Tu}!HRVX zw6T9X=Z)M6TQUyvDNQ!<&0 z?&VcD+~hdUPI>&ECi>ZuRn?1``&-Q#q!Vfs>npT_(z-{d$=aUYSb|&nXCRZeox6n! zgD9kdVi`S1Xj5mu91yH3(nP}!jH-8>0g^s(^SEPi!0L&EQz>S>%}sz>y?H9yMH!FN zYMYZJ1|HJbBR<^x;BNT>%B#lAP^FB(p}4C}Qc0_p^?$0t1F4Zo8^{~-G2~$2AI4M= zVv$;s$yiM0TM@||r~D?th^QO@Ljp;_9PyvP@z+wr#s2b(KXFG!(#d9}n5nF(?1?>d zVKL>3{(w&Wm%$rH7(ICln1eK`3fK9KD{He}7ADf7wd#qTEc$4N2O~IMc-lxDf0xf% zk%2W8n})79u8$flwhoz1fe}V_mn)8UF!P^wGw?7vvaLyvsiBi8RC%X@1%UfZ>9q?5 z2i<@UF_VmAJF*8un2vD8mNil0e^KKN$WmuB(JG# zI6mf071435Sem&=$EQDB+QS}?@eR9RAH5&Ey z!xebYJh#kjiwNMT%Hwf4Jaf(oA3b(F&3l*ND_zZX8LdcOSZ_^cbz@#i@H)vb+=*+MMM#k8l`Ms6 zZrjuKJCejc;h{$g{^9{dLR4d(a!yCidbk7)n~h+4)RyJi5ZA1ey=^wC&WMN*jq))8f-U`R+ zGAxng5>dwk7&zz2&&E2=;N8@!xFfhpI?BgtDSp-sUc~J2vES*|IM|zuk_k|Ag#Z>A z<2^Y9fDFh_)d`DB)+;Nb+OrvI`7bl?Ns|~Z#l3->gO40|^V0+aVrUE?&!JOk{WYOk zYRzMQC2v<`r0*q(2Ml)ex7)`9f!1T=sc7V$Gv?BSH|)h8mrmd)vb@ZII-FruN&O(; zU=j1vew+=6hkBb?U1jy{P{HL^s|mBSR-6+%}AEBRS3wwZ9qY6yzhLk8xd?r-kKPMuf2}<)H~8 z##Sa$jQ*pYs-ywQz!>@X%!mP_J0T?MF)tC)<}}J20!L-PG2Qjx~pf&ROLgG9EB60X%p= z9P!hw17;U<)Rt?(YfMtKw2PoB8H^(X;5Z(_NzQ&hzfSHCQ8*BLi$pq{DPn6@gmA{b ztH!`hmn3^bAj+c#-JIuw4;?_U9nRt|;1XBWZl$D_cCjf|XxI-|+!&nmln5|D05=5u z^ntAEQHrl}8`?Wrpet&cciuenJ0Du8ZOX@$XUG9qX9REnT=F_|j>Q;-m^UHRn#PZ0 zh~a_?l)y4Cr&P0IJ=<9BaqiwQN{#_jgY(o1SE$$alZ_Hd;Soz_wR;CSS=MD&x^=w^>2GS-?!Ty=*V_6AZP#a#OL~p9s~jLZNOr2=%_bKi zJ;&Nk&~kd79~&!acQ#@U=ZcNbH}^O3Q>9(etRJFXrQRP<>#|8|maF`p)qNr$$dUvK zLP*mQEG@TkhTNxc1ogA@*fITCjs65Hfl!Hk0pPaI*&40QDwMT~!CEzn1EJ>=MI&9rXz)YKX;yY(4;C8n(!{{T&tu?r>`24LGs9CcO4tC;KpLEOcu zb$CAABfoa63W27B(MtljVlyHmDaj{yxDYTwIpFoo2r_^GErgve)M)0gA!((7WA!3f z%u%QgRh(e{q3+5IXN(RwsRFiUZQR&*B{*!PRBKtjL31w30!T{+Im6^Az$HfQ+=0$U zM2HnGg2hNI_Xle`Nnp- zUpo)jw=NJUvlCVC-r}w!y;tc(?#TN{eO?%aWM|q7$|5*G*e#S(i42~nBLQ7jtD^t)zr`c%{6R zy(d26s-bYoGCkiXKaQbte^jUGIs}TIh5a(!G@SaLodUa}kNw@mWVhs#=Rcma97TOf zWy`8rZ_?w=tjBd~1kMlhBs{(!`ISSd0L{-QBidjSw%#8wo zxHvxm9D(usar<;b->Gy{>Q6?rHl~tTks$YOA8LdtWB&4ye$L#P?j{W;RS2|ksj z74%#zNe)9OY-5rKo^j4kO~r|!9?ctsuAQih@v_Mlp$suOf-@EXUB2Cp+(tQWK6CpH zm2fhfGGK=4Zz9+A;jwNgXN_c8og_Jq5;2xc_+ALd&nKR_p1n$fAu+YNlGT{>?K|&b z4d^j)?I7KZjhNbTkPb=Y^~R3ZxK_QupXyUTjpVZ%kL~+K^ljkR% zetN$>cP6WE3L1@Cm0*I!LsGnLE5QT^sv?XI0wCaydHj5QbdQt;h0w(-s@67a(Y2<< z1G5XO45)FmXZoC`6}%r$fNfOhH1)3Dk}{TUNi0#g6`bQ_a)4AfurHi| z3BkxcNatcY>S}BtEtp+RZ&lP@;i|GpU`DXRFkGU7H|ACd00WHkljlAK0qr0o1MN6C1O4K8>UkIqm{<8gY}eOyny^(@nrPBv>I7tgr;rb7G|UMgdve&v z#xgqVXR7J~Li&xBH2Y2IT%v5)cVA9FU-~v!9ou3gZa8Faa0eOvx*7vh(^cCah&xP-%;3or24gaWv!@5P)Di;RdZc{oXmMD2*BF8 zA&)*h0gUx5n0V|)86diSRMYW>tx3#4tX2?un?j!~ zPB;M%$C9MyqD_m46b(Q$%~d3f!#$?0Mpam1aW^uYZ^?2=3J&ZQILIH{spHLNr!rpS z2Wy_3&l6G;NU}UJwY)AvUL zw{bZGBWU9YNycyyTkj~<>Mw^^^@x>aqxK6MLSs_tEUZVgEJTIm9(mvso(brH+`keo z;EFQCX5>F_0Ft!sKg4r}jFMGA#!drg0~j7pk=4kqVf)VdrqS70*_x~(A6T|aNH=2*xsKz;c8+pR4?JL%J?cr@cQz>q zR!Z?f8H}bPgs_rjCm8??5yNAT$3H!H%O`WWYT&b3C7IG{$s#iqQ@C{}1=ft92=MB9uA_))+EAa3Wu!OOBGeqd8wM0&02b#Eox z%}s7NE@5!HTmXHWK*&TGkmH3J^NzFpv3D#>Fd|@S?WZ`oU3Hr}d)tn*Hz$RjM4i>0 zodjWut~Q0+?HL30ZX`0+#C5)!4QE=8oUp@6xAUzg270b#wIV5Atp&0twSAT zr&CQfwN$ArGss>kW>WG&<+6a~auq<3ew^Sa2a2h^84wLrnr7CbmY$-P>*?{t!KGmG zZHf@sauF1>zw{)VP@h<5mfq3Z@jH%R@CiSmdwf{j^)5=q_GujMi?%_ zbCy&%B}V1IIgvm0w(BGWaQA%=fSSlXi~fMDh03<+?=F$2efGs2p- zP+O>h2^%uFG|fC&ip}VIisT~9m!Ym&NQ{ysYZ#QHw$l4@eLchg0}crSM{QtEEI^=P zI=lA%pCtEZV>IuIoY=a2SUV`;FSY7IqHh-?nojLsXb~C#?;wny{b(TMI1_0LmpJ0 zXq9%h5;i#vxSlfIPa_KmdgZjG6SG0dp)cQ`F9Hk?xfhp9Su#!JcQpgu>-c#MX zIOD>uJe#B385Ak{VPjsUQc1&UcV#Zy$&Q=Ipx^>GBy2pX@^ks=n>89DAjUrO#ZFrF z&={oj&g^U$!~EnNXXDRax&=MYJga}{l1*;a6d*L7zE@~=AJhZD{{W72)bf%}^(>>0 z?U|C;#W5=EQz}OCMnPlyeEgsOI-LbIQNo$^MYk-*D0MRxZWj{Q1)Y?i@q>??6OMO& zdYHsdMmQh{$k`Cd6>=e0_o~S$PyyS@;QZskKc1zNC3P>45MgA*?Qt~3w;a$|cs3TZ z0DQXuMs{b(nUXd$(0B|qtsV*rMSOk6MrsZt7if=ZONPrmB>?^t_LIIsOA8R zS!VwHnp#&EQlhaudjiiKQCSK>a}qC*0LjnK87IV9c$oZ$Ug2=r^yf_K5sVp$pwdlUPPNWgVw8mc1kRldl0g8dV zlY^h0e*>);@qxHYlc6Woph;D(NbuHyLWnFp-q%y%z|QT*4T459{rad%Gmf}V_) zr?ICg5xf-PSI$rPvUe+-k(_~!dKtfNr2^4|#{q_xp&o>ESZ!FDjFugm!r0FN0-+h& zbD#cto-_3IFF+m547S$2nQTu_PsC+jc;Jx1mpfJkP{3|Jt{j~P^(9G7hKb{+ZL?cvLe0R4GF5_ zYsCy`pl)~TgTUS*p({P zqN%E~-kG&+c*@Y(Mva74j>Z0=+@Nj%1y&~*By*0Vg;X}?kCi#g;uXoW|h_E zor5HeTw^9Mcqqh$RgqaehcnH2igh+Y@BCEC+OKJG%>j1|Dh3+kQ@3C;$5 zpT>HmS%j6Rh8sm8Iz%K#3^GscQO@UP+(9RIEIfb*OrKN$R4?r}QL|%Dt6n&r8fEmt z#KB4Zc>pm|Kse9%Pd!s7D%3(2=*F8gSe7N@zP>q zG-Wt|C_=1NhQ)X!jjC6xD>Jz={{T^&1CMtBN`_<4Ho{i*E!w%L z)I^Qz!xTXg5k@`O9_7gg9z1^IsWR76jxVVpjjifBJTS>Z3UEihRfAxbCg3l)A7CIT zDo##7Jt`v}<52J1-dOc1Ua@DRZk-ugc&ouM-7<1;Ndp5Ua(Kdnj;a^bR$*2o8g!ni zmfVq@GR$&3Fjr(oKA2Sj+^NE!_i=!E<0GX+sW)NRF|E0w`(UyCtY{%K85r$WlYhZE z+{YyC@>_w`S*|WSlU9*0SCOOB<)>OV0XE1O*c^hyV3ff=bCHwJK=-JnN}Dl!As~gE zN@bMJs0>_hC5RaS@G+c{Fn`ylMI*SZqQo&xnGJjPWs^C8Hwv{K ziEUWFD?%u|Fp_4F7}psM$M`_O$^4Kw9R^}e$W3enp3+^3BCjMzW3qv|76WlAcH|J7 zhX;`U4tmPUtooY6P^h;vPAy4WSJbRQ3U5f(xv2V|XUjCe|t$u{Bi+rK85qNKi@uGtcQG&yKlJo}`rADtlT*d83JS2y0s_Au*Tt zqoDy*WR=>WkU%4my9c7jRA&QYQEWP>qY>0EY4g*C>Xh{?qi57r8Qh7IPS8iNoPm>{ zgc-Q?9k7Bb5ghfcTUqE)Q)a3=G=nR>*WKwbT;l|RjxabG=c=xDJCMzQ4KHvwcj?zc zvQMg2eV&@ifm9=({)|8dAhBReag&^d2MhC98oEI2EUvImA$UGJ#kYwz2asdvJUc7gl z>5%EnlFFsv8!#L(_b3dKoVT|da6$5{2M`Z%I{idnP&uI0r-`k{>rASC@P!VpFd2r4#?ermX{_X#Cnb#q}}5j=W~_UD;p z1=S{f;urlTRU6u>!JCW>`Oi2S2aJ1@Y^~5SgyyB4M42m~mN!`Ab#UM=a@okhP!2{h z&mMZI!Z}k!sum)}O*+0NuS7=}9+nesHeqIYIQTk3h1ZQ7WXr+=_>b9l{3-fu&YQo)W% zEUZT(&PF=p;OZO-LUlaKwslkwJP&cmsv1{{CI z+^<1$YtjnuemIGhCVJA=F7B*HD!|~E#~v~G9Yhk_jM!+{k?MEbPm(IhVf4w1NgaA6 zOQ9|f8JMY3LuBOqK2k@^Tc zjoyuMj2V9qW8+_7ttLo9Xl>$1n_W77$OBV&L;c2Hj#&Jd6f87Hldy$KsVh_azH zlC67|C5AEk|}5Hla-{nA38saERIR zGLj5Qy9zQgaHFcbd-Ve7+|3)p0Q9K)uTT`d z$4tucMr)}x)G3l_31wmd#uEyw&IWOw4;=MZ>f)}A2-Z0&$5m=aBD$z5rDFkQ&j(=n z@_8qYr$eDAR-_hw?kzec$6@y!J}tAw&AkTa^7 z-Ip-{7z6F{4n}-q#(L*!?kFVn0fOxIEka?Oy4IT}!a!14i-WsPb;_$>gs)JjN%E zQ!$W{?jcnBiN^%~bJB5AY8Ah6MxjQHd4!W`_oR3gW;Mfont9rDJ1}PiQ{qQ*MT;7N#v+2$vRsBz z%@l@f4-<@SY_GJOxjEW4j{_$i19vPN?+697!EQZ2M72*{u>c7vRwHIYu|P>GbCHa3 zk&Nf6h5}8Tw&Tg7n#~)@C0OB?(^&ztosy_b;g=haDoGac#IR5~a zNj|5h5cp)Qt06P(U@~LIazRs&eY?77SXkU&EpPp6+P7|fDuhh?mF%ygk(C~vNF$Qp z;l}0p^PY_uqa_NAVy04et0c8-`l!WcuAuB0J9nam0G+Fm{^?(F$>^vj`AIfXBnH)( zXGDa|7R8=aiA~5zVZr~BPVDt0T z`7;x7fD2U^X}WZjDpqq* ztw`R}8Bn9sIohNTfE&+~&sqSpBKZfXL|aFr4jb6m3Ue0E!C*g9;9&d@ZwIU4Rpm`2 z&QI=;NhKQ5F&>=zV=P=bk~IyM03LTA^3Q;Jncy1kb~GecQIyVByywyqTxLmK&d9*$ z8=t_y;14GQ_XDQ-B^MJ8jE!TCTbCc~k3QxvB&m*q+a$hxo`@-4?%LwDMW@75`vm}1-iWmd{ zdB===^Uu#g0Q6;4l^YNnx@^MK73u`314yotsdWVd46a5$sB%W^mB!>Fo{-|RLJ^X` zbQyH5GZ*BVCbMDV1C|jg#tV1@!R5DO^$!5&g4jUO)KilFs8o{uzRffUl>)2~s02I6 zHxQ&T*hvInZ8-`vo;smhPymar;J?fM&RHheYQ5=9Mn_g8?+ucJ2RH-91_m*Y9ZhiV zRAtmPuf97lHo( zQhE#p+;B;n#Wji$iJ`Ab)#=JdT1Zc(KrR(ow(oTtOo!SJgPa0SNX{!!!k^-wdYWa5 zms$(IWXmC%dC~ntD{oLs92O0bP=S&#Gs);6j4t|sqzx^4NlmE1VnO#Fk`0nHZ6?oJ zAi(fRQ-{Knk>GGiu{&PosXwm|Al9uVhMq}ZF{A$ghhcGyoGy4g0L(^DOs;yIR1Ju( zS+6_Vf(X9mNWqISuK3)pOKfex90fQy3Oo#)Vy&I3q)s1WsT=BTBtLTu@Nbes24s)Z z+aWmE1OBF9gV~P&}h3RfVMF^HnF%Ca#+pVbT)q5wF+ zCw4r6)Zd7jQtxvzT(52BwGG)Mp*U#K<6Wy7s*u1C$`1tPfd#Rgj~zppKel#7JDaU_ zv0j^fwvSa-Ad45HC@}y|0UqEF&IUaA2R%14Gkl{W)U_QllvU>R?MAUl5L&?psQl+{ zGs1v#{f2%z%;a`NzBVVg4b4Gp*tHg)BcuAt&bGny#ts7lIXFL_GvlZ;pHjmLgK{aP znY8%oikEc7VIs_9lr*fl;fNrj{5C%asJfqWxS3DwF>A{;C?dGj?9Zges|@U`Bgr}( z0Cu4LIRlP7ZAKMt+X^$Y1N^IbPy+~-t@r7kbX+%1b|0O*XmBe z1Q@?ktu2}`*Jsmp^Mpuh!lWynH|}*j;O7}Q{GO)3gLfzit?Cz9<%Vlk?^0MRD`GV= zGoqFN6g-@^SM?8R+mGL;#z8jFH{wliQN3>HG-R<=VTXK$pfVpBMM8v}A9hde;NbMR zZo~saacxSMHH3S8&#CG2Jb3kBuPPWPat7F@@?3%yxIW|OjyiP4N7P2)a!G{Jw_)R+ zD|&qzH8w}~u&77Q2?h3oKv98!etJ5EaZYQ!o~s57#!oz$3c)2aTd&J3sb1p7R0rh{D8X8CRn=+954HaIVV3k&ssnO zQ5jgx=C>S8TG!uL)Cosb4H(-S3}?h1y8@Jxsl7YTzYxE8n#mY7^=@#MTj&D%TQ6D6}_YEu0ORB##Hc_&rF1 zO^8XaQH+opx^Z1UPKp)&pxGG_s@` zr*dytg?pDEW-{2bIS>6z?h&G3V*q(4{JJ9%W4X-hESgLdYs8Z@^2K^U3b`euRn9;J zf=D9(f`1>5wJspLkY>0*tEWp9MyXi(XxrmSQ!b6j&frc5C;YxTqo4xd2BW`T3F3y@ zr>hI2A52CS9Fhie=N~`o$6CcushcJ}#mx~!u=7A=WRu^)Z%mVc6b2TZu=ZcUw-LayoQEtLC+ zCP@}8zM0&oZv?xJa&U3}dFzM-Mo@aFqMolzol1>;LYy{esx0M06TR}gah^_2GtO~= zo_d%cXzI5#v2JM6w%Sois5D5dXN1KKcgazPAQL7Fwj09ZC!niRj?^x0)M?OJS5%*S z!w^2auFBZJ%P9RLI0S*o+n*gfAnd@I2^41b6YhkHvBB%uK=R75?)!+giM zZEj{6+IrHM;ij?2hGsB^RT#)*P<^>1d$N9b$5q0&^no*zLPvIZEU1QhX<}I$J6V|v zZObZ#7$kW=9d|1{TT#^3#NNENC7tbCaVaw+ZfTp^Mslj07EzF-=jZX@Z~|yxyeLVh zaRM5Z!^<8$Xu>!i2^&c{1QLAy#N!VTC7fz+O*1F!BhL#f%LZU8!@pY}P@gSVra@lZFhdoIA*Y z!7Y_6pk$~A&f(T(%I(ykRK0gIr(TfP((TvQ9qTpELcx`qF(Lp=nMVZjF|_VreY}jG ziZcPY!9duXS(Yfsk4u{4T2)m_k=TtIsBS=FK?8%}o-%Rq)QP^M1_QXlqDYNaI`G`C zVpyYjnn8)O#O6lHW;`}Z;2eXGJv9(2Pa=~mvf8k3>Q?l_aw>9cLF=@w6dbT%s7S*c z@;}|iP6tS@eag~+0iQt8*;wS$^eul(v=anP7>U(*1ZNG)7Req!P)-5nSIa_CKmy6R zv}8|e)|+0_%G)Lr%-gG&=4K$W9iU3%{o;250Rxal5ie4CJdy`eb6Sj6Y&^5QxC18i zEhMVzfC$^l;CTe#;2h@|>chn&axI_l08Kt!Qd=@unk!eLnn@5m;6)ro$=Z?<7xaKI zqbs=ckU&lhbrj?zYGtO^q_a4qQsi1hb4m9G!n%tYQUHv{o&NwNjHx*U=Zr1Zrh-%s z_cG0HvaP7&lUAAn(zuaBp^h?C1!f?LNZ=OW24UkJIKJdcf_8Nyfa+SDacXeTtgz$P zRNf9@dv&!wxpGY>@~?W5@}ezR*+gS1dn8dMkYuG&1JU3V$A=Z}Xqk&-r7g`Nnt2$?m0U zeNGtCS@jEb>E4#W5mlN^uVCeHhZw-lIOlNswy!nSU%0Uxj33alLop4T@@F@XciHbC;MmB<@Dtz*J&yO8fBN6n7!jGtnw{%;YbTnp`g_%qexhXxB zmB|DIXKxw6{Xd^4J$5@$5uUr7%c(6XT3gUYZJ=hXjXO&rEXO<4U`9p($mDzw4^du3 zni1BFoZT`d==+mOiFDahJ4A+4Dh;?Kp)5hhKyN=d&sIkQ#5q2pOB&W)LCkjR>JdSv zu4QQ~K)$Taw45w_kOn*f;~flvk5fK3y97lO*wr-cKD>JDdOGQ{GM2Ftye~Oyl2SF` zHgSxuGsitgks31Wiq$l>DAu(eq_IT$bg_C;#bQ$Pvw`{yn~n(1as~zleB+=Njmg<2 zs$$39*0EN^x+OQ$WrKP9i%Pa+BY@jL$pn1(!Nxiv*67Np1QQ&{X~|*=^d`LoE3{^@r$^CHNs2f3GSM>S8`RAtOWfvKT>9=gwu5awCb``*Z-HCZf z&RKv5jky3E6UXFrPoCbS@+d#L6AM#WYF^Z(vbXG`LgpymP^2-~6^7u4-ZH#nJm;rU zr)Efk4T)o0?7Drcaa2fEw^giXm@}qW4+A?$B$J*`@1CDBfH?@b@-Z}b7bdcGD;6|& zlTGdoYN~=_hA;#T#y1u@-~z;)a(MICE>1$~QGin?xA!w%sZzy_EJGTZq*!V^alC^V z%!GrI#c{#O1M`lP5jCmJexpaM)?;fvnp3+Kklc@QkTHNy8TSBjpOMf}yAhNd+$I>S z#N}P&mDds@`lN{gU&k08PCsw*>p>D(Tk5Iww=OLvlT6YQ)KX_}pqN_c!C($SB$9c? zPf^JHcCwhj9muIqJJ+{fv<|kTMZ?J_`$q%@CN;p_$-wc*>BdVOglvorMm7vMjhit| z=}QZb%T}O>4NpT50C3Mxs+#SJTQc4YfhfAmcoL&#gH!im9Ao zYqJNfsMC&lCS;PlOc0}4LAp;WFb4ph1_vH9^Vb$y63vX(q_V=$qz(5eR(CBJ0}e?8 zBaT4xoPI|=aLdQ3FP7z~$YtA5dQU9%(;lhamSe{(0Lyd0+CM!@=CmXLI)Yle_on4Y zYTBtFh0r^N+|B@O2-||VAa3B~e%b0I-sMUFH3hE(HR@cRJv|==IG1dJ+~i}yUTj$eZkf_=k~ecWJ?)Cgm#qQ~`F ztw2cveaPy`_t3jQ>upk2Xx*eFi3jRnN{~=zgU=bq!Rea&oXiQSED`B#H{469G$~+i zh!Q;~WeOMv8DiP+elwi&$73`s#UuDc9#68Gb}L`)&H@=+;7fo=UOa)n`ijz+hAh%Ye_G#If3#pbM z?*xR96z2nxxBv!lGoGDW?+{llPpdp|$!a6E%Dh`u!N72%4URY&K2Mx+k%Q1VYq&mB z&1{H$TC9^BE3uzxZ%fs1Ku|+qfy*4=a6WN@A7huO_onpp?4S~$E=gAXBQ20Y{^h_R(3#wdweASDR<*NC?!6f;Jd0sBDG~rkWp(zC zX~18`2T$_F?khPXeb1nV+qIoaYt^1lOVAU?Ut$1Uur7(Xj41#q$R&O}4j6v%)s1yN zX(pC$UCiHZ#hf#hK_26i&Oij5V<4V*>wzPw%UEG%5)v7r{& z)T4${)!>5TsC8LPhF6UGtOnDbKnQdGA+(NqA%TUlSJX_+nV!6B6G07_4lyGFHYq>j z1Prhtu)Jp{o;lD|P`N*JC&_9xihES-%LIz@zpNb?pL>=iK*8Eb8&4VF=N#E}`(gz= zw*^~kIvR5J(@f zC4@q3gye<*u-%Qp{PWZ?sjj443)wQcS4^i;S+xrFUhMHkLP>3bU`GcHjE2q!2f;pi zapBRpe^3v5khN`bFPpU{l8{oY+v7rHJZ=b70J+EmK5?F=JW@|1H4KV6jUkHs#kBj7 zLrS!c6+;(iAN|aEc5IxVIXrYifGMMg9-_@!^iX>Abo9K|Dy|$tzOE0>)!Z`VXOc!d z{BxRfXA1uJA;2-bt)s>A! zBX(pOwbCMr_KgKSUr}dOLivS}0_3Le0DwMDInPL>5{IUX2BkevCOL}UD-fE1$jHp z-`k+K$^~zhqMq_vxjebCK4Cr8n+H#hPu&4oPda zFBh*2rLzM1?yLYU>c@<3Rw1yXAY-0SQZ9B6Qq{()HWO;%ymB=wcB2dJAyU5f$MqS; zGC|;fbB~Uf8KJ~ZY-&srotwz@FKWxIXfw#n(Z_j~Ng@Og0aeIck`6)fj~xVVEwdfm z72>maE=x30y$cdFQB5RPUEI8K4cvv!PBIP;S{#pF&^>T1knt%#9OnK(W5w zK6exT6a9_{N{L#4=-7t+k+pDFj(5y!{s!CXHaXg($pmnJf8VOIeWx~GVgY7&<2K=f zTUV?qY~6wjIbf?EWHm>!_N0Zg3cfcK zBm!GLaBv2H4O06YBhG0D%4m5oMKF-*c~rm+>c z5?E|t7LgD1!vIJjut7U{P;dg|`RPJ~vnb8?liRl%&1OcbmMcbp0+0uoDNuckq$;ue zlgR0bsU|nnlTCtnl}#%%t%t9%r4nTNVX|7L@ zcCS7G!({QtS3tl~>T-7%$r^pcG6%gqzeLs&Z~Y1w9hkujoPr4Q03@EWKXSt$5(@^U zOFw-)H~kn{i~AXvE;4cF7}^HW`RAZhh=`;y+*36~YpG_1xS*G6xt1#kCuo?v6bM%< zhWC59&p$m&lQ->}6mi=|w(3QP8g#ZGwpHgvY1kI_k1`T@pnf!9IUIRh*bO)}=aL+8XAe7NM)a^wuz0B-7`zmMZ@Mw8}Yb zAPu+>LT)U>k+gLogLWq-V^$2a%F@PE$5L75dt%Co1cA_a+JC|bP)-jGk-#IT;6y6G z?8a6#8x^I#H2P9sXrYam&bwAw04?dpk^8U|5OTwIFfqyKyO1tR8iG$F7fi33dTg^x zcThWxsqELw`sbX5BVYlxsK!d*fyg}}Jci|eZpTo|7p13iO8SMlTI`$P3C=2W;m<7d!u-yDy9HR?`M2-j4}Amc8;t_?n!ag z!Ynn3B&!v9Wz=lENRJp+9?l8co(U?hK;sz6Bgaht08J4SN)5m@JFt4!9<-FLY89bb zMR{Rq=9K03d}ZRI3n&kXU>kS#o-v@{hR))Pq=;?x2AqNX(ElvHD~Wd#NLK zJ;B$H0A!GQh{;VT`0Q#yEZTmkFWst^;E|PPd7**F^g$&YZ2tfND#rkTbK{<@a|*kW zTx4j*8h6`Ttt8c2eLvoI!m^1ZjPiFCe>o%M(P3$!Rf?a0uXhgV0QrbrFRC zdlMC2!pCk|C8=t85(Fvs=DI0hNjp8`1LO}RpBX(9kXaCsbSkE`wG9%}T$$(7u1o7M za{J;cc);0?1~5nD5Po_`BIk0gK_k}VMw9L#tzNPdzCw144tD~6Jo)+S(a)SHqde?5 ziT$t%yS_Wp){S&%?&uZ|q+O&6TjOzDfq{|7lb*a^)HUC!>Yo%;O{@0-x@>6>v?k1! zP^1+Jkgx+GODW@kSp0v-q<6&DIh9&2_)ut6NWtDxI~oPtX8 zpP$cMF^25Qe8Qj@+fMr`x@0yiYdyNPH#Ae&mO&!R8XRtof!Z596Y>4WM<@%_RVAB3 zF|DYr%{s&vE3TVWmSF^uSl-fr*xV*3jOTy`agaK&A5EBFMuY~uFjbBjEL1ivn1fcW z6^GgYAE}wYzv}VSvSA1b1{GpN2w$FER%;r*rKnWZ86z-LiL)Ac0D~Ai@JkMUIXzDr zwepl^2qPO7tvQBIwuYI1>7&WJbRoD5Nl>64A19*5`tAi=JKWN38N{-Z!h7o&j>n;6 z)R->ZlvtDixC9W#_UiO1%!X5{20o>xEn8M>fm#h6-52h2wtFY!f^tiEQ}Ofts<^@2 zq%wqjD_GzD8}>NeXQxv7Z^h!T$YaTz7H9kS3h72s#i2R$zv)CxVxQsoNTP5F+iLcXHPs-~bb2=Z_uC9}McGwnF|<8Mv! zLD`g}T8W^mp-rf1x3&Eu-1>sZ)#@4QZ*@|m1mhbQAd(oL&m48Ya65#l^kwef-`de5 zj=rX}YEDu!EDE5HfOE-q@y2`u)f)Xmpac1ptCh6fCd_qJsl_yIGkY^#- z{PT{aaFM%=GZ7@yAg3Oy47Fg?t=I)sXSKK>EA7IDVlvq|W%JL^Ll`7k?iO-(VKCUC zAFZLE|Qv{8-aczCfmF@#!Y)#w+=#lEZ8g-Xq zkh@H*-xfqXOmP&@DFYI!k)9QL6+}z*aT^Q3DLLX(XJvY@d!f z>KK%(QIuy5xofItu93$Oj#Y(3CTBYoNHL5832bLM2OsCww0eOZ70aa55M-0qK+s=K!e{g!BT8IhvC0!;$ z+hm2>NZ)jFyqi`MRAGwaa|6#OBy+&ePJ+eE`CNvmTh)b`V`_GzhtZZX>N5o=k9G$b z@wEB#(R39V9FZ5zigl}r1ac%w-mJMNBmscGI5{Wx=p*rmCNOq;l2|^qR(QyPq-4x{ zIRp=%7yy5tJ#0CcixVXQxh)wU!?R9-0DdwL`E-m3HTrU^v=!HniYd2zi9!A z(OCpU4{V!`7>pr7;NTDV@y9`EpQ;ip>P0k#=aQt#vV|yiB5}B$Fn&Dz@$>Q2@;~3G zcsHn~#+Xt%(?wvVS)F8^1ERc~VM`LFh!|cJgXcYC2tm}=txHA&sZXg{rCOrt^SsLn zmQT2jGb#o=w+A4p4ecHWJf5598mKYiA9zpP#a212SBgn>DJ0k$w%KG-6;!G=NX`aH z{{RRl9y(kY+O7vPDKm*6*&$fVss!`_A<&#QK+ieJ&z$wPVj!Ieb6PB{*DG19VwD|A zI4vnr!5BEt&%oekreXl&9HE-Ei!()9@}kENEc^Eo9|3^}1CzLaY#;5?5`93WM{rF= z{X}T)S>0U9k<2VQY-brOw;+|zA0wa7j)Sd;>&KV#mFcTb9m!WtjZK+j+aoP-BMfrb zQ~>QGEX3gBKN-l(*vQzG4to1ZC)C?V)1tGfNoIXP$!MTSOMTR!vowL2`$y`|Fc&9| zMqiKu+BY(H>MoLPM_pQXblVlH#$$@>!*eYml;q?RHtlGb`Cc+|I2}tPaul(w)x%6W zgca>PTCB0$fG?|2I1!e96t3nuD}@>5usn0qfIyK_3D}AIf^9oW)wFoKlTn_=qk2Y( zC$$)?9zEbHB18dqR*3RPf}C;(Qfnrq@x8AhsipmPzOi#qsNKcfdL&ew&m`*`p|87m z{-k>t%0S$}a56`pq|Zj7OgvB0He?m)S+bGTpzEksv0@PNo20JAeC|OQM&)C{1-Q>3 zh1zR@kMoI^#?Nu&b#5|TT|Ui~wJe^j^ClK)w(iE?D!gqOz|PMYJ!l{>h*+=YSFIyg zsd~Ll2UwCxY^2RAGC03vf&FVGeESr$F5I^Rj-bJSqcmexJGljQoiv%wrEF^2jIa=p zM2#BG6~jri9004iS%71L3W4#|Q8@xy2HD~|o5>}cvdKo)n$~7;WmqD_W)P|w8*-JA zTP27gj?zX32TUDE<54S)$(!lgtC*Ib-)Ut-%o08B(wXu4c*lS_zy}7{&;bwETRNLZrdV-W3U)N~ zTFU&Pt{7mI{aDXbD{zlwZ}%jMhXBvPTVkF+@9X zBN$!841NLUnQ(7XPCvA?F?njrLKd$eL0rb`h(i|Kz$y1_Eyn;H9~@^^G#-S{l$)a~ zmuo{5<@C)q%=M%ms9_fzp99$3008oQbx+cm6F9ESqeH5F{{W}=20O5%N|Eg$(tNMD zvM~0Jf92L(iW{k@5W4p<7Mj(Z*riYIbdwO@G!CU>V0j`l=R5!hu<1y+T();fv;ezLECKR-^!W2`%q*aa zmH{Pq(cEg{52i~9 zMV8Yjo~VC@1Z@fiNFR@mvb~nMrLzt^Pjb*T2h!oEU$!)ga<`~qm9v3kAd*$b>TKb^ z9y+jd)XRT)w9!+LMX1-)wGCESWfMa3SXU7?gt?_ae0xTA+~hXrK&U0`=t_8O6+5Z7evn_ z9tPw1z~J>4uw&z&2sA-fw0ACFyO|i~ySnX@$PP#Z`*1io;~excfmUTA{Z>6r$E{yc zzK^DfmdJ@<+F^xqMhiS}Uny9W6{l(OU{LyF0y4-<)I* z$?HbUMmxExy**i~&qmG3;XtBhHJUAoa!Es+jN>@+2lwd|65E83JC+$XK${jMB5)@6 zuOp5Fesi92`*pA&B4uWyW-A(8c5_uWB;^&PF+>mOTmggdayb70UZJ1I1Zq)Uy~W@8 ze^t3N#WXa8OCd!mn1`pddq@ECM$ky{p0Z=Y+_y9~Vd`1rpp~f6l7v>~d69i@R!#B8 zjPf(j#@rw8*0)L)A_ll7*EcRnr$uK@Skg~NYO57Y0AD8~;fKyTnkx}za68FV{m|WwtBiY4=O4Kr zx9!r939}y>kZx$Yg?d`Pr&CL>R#+iJP?vvrW>8be#z7tiGmP{pLDaLI*^adN?gV=7 z^HH(N74*!C30?^}0I|k-{r>>JRZv^00>bW3_Q-;?vMOGjsg7w213cgRS+?y|BTYi}9 zDTY5taWrI;un+M3^Yg*##|t*3%LVr*62%)rW{DVvD-M0aGy9(mK^}U!LZXa_qqtpx zc4+?Ac`r@1e&uD~?kE5lqbYQn+tp&REsLf{drY*t0`f9{`gJfX1 z_yfnDruGGiTyMymjVZx1yBp>xwKdqo=LGSEIl%-Ddag3n5Rq3?aUPn}R#RZShCq%Y zkSGiedB;Db9Dkop$a|3pNxxBjn@?FP(2H)^(yHH=OIuGpnH5Q=Bw)1c zK_C_bIKty3`09C1wq-HnH*Er`S*liL`X64`8{76k8u>yxg4p;l6dQS8-Xt1yPc$dk>mV1 z@Oo|r2tx|hB(Efbq;DdBPl7Tscp1(Ma52&XM^liCvm!G*5KF#KxU8y{V-YB3ng0N0 zQ~v<8fDV2+>y=D##F~lbxVltv-@iQicr!X}Swv!K00n^~1<3t}&&V0-Ze;-UBH6TW zw6Ri?NvKCs7XF|v!0vX=WB>&#$jCSY1LXAwTaJf_Fw44?JvpY%q8e!dQx#1q zin$eflh>{Mf_Sx9rqp#ASJ8wqCd9Cg3V>J_@(4J|YywXh=wrfG7swn<>uI#EKCyFA zyd;uD`*~vwP*;gxbcnkS;@D+7zCisz^4p zo_H7mv+#evRv1-!nZ9cfMDpj=&*H!7m(u?LsK@DS{k!#N>UVs>rdHOqx_zPB>#3a* zoUa}vnlHGC?95Wdd5kct6iYRC6?-+|lBdVLiEJpuTNvZ&57y-D*uIZm&_0#@PWsUq^36b6KU^>veUwNNrm5b2U0Jv~25Q#H(7?&$abpuqoJhrIBKh#J@=Q zXJR;JJ?b&+zpbB5JI6=w-sJi_-QAP*Yq#{S-mK>zQIvfNMsm?ERgWdi({()AHU(__Z-RIotp?y5zQFOyC`p z{8ard@B6*EH`Co)>O38-tky`h&i$9RgnBIZKl0l5Dbgf~C4EV5N2AJZJaN-#jIha8 zDWsi!Hmdj}Zucq~ga!Wqj1Q)8{Xgvv{eI8-rFTTtG%ZWKH9JGJqN{VVyLz6ux;?#X zU8Oy^q=FHD)nB)bY+dxyc?=3FOa;aUsxl!5BHrh&H8_o4;lH9wS*N#JPa@dTqd8Tl>A^T&>|k;u@Nbz$6eB}TH*M=jcdIZO)^ zuGl1Cz7+=~f9~TxI?J4r^m>k|OJlhfnD(bh>)2Rj+bnp5`$FMJU5ow^!8rZ;&QOM_ zQIK^S3o=Ehid6Iq8r?b7rCaG5G!YT-0VMDFg{ za-Gqct)uF#UaevF7u(Yd9M)uTVqK?&#us-xiSfr6=?E{gC|xWqMs!Gf9uwg4;>njX9u`$n@|aB)w`+Jk|>B3j=+Hz9@EEwd||wi(&7b$ z_ZT1@>JaoMh1z&iPIyBBA#zexecYEm06g+Kb{sb$jg1v1FS9K1QkKfvI%b?FZ&114 z>iG9Fk}z|R+XJp!BIUUns*Nkudhy8(eMt<0LdVq}){v+hMn@wg5J&wwfeeF;WdLkQ zX02{LF2W=hE?AZ0+R>;2Cdv0N_88Cq0KckZQpRz~_l+yW))6AHO0}6`*s?OQ46^dX z&Q0f+FX_n+@#8Q`UR44-&Ad%0WGCcJ%LhQ#OOIV9%vFjKa ztx08ZxDrPnoIWsdt)~Ok2X@hug4rkI!8rNpm20>l^3a*;86%Dfgc!BLy&_6V(`PVyUMmeQ?%py134fMpE*4bl}B+IiG~eDH^*i-m7P@JJirg7P$88Xn5HOcB!u*xF^l&o@roE!k7Xg`8U{j=2@LZJi`aeQ@>KebzT zj8+yiB*kFnW4CI6!{{V~J zagP8FxiOQYBr-99O{YazPX?cAl?7G+HrD-F1d<2pBxD2mB$LvzaT1G?#Y3%sErLe2 z*EJa`uG=#Dppw!t_JR&bQJjE0bPhWg03;Ix7W;ZA9@5#XsCx-ml(R^(md0}2V4iSK z7~`qBiWMo3At!PQu}fZ@*Qrv7@GWZ1AWhPjEr`Lw?+1c%KO?N*(fVZ( zL5$7Fs>f69ZZuF}$uvHhc64rit&RyGpV)Ep(}f=7+Q#G(SkxZf2_i~$;Fib$I=85x z1MOTKvi|^sqT4$I_P88ulz{&kzzm7VxxjjJG>~V$DYVRy@#XqE7H@X%70Af$H?tXGe z{{XH$o`rgVDvrz!CZ$a+g|QWO5(^R-_M)+4fJQI@B!B?%j;TNji<2E5OVmV>*EXH3 z-PNYFgkf1dNEOJ*0PVt`{loHl&yj3K$CLB8p2clO{5pP;i5!$Hv6fU>8aYH;i)|r+ z0Zstq?m70J4_TWX=t~9zW*1R3swn!VoW|~=(E!T7)MQn`+<}j@U?-FQBgZ{LC=O-KMaxaLEpfz=EPV{pM+B_Gab34OkNsB`B4Z9vd z0Xql);X%gHfO+emBFZBEWY;gW6KXKWCZJ5SA4x-rhp1OMRb>Hka^Hi;j~!KVYG%lk zderh))gR$&wSJ)eUe$iA=>Gsx{;z!=`hjyvtK0h9c2=VmQ>sM+*JzBgI=9=*sZxx2 zKD00PwPZEDSVXTu<$?6gC`cQfis#ZE#hx#sbu{LZu8-I~!=mfmwWrjS`f4<)uLV6e zMvyzTK@|9Q&#YZpy->yT6w1K;%U39hxX6Woj0)T zcF%bC6(sH7r`@;o>rJuhY+|Hh!b%vCXT-^52oxnN2TIV2WhYt4E{u>cL%h zn3qGK6|lWcywhDE0hiKLn|3bDkLK2D)_YuCBYJG=Nr z`g7b}rKI-tU#dS;^e)qG-qyQMxoc^sPIm8J-PJpR_35;`a?4zr*|{yKp|@9MfnL>w z<=rKWr;~6tQV!>;dy`kxF4~V)o>_&Pa>-&~v3ep&-HRSLWZFw(pK6?Zj-c0bIRM)M z5kvg8)RyKE>edgcL36c~oDb03xnKEmK00m(?+|`W;JvqEYA`LR1Um#}H>)}@n7%@Q z4mJ~mj{~Jd)ZE~<{Z55TS)H|wG-}nOio@2Bp|0dOIKjd25AWx!Uo5w9W6D2B43)ljfzw)DLkBmC1<)HNA30OB|yS8)=@jspO2!;b`feDxQ>R~H|Y z=GW9iLQC{?tCl98OiJ#pGP4OJ$tU#=0bVdM@PBTmufR%>7;ZPxAhoZUCBBuO#5s8* zQS6TcAoKdkcali~45Ev9V(9fqH#WrC6hoSSQj~9!em_2`YL1-BCKJNct!=>T*d=wTY}u5vR+ul~!VM z0Brkz1LGZN`9KgRT#Vkjg}k+45+swTf#YU~dH|%P0l2vHpMZWkJOoheHw@DBy*dpc z?P>2MRgf{o++l$oomDde|1_p8f;A0?u{PaTJp#XIo z+I5ZT(0XdMBavcnOpMM5&Tx1c=Yz*n%$kjj&vQX`?xK)t4Pq@TPkqPIbu!Dl0Du^9 zFb9tx9YcX8<&K2<3NUIgQngNNT5UNM$ekM`DCcnpBr*MmT6mB(GcG_*`--dEs&!kk zwx@Pzh9a*Urc@E&kMGGj1LLUjH#9Qpz0F}+q>@|quS+Saf;kxhUs0G5!si(Fx1N(G z_F!Fp>6+H-$zGj^^hA;*jH!&s6iMR_JmI?TD#Q6gZuRb1C~2oq5Tx?K3yEE3u8{ko z!utZrpZ9UdJyRwgN-7{~G-NetNvu6{_p*tWLQIT+m2CML&wqk6m53BwohCBg)F@iFGZmhVkxt~5zqTz~>#U=C!6!9rQ7&|LQSbXu%`E=}rcLL-F zX)ux*g(^}i4@-DMoCA^p{{V+S@akhN{{Tr)E*P4PT}ok2Hv6W9!m|YQ#AE_6zd1iA z$48haQJ6A|feEYW(_Mn@nM`~3L=ckfbMoH<+&?`Sp@R*>;j?8Ug|4g+7TqGOD}_vF zIX+JS{{Vikfwx9WE-(d%zDpHr%+td(cPr$UV634R13uH|rX<}NFbOe~cB;c&XGKct zv33&CiS9Nw<{Q!?a3J+5O&~X@=3_)!8bdJ9Y$J*aXnUeBcfQO zZ)olyIvy~f1GJuelk?JK$wyL{(2;ZMSC;h{?uChUG8b`naw~@Je;A~VmH@P0h?LO~i403POa+L0BVqNM~gvWy(zGmH`x zeEW}{IXz3|o49$IKc!-}ptK>DtH@zVE1-lTqyyo+?rf?2XFnZHlkF~Ifl?KtLX7qhbueHaRDZgPeb_ zKo%`CvWnzn)}mT7QjS^dS5k4&FkNr#ZHm)NAK0K;fl_0aS_r= zm7@j6(wxSl{{T-CDNto%PV5u8xCD%FKRr+6{Zh@I)X1iOwam*(GWYdGz(HesLM{gL09yuvhF*=_)-A}kV9wS^#}3hoLNcK zh0O74)M-IxS{76&-ce7i>=~1OGD?I00JvmhBjc%@exg2J2IBhEUcR#=w!~L88E22w znkWJ@C=7+9CH>5Iu1MqBxKV@DT}Uy=W*6#d>djFtD1sJgnb6EOr>sa)8OpBWzr;8s0D@;;Q2mV8#Tr(GHi7<)GI|sp0#5_w-gpPN@bQ>#gK;f@T}~7swnU= z@ID#Tu!;%;xnPDFs$H*sTT<1dG9OmC_bHY3v1k2QQM46Q-~)_#$iy10rgqC_gb9wJ zsnNS}>(#kUCrIJ-+i+mJhV8iau{*K&$T{Px1AoePa#yKE?w;|{J1UJ>t|hfZuQj%k zsNB+fOPp@n0T?I8oxhHkD{<&e7`#K3NhYFeTI~~iPVS0lT4l0gSFFCC_IVUClFmpR z6( 0: + net.load(data_path=net_weight, exe=exe, place=place) + else: + raise ValueError('not found weight file') + + #3, test this model + test_program = fluid.default_main_program().clone() + + fetch_list_var = [] + fetch_list_name = [] + if debug is False: + fetch_list_var.append(prediction) + else: + for k, v in net.layers.items(): + fetch_list_var.append(v) + fetch_list_name.append(k) + + return { + 'program': test_program, + 'feed_names': feed_names, + 'fetch_vars': fetch_list_var, + 'fetch_names': fetch_list_name, + 'feed_shapes': feed_shapes, + 'net': net + } + + +def get_shape(fluid, program, name): + for var in program.list_vars(): + if var.name == 'data': + return list(var.shape[1:]) + + raise ValueError('not found shape for input layer[%s], ' + 'you can specify by yourself' % (name)) + + +def load_inference_model(dirname, exe): + """ load fluid's inference model + """ + fluid = import_fluid() + model_fn = 'model' + params_fn = 'params' + if os.path.exists(os.path.join(dirname, model_fn)) \ + and os.path.exists(os.path.join(dirname, params_fn)): + program, feed_names, fetch_targets = fluid.io.load_inference_model(\ + dirname, exe, model_fn, params_fn) + else: + raise ValueError('not found model files in direcotry[%s]' % (dirname)) + + #print fluid.global_scope().find_var(feed_names[0]) + input_shape = get_shape(fluid, program, feed_names[0]) + feed_shapes = [input_shape] + + return program, feed_names, fetch_targets, feed_shapes + + +def infer(model_path, imgfile, net_file=None, net_name=None, debug=True): + """ do inference using a model which consist 'xxx.py' and 'xxx.npy' + """ + fluid = import_fluid() + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + try: + ret = load_inference_model(model_path, exe) + program, feed_names, fetch_targets, feed_shapes = ret + debug = False + print('found a inference model for fluid') + except ValueError as e: + print('try to load model using net file and weight file') + net_weight = model_path + ret = load_model(exe, place, net_file, net_name, net_weight, debug) + program = ret['program'] + feed_names = ret['feed_names'] + fetch_targets = ret['fetch_vars'] + fetch_list_name = ret['fetch_names'] + feed_shapes = ret['feed_shapes'] + net = ret['net'] + + input_name = feed_names[0] + input_shape = feed_shapes[0] + + np_images = load_data(imgfile, input_shape) + results = exe.run(program=program, + feed={input_name: np_images}, + fetch_list=fetch_targets) + + if debug is True: + dump_path = 'results.paddle' + dump_names = rename_layer_name(fetch_list_name, net) + dump_results(results, dump_names, dump_path) + print('all result of layers dumped to [%s]' % (dump_path)) + else: + result = results[0] + print('succeed infer with results[class:%d]' % (np.argmax(result))) + + return 0 + + +def caffe_infer(prototxt, caffemodel, datafile): + """ do inference using pycaffe for debug, + all intermediate results will be dumpped to 'results.caffe' + """ + import caffe + + net = caffe.Net(prototxt, caffemodel, caffe.TEST) + input_layer = net.blobs.keys()[0] + print('got name of input layer is:%s' % (input_layer)) + input_shape = list(net.blobs[input_layer].data.shape[1:]) + + if '.npy' in datafile: + np_images = np.load(datafile) + else: + np_images = load_data(datafile, input_shape) + + inputs = {input_layer: np_images} + net.forward_all(**inputs) + + results = [] + names = [] + for k, v in net.blobs.items(): + k = k.replace('/', '_') + names.append(k) + results.append(v.data.copy()) + + dump_path = 'results.caffe' + dump_results(results, names, dump_path) + print('all result of layers dumped to [%s]' % (dump_path)) + return 0 + + +if __name__ == "__main__": + """ maybe more convenient to use 'run.sh' to call this tool + """ + net_file = 'models/resnet50/resnet50.py' + weight_file = 'models/resnet50/resnet50.npy' + datafile = 'data/65.jpeg' + net_name = 'ResNet50' + model_file = 'models/resnet50/fluid' + + ret = None + if len(sys.argv) <= 2: + pass + elif sys.argv[1] == 'caffe': + if len(sys.argv) != 5: + print('usage:') + print('\tpython %s caffe [prototxt] [caffemodel] [datafile]' % + (sys.argv[0])) + sys.exit(1) + prototxt = sys.argv[2] + caffemodel = sys.argv[3] + datafile = sys.argv[4] + ret = caffe_infer(prototxt, caffemodel, datafile) + elif sys.argv[1] == 'infer': + if len(sys.argv) != 4: + print('usage:') + print('\tpython %s infer [fluid_model] [datafile]' % (sys.argv[0])) + sys.exit(1) + model_path = sys.argv[2] + datafile = sys.argv[3] + ret = infer(model_path, datafile) + elif sys.argv[1] == 'dump': + if len(sys.argv) != 6: + print('usage:') + print('\tpython %s dump [net_file] [weight_file] [datafile] [net_name]' \ + % (sys.argv[0])) + print('\teg:python %s dump %s %s %s %s' % (sys.argv[0],\ + net_file, weight_file, datafile, net_name)) + sys.exit(1) + + net_file = sys.argv[2] + weight_file = sys.argv[3] + datafile = sys.argv[4] + net_name = sys.argv[5] + ret = infer(weight_file, datafile, net_file, net_name) + + if ret is None: + print('usage:') + print(' python %s [infer] [fluid_model] [imgfile]' % (sys.argv[0])) + print(' eg:python %s infer %s %s' % (sys.argv[0], model_file, datafile)) + sys.exit(1) + + sys.exit(ret) diff --git a/caffe2fluid/examples/imagenet/tools/cmp.sh b/caffe2fluid/examples/imagenet/tools/cmp.sh new file mode 100755 index 0000000..54c7b48 --- /dev/null +++ b/caffe2fluid/examples/imagenet/tools/cmp.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# +#function: +# a tool used to compare the results produced by paddle and caffe +# + +if [[ $# -lt 2 ]];then + echo "usage:" + echo " bash $0 [model_name] [param_name] [caffe_name]" + exit 1 +fi + +model_name=$1 +param_name=$2 +paddle_file="./results/${model_name}.paddle/${param_name}.npy" +if [[ $# -eq 3 ]];then + caffe_file="./results/${model_name}.caffe/${3}.npy" +else + caffe_file="./results/${model_name}.caffe/${2}.npy" +fi +cmd="python ./compare.py $paddle_file $caffe_file" +echo $cmd +eval $cmd diff --git a/caffe2fluid/examples/imagenet/tools/cmp_layers.sh b/caffe2fluid/examples/imagenet/tools/cmp_layers.sh new file mode 100755 index 0000000..37a106e --- /dev/null +++ b/caffe2fluid/examples/imagenet/tools/cmp_layers.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +#function: +# a tool used to compare all layers' results +# +#set -x +if [[ $# -ne 1 ]];then + echo "usage:" + echo " bash $0 [model_name]" + echo " eg:bash $0 alexnet" + exit 1 +fi + +model_name=$1 +prototxt="models.caffe/$model_name/${model_name}.prototxt" +cat $prototxt | grep name | perl -ne 'if(/^\s*name\s*:\s+\"([^\"]+)/){ print $1."\n";}' >.layer_names + +final_layer=$(cat $prototxt | perl -ne 'if(/^\s*top\s*:\s+\"([^\"]+)/){ print $1."\n";}' | tail -n1) +ret=$(grep "^$final_layer$" .layer_names | wc -l) +if [[ $ret -eq 0 ]];then + echo $final_layer >>.layer_names +fi + +for i in $(cat .layer_names);do + i=${i//\//_} + cf_npy="results/${model_name}.caffe/${i}.npy" + #pd_npy="results/${model_name}.paddle/${i}.npy" + #pd_npy=$(find results/${model_name}.paddle -iname "${i}*.npy" | head -n1) + pd_npy=$(find results/${model_name}.paddle -iname "${i}.*npy" | grep deleted -v | head -n1) + + if [[ ! -e $cf_npy ]];then + echo "caffe's result not exist[$cf_npy]" + continue + fi + + if [[ ! -e $pd_npy ]];then + echo "paddle's result not exist[$pd_npy]" + continue + fi + + python compare.py $cf_npy $pd_npy no_exception + if [[ $? -eq 0 ]];then + echo "succeed to compare layer[$i]" + else + echo "failed to compare layer[$i]" + fi + +done diff --git a/caffe2fluid/examples/imagenet/tools/diff.sh b/caffe2fluid/examples/imagenet/tools/diff.sh new file mode 100755 index 0000000..25e5d3b --- /dev/null +++ b/caffe2fluid/examples/imagenet/tools/diff.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +# +#function: +# a tool used to check the difference of models' results generated by caffe model and paddle model +# +#howto: +# bash diff.sh resnet50 #when this has been finished, you can get the difference in precision +# +#notes: +# 0, in order to infer using caffe, we need pycaffe installed +# 1, prepare your caffe model in 'models.caffe/', eg: 'model.caffe/resnet101/resnet101.[prototxt|caffemodel]' +# 2, converted paddle model will be in 'models' +# 3, results of layers will be stored in 'results/${model_name}.[paddle|caffe]' +# 4, only the last layer will be checked by default + +model_name="resnet50" +results_root="results/" + +if [[ -n $1 ]];then + if [ $1 = "-h" ];then + echo "usage:" + echo " bash $0 [model_name]" + echo " eg:bash $0 resnet50" + exit 0 + fi + model_name=$1 +fi + +mkdir -p $results_root + +prototxt="models.caffe/$model_name/${model_name}.prototxt" +caffemodel="models.caffe/${model_name}/${model_name}.caffemodel" + +#1, dump layers' results from paddle +paddle_results="$results_root/${model_name}.paddle" +rm -rf $paddle_results +rm -rf "results.paddle" +bash ./tools/run.sh $model_name ./models.caffe/$model_name ./models/$model_name +if [[ $? -ne 0 ]] || [[ ! -e "results.paddle" ]];then + echo "not found paddle's results, maybe failed to convert" + exit 1 +fi +mv results.paddle $paddle_results + +#2, dump layers' results from caffe +caffe_results="$results_root/${model_name}.caffe" +rm -rf $caffe_results +rm -rf "results.caffe" +PYTHON=`which cfpython` +if [[ -z $PYTHON ]];then + PYTHON=`which python` +fi +$PYTHON ./infer.py caffe $prototxt $caffemodel $paddle_results/data.npy +if [[ $? -ne 0 ]] || [[ ! -e "results.caffe" ]];then + echo "not found caffe's results, maybe failed to do inference with caffe" + exit 1 +fi +mv results.caffe $caffe_results + +#3, extract layer names +cat $prototxt | grep name | perl -ne 'if(/^\s*name\s*:\s+\"([^\"]+)/){ print $1."\n";}' >.layer_names + +final_layer=$(cat $prototxt | perl -ne 'if(/^\s*top\s*:\s+\"([^\"]+)/){ print $1."\n";}' | tail -n1) +ret=$(grep "^$final_layer$" .layer_names | wc -l) +if [[ $ret -eq 0 ]];then + echo $final_layer >>.layer_names +fi + +#4, compare one by one +#for i in $(cat .layer_names);do +for i in $(cat .layer_names | tail -n1);do + i=${i//\//_} + echo "process $i" + pd_npy=$(find $paddle_results/ -iname "${i}.*npy" | grep deleted -v | head -n1) + #pd_npy="$paddle_results/${i}.npy" + if [[ -f $pd_npy ]];then + $PYTHON compare.py $caffe_results/${i}.npy $pd_npy + else + echo "not found npy file[${i}.*npy] for layer[$i]" + exit 1 + fi +done diff --git a/caffe2fluid/examples/imagenet/tools/run.sh b/caffe2fluid/examples/imagenet/tools/run.sh new file mode 100755 index 0000000..7eb23f4 --- /dev/null +++ b/caffe2fluid/examples/imagenet/tools/run.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +#function: +# a tool used to: +# 1, convert a caffe model +# 2, do inference(only in fluid) using this model +# +#usage: +# cd caffe2fluid/examples/imagenet && bash run.sh resnet50 ./models.caffe/resnet50 ./models/resnet50 +# + +#set -x +if [[ $# -lt 3 ]];then + echo "usage:" + echo " bash $0 [model_name] [cf_model_path] [pd_model_path] [only_convert]" + echo " eg: bash $0 resnet50 ./models.caffe/resnet50 ./models/resnet50" + exit 1 +else + model_name=$1 + cf_model_path=$2 + pd_model_path=$3 + only_convert=$4 +fi + +proto_file=$cf_model_path/${model_name}.prototxt +caffemodel_file=$cf_model_path/${model_name}.caffemodel +weight_file=$pd_model_path/${model_name}.npy +net_file=$pd_model_path/${model_name}.py + +if [[ ! -e $proto_file ]];then + echo "not found prototxt[$proto_file]" + exit 1 +fi + +if [[ ! -e $caffemodel_file ]];then + echo "not found caffemodel[$caffemodel_file]" + exit 1 +fi + +if [[ ! -e $pd_model_path ]];then + mkdir $pd_model_path +fi + +PYTHON=`which cfpython` +if [[ -z $PYTHON ]];then + PYTHON=`which python` +fi +$PYTHON ../../convert.py \ + $proto_file \ + --caffemodel $caffemodel_file \ + --data-output-path $weight_file\ + --code-output-path $net_file + +ret=$? +if [[ $ret -ne 0 ]];then + echo "failed to convert caffe model[$cf_model_path]" + exit $ret +else + echo "succeed to convert caffe model[$cf_model_path] to fluid model[$pd_model_path]" +fi + +if [[ -z $only_convert ]];then + PYTHON=`which pdpython` + if [[ -z $PYTHON ]];then + PYTHON=`which python` + fi + imgfile="data/65.jpeg" + #FIX ME: + # only look the first line in prototxt file for the name of this network, maybe not correct + net_name=`grep "name" $proto_file | head -n1 | perl -ne 'if(/^name\s*:\s*\"([^\"]+)\"/){ print $1."\n";}'` + if [[ -z $net_name ]];then + net_name="MyNet" + fi + cmd="$PYTHON ./infer.py dump $net_file $weight_file $imgfile $net_name" + echo $cmd + eval $cmd + ret=$? +fi +exit $ret diff --git a/caffe2fluid/examples/imagenet/tools/test.sh b/caffe2fluid/examples/imagenet/tools/test.sh new file mode 100755 index 0000000..13e5db6 --- /dev/null +++ b/caffe2fluid/examples/imagenet/tools/test.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# +#script to test all models +# + +models="alexnet vgg16 googlenet resnet152 resnet101 resnet50" +for i in $models;do + echo "begin to process $i" + bash ./tools/diff.sh $i 2>&1 + echo "finished to process $i with ret[$?]" +done diff --git a/caffe2fluid/examples/mnist/README.md b/caffe2fluid/examples/mnist/README.md new file mode 100644 index 0000000..cd427d6 --- /dev/null +++ b/caffe2fluid/examples/mnist/README.md @@ -0,0 +1,10 @@ +a demo to show converting caffe model on 'mnist' using caffe2fluid + +--- + +# How to use + +1. prepare python environment +2. download caffe model to "models.caffe/lenet" which contains "lenet.caffemodel" and "lenet.prototxt" +3. run the tool + eg: bash ./run.sh lenet ./models.caffe/lenet ./models/lenet diff --git a/caffe2fluid/examples/mnist/evaluate.py b/caffe2fluid/examples/mnist/evaluate.py new file mode 100644 index 0000000..55b053e --- /dev/null +++ b/caffe2fluid/examples/mnist/evaluate.py @@ -0,0 +1,83 @@ +#!/bin/env python + +#function: +# demo to show how to use converted model using caffe2fluid +# + +import sys +import os +import numpy as np +import paddle.fluid as fluid +import paddle + + +def test_model(exe, test_program, fetch_list, test_reader, feeder): + acc_set = [] + + for data in test_reader(): + acc_np, pred = exe.run(program=test_program, + feed=feeder.feed(data), + fetch_list=fetch_list) + acc_set.append(float(acc_np)) + + acc_val = np.array(acc_set).mean() + return float(acc_val) + + +def evaluate(net_file, model_file): + """ main + """ + #1, build model + net_path = os.path.dirname(net_file) + if net_path not in sys.path: + sys.path.insert(0, net_path) + + from lenet import LeNet as MyNet + + #1, define network topology + images = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + net = MyNet({'data': images}) + prediction = net.layers['prob'] + acc = fluid.layers.accuracy(input=prediction, label=label) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + #2, load weights + if model_file.find('.npy') > 0: + net.load(data_path=model_file, exe=exe, place=place) + else: + net.load(data_path=model_file, exe=exe) + + #3, test this model + test_program = fluid.default_main_program().clone() + test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=128) + + feeder = fluid.DataFeeder(feed_list=[images, label], place=place) + fetch_list = [acc, prediction] + + print('go to test model using test set') + acc_val = test_model(exe, test_program, \ + fetch_list, test_reader, feeder) + + print('test accuracy is [%.4f], expected value[0.919]' % (acc_val)) + + +if __name__ == "__main__": + net_file = 'models/lenet/lenet.py' + weight_file = 'models/lenet/lenet.npy' + + argc = len(sys.argv) + if argc == 3: + net_file = sys.argv[1] + weight_file = sys.argv[2] + elif argc > 1: + print('usage:') + print('\tpython %s [net_file] [weight_file]' % (sys.argv[0])) + print('\teg:python %s %s %s %s' % (sys.argv[0], net_file, weight_file)) + sys.exit(1) + + evaluate(net_file, weight_file) diff --git a/caffe2fluid/examples/mnist/run.sh b/caffe2fluid/examples/mnist/run.sh new file mode 100755 index 0000000..eee83ef --- /dev/null +++ b/caffe2fluid/examples/mnist/run.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +#function: +# a tool used to: +# 1, convert a caffe model +# 2, do inference using this model +# +#usage: +# bash run.sh lenet ./models.caffe/lenet ./models/lenet +# + +#set -x +if [[ $# -lt 3 ]];then + echo "usage:" + echo " bash $0 [model_name] [cf_model_path] [pd_model_path] [only_convert]" + echo " eg: bash $0 lenet ./models.caffe/lenet ./models/lenet" + exit 1 +else + model_name=$1 + cf_model_path=$2 + pd_model_path=$3 + no_eval=$4 +fi + +proto_file=$cf_model_path/${model_name}.prototxt +caffemodel_file=$cf_model_path/${model_name}.caffemodel +weight_file=$pd_model_path/${model_name}.npy +net_file=$pd_model_path/${model_name}.py + +if [[ ! -e $proto_file ]];then + echo "not found prototxt[$proto_file]" + exit 1 +fi + +if [[ ! -e $caffemodel_file ]];then + echo "not found caffemodel[$caffemodel_file]" + exit 1 +fi + +if [[ ! -e $pd_model_path ]];then + mkdir $pd_model_path +fi + +PYTHON=`which cfpython` +if [[ -z $PYTHON ]];then + PYTHON=`which python` +fi +$PYTHON ../../convert.py \ + $proto_file \ + --caffemodel $caffemodel_file \ + --data-output-path $weight_file\ + --code-output-path $net_file + +ret=$? +if [[ $ret -ne 0 ]];then + echo "failed to convert caffe model[$cf_model_path]" + exit $ret +else + echo "succeed to convert caffe model[$cf_model_path] to fluid model[$pd_model_path]" +fi + +if [[ -z $only_convert ]];then + PYTHON=`which pdpython` + if [[ -z $PYTHON ]];then + PYTHON=`which python` + fi + net_name=`grep "name" $proto_file | head -n1 | perl -ne 'if(/\"([^\"]+)\"/){ print $1."\n";}'` + if [[ $net_name != "LeNet" ]];then + echo "only support LeNet" + exit 1 + fi + $PYTHON ./evaluate.py $net_file $weight_file + ret=$? +fi +exit $ret diff --git a/caffe2fluid/kaffe/__init__.py b/caffe2fluid/kaffe/__init__.py new file mode 100644 index 0000000..c11ce45 --- /dev/null +++ b/caffe2fluid/kaffe/__init__.py @@ -0,0 +1,5 @@ +from .graph import GraphBuilder, NodeMapper +from .errors import KaffeError, print_stderr + +import os +from . import paddle diff --git a/caffe2fluid/kaffe/caffe/__init__.py b/caffe2fluid/kaffe/caffe/__init__.py new file mode 100644 index 0000000..8d53dee --- /dev/null +++ b/caffe2fluid/kaffe/caffe/__init__.py @@ -0,0 +1 @@ +from .resolver import get_caffe_resolver, has_pycaffe diff --git a/caffe2fluid/kaffe/caffe/resolver.py b/caffe2fluid/kaffe/caffe/resolver.py new file mode 100644 index 0000000..6f439d7 --- /dev/null +++ b/caffe2fluid/kaffe/caffe/resolver.py @@ -0,0 +1,60 @@ +import os +import sys + +SHARED_CAFFE_RESOLVER = None + + +def import_caffepb(): + p = os.path.realpath(__file__) + p = os.path.dirname(p) + p = os.path.join(p, '../../proto') + sys.path.insert(0, p) + import caffe_pb2 + return caffe_pb2 + + +class CaffeResolver(object): + def __init__(self): + self.import_caffe() + + def import_caffe(self): + self.caffe = None + try: + # Try to import PyCaffe first + import caffe + self.caffe = caffe + except ImportError: + # Fall back to the protobuf implementation + self.caffepb = import_caffepb() + show_fallback_warning() + if self.caffe: + # Use the protobuf code from the imported distribution. + # This way, Caffe variants with custom layers will work. + self.caffepb = self.caffe.proto.caffe_pb2 + self.NetParameter = self.caffepb.NetParameter + + def has_pycaffe(self): + return self.caffe is not None + + +def get_caffe_resolver(): + global SHARED_CAFFE_RESOLVER + if SHARED_CAFFE_RESOLVER is None: + SHARED_CAFFE_RESOLVER = CaffeResolver() + return SHARED_CAFFE_RESOLVER + + +def has_pycaffe(): + return get_caffe_resolver().has_pycaffe() + + +def show_fallback_warning(): + msg = ''' +------------------------------------------------------------ + WARNING: PyCaffe not found! + Falling back to a pure protocol buffer implementation. + * Conversions will be drastically slower. +------------------------------------------------------------ + +''' + sys.stderr.write(msg) diff --git a/caffe2fluid/kaffe/custom_layers/__init__.py b/caffe2fluid/kaffe/custom_layers/__init__.py new file mode 100644 index 0000000..8505aee --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/__init__.py @@ -0,0 +1,114 @@ +""" +""" + +from .register import get_registered_layers +#custom layer import begins + +import axpy +import flatten +import argmax +import reshape +import roipooling +import priorbox +import permute +import detection_out +import normalize +import select +import crop +import power +import reduction + +#custom layer import ends + +custom_layers = get_registered_layers() + + +def set_args(f, params, node=None): + """ set args for function 'f' using the parameters in node.layer.parameters + + Args: + f (function): a python function object + params (object): a object contains attributes needed by f's arguments + + Returns: + arg_names (list): a list of argument names + kwargs (dict): a dict contains needed arguments + """ + from ..protobuf_to_dict import protobuf_to_dict + + argc = f.__code__.co_argcount + arg_list = f.__code__.co_varnames[0:argc] + + kwargs = {} + for arg_name in arg_list: + if arg_name in params: + kwargs[arg_name] = params[arg_name] + + if node is not None and len(node.metadata): + kwargs.update(node.metadata) + + return arg_list, kwargs + + +def has_layer(kind): + """ test whether this layer exists in custom layer + """ + return kind in custom_layers + + +def compute_output_shape(kind, node): + assert kind in custom_layers, "layer[%s] not exist in custom layers" % ( + kind) + shape_func = custom_layers[kind]['shape'] + + parents = node.parents + inputs = [list(p.output_shape) for p in parents] + arg_names, kwargs = set_args(shape_func, node.params) + + if len(inputs) == 1: + inputs = inputs[0] + + return shape_func(inputs, **kwargs) + + +def make_node(template, kind, node): + """ make a PaddleNode for custom layer which means construct + a piece of code to define a layer implemented in 'custom_layers' + + Args: + @template (PaddleNode): a factory to new a instance of PaddleNode + @kind (str): type of custom layer + @node (graph.Node): a layer in the net + + Returns: + instance of PaddleNode + """ + assert kind in custom_layers, "layer[%s] not exist in custom layers" % ( + kind) + + layer_func = custom_layers[kind]['layer'] + + #construct arguments needed by custom layer function from node's parameters + arg_names, kwargs = set_args(layer_func, node.params, node) + + return template('custom_layer', kind, **kwargs) + + +def make_custom_layer(kind, inputs, name, *args, **kwargs): + """ execute a custom layer which is implemented by users + + Args: + @kind (str): type name of this layer + @inputs (vars): variable list created by fluid + @namme (str): name for this layer + @args (tuple): other positional arguments + @kwargs (dict): other kv arguments + + Returns: + output (var): output variable for this layer + """ + assert kind in custom_layers, "layer[%s] not exist in custom layers" % ( + kind) + + layer_func = custom_layers[kind]['layer'] + return layer_func(inputs, name, *args, **kwargs) diff --git a/caffe2fluid/kaffe/custom_layers/argmax.py b/caffe2fluid/kaffe/custom_layers/argmax.py new file mode 100644 index 0000000..d419832 --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/argmax.py @@ -0,0 +1,73 @@ +""" a custom layer for 'argmax', maybe we should implement this in standard way. + more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/argmax.html +""" +from .register import register + + +def import_fluid(): + import paddle.fluid as fluid + return fluid + + +def argmax_shape(input_shape, out_max_val=False, top_k=1, axis=-1): + """ calculate the output shape of this layer using input shape + + Args: + @input_shape (list of num): a list of number which represents the input shape + @out_max_val (bool): parameter from caffe's ArgMax layer + @top_k (int): parameter from caffe's ArgMax layer + @axis (int): parameter from caffe's ArgMax layer + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + input_shape = list(input_shape) + + if axis < 0: + axis += len(input_shape) + + assert (axis + 1 == len(input_shape) + ), 'only can be applied on the last dimension[axis:%d, %s] now,'\ + 'make sure you have set axis param in xxx.prototxt file' \ + % (axis, str(input_shape)) + + output_shape = input_shape + output_shape[-1] = top_k + if out_max_val is True: + output_shape[-1] *= 2 + + return output_shape + + +def argmax_layer(input, name, out_max_val=False, top_k=1, axis=-1): + """ build a layer of type 'ArgMax' using fluid + + Args: + @input (variable): input fluid variable for this layer + @name (str): name for this layer + @out_max_val (bool): parameter from caffe's ArgMax layer + @top_k (int): parameter from caffe's ArgMax layer + @axis (int): parameter from caffe's ArgMax layer + + Returns: + output (variable): output variable for this layer + """ + + fluid = import_fluid() + + if axis < 0: + axis += len(input.shape) + + if out_max_val is True: + topk_var, index_var = fluid.layers.topk(input=input, k=top_k) + index_var = fluid.layers.cast(index_var, dtype=topk_var.dtype) + output = fluid.layers.concat( + [index_var, topk_var], axis=axis, name=name) + else: + topk_var, index_var = fluid.layers.topk(input=input, k=top_k, name=name) + output = index_var + + return output + + +register(kind='ArgMax', shape=argmax_shape, layer=argmax_layer) diff --git a/caffe2fluid/kaffe/custom_layers/axpy.py b/caffe2fluid/kaffe/custom_layers/axpy.py new file mode 100644 index 0000000..b81d4f2 --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/axpy.py @@ -0,0 +1,51 @@ +""" A custom layer for 'axpy' which receives 3 tensors and output 1 tensor. + the function performed is:(the mupltiplication and add are elementewise) + output = inputs[0] * inputs[1] + inputs[2] +""" + +from .register import register + + +def axpy_shape(input_shapes): + """ calculate the output shape of this layer using input shapes + + Args: + @input_shapes (list of tuples): a list of input shapes + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + assert len(input_shapes) == 3, "not valid input shape for axpy layer" + assert len(input_shapes[0]) == len(input_shapes[1]), 'should have same dims' + + output_shape = input_shapes[1] + assert (input_shapes[2] == output_shape),\ + "shape not consistent for axpy[%s <--> %s]" \ + % (str(output_shape), str(input_shapes[2])) + + return output_shape + + +def axpy_layer(inputs, name): + """ build a layer of type 'Axpy' using fluid + + Args: + @inputs (list of variables): input fluid variables for this layer + @name (str): name for this layer + + Returns: + output (variable): output variable for this layer + """ + import paddle.fluid as fluid + + assert len(inputs) == 3, "invalid inputs for axpy[%s]" % (name) + alpha = inputs[0] + x = inputs[1] + y = inputs[2] + output = fluid.layers.elementwise_mul(x, alpha, axis=0) + output = fluid.layers.elementwise_add(output, y, name=name) + + return output + + +register(kind='Axpy', shape=axpy_shape, layer=axpy_layer) diff --git a/caffe2fluid/kaffe/custom_layers/crop.py b/caffe2fluid/kaffe/custom_layers/crop.py new file mode 100644 index 0000000..807f65d --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/crop.py @@ -0,0 +1,77 @@ +""" a custom layer for 'crop', maybe we should implement this in standard way. + more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/crop.html +""" +from .register import register + + +def crop_shape(input_shape, shape=None): + """ calculate the output shape of this layer using input shape + + Args: + @input_shape (num | list of num): a list of number or num which represents the input shape + @shape (list of integer): the shape of output + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + if isinstance(input_shape, list): + assert len(input_shape) == 2, "the number of crop's inputs must be 2" + return input_shape[1] + elif not shape is None: + assert len(shape) == len( + input_shape.shape), "input_shape is diff with output_shape" + return shape + else: + raise Exception, "crop_shape input error" + return None + + +def crop_layer(input, name, shape=None, axis=2, offset=None): + """ build a layer of type 'Crop' using fluid + + Args: + @input (variables | list of variables): input fluid variable for this layer + @shape (list of integer): the shape of output + @name (str): name for this layer + @axis (integer): parameter from caffe's Crop layer + @offset (Variable|list/tuple of integer|None): parameter from caffe's Crop layer + + Returns: + output (variable): output variable for this layer + """ + input_shape = None + output_shape = None + input_tensor = None + if isinstance(input, list): + assert len(input) == 2, "the number of crop's inputs must be 2" + input_shape = input[0].shape + output_shape = input[1].shape + input_tensor = input[0] + elif not shape is None: + assert len(shape) == len( + input.shape), "input_shape is diff with output_shape" + input_shape = input.shape + output_shape = shape + input_tensor = input + else: + raise Exception, "crop_layer input error" + + assert len(output_shape) == len( + input_shape), "input_shape is diff with output_shape" + + if axis < 0: + axis += len(input_shape) + + if offset is not None: + assert (len(input_shape) - axis + ) == len(offset), "invalid offset[%s] in crop layer" % ( + str(offset)) + offset = [0] * axis + offset + import paddle.fluid as fluid + output = fluid.layers.crop( + input_tensor, shape=output_shape, offsets=offset, name=name) + + return output + + +register(kind='Crop', shape=crop_shape, layer=crop_layer) diff --git a/caffe2fluid/kaffe/custom_layers/detection_out.py b/caffe2fluid/kaffe/custom_layers/detection_out.py new file mode 100644 index 0000000..ff79e26 --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/detection_out.py @@ -0,0 +1,79 @@ +""" A custom layer for 'detectionout' used in 'SSD' model to produce outputs + Note: Since Paddle's implementation of 'detectionout' applied 'flatten' and 'softmax' ops on the input of 'conf', + while Caffe's implementation do not. +""" + +from .register import register + + +def detectionoutput_shape(input_shape): + """ the output shape of this layer is dynamic and not determined by 'input_shape' + + Args: + @input_shape (list of int): input shape + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + output_shape = [-1, 6] + return output_shape + + +def detectionoutput_layer(inputs, + name, + background_label=0, + share_location=True, + nms_param=None, + keep_top_k=100, + confidence_threshold=0.1): + """ build a layer of type 'detectionout' using fluid + + Args: + @inputs (list of variables): input fluid variables for this layer + @name (str): name for this layer + + Returns: + output (variable): output variable for this layer + """ + import paddle.fluid as fluid + + if nms_param is None: + nms_param = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} + + mbox_conf_flatten = inputs[1] + mbox_priorbox = inputs[2] + mbox_priorbox_list = fluid.layers.split(mbox_priorbox, 2, dim=1) + pb = mbox_priorbox_list[0] + pbv = mbox_priorbox_list[1] + pb = fluid.layers.reshape(x=pb, shape=[-1, 4]) + pbv = fluid.layers.reshape(x=pbv, shape=[-1, 4]) + mbox_loc = inputs[0] + mbox_loc = fluid.layers.reshape( + x=mbox_loc, shape=[-1, mbox_conf_flatten.shape[1], 4]) + + default = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} + fields = ['eta', 'top_k', 'nms_threshold'] + + for f in default.keys(): + if not nms_param.has_key(f): + nms_param[f] = default[f] + + nmsed_outs = fluid.layers.detection_output( + scores=mbox_conf_flatten, + loc=mbox_loc, + prior_box=pb, + prior_box_var=pbv, + background_label=background_label, + nms_threshold=nms_param["nms_threshold"], + nms_top_k=nms_param["top_k"], + keep_top_k=keep_top_k, + score_threshold=confidence_threshold, + nms_eta=nms_param["eta"]) + + return nmsed_outs + + +register( + kind='DetectionOutput', + shape=detectionoutput_shape, + layer=detectionoutput_layer) diff --git a/caffe2fluid/kaffe/custom_layers/flatten.py b/caffe2fluid/kaffe/custom_layers/flatten.py new file mode 100644 index 0000000..ebb9771 --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/flatten.py @@ -0,0 +1,66 @@ +""" a custom layer for 'flatten', maybe we should implement this in standard way. + more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/flatten.html +""" +from .register import register + + +def flatten_shape(input_shape, axis=1, end_axis=-1): + """ calculate the output shape of this layer using input shape + + Args: + @input_shape (list of num): a list of number which represents the input shape + @axis (int): parameter from caffe's Flatten layer + @end_axis (int): parameter from caffe's Flatten layer + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + + start_axis = axis + end_axis = end_axis + input_shape = list(input_shape) + if start_axis < 0: + start_axis += len(input_shape) + + if end_axis < 0: + end_axis += len(input_shape) + 1 + + assert start_axis <= end_axis, 'invalid axis[%d] or end_axis[%d] params'\ + % (start_axis, end_axis) + output_shape = input_shape[0:start_axis] + flat_sz = reduce(lambda a, b: a * b, input_shape[start_axis:end_axis]) + output_shape += [flat_sz] + output_shape += input_shape[end_axis:-1] + + return output_shape + + +def flatten_layer(input, name, axis=1, end_axis=-1): + """ build a layer of type 'Flatten' using fluid + + Args: + @input (variable): input fluid variable for this layer + @name (str): name for this layer + @axis (int): parameter from caffe's Flatten layer + @end_axis (int): parameter from caffe's Flatten layer + + Returns: + output (variable): output variable for this layer + """ + import paddle.fluid as fluid + + input_shape = list(input.shape) + + if input_shape[0] == -1: + input_shape[0] = 1 + output_shape = flatten_shape(input_shape, axis=axis, end_axis=end_axis) + output_shape[0] = -1 + else: + output_shape = flatten_shape(input_shape, axis=axis, end_axis=end_axis) + + output = fluid.layers.reshape(input, shape=output_shape, name=name) + + return output + + +register(kind='Flatten', shape=flatten_shape, layer=flatten_layer) diff --git a/caffe2fluid/kaffe/custom_layers/normalize.py b/caffe2fluid/kaffe/custom_layers/normalize.py new file mode 100644 index 0000000..f6e8c00 --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/normalize.py @@ -0,0 +1,56 @@ +""" A custom layer for 'normalize' op +""" + +from .register import register + + +def normalize_shape(input_shape, + across_spatial=True, + scale_filler=True, + eps=1e-10): + """ calculate the output shape of this layer using input shapes + + Args: + @input_shape (list of tuples): input shape + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + output_shape = input_shape + return output_shape + + +def normalize_layer(input, + name, + across_spatial=True, + scale_filler=True, + channel_shared=False, + eps=1e-10): + """ build a layer of type 'normalize' using fluid + + Args: + @inputs (list of variables): input fluid variables for this layer + @name (str): name for this layer + + Returns: + output (variable): output variable for this layer + """ + import paddle.fluid as fluid + + param_prefix = name.split('.')[0] + + assert across_spatial == False, "Only support across_spatial == False for Normalize[%s]" % ( + name) + l2_norm = fluid.layers.l2_normalize(input, axis=1) # l2 norm along channel + + shape = [1] if channel_shared else [input.shape[1]] + scale_attr = fluid.ParamAttr(name=param_prefix + '_scale') + scale_param = fluid.layers.create_parameter( + shape=shape, dtype=input.dtype, name=name, attr=scale_attr) + + out = fluid.layers.elementwise_mul( + x=l2_norm, y=scale_param, axis=-1 if channel_shared else 1) + return out + + +register(kind='Normalize', shape=normalize_shape, layer=normalize_layer) diff --git a/caffe2fluid/kaffe/custom_layers/permute.py b/caffe2fluid/kaffe/custom_layers/permute.py new file mode 100644 index 0000000..f0633fd --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/permute.py @@ -0,0 +1,40 @@ +""" A custom layer for 'Permute' which is equivalent to transpose in paddle +""" + +from .register import register + + +def permute_shape(input_shape, order): + """ calculate the output shape of this layer using input shapes + + Args: + @input_shape (list of numbers): input shape + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + output_shape = [] + for ii in order: + assert ii < len(input_shape), "invalid order for permute[%s]" % (name) + output_shape.append(input_shape[ii]) + return output_shape + + +def permute_layer(input, name, order): + """ build a layer of type 'permute' using fluid + + Args: + @input (input variable): input fluid variables for this layer + @name (str): name for this layer + @order (list of int): order to permute the dims + + Returns: + output (variable): output variable for this layer + """ + import paddle.fluid as fluid + output = fluid.layers.transpose(input, order, name=name) + + return output + + +register(kind='Permute', shape=permute_shape, layer=permute_layer) diff --git a/caffe2fluid/kaffe/custom_layers/power.py b/caffe2fluid/kaffe/custom_layers/power.py new file mode 100644 index 0000000..a8b91f4 --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/power.py @@ -0,0 +1,40 @@ +""" a custom layer for 'power', maybe we should implement this in standard way. + more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/power.html +""" +from .register import register + + +def power_shape(input_shape, shape=None): + """ calculate the output shape of this layer using input shape + + Args: + @input_shape (list of num): a list of number which represents the input shape + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + return input_shape + + +def power_layer(input, name, power=1.0, scale=1.0, shift=0.0): + """ build a layer of type 'Power' using fluid + + Args: + @input (variables): input fluid variable for this layer + @name (str): name for this layer + @power (float): parameter from caffe's Power layer + @scale (float): parameter from caffe's Power layer + @shift (float): parameter from caffe's Power layer + + Returns: + output (variable): output variable for this layer + """ + import paddle.fluid as fluid + scale_out = fluid.layers.scale( + input, scale=scale, bias=shift, bias_after_scale=True) + output = fluid.layers.pow(scale_out, factor=power) + + return output + + +register(kind='Power', shape=power_shape, layer=power_layer) diff --git a/caffe2fluid/kaffe/custom_layers/priorbox.py b/caffe2fluid/kaffe/custom_layers/priorbox.py new file mode 100644 index 0000000..e3eb640 --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/priorbox.py @@ -0,0 +1,103 @@ +""" A custom layer for 'priorbox' which is used in ssd to generate prior box info + Since the order of prior box is different between caffe and paddle, + we use 'slice' and 'concate' ops to align them. +""" + +from .register import register + + +def priorbox_shape(input_shapes, min_size, max_size=None, aspect_ratio=None): + """ calculate the output shape of this layer using input shapes + + Args: + @input_shapes (list of tuples): a list of input shapes + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + assert len(input_shapes) == 2, "invalid inputs for Priorbox[%s]" % (name) + fc_shape = input_shapes[0] + N = 1 + if not max_size == None: + N += 1 + if not aspect_ratio == None: + N += 2 * len(aspect_ratio) + + N_bbx = fc_shape[2] * fc_shape[3] * N + output_shape = [1, 2, 4 * N_bbx] + return output_shape + + +def priorbox_layer(inputs, + name, + min_size, + max_size=None, + aspect_ratio=None, + variance=[0.1, 0.1, 0.2, 0.2], + flip=False, + clip=False, + step=0.0, + offset=0.5): + """ build a layer of type 'Priorbox' using fluid + + Args: + @inputs (list of variables): input fluid variables for this layer + @name (str): name for this layer + + Returns: + output (variable): output variable for this layer + """ + import paddle.fluid as fluid + + assert len(inputs) == 2, "invalid inputs for Priorbox[%s]" % (name) + input = inputs[0] + image = inputs[1] + steps = tuple(step) if type(step) is list or type(step) is tuple else (step, + step) + box, variance_ = fluid.layers.prior_box( + input, + image, + min_size, + max_size, + aspect_ratio, + variance, + flip, + clip, + steps, + offset, + min_max_aspect_ratios_order=True) + """ + #adjust layout when the output is not consistent with caffe's + + feat_shape = list(input.shape) + H = feat_shape[2] + W = feat_shape[3] + box_tmp = fluid.layers.reshape(box, [H, W, -1, 4]) + nb_prior_bbx = int(box_tmp.shape[2]) + tensor_list = fluid.layers.split(box_tmp, nb_prior_bbx, 2) + + #TODO: + # current implementation for this layer is not efficient + # and we should fix this bug in future when Paddle support the same prior-box layout with Caffe + index_list = [0] + index_list = index_list * nb_prior_bbx + index_offset = 0 + if max_size is not None: + index_list[1] = -1 + index_offset = 1 + for ii in xrange(2 * len(aspect_ratio)): + index_list[ii + 1 + index_offset] = ii + 1 + + tensor_list_gathered = [tensor_list[ii] for ii in index_list] + caffe_prior_bbx = fluid.layers.concat(tensor_list_gathered, axis=2) + box = fluid.layers.reshape(caffe_prior_bbx, [1, 1, -1]) + """ + + box = fluid.layers.reshape(box, [1, 1, -1]) + variance_ = fluid.layers.reshape(variance_, [1, 1, -1]) + output = fluid.layers.concat([box, variance_], axis=1) + + return output + + +register(kind='PriorBox', shape=priorbox_shape, layer=priorbox_layer) diff --git a/caffe2fluid/kaffe/custom_layers/reduction.py b/caffe2fluid/kaffe/custom_layers/reduction.py new file mode 100644 index 0000000..ced60d3 --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/reduction.py @@ -0,0 +1,67 @@ +""" a custom layer for 'crop', maybe we should implement this in standard way. + more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/reduction.html +""" +from .register import register + + +def reduction_shape(input_shape, axis=0): + """ calculate the output shape of this layer using input shape + + Args: + @input_shape (list of num): a list of number which represents the input shape + @axis (int): parameter from caffe's reduction layer + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + if axis < 0: + axis += len(input_shape) + 1 + + assert axis <= len(input_shape), 'invalid axis[%d] error' % (axis) + + return input_shape[0:axis] + + +def reduction_layer(input, name, axis=0, operation=1, coeff=1.0): + """ build a layer of type 'Crop' using fluid + + Args: + @input (variable): input fluid variable for this layer + @name (str): name for this layer + @axis (int): parameter from caffe's reduction layer + @operation (int): parameter from caffe's reduction layer + @coeff (float): parameter from caffe's reduction layer + + Returns: + output (variable): output variable for this layer + """ + assert operation >= 1 and operation <= 4, "reduction reduction [%s] error" % ( + operation) + + input_len = len(input.shape) + if axis < 0: + axis += input_len + 1 + + dim = range(input_len) + + import paddle.fluid as fluid + if operation == 1: ## operation = SUM + output = fluid.layers.reduce_sum( + input, dim=dim[axis:], keep_dim=False, name=name) + elif operation == 2: ## operation = ASUM + absout = fluid.layers.abs(input) + output = fluid.layers.reduce_sum( + absout, dim=dim[axis:], keep_dim=False, name=name) + elif operation == 3: ## operation = SUMSQ + powout = fluid.layers.pow(x=input, factor=2.0) + output = fluid.layers.reduce_sum( + powout, dim=dim[axis:], keep_dim=False, name=name) + else: ## operation = MEAN + output = fluid.layers.reduce_mean( + input, dim=dim[axis:], keep_dim=False, name=name) + + mulout = fluid.layers.scale(x=output, scale=coeff) + return mulout + + +register(kind='Reduction', shape=reduction_shape, layer=reduction_layer) diff --git a/caffe2fluid/kaffe/custom_layers/register.py b/caffe2fluid/kaffe/custom_layers/register.py new file mode 100644 index 0000000..ae806cd --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/register.py @@ -0,0 +1,37 @@ +""" this module provides 'register' for registering customized layers +""" + +g_custom_layers = {} + + +def register(kind, shape, layer): + """ register a custom layer or a list of custom layers + + Args: + @kind (str or list): type name of the layer + @shape (function): a function to generate the shape of layer's output + @layer (function): a function to generate the shape of layer's output + + Returns: + None + """ + assert type(shape).__name__ == 'function', 'shape should be a function' + assert type(layer).__name__ == 'function', 'layer should be a function' + + if type(kind) is str: + kind = [kind] + else: + assert type( + kind) is list, 'invalid param "kind" for register, not a list or str' + + for k in kind: + assert type( + k) is str, 'invalid param "kind" for register, not a list of str' + assert k not in g_custom_layers, 'this type[%s] has already been registered' % ( + k) + print('register layer[%s]' % (k)) + g_custom_layers[k] = {'shape': shape, 'layer': layer} + + +def get_registered_layers(): + return g_custom_layers diff --git a/caffe2fluid/kaffe/custom_layers/reshape.py b/caffe2fluid/kaffe/custom_layers/reshape.py new file mode 100644 index 0000000..da82e4d --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/reshape.py @@ -0,0 +1,133 @@ +""" a custom layer for 'reshape', maybe we should implement this in standard way. + more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/reshape.html +""" +from .register import register + + +def import_fluid(): + import paddle.fluid as fluid + return fluid + + +def reshape_shape(input_sp, shape, axis=0, num_axes=-1): + """ calculate the output shape of this layer using input shape + + Args: + @input_shape (list of num): a list of number which represents the input shape + @shape (object): parameter from caffe's Reshape layer + @axis (int): parameter from caffe's Reshape layer + @num_axes(int): parameter from caffe's Reshape layer + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + + def count(num_list): + return reduce(lambda a, b: a * b, num_list) + + input_shape = list(input_sp) + input_count = count(input_shape) + + input_num_axes = len(input_shape) + + input_start_axis = axis + start_axis = input_start_axis if input_start_axis >= 0 \ + else input_num_axes + input_start_axis + 1 + + assert start_axis >= 0, "[Reshape]axis %d out of range" % (input_start_axis) + assert start_axis <= input_num_axes, "[Reshape]axis %d out of range for %d-D input data"\ + % (input_start_axis, input_num_axes) + + assert num_axes >= -1, "[Reshape]num_axes must be >= 0, or -1 for all" + + end_axis = input_num_axes if num_axes == -1 else start_axis + num_axes + assert end_axis <= input_num_axes, "end_axis[%d] = axis[%d] + num_axes[%d] is out of range"\ + % (end_axis, start_axis, num_axes) + + num_axes_replaced = end_axis - start_axis + num_axes_retained = input_num_axes - num_axes_replaced + num_new_axes = len(shape['dim']) + output_shape = [] + + for i in range(start_axis): + output_shape.append(input_shape[i]) + + for i in range(num_new_axes): + output_shape.append(shape['dim'][i]) + + for i in range(end_axis, input_num_axes): + output_shape.append(input_shape[i]) + + assert len(output_shape) == num_axes_retained + num_new_axes,\ + "[Reshape]invalid dims of output shape[%s]" % (str(output_shape)) + + inferred_axis = -1 + copy_axes = [] + constant_count = 1 + for i in range(num_new_axes): + top_dim = shape['dim'][i] + if top_dim == 0: + copy_axes.append(i) + copy_axis_index = start_axis + i + output_shape[copy_axis_index] = input_shape[copy_axis_index] + elif top_dim == -1: + assert inferred_axis == -1, "[Reshape]new shape contains multiple -1 dims" + inferred_axis = i + else: + constant_count *= top_dim + + if inferred_axis >= 0: + explicit_count = constant_count + l = input_shape[0:start_axis] + if len(l) > 0: + explicit_count *= count(l) + + l = input_shape[end_axis:] + if len(l) > 0: + explicit_count *= count(l) + + for i in range(len(copy_axes)): + explicit_count *= output_shape[start_axis + copy_axes[i]] + + assert input_count % explicit_count == 0, "[Reshape]botom count[%d] "\ + "must be divisible by product of the specified dimensions[%d] "\ + % (input_count, explicit_count) + output_shape[start_axis + inferred_axis] = input_count / explicit_count + + output_count = count(output_shape) + assert output_count == input_count, "[Reshape]output count[%d] must match input count[%d]" % ( + output_count, input_count) + + return output_shape + + +def reshape_layer(input, name, shape, axis=0, num_axes=-1): + """ build a layer of type 'Flatten' using fluid + + Args: + @input (variable): input fluid variable for this layer + @name (str): name for this layer + @shape (object): parameter from caffe's Reshape layer + @axis (int): parameter from caffe's Reshape layer + @num_axes(int): parameter from caffe's Reshape layer + + Returns: + output (variable): output variable for this layer + """ + fluid = import_fluid() + + input_shape = list(input.shape) + + if input_shape[0] == -1: + input_shape[0] = 1 + output_shape = reshape_shape(input_shape, shape, axis, num_axes) + output_shape[0] = -1 + else: + output_shape = reshape_shape(input_shape, shape, axis, num_axes) + + output = fluid.layers.reshape(input, shape=output_shape, name=name) + + return output + + +register(kind='Reshape', shape=reshape_shape, layer=reshape_layer) diff --git a/caffe2fluid/kaffe/custom_layers/roipooling.py b/caffe2fluid/kaffe/custom_layers/roipooling.py new file mode 100644 index 0000000..ccbf24a --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/roipooling.py @@ -0,0 +1,53 @@ +""" a custom layer for 'ROIPooling', maybe we should implement this in standard way. + more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/ROIPooling.html +""" +from .register import register + + +def roipooling_shape(input_shapes, pooled_h, pooled_w, spatial_scale): + """ calculate the output shape of this layer using input shape + + Args: + @input_shape (list of num): a list of number which represents the input shape + @out_max_val (bool): parameter from caffe's ROIPooling layer + @top_k (int): parameter from caffe's ROIPooling layer + @axis (int): parameter from caffe's ROIPooling layer + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + assert len(input_shapes) == 2, "not valid input shape for roipooling layer" + base_fea_shape = input_shapes[0] + rois_shape = input_shapes[1] + output_shape = base_fea_shape + output_shape[0] = rois_shape[0] + output_shape[2] = pooled_h + output_shape[3] = pooled_w + return output_shape + + +def roipooling_layer(inputs, name, pooled_h, pooled_w, spatial_scale): + """ build a layer of type 'ROIPooling' using fluid + + Args: + @input (variable): input fluid variable for this layer + @name (str): name for this layer + @out_max_val (bool): parameter from caffe's ROIPooling layer + @top_k (int): parameter from caffe's ROIPooling layer + @axis (int): parameter from caffe's ROIPooling layer + + Returns: + output (variable): output variable for this layer + """ + + import paddle.fluid as fluid + assert len(inputs) == 2, "not valid input shape for roipooling layer" + base_fea = inputs[0] + rois = inputs[1][:, 1:5] + rois_fea = fluid.layers.roi_pool(base_fea, rois, pooled_h, pooled_w, + spatial_scale) + + return rois_fea + + +register(kind='ROIPooling', shape=roipooling_shape, layer=roipooling_layer) diff --git a/caffe2fluid/kaffe/custom_layers/select.py b/caffe2fluid/kaffe/custom_layers/select.py new file mode 100644 index 0000000..708ac64 --- /dev/null +++ b/caffe2fluid/kaffe/custom_layers/select.py @@ -0,0 +1,67 @@ +""" a custom layer for 'select' which is used to replace standard 'Slice' layer + for converting layer with multiple different output tensors +""" +from .register import register + + +def select_shape(input_shape, slice_point, axis=1): + """ calculate the output shape of this layer using input shape + + Args: + @input_shape (list of num): a list of number which represents the input shape + @slice_point (list): parameter from caffe's Slice layer + @axis (int): parameter from caffe's Slice layer + + Returns: + @output_shape (list of num): a list of numbers represent the output shape + """ + + input_shape = list(input_shape) + start = slice_point[0] + if len(slice_point) == 2: + end = slice_point[1] + else: + end = input_shape[axis] + + assert end > start, "invalid slice_point with [start:%d, end:%d]"\ + % (start, end) + output_shape = input_shape + output_shape[axis] = end - start + return output_shape + + +def select_layer(input, name, slice_point, axis=1): + """ build a layer of type 'Slice' using fluid + + Args: + @input (variable): input fluid variable for this layer + @name (str): name for this layer + @slice_point (list): parameter from caffe's Slice layer + @axis (int): parameter from caffe's Slice layer + + Returns: + output (variable): output variable for this layer + """ + import paddle.fluid as fluid + input_shape = list(input.shape) + + start = slice_point[0] + if len(slice_point) == 2: + end = slice_point[1] + else: + end = input_shape[axis] + + sections = [] + if start > 0: + sections.append(start) + + pos = len(sections) + sections.append(end - start) + if end != input_shape[axis]: + sections.append(input_shape[axis] - end) + + outputs = fluid.layers.split(input, sections, dim=axis, name=name) + return outputs[pos] + + +register(kind='Select', shape=select_shape, layer=select_layer) diff --git a/caffe2fluid/kaffe/errors.py b/caffe2fluid/kaffe/errors.py new file mode 100644 index 0000000..75eced5 --- /dev/null +++ b/caffe2fluid/kaffe/errors.py @@ -0,0 +1,34 @@ +import sys + +#debug level, can be 'warn', 'verbose' +log_level = 'warn' + + +class KaffeError(Exception): + pass + + +def print_stderr(msg): + sys.stderr.write('%s\n' % msg) + + +def debug(msg): + if log_level == 'verbose': + print_stderr('[DEBUG]' + msg) + + +def notice(msg): + print_stderr('[NOTICE]' + msg) + + +def warn(msg): + print_stderr('[WARNING]' + msg) + + +def set_loglevel(level): + global log_level + + if 'warn' != level and 'verbose' != level: + raise Exception('not supported log level[%s]' % (level)) + + log_level = level diff --git a/caffe2fluid/kaffe/graph.py b/caffe2fluid/kaffe/graph.py new file mode 100644 index 0000000..baea3cc --- /dev/null +++ b/caffe2fluid/kaffe/graph.py @@ -0,0 +1,371 @@ +from google.protobuf import text_format + +from .caffe import get_caffe_resolver +from .errors import KaffeError, print_stderr +from .layers import LayerAdapter, LayerType, NodeKind, NodeDispatch +from .shapes import make_tensor + + +class Node(object): + def __init__(self, name, kind, layer=None): + self.name = name + self.kind = kind + self.layer = LayerAdapter(layer, kind) if layer else None + self.parents = [] + self.children = [] + self.data = None #parameters of this node + self.output_shape = None #output shape of this node + self.metadata = {} + + def add_parent(self, parent_node): + assert parent_node not in self.parents + self.parents.append(parent_node) + if self not in parent_node.children: + parent_node.children.append(self) + + def add_child(self, child_node): + assert child_node not in self.children + self.children.append(child_node) + if self not in child_node.parents: + child_node.parents.append(self) + + def get_only_parent(self): + if len(self.parents) != 1: + raise KaffeError('Node (%s) expected to have 1 parent. Found %s.' % + (self, len(self.parents))) + return self.parents[0] + + @property + def parameters(self): + """ get parameters stored in a protobuf object + """ + if self.layer is not None: + return self.layer.parameters + return None + + @property + def params(self): + """ get parameters stored in a dict + """ + from .protobuf_to_dict import protobuf_to_dict + + p = self.parameters + if p is not None: + return protobuf_to_dict(p) + else: + return None + + def __str__(self): + return '[%s] %s' % (self.kind, self.name) + + def __repr__(self): + return '%s (0x%x)' % (self.name, id(self)) + + +class Graph(object): + def __init__(self, nodes=None, name=None, trace={}): + self.nodes = nodes or [] + self.node_lut = {node.name: node for node in self.nodes} + self.output_trace = trace + if name is None or name == '': + self.name = 'MyNet' + else: + self.name = name + + def add_node(self, node): + self.nodes.append(node) + self.node_lut[node.name] = node + + def get_node(self, name): + try: + return self.node_lut[name] + except KeyError: + raise KaffeError('Layer not found: %s' % name) + + def add_name_trace(self, trace, which='caffe'): + self.output_trace[which] = trace + + def get_name_trace(self, which=None): + if which is not None: + return self.output_trace[which] + else: + return self.output_trace + + def get_input_nodes(self): + return [node for node in self.nodes if len(node.parents) == 0] + + def get_output_nodes(self): + return [node for node in self.nodes if len(node.children) == 0] + + def topologically_sorted(self): + sorted_nodes = [] + unsorted_nodes = list(self.nodes) + temp_marked = set() + perm_marked = set() + + def visit(node): + if node in temp_marked: + raise KaffeError('Graph is not a DAG.') + if node in perm_marked: + return + temp_marked.add(node) + for child in node.children: + visit(child) + perm_marked.add(node) + temp_marked.remove(node) + sorted_nodes.insert(0, node) + + while len(unsorted_nodes): + visit(unsorted_nodes.pop()) + return sorted_nodes + + def compute_output_shapes(self): + sorted_nodes = self.topologically_sorted() + for node in sorted_nodes: + node.output_shape = make_tensor( + *NodeKind.compute_output_shape(node)) + + def replaced(self, new_nodes): + return Graph(nodes=new_nodes, name=self.name, trace=self.output_trace) + + def transformed(self, transformers): + graph = self + for transformer in transformers: + graph = transformer(graph) + if graph is None: + raise KaffeError('Transformer failed: {}'.format(transformer)) + assert isinstance(graph, Graph) + + return graph + + def __contains__(self, key): + return key in self.node_lut + + def __str__(self): + hdr = '{:<20} {:<30} {:>20} {:>20}'.format('Type', 'Name', 'Param', + 'Output') + s = [hdr, '-' * 94] + for node in self.topologically_sorted(): + # If the node has learned parameters, display the first one's shape. + # In case of convolutions, this corresponds to the weights. + if node.data is None: + data_shape = '--' + out_shape = node.output_shape or '--' + s.append('{:<20} {:<30} {:>20} {:>20}'.format( + node.kind, node.name, data_shape, tuple(out_shape))) + else: + for d in node.data: + #data_shape = node.data[0].shape if node.data else '--' + data_shape = d.shape + out_shape = node.output_shape or '--' + s.append('{:<20} {:<30} {:>20} {:>20}'.format( + node.kind, node.name, data_shape, tuple(out_shape))) + return '\n'.join(s) + + +class GraphBuilder(object): + '''Constructs a model graph from a Caffe protocol buffer definition.''' + + def __init__(self, def_path, phase='test'): + ''' + def_path: Path to the model definition (.prototxt) + data_path: Path to the model data (.caffemodel) + phase: Either 'test' or 'train'. Used for filtering phase-specific nodes. + ''' + self.def_path = def_path + self.phase = phase + self.load() + + def load(self): + '''Load the layer definitions from the prototxt.''' + self.params = get_caffe_resolver().NetParameter() + with open(self.def_path, 'rb') as def_file: + text_format.Merge(def_file.read(), self.params) + + def filter_layers(self, layers): + '''Filter out layers based on the current phase.''' + phase_map = {0: 'train', 1: 'test'} + filtered_layer_names = set() + filtered_layers = [] + for layer in layers: + phase = self.phase + if len(layer.include): + phase = phase_map[layer.include[0].phase] + if len(layer.exclude): + phase = phase_map[1 - layer.include[0].phase] + exclude = (phase != self.phase) + # Dropout layers appear in a fair number of Caffe + # test-time networks. These are just ignored. We'll + # filter them out here. + if (not exclude) and (phase == 'test'): + exclude = (layer.type == LayerType.Dropout) + if not exclude: + filtered_layers.append(layer) + # Guard against dupes. + assert layer.name not in filtered_layer_names + filtered_layer_names.add(layer.name) + return filtered_layers + + def make_node(self, layer): + '''Create a graph node for the given layer.''' + kind = NodeKind.map_raw_kind(layer.type) + if kind is None: + raise KaffeError('Unknown layer type encountered: %s' % layer.type) + + # We want to use the layer's top names (the "output" names), rather than the + # name attribute, which is more of readability thing than a functional one. + # Other layers will refer to a node by its "top name". + return Node(layer.name, kind, layer=layer) + + def make_input_nodes(self): + ''' + Create data input nodes. + + This method is for old-style inputs, where the input specification + was not treated as a first-class layer in the prototext. + Newer models use the "Input layer" type. + ''' + nodes = [Node(name, NodeKind.Data) for name in self.params.input] + inputs_num = len(nodes) + if inputs_num > 0: + input_dims_num = len(self.params.input_dim) + if input_dims_num > 0 and input_dims_num != inputs_num * 4: + raise KaffeError('invalid input_dim[%d] param in prototxt' % + (input_dims_num)) + + input_dims = [[]] * inputs_num + for i in range(input_dims_num): + dim = self.params.input_dim[i] + which = int(i / 4) + input_dims[which].append(int(dim)) + + for i in range(inputs_num): + if len(self.params.input_shape) == inputs_num: + input_dim = map(int, self.params.input_shape[i].dim) + input_dims[i] = input_dim + + nodes[i].output_shape = tuple(input_dims[i]) + return nodes + + def build(self): + ''' + Builds the graph from the Caffe layer definitions. + ''' + # Get the layers + layers = self.params.layers or self.params.layer + # Filter out phase-excluded layers + layers = self.filter_layers(layers) + # Get any separately-specified input layers + nodes = self.make_input_nodes() + nodes += [self.make_node(layer) for layer in layers] + # Initialize the graph + graph = Graph(nodes=nodes, name=self.params.name) + # Connect the nodes + # + # A note on layers and outputs: + # In Caffe, each layer can produce multiple outputs ("tops") from a set of inputs + # ("bottoms"). The bottoms refer to other layers' tops. The top can rewrite a bottom + # (in case of in-place operations). Note that the layer's name is not used for establishing + # any connectivity. It's only used for data association. By convention, a layer with a + # single top will often use the same name (although this is not required). + # + # The current implementation only supports single-output nodes (note that a node can still + # have multiple children, since multiple child nodes can refer to the single top's name). + node_outputs = {} + output_trace = {} + for layer in layers: + node = graph.get_node(layer.name) + for input_name in layer.bottom: + assert input_name != layer.name + parent_node = node_outputs.get(input_name) + if (parent_node is None) or (parent_node == node): + parent_node = graph.get_node(input_name) + node.add_parent(parent_node) + + if len(layer.top) > 1: + raise KaffeError('Multiple top nodes are not supported.') + + for output_name in layer.top: + if output_name == layer.name: + # Output is named the same as the node. No further action required. + continue + # There are two possibilities here: + # + # Case 1: output_name refers to another node in the graph. + # This is an "in-place operation" that overwrites an existing node. + # This would create a cycle in the graph. We'll undo the in-placing + # by substituting this node wherever the overwritten node is referenced. + # + # Case 2: output_name violates the convention layer.name == output_name. + # Since we are working in the single-output regime, we will can rename it to + # match the layer name. + # + # For both cases, future references to this top re-routes to this node. + node_outputs[output_name] = node + if output_name in output_trace: + output_trace[output_name].append(node.name) + else: + output_trace[output_name] = [output_name, node.name] + + #build a mapping from real-name to changed-name(for caffe's INPLACE inference) + real2chg = {} + deleted = {} + for k, v in output_trace.items(): + real2chg[v[-1]] = k + for n in v: + if n in real2chg: + continue + if n not in deleted: + deleted[n] = '%s.%s' % (k, v[-1]) + + graph.add_name_trace({ + 'real2chg': real2chg, + 'deleted': deleted + }, 'caffe') + graph.compute_output_shapes() + return graph + + +class NodeMapper(NodeDispatch): + def __init__(self, graph): + self.graph = graph + + def map(self): + nodes = self.graph.topologically_sorted() + # Remove input nodes - we'll handle them separately. + input_nodes = self.graph.get_input_nodes() + nodes = [t for t in nodes if t not in input_nodes] + # Decompose DAG into chains. + chains = [] + for node in nodes: + attach_to_chain = None + if len(node.parents) == 1: + parent = node.get_only_parent() + for chain in chains: + if chain[-1] == parent: + # Node is part of an existing chain. + attach_to_chain = chain + break + if attach_to_chain is None: + # Start a new chain for this node. + attach_to_chain = [] + chains.append(attach_to_chain) + attach_to_chain.append(node) + # Map each chain. + mapped_chains = [] + for chain in chains: + mapped_chains.append(self.map_chain(chain)) + return self.commit(mapped_chains) + + def map_chain(self, chain): + return [self.map_node(node) for node in chain] + + def map_node(self, node): + map_func = self.get_handler(node.kind, 'map') + mapped_node = map_func(node) + assert mapped_node is not None + mapped_node.node = node + return mapped_node + + def commit(self, mapped_chains): + raise NotImplementedError('Must be implemented by subclass.') diff --git a/caffe2fluid/kaffe/layers.py b/caffe2fluid/kaffe/layers.py new file mode 100644 index 0000000..0d0aa1a --- /dev/null +++ b/caffe2fluid/kaffe/layers.py @@ -0,0 +1,250 @@ +import re +import numbers +from collections import namedtuple + +import custom_layers +from .shapes import * + +LAYER_DESCRIPTORS = { + + # Caffe Types + 'AbsVal': shape_identity, + 'Accuracy': shape_scalar, + 'ArgMax': shape_not_implemented, + 'BatchNorm': shape_identity, + 'BNLL': shape_not_implemented, + 'Concat': shape_concat, + 'ContrastiveLoss': shape_scalar, + 'Convolution': shape_convolution, + 'Deconvolution': shape_deconvolution, + 'Data': shape_data, + 'Dropout': shape_identity, + 'DummyData': shape_data, + 'Crop': shape_crop, + 'EuclideanLoss': shape_scalar, + 'Eltwise': shape_identity, + 'Exp': shape_identity, + 'Flatten': shape_not_implemented, + 'HDF5Data': shape_data, + 'HDF5Output': shape_identity, + 'HingeLoss': shape_scalar, + 'Im2col': shape_not_implemented, + 'ImageData': shape_data, + 'InfogainLoss': shape_scalar, + 'InnerProduct': shape_inner_product, + 'Input': shape_data, + 'LRN': shape_identity, + 'MemoryData': shape_mem_data, + 'MultinomialLogisticLoss': shape_scalar, + 'MVN': shape_not_implemented, + 'Pooling': shape_pool, + 'Power': shape_power, + 'ReLU': shape_identity, + 'PReLU': shape_identity, + 'Scale': shape_identity, + 'Sigmoid': shape_identity, + 'SigmoidCrossEntropyLoss': shape_scalar, + 'Silence': shape_not_implemented, + 'Softmax': shape_identity, + 'SoftmaxWithLoss': shape_scalar, + 'Split': shape_not_implemented, + 'Slice': shape_not_implemented, + 'TanH': shape_identity, + 'WindowData': shape_not_implemented, + 'Threshold': shape_identity, +} + +# layer types in 'V1LayerParameter' +# (v1layertype name, enum value, mapped to layer type) +v1_layertypes = [ + ('ABSVAL', 35), + ('ACCURACY', 1), + ('ARGMAX', 30), + ('BNLL', 2), + ('CONCAT', 3), + ('CONVOLUTION', 4), + ('DATA', 5), + ('DECONVOLUTION', 39), + ('DROPOUT', 6), + ('ELTWISE', 25), + ('EXP', 38), + ('FLATTEN', 8), + ('IM2COL', 11), + ('INNERPRODUCT', 14), + ('LRN', 15), + ('MEMORYDATA', 29), + ('MULTINOMIALLOGISTICLOSS', 16), + ('MVN', 34), + ('POOLING', 17), + ('POWER', 26), + ('RELU', 18), + ('SIGMOID', 19), + ('SIGMOIDCROSSENTROPYLOSS', 27), + ('SILENCE', 36), + ('SOFTMAX', 20), + ('SPLIT', 22), + ('SLICE', 33), + ('TANH', 23), + ('WINDOWDATA', 24), + ('THRESHOLD', 31), +] + +LAYER_TYPES = LAYER_DESCRIPTORS.keys() +LayerType = type('LayerType', (), {t: t for t in LAYER_TYPES}) + +#map the layer name in V1 to standard name +V1_LAYER_MAP = {'_not_init_': True} + + +def get_v1_layer_map(): + global V1_LAYER_MAP + if '_not_init_' not in V1_LAYER_MAP: + return V1_LAYER_MAP + else: + del V1_LAYER_MAP['_not_init_'] + + name2layer = {} + for n in LAYER_TYPES: + name2layer[n.upper()] = n + + for l in v1_layertypes: + n, v = l + if n in name2layer and v not in V1_LAYER_MAP: + V1_LAYER_MAP[v] = name2layer[n] + else: + raise KaffeError('not found v1 layer type %s' % n) + return V1_LAYER_MAP + + +class NodeKind(LayerType): + @staticmethod + def map_raw_kind(kind): + if custom_layers.has_layer(kind): + return kind + + if kind in LAYER_TYPES: + return kind + + v1_layers = get_v1_layer_map() + if kind in v1_layers: + return v1_layers[kind] + else: + return None + + @staticmethod + def compute_output_shape(node): + if custom_layers.has_layer(node.kind): + return custom_layers.compute_output_shape(node.kind, node) + + try: + val = LAYER_DESCRIPTORS[node.kind](node) + return val + except NotImplementedError: + raise KaffeError( + 'Output shape computation not implemented for type: %s' % + node.kind) + + +class NodeDispatchError(KaffeError): + pass + + +class NodeDispatch(object): + @staticmethod + def get_handler_name(node_kind): + if len(node_kind) <= 6: + # A catch-all for things like ReLU and tanh + return node_kind.lower() + # Convert from CamelCase to under_scored + name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', node_kind) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower() + + def get_handler(self, node_kind, prefix): + if custom_layers.has_layer(node_kind): + return getattr(self, 'map_custom') + + name = self.get_handler_name(node_kind) + name = '_'.join((prefix, name)) + try: + return getattr(self, name) + except AttributeError: + raise NodeDispatchError( + 'No handler found for node kind: %s (expected: %s)' % + (node_kind, name)) + + +class LayerAdapter(object): + def __init__(self, layer, kind): + self.layer = layer + self.kind = kind + + @property + def parameters(self): + name = NodeDispatch.get_handler_name(self.kind) + if self.kind.lower() == "normalize": + name = "norm" + elif self.kind.lower() == "deconvolution": + name = "convolution" + + name = '_'.join((name, 'param')) + try: + return getattr(self.layer, name) + except AttributeError: + print(dir(self.layer)) + raise NodeDispatchError( + 'Caffe parameters not found attr[%s] for layer kind[%s]' % + (name, self.kind)) + + @staticmethod + def get_kernel_value(scalar, repeated, idx, default=None): + if scalar: + return scalar + if repeated: + if isinstance(repeated, numbers.Number): + return repeated + if len(repeated) == 1: + # Same value applies to all spatial dimensions + return int(repeated[0]) + assert idx < len(repeated) + # Extract the value for the given spatial dimension + return repeated[idx] + if default is None: + raise ValueError('Unable to determine kernel parameter!') + return default + + @property + def kernel_parameters(self): + assert self.kind in (NodeKind.Convolution, NodeKind.Pooling,\ + NodeKind.Deconvolution) + + params = self.parameters + k_h = self.get_kernel_value(params.kernel_h, params.kernel_size, 0) + k_w = self.get_kernel_value(params.kernel_w, params.kernel_size, 1) + s_h = self.get_kernel_value( + params.stride_h, params.stride, 0, default=1) + s_w = self.get_kernel_value( + params.stride_w, params.stride, 1, default=1) + p_h = self.get_kernel_value(params.pad_h, params.pad, 0, default=0) + p_w = self.get_kernel_value(params.pad_w, params.pad, 1, default=0) + + dila_h = dila_w = 1 + if self.kind in (NodeKind.Convolution, NodeKind.Deconvolution): + dila_len = len(params.dilation) + if dila_len == 2: + dila_h = params.dilation[0] + dila_w = params.dilation[1] + elif dila_len == 1: + dila_h = dila_w = params.dilation[0] + else: + assert dila_len == 0, "invalid length[%s] of dilation in convolution" % ( + dila_len) + + return KernelParameters(k_h, k_w, s_h, s_w, p_h, p_w, dila_h, dila_w) + + +KernelParameters = namedtuple( + 'KernelParameters', + [ + 'kernel_h', 'kernel_w', 'stride_h', 'stride_w', 'pad_h', 'pad_w', + 'dila_h', 'dila_w' + ], ) diff --git a/caffe2fluid/kaffe/net_template.py b/caffe2fluid/kaffe/net_template.py new file mode 100644 index 0000000..86a6628 --- /dev/null +++ b/caffe2fluid/kaffe/net_template.py @@ -0,0 +1,161 @@ +""" this module is used as a template for generating sub class of Network +""" + + +class MyNet(object): + ### automatically generated by caffe2fluid ### + inputs_info = "INPUTS_INFO" + custom_layers_path = "_CAFFE2FLUID_CUSTOM_LAYERS_" + + def custom_layer_factory(self): + import os + + pk_paths = [] + default = os.path.dirname(os.path.abspath(__file__)) + location = os.environ.get('CAFFE2FLUID_CUSTOM_LAYERS', default) + pk_name = 'custom_layers' + pk_dir = os.path.join(location, pk_name) + pk_paths.append((location, pk_dir)) + + location = MyNet.custom_layers_path + pk_dir = os.path.join(MyNet.custom_layers_path, pk_name) + pk_paths.append((location, pk_dir)) + + for loc, pk_dir in pk_paths: + if os.path.exists(pk_dir): + if loc not in sys.path: + sys.path.insert(0, loc) + break + + try: + from custom_layers import make_custom_layer + return make_custom_layer + except Exception as e: + print('maybe you should set $CAFFE2FLUID_CUSTOM_LAYERS first') + raise e + + @classmethod + def input_shapes(cls): + return cls.inputs_info + + @classmethod + def convert(cls, npy_model, fluid_path, outputs=None): + fluid = import_fluid() + shapes = cls.input_shapes() + input_name = shapes.keys()[0] + feed_data = {} + for name, shape in shapes.items(): + data_layer = fluid.layers.data( + name=name, shape=shape, dtype="float32") + feed_data[name] = data_layer + + net = cls(feed_data) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + net.load(data_path=npy_model, exe=exe, place=place) + output_vars = [] + + model_filename = 'model' + params_filename = 'params' + if outputs is None: + output_vars.append(net.get_output()) + else: + if outputs[0] == 'dump_all': + model_filename = None + params_filename = None + output_vars.append(net.get_output()) + else: + if type(outputs) is list: + for n in outputs: + assert n in net.layers, 'not found layer with this name[%s]' % ( + n) + output_vars.append(net.layers[n]) + + fluid.io.save_inference_model( + fluid_path, [input_name], + output_vars, + exe, + main_program=None, + model_filename=model_filename, + params_filename=params_filename) + return 0 + + +def main(): + """ a tool used to convert caffe model to fluid + """ + + import sys + import os + filename = os.path.splitext(os.path.basename(sys.argv[0]))[0] + if len(sys.argv) < 3: + print('usage:') + print(' python %s %s.npy [save_dir] [layer names seperated by comma]' \ + % (sys.argv[0], filename)) + print(' eg: python %s %s.npy ./fluid' % (sys.argv[0], filename)) + print(' eg: python %s %s.npy ./fluid layer_name1,layer_name2' \ + % (sys.argv[0], filename)) + return 1 + + npy_weight = sys.argv[1] + fluid_model = sys.argv[2] + outputs = None + if len(sys.argv) >= 4: + outputs = sys.argv[3].split(',') + + ret = MyNet.convert(npy_weight, fluid_model, outputs) + if ret == 0: + outputs = 'last output layer' if outputs is None else outputs + print('succeed to convert to fluid format with output layers[%s]' + ' in directory[%s]' % (outputs, fluid_model)) + else: + print('failed to convert model to fluid format') + + return ret + + +def generate_net_code(net_name, inputs_info): + """ generate framework of a custom net code which represent a subclass of Network + + Args: + @net_name (str): class name for this net + @inputs_info (str): a str which represents a dict, eg: '{"data": [3, 32, 32]}' + Returns: + net_codes (str): codes for this subclass + """ + import os + import inspect + + net_codes = str(inspect.getsource(MyNet)) + net_codes = net_codes.replace('MyNet(object)', '%s(Network)' % net_name) + net_codes = net_codes.replace('MyNet', net_name) + net_codes = net_codes.replace('"INPUTS_INFO"', inputs_info) + + custom_layer_dir = os.path.dirname(os.path.abspath(__file__)) + net_codes = net_codes.replace('_CAFFE2FLUID_CUSTOM_LAYERS_', + custom_layer_dir) + return net_codes + + +def generate_main_code(net_name): + """ generate a piece of code for 'main' function + + Args: + @net_name (str): class name for this net + + Returns: + main_codes (str): codes for this main function + """ + import inspect + + main_codes = str(inspect.getsource(main)) + main_codes = main_codes.replace('MyNet', net_name) + return main_codes + + +if __name__ == "__main__": + """ just for testing + """ + print generate_net_code('Attribute', "{'data': [3, 277, 277]}") + print generate_main_code('Attribute') diff --git a/caffe2fluid/kaffe/paddle/__init__.py b/caffe2fluid/kaffe/paddle/__init__.py new file mode 100644 index 0000000..685b653 --- /dev/null +++ b/caffe2fluid/kaffe/paddle/__init__.py @@ -0,0 +1,2 @@ +from .transformer import Transformer +from .network import Network diff --git a/caffe2fluid/kaffe/paddle/network.py b/caffe2fluid/kaffe/paddle/network.py new file mode 100644 index 0000000..718bd19 --- /dev/null +++ b/caffe2fluid/kaffe/paddle/network.py @@ -0,0 +1,576 @@ +import sys +import os +import math +import numpy as np + + +def import_fluid(): + import paddle.fluid as fluid + return fluid + + +def layer(op): + '''Decorator for composable network layers.''' + + def layer_decorated(self, *args, **kwargs): + # Automatically set a name if not provided. + name = kwargs.setdefault('name', self.get_unique_name(op.__name__)) + # Figure out the layer inputs. + if len(self.terminals) == 0: + raise RuntimeError('No input variables found for layer %s.' % name) + elif len(self.terminals) == 1: + layer_input = self.terminals[0] + else: + layer_input = list(self.terminals) + + self.layer_reverse_trace[name] = layer_input + # Perform the operation and get the output. + layer_output = op(self, layer_input, *args, **kwargs) + # Add to layer LUT. + self.layers[name] = layer_output + self.var2name[layer_output.name] = (name, layer_output) + + # This output is now the input for the next layer. + self.feed(layer_output) + # Return self for chained calls. + return self + + return layer_decorated + + +class Network(object): + def __init__(self, inputs, trainable=True): + # The input nodes for this network + self.inputs = inputs + # The current list of terminal nodes + self.terminals = [] + # Mapping from layer names to layers + self.layers = dict(inputs) + # If true, the resulting variables are set as trainable + self.trainable = trainable + # Switch variable for dropout + self.paddle_env = None + self.output_names = [] + self.name_trace = None + + self.layer_reverse_trace = {} + self.var2name = {} + self.setup() + + def setup(self): + '''Construct the network. ''' + raise NotImplementedError('Must be implemented by the subclass.') + + def locate_ancestor(self, v, which=[0], ancestor_level=1): + """ find a ancestor for a node 'v' which is a fluid variable + """ + ancestor = None + which = which * ancestor_level + name = self.var2name[v.name][0] + + for i in range(ancestor_level): + v = self.layer_reverse_trace[name] + if type(v) is list: + ancestor = self.var2name[v[which[i]].name] + else: + ancestor = self.var2name[v.name] + name = ancestor[0] + return ancestor + + def load(self, data_path, exe=None, place=None, ignore_missing=False): + '''Load network weights. + data_path: The path to the numpy-serialized network weights + ignore_missing: If true, serialized weights for missing layers are ignored. + ''' + fluid = import_fluid() + #load fluid mode directly + if os.path.isdir(data_path): + assert (exe is not None), \ + 'must provide a executor to load fluid model' + fluid.io.load_persistables(executor=exe, dirname=data_path) + return True + + #load model from a npy file + if exe is None or place is None: + if self.paddle_env is None: + place = fluid.CPUPlace() + exe = fluid.Executor(place) + self.paddle_env = {'place': place, 'exe': exe} + exe = exe.run(fluid.default_startup_program()) + else: + place = self.paddle_env['place'] + exe = self.paddle_env['exe'] + + data_dict = np.load(data_path).item() + for op_name in data_dict: + if op_name == 'caffe2fluid_name_trace': + self.name_trace = data_dict[op_name] + continue + + layer = self.layers[op_name] + for param_name, data in data_dict[op_name].iteritems(): + try: + name = '%s_%s' % (op_name, param_name) + v = fluid.global_scope().find_var(name) + w = v.get_tensor() + w.set(data.reshape(w.shape()), place) + except ValueError: + if not ignore_missing: + raise + return True + + def feed(self, *args): + '''Set the input(s) for the next operation by replacing the terminal nodes. + The arguments can be either layer names or the actual layers. + ''' + assert len(args) != 0 + self.terminals = [] + for fed_layer in args: + if isinstance(fed_layer, basestring): + try: + fed_layer = self.layers[fed_layer] + except KeyError: + raise KeyError('Unknown layer name fed: %s' % fed_layer) + self.terminals.append(fed_layer) + return self + + def get_output(self): + '''Returns the current network output.''' + return self.terminals[-1] + + def get_unique_name(self, prefix): + '''Returns an index-suffixed unique name for the given prefix. + This is used for auto-generating layer names based on the type-prefix. + ''' + ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1 + return '%s_%d' % (prefix, ident) + + def get_unique_output_name(self, prefix, layertype): + '''Returns an index-suffixed unique name for the given prefix. + This is used for auto-generating layer names based on the type-prefix. + ''' + ident = sum(t.startswith(prefix) for t in self.output_names) + 1 + unique_name = '%s.%s.output.%d' % (prefix, layertype, ident) + self.output_names.append(unique_name) + return unique_name + + @layer + def conv(self, + input, + k_h, + k_w, + c_o, + s_h, + s_w, + name, + relu=True, + relu_negative_slope=0.0, + padding=None, + dilation=1, + group=1, + biased=True): + if padding is None: + padding = [0, 0] + + # Get the number of channels in the input + c_i, h_i, w_i = input.shape[1:] + + # Verify that the grouping parameter is valid + assert c_i % group == 0 + assert c_o % group == 0 + + fluid = import_fluid() + prefix = name + '_' + leaky_relu = False + act = 'relu' + if relu is False: + act = None + elif relu_negative_slope != 0.0: + leaky_relu = True + act = None + + output = fluid.layers.conv2d( + name=self.get_unique_output_name(name, 'conv2d'), + input=input, + filter_size=[k_h, k_w], + num_filters=c_o, + stride=[s_h, s_w], + padding=padding, + dilation=dilation, + groups=group, + param_attr=fluid.ParamAttr(name=prefix + "weights"), + bias_attr=fluid.ParamAttr(name=prefix + "biases"), + act=act) + + if leaky_relu: + output = fluid.layers.leaky_relu(output, alpha=relu_negative_slope) + + return output + + @layer + def deconv(self, + input, + k_h, + k_w, + c_o, + s_h, + s_w, + name, + relu=True, + relu_negative_slope=0.0, + padding=None, + dilation=1, + biased=True): + if padding is None: + padding = [0, 0] + + # Get the number of channels in the input + c_i, h_i, w_i = input.shape[1:] + + fluid = import_fluid() + prefix = name + '_' + leaky_relu = False + act = 'relu' + if relu is False: + act = None + elif relu_negative_slope != 0.0: + leaky_relu = True + act = None + + p_h = padding[0] + p_w = padding[1] + h_o = (h_i - 1) * s_h - 2 * p_h + dilation * (k_h - 1) + 1 + w_o = (w_i - 1) * s_w - 2 * p_w + dilation * (k_w - 1) + 1 + output = fluid.layers.conv2d_transpose( + name=self.get_unique_output_name(name, 'conv2d_transpose'), + input=input, + num_filters=c_o, + output_size=[h_o, w_o], + filter_size=[k_h, k_w], + padding=padding, + stride=[s_h, s_w], + dilation=dilation, + param_attr=fluid.ParamAttr(name=prefix + "weights"), + bias_attr=fluid.ParamAttr(name=prefix + "biases"), + act=act) + + if leaky_relu: + output = fluid.layers.leaky_relu(output, alpha=relu_negative_slope) + + return output + + @layer + def relu(self, input, name): + fluid = import_fluid() + output = fluid.layers.relu(input) + return output + + @layer + def prelu(self, input, channel_shared, name): + fluid = import_fluid() + if channel_shared: + mode = 'all' + else: + mode = 'channel' + + prefix = name + '_' + output = fluid.layers.prelu( + input, + mode=mode, + param_attr=fluid.ParamAttr(name=prefix + 'negslope')) + return output + + def pool(self, + pool_type, + input, + k_h, + k_w, + s_h, + s_w, + ceil_mode, + padding, + name, + exclusive=True): + # Get the number of channels in the input + in_hw = input.shape[2:] + k_hw = [k_h, k_w] + s_hw = [s_h, s_w] + + fluid = import_fluid() + output = fluid.layers.pool2d( + name=name, + input=input, + pool_size=k_hw, + pool_stride=s_hw, + pool_padding=padding, + ceil_mode=ceil_mode, + pool_type=pool_type, + exclusive=exclusive) + return output + + @layer + def max_pool(self, + input, + k_h, + k_w, + s_h, + s_w, + ceil_mode, + padding=[0, 0], + name=None): + return self.pool( + 'max', + input, + k_h, + k_w, + s_h, + s_w, + ceil_mode, + padding, + name=self.get_unique_output_name(name, 'max_pool')) + + @layer + def avg_pool(self, + input, + k_h, + k_w, + s_h, + s_w, + ceil_mode, + padding=[0, 0], + name=None): + return self.pool( + 'avg', + input, + k_h, + k_w, + s_h, + s_w, + ceil_mode, + padding, + name=self.get_unique_output_name(name, 'avg_pool'), + exclusive=False) + + @layer + def sigmoid(self, input, name): + fluid = import_fluid() + return fluid.layers.sigmoid( + input, name=self.get_unique_output_name(name, 'sigmoid')) + + @layer + def tanh(self, input, name): + fluid = import_fluid() + return fluid.layers.tanh( + input, name=self.get_unique_output_name(name, 'tanh')) + + @layer + def lrn(self, input, radius, alpha, beta, name, bias=1.0): + fluid = import_fluid() + output = fluid.layers.lrn(input=input, + n=radius, + k=bias, + alpha=alpha, + beta=beta, + name=self.get_unique_output_name(name, 'lrn')) + return output + + @layer + def concat(self, inputs, axis, name): + fluid = import_fluid() + output = fluid.layers.concat( + input=inputs, + axis=axis, + name=self.get_unique_output_name(name, 'concat')) + return output + + @layer + def add(self, inputs, name): + fluid = import_fluid() + output = inputs[0] + for i in inputs[1:]: + output = fluid.layers.elementwise_add( + x=output, y=i, name=self.get_unique_output_name(name, 'add')) + return output + + @layer + def max(self, inputs, name): + fluid = import_fluid() + output = inputs[0] + for i in inputs[1:]: + output = fluid.layers.elementwise_max( + x=output, y=i, name=self.get_unique_output_name(name, 'max')) + return output + + @layer + def multiply(self, inputs, name): + fluid = import_fluid() + output = inputs[0] + for i in inputs[1:]: + output = fluid.layers.elementwise_mul( + x=output, y=i, name=self.get_unique_output_name(name, 'mul')) + return output + + @layer + def fc(self, input, num_out, name, relu=True, act=None): + fluid = import_fluid() + + if act is None: + act = 'relu' if relu is True else None + + prefix = name + '_' + output = fluid.layers.fc( + name=self.get_unique_output_name(name, 'fc'), + input=input, + size=num_out, + act=act, + param_attr=fluid.ParamAttr(name=prefix + 'weights'), + bias_attr=fluid.ParamAttr(name=prefix + 'biases')) + return output + + @layer + def softmax(self, input, axis=2, name=None): + fluid = import_fluid() + shape = input.shape + dims = len(shape) + axis = axis + dims if axis < 0 else axis + + need_transpose = False + if axis + 1 != dims: + need_transpose = True + + if need_transpose: + order = range(dims) + order.remove(axis) + order.append(axis) + input = fluid.layers.transpose( + input, + perm=order, + name=self.get_unique_output_name(name, 'transpose')) + + output = fluid.layers.softmax( + input, name=self.get_unique_output_name(name, 'softmax')) + + if need_transpose: + order = range(len(shape)) + order[axis] = dims - 1 + order[-1] = axis + output = fluid.layers.transpose( + output, + perm=order, + name=self.get_unique_output_name(name, 'transpose')) + return output + + @layer + def batch_normalization(self, + input, + name, + scale_offset=True, + eps=1e-5, + relu=False, + relu_negative_slope=0.0): + # NOTE: Currently, only inference is supported + fluid = import_fluid() + prefix = name + '_' + param_attr = None if scale_offset is False else fluid.ParamAttr( + name=prefix + 'scale') + bias_attr = None if scale_offset is False else fluid.ParamAttr( + name=prefix + 'offset') + mean_name = prefix + 'mean' + variance_name = prefix + 'variance' + + leaky_relu = False + act = 'relu' + if relu is False: + act = None + elif relu_negative_slope != 0.0: + leaky_relu = True + act = None + + output = fluid.layers.batch_norm( + name=self.get_unique_output_name(name, 'batch_norm'), + input=input, + is_test=True, + param_attr=param_attr, + bias_attr=bias_attr, + moving_mean_name=mean_name, + moving_variance_name=variance_name, + epsilon=eps, + act=act) + + if leaky_relu: + output = fluid.layers.leaky_relu(output, alpha=relu_negative_slope) + + return output + + @layer + def dropout(self, input, drop_prob, name, is_test=True): + fluid = import_fluid() + if is_test: + output = input + else: + output = fluid.layers.dropout( + input, + dropout_prob=drop_prob, + is_test=is_test, + name=self.get_unique_output_name(name, 'dropout')) + return output + + @layer + def scale(self, input, axis=1, num_axes=1, name=None): + fluid = import_fluid() + + assert num_axes == 1, "layer scale not support this num_axes[%d] now" % ( + num_axes) + + prefix = name + '_' + scale_shape = input.shape[axis:axis + num_axes] + param_attr = fluid.ParamAttr(name=prefix + 'scale') + scale_param = fluid.layers.create_parameter( + shape=scale_shape, + dtype=input.dtype, + name=name, + attr=param_attr, + is_bias=True, + default_initializer=fluid.initializer.Constant(value=1.0)) + + offset_attr = fluid.ParamAttr(name=prefix + 'offset') + offset_param = fluid.layers.create_parameter( + shape=scale_shape, + dtype=input.dtype, + name=name, + attr=offset_attr, + is_bias=True, + default_initializer=fluid.initializer.Constant(value=0.0)) + + output = fluid.layers.elementwise_mul( + input, + scale_param, + axis=axis, + name=self.get_unique_output_name(name, 'scale_mul')) + output = fluid.layers.elementwise_add( + output, + offset_param, + axis=axis, + name=self.get_unique_output_name(name, 'scale_add')) + return output + + def custom_layer_factory(self): + """ get a custom layer maker provided by subclass + """ + raise NotImplementedError( + '[custom_layer_factory] must be implemented by the subclass.') + + @layer + def custom_layer(self, inputs, kind, name, *args, **kwargs): + """ make custom layer + """ + #FIX ME: + # there is a trick for different API between caffe and paddle + if kind == "DetectionOutput": + conf_var = inputs[1] + real_conf_var = self.locate_ancestor(conf_var, ancestor_level=2) + inputs[1] = real_conf_var[1] + + name = self.get_unique_output_name(name, kind) + layer_factory = self.custom_layer_factory() + return layer_factory(kind, inputs, name, *args, **kwargs) diff --git a/caffe2fluid/kaffe/paddle/transformer.py b/caffe2fluid/kaffe/paddle/transformer.py new file mode 100644 index 0000000..b07393f --- /dev/null +++ b/caffe2fluid/kaffe/paddle/transformer.py @@ -0,0 +1,391 @@ +import numpy as np + +from ..errors import KaffeError, print_stderr +from ..graph import GraphBuilder, NodeMapper +from ..layers import NodeKind +from ..transformers import (DataInjector, DataReshaper, NodeRenamer, + SubNodeFuser, ReLUFuser, BatchNormScaleBiasFuser, + BatchNormPreprocessor, ParameterNamer, CropFuser) +from . import network + + +class PaddleNode(object): + '''An intermediate representation for Paddle operations.''' + + def __init__(self, op, *args, **kwargs): + # A string corresponding to the Paddle operation + self.op = op + # Positional arguments for the operation + self.args = args + # Keyword arguments for the operation + self.kwargs = list(kwargs.items()) + # The source Caffe node + self.node = None + + def format(self, arg): + '''Returns a string representation for the given value.''' + return "'%s'" % arg if isinstance(arg, basestring) else str(arg) + + def pair(self, key, value): + '''Returns key=formatted(value).''' + return '%s=%s' % (key, self.format(value)) + + def emit(self): + '''Emits the Python source for this node.''' + # Format positional arguments + args = map(self.format, self.args) + # Format any keyword arguments + if self.kwargs: + args += [self.pair(k, v) for k, v in self.kwargs] + # Set the node name + args.append(self.pair('name', self.node.name)) + args = ', '.join(args) + return '%s(%s)' % (self.op, args) + + +class MaybeActivated(object): + def __init__(self, node, default=True): + self.inject_kwargs = {} + if node.metadata.get('relu', False) != default: + self.inject_kwargs['relu'] = not default + + default_slope = 0.0 + slope = node.metadata.get('relu_negative_slope', default_slope) + if slope != default_slope: + self.inject_kwargs['relu_negative_slope'] = slope + + def __call__(self, *args, **kwargs): + kwargs.update(self.inject_kwargs) + return PaddleNode(*args, **kwargs) + + +class PaddleMapper(NodeMapper): + def get_kernel_params(self, node): + kernel_params = node.layer.kernel_parameters + input_shape = node.get_only_parent().output_shape + padding = [kernel_params.pad_h, kernel_params.pad_w] + if padding[0] == 0 and padding[1] == 0: + padding = {} + else: + padding = {'padding': padding} + return (kernel_params, padding) + + def map_convolution(self, node): + (kernel_params, kwargs) = self.get_kernel_params(node) + h = kernel_params.kernel_h + w = kernel_params.kernel_w + c_o = node.output_shape[1] + c_i = node.parents[0].output_shape[1] + group = node.parameters.group + if group != 1: + kwargs['group'] = group + if not node.parameters.bias_term: + kwargs['biased'] = False + + if kernel_params.dila_h != 1 or kernel_params.dila_w != 1: + kwargs['dilation'] = (kernel_params.dila_h, kernel_params.dila_w) + + assert kernel_params.kernel_h == h + assert kernel_params.kernel_w == w + return MaybeActivated(node)( + 'conv', kernel_params.kernel_h, kernel_params.kernel_w, c_o, + kernel_params.stride_h, kernel_params.stride_w, **kwargs) + + def map_deconvolution(self, node): + (kernel_params, kwargs) = self.get_kernel_params(node) + h = kernel_params.kernel_h + w = kernel_params.kernel_w + c_o = node.output_shape[1] + c_i = node.parents[0].output_shape[1] + if not node.parameters.bias_term: + kwargs['biased'] = False + + if kernel_params.dila_h != 1 or kernel_params.dila_w != 1: + kwargs['dilation'] = (kernel_params.dila_h, kernel_params.dila_w) + + assert kernel_params.kernel_h == h + assert kernel_params.kernel_w == w + return MaybeActivated(node)( + 'deconv', kernel_params.kernel_h, kernel_params.kernel_w, c_o, + kernel_params.stride_h, kernel_params.stride_w, **kwargs) + + def map_relu(self, node): + return PaddleNode('relu') + + def map_prelu(self, node): + channel_shared = getattr(node.parameters, 'channel_shared', False) + return PaddleNode('prelu', channel_shared) + + def map_tanh(self, node): + return PaddleNode('tanh') + + def map_pooling(self, node): + pool_type = node.parameters.pool + if pool_type == 0: + pool_op = 'max_pool' + elif pool_type == 1: + pool_op = 'avg_pool' + else: + # Stochastic pooling, for instance. + raise KaffeError('Unsupported pooling type.') + + ceil_mode = getattr(node.layer.parameters, 'ceil_mode', True) + global_pool = getattr(node.layer.parameters, 'global_pooling', False) + if global_pool: + input_shape = node.get_only_parent().output_shape + return PaddleNode(pool_op, input_shape.height, input_shape.width, 1, + 1, ceil_mode) + else: + (kernel_params, padding) = self.get_kernel_params(node) + return PaddleNode(pool_op, kernel_params.kernel_h, + kernel_params.kernel_w, kernel_params.stride_h, + kernel_params.stride_w, ceil_mode, **padding) + + def map_sigmoid(self, node): + return PaddleNode('sigmoid') + + def map_custom(self, node): + from .. import custom_layers + return custom_layers.make_node(PaddleNode, node.kind, node) + + def map_inner_product(self, node): + #TODO: Axis + assert node.parameters.axis == 1 + #TODO: Unbiased + assert node.parameters.bias_term == True + return MaybeActivated(node)('fc', node.parameters.num_output) + + def map_softmax(self, node): + return PaddleNode('softmax', node.parameters.axis) + + def map_lrn(self, node): + params = node.parameters + # The window size must be an odd value. For a window + # size of (2*n+1), Paddle defines depth_radius = n. + assert params.local_size % 2 == 1 + # Caffe scales by (alpha/(2*n+1)), whereas Paddle + # just scales by alpha (as does Krizhevsky's paper). + # We'll account for that here. + alpha = params.alpha / float(params.local_size) + return PaddleNode('lrn', params.local_size, alpha, params.beta) + + def map_concat(self, node): + return PaddleNode('concat', node.parameters.axis) + + def map_dropout(self, node): + return PaddleNode('dropout', node.parameters.dropout_ratio) + + def map_batch_norm(self, node): + scale_offset = len(node.data) == 4 + + #this default value comes from caffe's param in batch_norm + default_eps = 1e-5 + kwargs = {'scale_offset': scale_offset} + if node.parameters.eps != default_eps: + kwargs['eps'] = node.parameters.eps + + return MaybeActivated( + node, default=False)('batch_normalization', **kwargs) + + def map_eltwise(self, node): + operations = {0: 'multiply', 1: 'add', 2: 'max'} + op_code = node.parameters.operation + try: + return PaddleNode(operations[op_code]) + except KeyError: + raise KaffeError('Unknown elementwise operation: {}'.format( + op_code)) + + def map_scale(self, node): + params = node.parameters + return PaddleNode('scale', axis=params.axis, num_axes=params.num_axes) + + def commit(self, chains): + return chains + + +class PaddleEmitter(object): + def __init__(self, tab=None): + self.tab = tab or ' ' * 4 + self.prefix = '' + self.net_name = '' + + def indent(self): + self.prefix += self.tab + + def outdent(self): + self.prefix = self.prefix[:-len(self.tab)] + + def statement(self, s): + return self.prefix + s + '\n' + + def emit_imports(self): + import inspect + codes = [] + codes.append( + '### generated by caffe2fluid, your net is in class "%s" ###\n' % + (self.net_name)) + network_source = inspect.getsource(network) + codes.append(network_source + '\n') + return self.statement('\n'.join(codes)) + + def emit_setup_def(self): + return self.statement('def setup(self):') + + def get_inputs_info(self, input_nodes): + input_shapes = {} + for n in input_nodes: + name = n.name + output_shape = n.output_shape + shape = [str(s) for s in output_shape[1:]] + input_shapes[name] = ', '.join(shape) + input_shapes = ['"%s": [%s]' % (n, l) for n, l in input_shapes.items()] + shape_str = ','.join(input_shapes) + return '{%s}' % (shape_str) + + def emit_main_def(self, name): + if name is None: + return '' + + self.prefix = '' + main_def = self.statement('if __name__ == "__main__":') + self.indent() + main_def += self.statement('exit(main())') + return '\n\n' + main_def + + def emit_parents(self, chain): + assert len(chain) + s = 'self.feed(' + sep = ', \n' + self.prefix + (' ' * len(s)) + s += sep.join( + ["'%s'" % parent.name for parent in chain[0].node.parents]) + return self.statement(s + ')') + + def emit_node(self, node): + return self.statement('self.' + node.emit()) + + def emit(self, name, chains, input_nodes=None): + from ..net_template import generate_net_code + from ..net_template import generate_main_code + + self.net_name = name + inputs_info = self.get_inputs_info(input_nodes) + + s = self.emit_imports() + s += generate_net_code(name, inputs_info) + '\n' + self.indent() + + # define the net using api + s += self.emit_setup_def() + self.indent() + blocks = [] + for chain in chains: + b = '' + b += self.emit_parents(chain) + for node in chain: + b += self.emit_node(node) + blocks.append(b[:-1]) + s = s + '\n\n'.join(blocks) + + # define the main function + s += '\n\n\n' + generate_main_code(name) + s += self.emit_main_def(name) + return s + + +class Transformer(object): + def __init__(self, def_path, data_path, verbose=True, phase='test'): + self.verbose = verbose + self.phase = phase + self.load(def_path, data_path, phase) + self.params = None + self.source = None + + def load(self, def_path, data_path, phase): + # Build the graph + graph = GraphBuilder(def_path, phase).build() + + if data_path is not None: + # Load and associate learned parameters + graph = DataInjector(def_path, data_path)(graph) + + # Transform the graph + transformers = [ + # Fuse split batch normalization layers + BatchNormScaleBiasFuser(), + + # Fuse ReLUs + # TODO: Move non-linearity application to layer wrapper, allowing + # any arbitrary operation to be optionally activated. + ReLUFuser(allowed_parent_types=[ + NodeKind.Convolution, NodeKind.InnerProduct, NodeKind.BatchNorm + ]), + + # Rename nodes + # Slashes are used for scoping in Paddle. Replace slashes + # in node names with underscores. + # (Caffe's GoogLeNet implementation uses slashes) + NodeRenamer(lambda node: node.name.replace('/', '_')), + + # Fuse Crop + # Crop is to return a scalar output Blob for an input Blob of arbitrary size. + # When one of the input Blob is "input" or "DummyData", we can remove this input Blob + # and put the shape into the reduction layer. + CropFuser() + ] + + self.graph = graph.transformed(transformers) + + #for the purpose of recording name mapping because of fused nodes + trace = SubNodeFuser.traced_names() + chg2real = {} + deleted = {} + for k, v in trace.items(): + chg2real[k] = v[-1] #mapping from changed-name to real-name + for n in v: + if n in chg2real: + continue + if n not in deleted: + deleted[n] = '%s.%s' % (k, v[-1]) + + self.graph.add_name_trace({ + 'chg2real': chg2real, + 'deleted': deleted + }, 'paddle') + + # Display the graph + if self.verbose: + print_stderr(self.graph) + + def transform_data(self): + if self.params is None: + transformers = [ + # Reshape the parameters to Paddle's ordering + DataReshaper({ + # (c_o, c_i) -> (c_i, c_o) + NodeKind.InnerProduct: (1, 0) + }), + + # Pre-process batch normalization data + BatchNormPreprocessor(), + + # Convert parameters to dictionaries + ParameterNamer(), + ] + self.graph = self.graph.transformed(transformers) + self.params = { + node.name: node.data + for node in self.graph.nodes if node.data + } + self.params['caffe2fluid_name_trace'] = self.graph.get_name_trace() + + return self.params + + def transform_source(self): + if self.source is None: + mapper = PaddleMapper(self.graph) + chains = mapper.map() + emitter = PaddleEmitter() + input_nodes = self.graph.get_input_nodes() + self.source = emitter.emit(self.graph.name, chains, input_nodes) + return self.source diff --git a/caffe2fluid/kaffe/protobuf_to_dict.py b/caffe2fluid/kaffe/protobuf_to_dict.py new file mode 100644 index 0000000..cdc8d44 --- /dev/null +++ b/caffe2fluid/kaffe/protobuf_to_dict.py @@ -0,0 +1,185 @@ +"""a util for convert protobuf to dict +""" + +from google.protobuf.message import Message +from google.protobuf.descriptor import FieldDescriptor + +__all__ = [ + "protobuf_to_dict", "TYPE_CALLABLE_MAP", "dict_to_protobuf", + "REVERSE_TYPE_CALLABLE_MAP" +] + +EXTENSION_CONTAINER = '___X' + +TYPE_CALLABLE_MAP = { + FieldDescriptor.TYPE_DOUBLE: float, + FieldDescriptor.TYPE_FLOAT: float, + FieldDescriptor.TYPE_INT32: int, + FieldDescriptor.TYPE_INT64: long, + FieldDescriptor.TYPE_UINT32: int, + FieldDescriptor.TYPE_UINT64: long, + FieldDescriptor.TYPE_SINT32: int, + FieldDescriptor.TYPE_SINT64: long, + FieldDescriptor.TYPE_FIXED32: int, + FieldDescriptor.TYPE_FIXED64: long, + FieldDescriptor.TYPE_SFIXED32: int, + FieldDescriptor.TYPE_SFIXED64: long, + FieldDescriptor.TYPE_BOOL: bool, + FieldDescriptor.TYPE_STRING: unicode, + FieldDescriptor.TYPE_BYTES: lambda b: b.encode("base64"), + FieldDescriptor.TYPE_ENUM: int, +} + + +def repeated(type_callable): + return lambda value_list: [type_callable(value) for value in value_list] + + +def enum_label_name(field, value): + return field.enum_type.values_by_number[int(value)].name + + +def protobuf_to_dict(pb, + type_callable_map=TYPE_CALLABLE_MAP, + use_enum_labels=False): + result_dict = {} + extensions = {} + for field, value in pb.ListFields(): + type_callable = _get_field_value_adaptor(pb, field, type_callable_map, + use_enum_labels) + if field.label == FieldDescriptor.LABEL_REPEATED: + type_callable = repeated(type_callable) + + if field.is_extension: + extensions[str(field.number)] = type_callable(value) + continue + + result_dict[field.name] = type_callable(value) + + if extensions: + result_dict[EXTENSION_CONTAINER] = extensions + return result_dict + + +def _get_field_value_adaptor(pb, + field, + type_callable_map=TYPE_CALLABLE_MAP, + use_enum_labels=False): + if field.type == FieldDescriptor.TYPE_MESSAGE: + # recursively encode protobuf sub-message + return lambda pb: protobuf_to_dict(pb, + type_callable_map=type_callable_map, + use_enum_labels=use_enum_labels) + + if use_enum_labels and field.type == FieldDescriptor.TYPE_ENUM: + return lambda value: enum_label_name(field, value) + + if field.type in type_callable_map: + return type_callable_map[field.type] + + raise TypeError("Field %s.%s has unrecognised type id %d" % + (pb.__class__.__name__, field.name, field.type)) + + +def get_bytes(value): + return value.decode('base64') + + +REVERSE_TYPE_CALLABLE_MAP = {FieldDescriptor.TYPE_BYTES: get_bytes, } + + +def dict_to_protobuf(pb_klass_or_instance, + values, + type_callable_map=REVERSE_TYPE_CALLABLE_MAP, + strict=True): + """Populates a protobuf model from a dictionary. + + :param pb_klass_or_instance: a protobuf message class, or an protobuf instance + :type pb_klass_or_instance: a type or instance of a subclass of google.protobuf.message.Message + :param dict values: a dictionary of values. Repeated and nested values are + fully supported. + :param dict type_callable_map: a mapping of protobuf types to callables for setting + values on the target instance. + :param bool strict: complain if keys in the map are not fields on the message. + """ + if isinstance(pb_klass_or_instance, Message): + instance = pb_klass_or_instance + else: + instance = pb_klass_or_instance() + return _dict_to_protobuf(instance, values, type_callable_map, strict) + + +def _get_field_mapping(pb, dict_value, strict): + field_mapping = [] + for key, value in dict_value.items(): + if key == EXTENSION_CONTAINER: + continue + if key not in pb.DESCRIPTOR.fields_by_name: + if strict: + raise KeyError("%s does not have a field called %s" % (pb, key)) + continue + field_mapping.append( + (pb.DESCRIPTOR.fields_by_name[key], value, getattr(pb, key, None))) + + for ext_num, ext_val in dict_value.get(EXTENSION_CONTAINER, {}).items(): + try: + ext_num = int(ext_num) + except ValueError: + raise ValueError("Extension keys must be integers.") + if ext_num not in pb._extensions_by_number: + if strict: + raise KeyError( + "%s does not have a extension with number %s. Perhaps you forgot to import it?" + % (pb, key)) + continue + ext_field = pb._extensions_by_number[ext_num] + pb_val = None + pb_val = pb.Extensions[ext_field] + field_mapping.append((ext_field, ext_val, pb_val)) + + return field_mapping + + +def _dict_to_protobuf(pb, value, type_callable_map, strict): + fields = _get_field_mapping(pb, value, strict) + + for field, input_value, pb_value in fields: + if field.label == FieldDescriptor.LABEL_REPEATED: + for item in input_value: + if field.type == FieldDescriptor.TYPE_MESSAGE: + m = pb_value.add() + _dict_to_protobuf(m, item, type_callable_map, strict) + elif field.type == FieldDescriptor.TYPE_ENUM and isinstance( + item, basestring): + pb_value.append(_string_to_enum(field, item)) + else: + pb_value.append(item) + continue + if field.type == FieldDescriptor.TYPE_MESSAGE: + _dict_to_protobuf(pb_value, input_value, type_callable_map, strict) + continue + + if field.type in type_callable_map: + input_value = type_callable_map[field.type](input_value) + + if field.is_extension: + pb.Extensions[field] = input_value + continue + + if field.type == FieldDescriptor.TYPE_ENUM and isinstance(input_value, + basestring): + input_value = _string_to_enum(field, input_value) + + setattr(pb, field.name, input_value) + + return pb + + +def _string_to_enum(field, input_value): + enum_dict = field.enum_type.values_by_name + try: + input_value = enum_dict[input_value].number + except KeyError: + raise KeyError("`%s` is not a valid value for field `%s`" % + (input_value, field.name)) + return input_value diff --git a/caffe2fluid/kaffe/shapes.py b/caffe2fluid/kaffe/shapes.py new file mode 100644 index 0000000..4bbdbde --- /dev/null +++ b/caffe2fluid/kaffe/shapes.py @@ -0,0 +1,160 @@ +import math +from collections import namedtuple + +from .errors import KaffeError + +Tensor4DShape = namedtuple('Tensor4DShape', + ['batch_size', 'channels', 'height', 'width']) + +Tensor3DShape = namedtuple('Tensor3DShape', ['batch_size', 'data1', 'data2']) + +Tensor2DShape = namedtuple('Tensor2DShape', ['batch_size', 'data']) + +ScalarShape = namedtuple('ScalarShape', ['batch_size']) + + +def make_tensor(batch_size, d1=None, d2=None, d3=None): + if d3 is not None: + return Tensor4DShape(batch_size, d1, d2, d3) + elif d1 is not None and d2 is not None: + return Tensor3DShape(batch_size, d1, d2) + elif d1 is not None and d2 is None: + return Tensor2DShape(batch_size, d1) + elif d1 is None and d2 is None and d3 is None: + return ScalarShape(batch_size) + else: + raise NotImplementedError('invalid params for make_tensor %s' \ + % (str((batch_size, d1, d2, d3)))) + + +def get_filter_output_shape(i_h, i_w, params, round_func): + dila_h = getattr(params, 'dila_h', 1) + dila_w = getattr(params, 'dila_w', 1) + + o_h = (i_h + 2 * params.pad_h - + (dila_h * (params.kernel_h - 1) + 1)) / float(params.stride_h) + 1 + o_w = (i_w + 2 * params.pad_w - + (dila_w * (params.kernel_w - 1) + 1)) / float(params.stride_w) + 1 + + return (int(round_func(o_h)), int(round_func(o_w))) + + +def get_strided_kernel_output_shape(node, round_func): + assert node.layer is not None + input_shape = node.get_only_parent().output_shape + o_h, o_w = get_filter_output_shape(input_shape.height, input_shape.width, + node.layer.kernel_parameters, round_func) + params = node.layer.parameters + has_c_o = hasattr(params, 'num_output') + c = params.num_output if has_c_o else input_shape.channels + return make_tensor(input_shape.batch_size, c, o_h, o_w) + + +def shape_not_implemented(node): + raise NotImplementedError + + +def shape_identity(node): + assert len(node.parents) > 0 + return node.parents[0].output_shape + + +def shape_scalar(node): + return make_tensor(1, 1, 1, 1) + + +def shape_crop(node): + raise KaffeError('crop function had been defined in customer_layers') + + +def shape_power(node): + raise KaffeError('power function had been defined in customer_layers') + + +def shape_data(node): + if node.output_shape: + # Old-style input specification + shape = node.output_shape + else: + try: + # New-style input specification + shape = map(int, node.parameters.shape[0].dim) + except: + # We most likely have a data layer on our hands. The problem is, + # Caffe infers the dimensions of the data from the source (eg: LMDB). + # We want to avoid reading datasets here. Fail for now. + # This can be temporarily fixed by transforming the data layer to + # Caffe's "input" layer (as is usually used in the "deploy" version). + # TODO: Find a better solution for this. + raise KaffeError( + 'Cannot determine dimensions of data layer.\n' + 'See comments in function shape_data for more info.') + return shape + + +def shape_mem_data(node): + params = node.parameters + return make_tensor(params.batch_size, params.channels, params.height, + params.width) + + +def shape_concat(node): + axis = node.layer.parameters.axis + output_shape = None + for parent in node.parents: + if output_shape is None: + output_shape = list(parent.output_shape) + else: + output_shape[axis] += parent.output_shape[axis] + return tuple(output_shape) + + +def shape_convolution(node): + return get_strided_kernel_output_shape(node, math.floor) + + +def shape_deconvolution(node): + assert node.layer is not None + input_shape = node.get_only_parent().output_shape + h_i = input_shape.height + w_i = input_shape.width + + params = node.layer.kernel_parameters + p_h = params.pad_h + p_w = params.pad_w + + dila_h = params.dila_h + dila_w = params.dila_w + + k_h = params.kernel_h + k_w = params.kernel_w + + s_h = params.stride_h + s_w = params.stride_w + + h_o = (h_i - 1) * s_h - 2 * p_h + dila_h * (k_h - 1) + 1 + w_o = (w_i - 1) * s_w - 2 * p_w + dila_w * (k_w - 1) + 1 + + params = node.layer.parameters + has_c_o = hasattr(params, 'num_output') + c = params.num_output if has_c_o else input_shape.channels + return make_tensor(input_shape.batch_size, c, h_o, w_o) + + +def shape_pool(node): + global_pool = getattr(node.layer.parameters, 'global_pooling', False) + if global_pool: + input_shape = node.get_only_parent().output_shape + return make_tensor(input_shape.batch_size, input_shape.channels, 1, 1) + + ceil_mode = getattr(node.layer.parameters, 'ceil_mode', True) + if ceil_mode is True: + method = math.ceil + else: + method = math.floor + return get_strided_kernel_output_shape(node, method) + + +def shape_inner_product(node): + input_shape = node.get_only_parent().output_shape + return make_tensor(input_shape.batch_size, node.layer.parameters.num_output) diff --git a/caffe2fluid/kaffe/transformers.py b/caffe2fluid/kaffe/transformers.py new file mode 100644 index 0000000..f436ec8 --- /dev/null +++ b/caffe2fluid/kaffe/transformers.py @@ -0,0 +1,414 @@ +''' +A collection of graph transforms. + +A transformer is a callable that accepts a graph and returns a transformed version. +''' +import os +import numpy as np + +from .caffe import get_caffe_resolver, has_pycaffe +from .errors import KaffeError, debug, notice, warn +from .layers import NodeKind + + +class DataInjector(object): + ''' + Associates parameters loaded from a .caffemodel file with their corresponding nodes. + ''' + + def __init__(self, def_path, data_path): + # The .prototxt file defining the graph + self.def_path = def_path + # The .caffemodel file containing the learned parameters + self.data_path = data_path + # Set to true if the fallback protocol-buffer based backend was used + self.did_use_pb = False + # A list containing (layer name, parameters) tuples + self.params = None + # Load the parameters + self.load() + + def load(self): + if has_pycaffe(): + self.load_using_caffe() + else: + self.load_using_pb() + + def load_using_caffe(self): + caffe = get_caffe_resolver().caffe + net = caffe.Net(self.def_path, self.data_path, caffe.TEST) + data = lambda blob: blob.data + self.params = [(k, map(data, v)) for k, v in net.params.items()] + + def load_using_pb(self): + data = get_caffe_resolver().NetParameter() + data.MergeFromString(open(self.data_path, 'rb').read()) + pair = lambda layer: (layer.name, self.normalize_pb_data(layer)) + layers = data.layers or data.layer + self.params = [pair(layer) for layer in layers if layer.blobs] + self.did_use_pb = True + + def normalize_pb_data(self, layer): + transformed = [] + for blob in layer.blobs: + if len(blob.shape.dim): + dims = blob.shape.dim + c_o, c_i, h, w = map(int, [1] * (4 - len(dims)) + list(dims)) + else: + c_o = blob.num + c_i = blob.channels + h = blob.height + w = blob.width + data = np.array(blob.data, dtype=np.float32).reshape(c_o, c_i, h, w) + transformed.append(data) + return transformed + + def adjust_parameters(self, node, data): + if not self.did_use_pb: + return data + + # When using the protobuf-backend, each parameter initially has four dimensions. + # In certain cases (like FC layers), we want to eliminate the singleton dimensions. + # This implementation takes care of the common cases. However, it does leave the + # potential for future issues. + # The Caffe-backend does not suffer from this problem. + data = list(data) + + squeeze_indices = [1] # Squeeze biases. + if node.kind == NodeKind.InnerProduct: + squeeze_indices.append(0) # Squeeze FC. + + for idx in squeeze_indices: + if idx >= len(data): + continue + + d = data[idx] + assert len( + d.shape + ) == 4, 'invalid shape[%s] from caffe when adjust_parameters' % ( + str(d.shape)) + + shape_old = d.shape + sq_axis = None + if idx == 0: + sq_axis = (0, 1) + elif idx == 1: + sq_axis = (0, 1, 2) + else: + continue + + data[idx] = np.squeeze(d, axis=sq_axis) + shape_new = data[idx].shape + if len(shape_old) != shape_new: + debug('squeeze idx:%d, with kind:%s,name:%s' % \ + (idx, node.kind, node.name)) + return data + + def __call__(self, graph): + for layer_name, data in self.params: + if layer_name in graph: + node = graph.get_node(layer_name) + node.data = self.adjust_parameters(node, data) + else: + notice('Ignoring parameters for non-existent layer: %s' % \ + layer_name) + return graph + + +class DataReshaper(object): + def __init__(self, mapping, replace=True): + # A dictionary mapping NodeKind to the transposed order. + self.mapping = mapping + # The node kinds eligible for reshaping + self.reshaped_node_types = self.mapping.keys() + # If true, the reshaped data will replace the old one. + # Otherwise, it's set to the reshaped_data attribute. + self.replace = replace + + def has_spatial_parent(self, node): + try: + parent = node.get_only_parent() + s = parent.output_shape + if len(s) == 4: + return s.height > 1 or s.width > 1 + else: + return False + except KaffeError: + return False + + def map(self, node_kind): + try: + return self.mapping[node_kind] + except KeyError: + raise KaffeError('Ordering not found for node kind: {}'.format( + node_kind)) + + def __call__(self, graph): + for node in graph.nodes: + if node.data is None: + continue + + if node.kind not in self.reshaped_node_types: + # Check for 2+ dimensional data + #if any(len(tensor.shape) > 1 for tensor in node.data): + # notice('parmaters not reshaped for node: {}'.format(node)) + continue + + transpose_order = self.map(node.kind) + weights = node.data[0] + if node.kind == NodeKind.InnerProduct: + # The FC layer connected to the spatial layer needs to be + # re-wired to match the new spatial ordering. + #in_shape = node.get_only_parent().output_shape + fc_shape = weights.shape + output_channels = fc_shape[0] + weights = weights.reshape((output_channels, -1)) + weights = weights.transpose(transpose_order) + node.reshaped_data = weights + else: + node.reshaped_data = weights.transpose(transpose_order) + + if self.replace: + for node in graph.nodes: + if hasattr(node, 'reshaped_data'): + # Set the weights + node.data[0] = node.reshaped_data + del node.reshaped_data + return graph + + +class CropFuser(object): + ''' + Crop is to return a scalar output Blob for an input Blob of arbitrary size. + When one of the input Blob is "input" or "DummyData", we can remove the input Blob + and put the shape into the reduction layer. + ''' + _traced_names = {} + + @classmethod + def traced_names(cls): + return cls._traced_names + + @classmethod + def trace(cls, fname, tname): + """ recording the names mapping, + the value of 'fname' will be replaced by value of 'tname' + """ + if fname not in cls._traced_names: + cls._traced_names[fname] = [] + cls._traced_names[fname].append(tname) + + def __init__(self, + allowed_parent_types=[NodeKind.Input, NodeKind.DummyData]): + self.allowed_parent_types = allowed_parent_types + + def __call__(self, graph): + nodes = graph.nodes + fused_nodes = [] + for node in nodes: + if len(node.parents) != 2: + # reduction layer must has two parent layers. + continue + parent = node.parents[1] + if not self.is_eligible_pair(parent, node): + continue + # Change the graph structure. + parent.children.remove(node) + node.parents.remove(parent) + # Let the sub-class merge the fused node in any arbitrary way. + if not len(parent.children): + fused_nodes.append(parent) + #fused_nodes.append(parent) + self.merge(parent, node) + # rebuild the graph + transformed_nodes = [node for node in nodes if node not in fused_nodes] + return graph.replaced(transformed_nodes) + + def is_eligible_pair(self, parent, child): + '''Returns true if this parent/child pair is eligible for fusion.''' + return child.kind == NodeKind.Crop + #return (self.allowed_parent_types is not None and \ + # len(parent.children) == 1 and \ + # parent.kind in self.allowed_parent_types and \ + # child.kind == NodeKind.Crop) + + def merge(self, parent, child): + '''Merge the parent node into the child.''' + child.metadata['shape'] = [ + parent.output_shape.batch_size, parent.output_shape.channels, + parent.output_shape.height, parent.output_shape.width + ] + + +class SubNodeFuser(object): + ''' + An abstract helper for merging a single-child with its single-parent. + ''' + _traced_names = {} + + @classmethod + def traced_names(cls): + return cls._traced_names + + @classmethod + def trace(cls, fname, tname): + """ recording the names mapping, + the value of 'fname' will be replaced by value of 'tname' + """ + if fname not in cls._traced_names: + cls._traced_names[fname] = [] + cls._traced_names[fname].append(tname) + + def __call__(self, graph): + nodes = graph.nodes + fused_nodes = [] + for node in nodes: + if len(node.parents) != 1: + # We're only fusing nodes with single parents + continue + parent = node.get_only_parent() + if len(parent.children) != 1: + # We can only fuse a node if its parent's + # value isn't used by any other node. + continue + if not self.is_eligible_pair(parent, node): + continue + # Rewrite the fused node's children to its parent. + for child in node.children: + pos = child.parents.index(node) + child.parents[pos] = parent + parent.add_child(child) + # Disconnect the fused node from the graph. + parent.children.remove(node) + fused_nodes.append(node) + # Let the sub-class merge the fused node in any arbitrary way. + self.merge(parent, node) + transformed_nodes = [node for node in nodes if node not in fused_nodes] + return graph.replaced(transformed_nodes) + + def is_eligible_pair(self, parent, child): + '''Returns true if this parent/child pair is eligible for fusion.''' + raise NotImplementedError('Must be implemented by subclass.') + + def merge(self, parent, child): + '''Merge the child node into the parent.''' + raise NotImplementedError('Must be implemented by subclass') + + +class ReLUFuser(SubNodeFuser): + ''' + Fuses rectified linear units with their parent nodes. + ''' + + def __init__(self, allowed_parent_types=None): + # Fuse ReLUs when the parent node is one of the given types. + # If None, all node types are eligible. + self.allowed_parent_types = allowed_parent_types + + def is_eligible_pair(self, parent, child): + return ((self.allowed_parent_types is None or \ + parent.kind in self.allowed_parent_types) and \ + child.kind == NodeKind.ReLU) + + def merge(self, parent, child): + SubNodeFuser.trace(parent.name, child.name) + parent.metadata['relu'] = True + parent.metadata['relu_negative_slope'] = child.parameters.negative_slope + + +class BatchNormScaleBiasFuser(SubNodeFuser): + ''' + The original batch normalization paper includes two learned + parameters: a scaling factor \gamma and a bias \beta. + Caffe's implementation does not include these two. However, it is commonly + replicated by adding a scaling+bias layer immidiately after the batch norm. + + This fuser merges the scaling+bias layer with the batch norm. + ''' + + def is_eligible_pair(self, parent, child): + return (parent.kind == NodeKind.BatchNorm and \ + child.kind == NodeKind.Scale and \ + child.parameters.axis == 1 and \ + child.parameters.bias_term == True) + + def merge(self, parent, child): + SubNodeFuser.trace(parent.name, child.name) + parent.scale_bias_node = child + + +class BatchNormPreprocessor(object): + ''' + Prescale batch normalization parameters. + Concatenate gamma (scale) and beta (bias) terms if set. + ''' + + def __call__(self, graph): + for node in graph.nodes: + if node.kind != NodeKind.BatchNorm: + continue + assert node.data is not None + assert len(node.data) == 3 + node.data = [np.squeeze(i) for i in node.data] + mean, variance, scale = node.data + # Prescale the stats + scaling_factor = 1.0 / scale if scale != 0 else 0 + mean *= scaling_factor + variance *= scaling_factor + # Replace with the updated values + node.data = [mean, variance] + if hasattr(node, 'scale_bias_node'): + # Include the scale and bias terms + gamma, beta = node.scale_bias_node.data + node.data += [np.squeeze(i) for i in [gamma, beta]] + return graph + + +class NodeRenamer(object): + ''' + Renames nodes in the graph using a given unary function that + accepts a node and returns its new name. + ''' + + def __init__(self, renamer): + self.renamer = renamer + + def __call__(self, graph): + for node in graph.nodes: + node.name = self.renamer(node) + return graph + + +class ParameterNamer(object): + ''' + Convert layer data arrays to a dictionary mapping parameter names to their values. + ''' + + def __call__(self, graph): + for node in graph.nodes: + if node.data is None: + continue + if node.kind in (NodeKind.Convolution, NodeKind.InnerProduct,\ + NodeKind.Deconvolution): + names = ('weights', ) + if node.parameters.bias_term: + names += ('biases', ) + elif node.kind == NodeKind.BatchNorm: + names = ('mean', 'variance') + if len(node.data) == 4: + names += ('scale', 'offset') + elif node.kind == NodeKind.Scale: + names = ('scale', ) + if getattr(node.parameters, 'bias_term', False): + names = ('scale', 'offset') + elif node.kind == NodeKind.PReLU: + names = ('negslope', ) + elif node.kind == "Normalize": + names = ('scale', ) + else: + warn('Unhandled parameters when naming this it[%s]' % + (node.kind)) + continue + assert len(names) == len(node.data) + node.data = dict(zip(names, node.data)) + return graph diff --git a/caffe2fluid/proto/caffe.proto b/caffe2fluid/proto/caffe.proto new file mode 100644 index 0000000..18eb5ca --- /dev/null +++ b/caffe2fluid/proto/caffe.proto @@ -0,0 +1,1411 @@ +syntax = "proto2"; + +package caffe; + +// Specifies the shape (dimensions) of a Blob. +message BlobShape { repeated int64 dim = 1 [ packed = true ]; } + +message BlobProto { + optional BlobShape shape = 7; + repeated float data = 5 [ packed = true ]; + repeated float diff = 6 [ packed = true ]; + repeated double double_data = 8 [ packed = true ]; + repeated double double_diff = 9 [ packed = true ]; + + // 4D dimensions -- deprecated. Use "shape" instead. + optional int32 num = 1 [ default = 0 ]; + optional int32 channels = 2 [ default = 0 ]; + optional int32 height = 3 [ default = 0 ]; + optional int32 width = 4 [ default = 0 ]; +} + +// The BlobProtoVector is simply a way to pass multiple blobproto instances +// around. +message BlobProtoVector { repeated BlobProto blobs = 1; } + +message Datum { + optional int32 channels = 1; + optional int32 height = 2; + optional int32 width = 3; + // the actual image data, in bytes + optional bytes data = 4; + optional int32 label = 5; + // Optionally, the datum could also hold float data. + repeated float float_data = 6; + // If true data contains an encoded image that need to be decoded + optional bool encoded = 7 [ default = false ]; +} + +message FillerParameter { + // The filler type. + optional string type = 1 [ default = 'constant' ]; + optional float value = 2 [ default = 0 ]; // the value in constant filler + optional float min = 3 [ default = 0 ]; // the min value in uniform filler + optional float max = 4 [ default = 1 ]; // the max value in uniform filler + optional float mean = 5 [ default = 0 ]; // the mean value in Gaussian filler + optional float std = 6 [ default = 1 ]; // the std value in Gaussian filler + // The expected number of non-zero output weights for a given input in + // Gaussian filler -- the default -1 means don't perform sparsification. + optional int32 sparse = 7 [ default = -1 ]; + // Normalize the filler variance by fan_in, fan_out, or their average. + // Applies to 'xavier' and 'msra' fillers. + enum VarianceNorm { + FAN_IN = 0; + FAN_OUT = 1; + AVERAGE = 2; + } + optional VarianceNorm variance_norm = 8 [ default = FAN_IN ]; +} + +message NetParameter { + optional string name = 1; // consider giving the network a name + // DEPRECATED. See InputParameter. The input blobs to the network. + repeated string input = 3; + // DEPRECATED. See InputParameter. The shape of the input blobs. + repeated BlobShape input_shape = 8; + + // 4D input dimensions -- deprecated. Use "input_shape" instead. + // If specified, for each input blob there should be four + // values specifying the num, channels, height and width of the input blob. + // Thus, there should be a total of (4 * #input) numbers. + repeated int32 input_dim = 4; + + // Whether the network will force every layer to carry out backward operation. + // If set False, then whether to carry out backward is determined + // automatically according to the net structure and learning rates. + optional bool force_backward = 5 [ default = false ]; + // The current "state" of the network, including the phase, level, and stage. + // Some layers may be included/excluded depending on this state and the states + // specified in the layers' include and exclude fields. + optional NetState state = 6; + + // Print debugging information about results while running Net::Forward, + // Net::Backward, and Net::Update. + optional bool debug_info = 7 [ default = false ]; + + // The layers that make up the net. Each of their configurations, including + // connectivity and behavior, is specified as a LayerParameter. + repeated LayerParameter layer = 100; // ID 100 so layers are printed last. + + // DEPRECATED: use 'layer' instead. + repeated V1LayerParameter layers = 2; +} + +// NOTE +// Update the next available ID when you add a new SolverParameter field. +// +// SolverParameter next available ID: 42 (last added: layer_wise_reduce) +message SolverParameter { + ////////////////////////////////////////////////////////////////////////////// + // Specifying the train and test networks + // + // Exactly one train net must be specified using one of the following fields: + // train_net_param, train_net, net_param, net + // One or more test nets may be specified using any of the following fields: + // test_net_param, test_net, net_param, net + // If more than one test net field is specified (e.g., both net and + // test_net are specified), they will be evaluated in the field order given + // above: (1) test_net_param, (2) test_net, (3) net_param/net. + // A test_iter must be specified for each test_net. + // A test_level and/or a test_stage may also be specified for each test_net. + ////////////////////////////////////////////////////////////////////////////// + + // Proto filename for the train net, possibly combined with one or more + // test nets. + optional string net = 24; + // Inline train net param, possibly combined with one or more test nets. + optional NetParameter net_param = 25; + + optional string train_net = 1; // Proto filename for the train net. + repeated string test_net = 2; // Proto filenames for the test nets. + optional NetParameter train_net_param = 21; // Inline train net params. + repeated NetParameter test_net_param = 22; // Inline test net params. + + // The states for the train/test nets. Must be unspecified or + // specified once per net. + // + // By default, train_state will have phase = TRAIN, + // and all test_state's will have phase = TEST. + // Other defaults are set according to the NetState defaults. + optional NetState train_state = 26; + repeated NetState test_state = 27; + + // The number of iterations for each test net. + repeated int32 test_iter = 3; + + // The number of iterations between two testing phases. + optional int32 test_interval = 4 [ default = 0 ]; + optional bool test_compute_loss = 19 [ default = false ]; + // If true, run an initial test pass before the first iteration, + // ensuring memory availability and printing the starting value of the loss. + optional bool test_initialization = 32 [ default = true ]; + optional float base_lr = 5; // The base learning rate + // the number of iterations between displaying info. If display = 0, no info + // will be displayed. + optional int32 display = 6; + // Display the loss averaged over the last average_loss iterations + optional int32 average_loss = 33 [ default = 1 ]; + optional int32 max_iter = 7; // the maximum number of iterations + // accumulate gradients over `iter_size` x `batch_size` instances + optional int32 iter_size = 36 [ default = 1 ]; + + // The learning rate decay policy. The currently implemented learning rate + // policies are as follows: + // - fixed: always return base_lr. + // - step: return base_lr * gamma ^ (floor(iter / step)) + // - exp: return base_lr * gamma ^ iter + // - inv: return base_lr * (1 + gamma * iter) ^ (- power) + // - multistep: similar to step but it allows non uniform steps defined by + // stepvalue + // - poly: the effective learning rate follows a polynomial decay, to be + // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) + // - sigmoid: the effective learning rate follows a sigmod decay + // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) + // + // where base_lr, max_iter, gamma, step, stepvalue and power are defined + // in the solver parameter protocol buffer, and iter is the current iteration. + optional string lr_policy = 8; + optional float gamma = 9; // The parameter to compute the learning rate. + optional float power = 10; // The parameter to compute the learning rate. + optional float momentum = 11; // The momentum value. + optional float weight_decay = 12; // The weight decay. + // regularization types supported: L1 and L2 + // controlled by weight_decay + optional string regularization_type = 29 [ default = "L2" ]; + // the stepsize for learning rate policy "step" + optional int32 stepsize = 13; + // the stepsize for learning rate policy "multistep" + repeated int32 stepvalue = 34; + + // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, + // whenever their actual L2 norm is larger. + optional float clip_gradients = 35 [ default = -1 ]; + + optional int32 snapshot = 14 [ default = 0 ]; // The snapshot interval + optional string snapshot_prefix = 15; // The prefix for the snapshot. + // whether to snapshot diff in the results or not. Snapshotting diff will help + // debugging but the final protocol buffer size will be much larger. + optional bool snapshot_diff = 16 [ default = false ]; + enum SnapshotFormat { + HDF5 = 0; + BINARYPROTO = 1; + } + optional SnapshotFormat snapshot_format = 37 [ default = BINARYPROTO ]; + // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. + enum SolverMode { + CPU = 0; + GPU = 1; + } + optional SolverMode solver_mode = 17 [ default = GPU ]; + // the device_id will that be used in GPU mode. Use device_id = 0 in default. + optional int32 device_id = 18 [ default = 0 ]; + // If non-negative, the seed with which the Solver will initialize the Caffe + // random number generator -- useful for reproducible results. Otherwise, + // (and by default) initialize using a seed derived from the system clock. + optional int64 random_seed = 20 [ default = -1 ]; + + // type of the solver + optional string type = 40 [ default = "SGD" ]; + + // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam + optional float delta = 31 [ default = 1e-8 ]; + // parameters for the Adam solver + optional float momentum2 = 39 [ default = 0.999 ]; + + // RMSProp decay value + // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) + optional float rms_decay = 38 [ default = 0.99 ]; + + // If true, print information about the state of the net that may help with + // debugging learning problems. + optional bool debug_info = 23 [ default = false ]; + + // If false, don't save a snapshot after training finishes. + optional bool snapshot_after_train = 28 [ default = true ]; + + // DEPRECATED: old solver enum types, use string instead + enum SolverType { + SGD = 0; + NESTEROV = 1; + ADAGRAD = 2; + RMSPROP = 3; + ADADELTA = 4; + ADAM = 5; + } + // DEPRECATED: use type instead of solver_type + optional SolverType solver_type = 30 [ default = SGD ]; + + // Overlap compute and communication for data parallel training + optional bool layer_wise_reduce = 41 [ default = true ]; +} + +// A message that stores the solver snapshots +message SolverState { + optional int32 iter = 1; // The current iteration + optional string learned_net = 2; // The file that stores the learned net. + repeated BlobProto history = 3; // The history for sgd solvers + optional int32 current_step = 4 + [ default = 0 ]; // The current step for learning rate +} + +enum Phase { + TRAIN = 0; + TEST = 1; +} + +message NetState { + optional Phase phase = 1 [ default = TEST ]; + optional int32 level = 2 [ default = 0 ]; + repeated string stage = 3; +} + +message NetStateRule { + // Set phase to require the NetState have a particular phase (TRAIN or TEST) + // to meet this rule. + optional Phase phase = 1; + + // Set the minimum and/or maximum levels in which the layer should be used. + // Leave undefined to meet the rule regardless of level. + optional int32 min_level = 2; + optional int32 max_level = 3; + + // Customizable sets of stages to include or exclude. + // The net must have ALL of the specified stages and NONE of the specified + // "not_stage"s to meet the rule. + // (Use multiple NetStateRules to specify conjunctions of stages.) + repeated string stage = 4; + repeated string not_stage = 5; +} + +// Specifies training parameters (multipliers on global learning constants, +// and the name and other settings used for weight sharing). +message ParamSpec { + // The names of the parameter blobs -- useful for sharing parameters among + // layers, but never required otherwise. To share a parameter between two + // layers, give it a (non-empty) name. + optional string name = 1; + + // Whether to require shared weights to have the same shape, or just the same + // count -- defaults to STRICT if unspecified. + optional DimCheckMode share_mode = 2; + enum DimCheckMode { + // STRICT (default) requires that num, channels, height, width each match. + STRICT = 0; + // PERMISSIVE requires only the count (num*channels*height*width) to match. + PERMISSIVE = 1; + } + + // The multiplier on the global learning rate for this parameter. + optional float lr_mult = 3 [ default = 1.0 ]; + + // The multiplier on the global weight decay for this parameter. + optional float decay_mult = 4 [ default = 1.0 ]; +} + +// NOTE +// Update the next available ID when you add a new LayerParameter field. +// +// LayerParameter next available layer-specific ID: 147 (last added: +// recurrent_param) +message LayerParameter { + optional string name = 1; // the layer name + optional string type = 2; // the layer type + repeated string bottom = 3; // the name of each bottom blob + repeated string top = 4; // the name of each top blob + + // The train / test phase for computation. + optional Phase phase = 10; + + // The amount of weight to assign each top blob in the objective. + // Each layer assigns a default value, usually of either 0 or 1, + // to each top blob. + repeated float loss_weight = 5; + + // Specifies training parameters (multipliers on global learning constants, + // and the name and other settings used for weight sharing). + repeated ParamSpec param = 6; + + // The blobs containing the numeric parameters of the layer. + repeated BlobProto blobs = 7; + + // Specifies whether to backpropagate to each bottom. If unspecified, + // Caffe will automatically infer whether each input needs backpropagation + // to compute parameter gradients. If set to true for some inputs, + // backpropagation to those inputs is forced; if set false for some inputs, + // backpropagation to those inputs is skipped. + // + // The size must be either 0 or equal to the number of bottoms. + repeated bool propagate_down = 11; + + // Rules controlling whether and when a layer is included in the network, + // based on the current NetState. You may specify a non-zero number of rules + // to include OR exclude, but not both. If no include or exclude rules are + // specified, the layer is always included. If the current NetState meets + // ANY (i.e., one or more) of the specified rules, the layer is + // included/excluded. + repeated NetStateRule include = 8; + repeated NetStateRule exclude = 9; + + // Parameters for data pre-processing. + optional TransformationParameter transform_param = 100; + + // Parameters shared by loss layers. + optional LossParameter loss_param = 101; + + // Layer type-specific parameters. + // + // Note: certain layers may have more than one computational engine + // for their implementation. These layers include an Engine type and + // engine parameter for selecting the implementation. + // The default for the engine is set by the ENGINE switch at compile-time. + optional AccuracyParameter accuracy_param = 102; + optional ArgMaxParameter argmax_param = 103; + optional BatchNormParameter batch_norm_param = 139; + optional BiasParameter bias_param = 141; + optional ConcatParameter concat_param = 104; + optional ContrastiveLossParameter contrastive_loss_param = 105; + optional ConvolutionParameter convolution_param = 106; + optional CropParameter crop_param = 144; + optional DataParameter data_param = 107; + optional DropoutParameter dropout_param = 108; + optional DummyDataParameter dummy_data_param = 109; + optional EltwiseParameter eltwise_param = 110; + optional ELUParameter elu_param = 140; + optional EmbedParameter embed_param = 137; + optional ExpParameter exp_param = 111; + optional FlattenParameter flatten_param = 135; + optional HDF5DataParameter hdf5_data_param = 112; + optional HDF5OutputParameter hdf5_output_param = 113; + optional HingeLossParameter hinge_loss_param = 114; + optional ImageDataParameter image_data_param = 115; + optional InfogainLossParameter infogain_loss_param = 116; + optional InnerProductParameter inner_product_param = 117; + optional InputParameter input_param = 143; + optional LogParameter log_param = 134; + optional LRNParameter lrn_param = 118; + optional MemoryDataParameter memory_data_param = 119; + optional MVNParameter mvn_param = 120; + optional ParameterParameter parameter_param = 145; + optional PoolingParameter pooling_param = 121; + optional PowerParameter power_param = 122; + optional PReLUParameter prelu_param = 131; + optional PythonParameter python_param = 130; + optional RecurrentParameter recurrent_param = 146; + optional ReductionParameter reduction_param = 136; + optional ReLUParameter relu_param = 123; + optional ReshapeParameter reshape_param = 133; + optional ScaleParameter scale_param = 142; + optional SigmoidParameter sigmoid_param = 124; + optional SoftmaxParameter softmax_param = 125; + optional SPPParameter spp_param = 132; + optional SliceParameter slice_param = 126; + optional TanHParameter tanh_param = 127; + optional ThresholdParameter threshold_param = 128; + optional TileParameter tile_param = 138; + optional WindowDataParameter window_data_param = 129; +} + +// Message that stores parameters used to apply transformation +// to the data layer's data +message TransformationParameter { + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 1 [ default = 1 ]; + // Specify if we want to randomly mirror data. + optional bool mirror = 2 [ default = false ]; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 3 [ default = 0 ]; + // mean_file and mean_value cannot be specified at the same time + optional string mean_file = 4; + // if specified can be repeated once (would subtract it from all the channels) + // or can be repeated the same number of times as channels + // (would subtract them from the corresponding channel) + repeated float mean_value = 5; + // Force the decoded image to have 3 color channels. + optional bool force_color = 6 [ default = false ]; + // Force the decoded image to have 1 color channels. + optional bool force_gray = 7 [ default = false ]; +} + +// Message that stores parameters shared by loss layers +message LossParameter { + // If specified, ignore instances with the given label. + optional int32 ignore_label = 1; + // How to normalize the loss for loss layers that aggregate across batches, + // spatial dimensions, or other dimensions. Currently only implemented in + // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers. + enum NormalizationMode { + // Divide by the number of examples in the batch times spatial dimensions. + // Outputs that receive the ignore label will NOT be ignored in computing + // the normalization factor. + FULL = 0; + // Divide by the total number of output locations that do not take the + // ignore_label. If ignore_label is not set, this behaves like FULL. + VALID = 1; + // Divide by the batch size. + BATCH_SIZE = 2; + // Do not normalize the loss. + NONE = 3; + } + // For historical reasons, the default normalization for + // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID. + optional NormalizationMode normalization = 3 [ default = VALID ]; + // Deprecated. Ignored if normalization is specified. If normalization + // is not specified, then setting this to false will be equivalent to + // normalization = BATCH_SIZE to be consistent with previous behavior. + optional bool normalize = 2; +} + +// Messages that store parameters used by individual layer types follow, in +// alphabetical order. + +message AccuracyParameter { + // When computing accuracy, count as correct by comparing the true label to + // the top k scoring classes. By default, only compare to the top scoring + // class (i.e. argmax). + optional uint32 top_k = 1 [ default = 1 ]; + + // The "label" axis of the prediction blob, whose argmax corresponds to the + // predicted label -- may be negative to index from the end (e.g., -1 for the + // last axis). For example, if axis == 1 and the predictions are + // (N x C x H x W), the label blob is expected to contain N*H*W ground truth + // labels with integer values in {0, 1, ..., C-1}. + optional int32 axis = 2 [ default = 1 ]; + + // If specified, ignore instances with the given label. + optional int32 ignore_label = 3; +} + +message ArgMaxParameter { + // If true produce pairs (argmax, maxval) + optional bool out_max_val = 1 [ default = false ]; + optional uint32 top_k = 2 [ default = 1 ]; + // The axis along which to maximise -- may be negative to index from the + // end (e.g., -1 for the last axis). + // By default ArgMaxLayer maximizes over the flattened trailing dimensions + // for each index of the first / num dimension. + optional int32 axis = 3; +} + +message ConcatParameter { + // The axis along which to concatenate -- may be negative to index from the + // end (e.g., -1 for the last axis). Other axes must have the + // same dimension for all the bottom blobs. + // By default, ConcatLayer concatenates blobs along the "channels" axis (1). + optional int32 axis = 2 [ default = 1 ]; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. + optional uint32 concat_dim = 1 [ default = 1 ]; +} + +message BatchNormParameter { + // If false, normalization is performed over the current mini-batch + // and global statistics are accumulated (but not yet used) by a moving + // average. + // If true, those accumulated mean and variance values are used for the + // normalization. + // By default, it is set to false when the network is in the training + // phase and true when the network is in the testing phase. + optional bool use_global_stats = 1; + // What fraction of the moving average remains each iteration? + // Smaller values make the moving average decay faster, giving more + // weight to the recent values. + // Each iteration updates the moving average @f$S_{t-1}@f$ with the + // current mean @f$ Y_t @f$ by + // @f$ S_t = (1-\beta)Y_t + \beta \cdot S_{t-1} @f$, where @f$ \beta @f$ + // is the moving_average_fraction parameter. + optional float moving_average_fraction = 2 [ default = .999 ]; + // Small value to add to the variance estimate so that we don't divide by + // zero. + optional float eps = 3 [ default = 1e-5 ]; +} + +message BiasParameter { + // The first axis of bottom[0] (the first input Blob) along which to apply + // bottom[1] (the second input Blob). May be negative to index from the end + // (e.g., -1 for the last axis). + // + // For example, if bottom[0] is 4D with shape 100x3x40x60, the output + // top[0] will have the same shape, and bottom[1] may have any of the + // following shapes (for the given value of axis): + // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 + // (axis == 1 == -3) 3; 3x40; 3x40x60 + // (axis == 2 == -2) 40; 40x60 + // (axis == 3 == -1) 60 + // Furthermore, bottom[1] may have the empty shape (regardless of the value of + // "axis") -- a scalar bias. + optional int32 axis = 1 [ default = 1 ]; + + // (num_axes is ignored unless just one bottom is given and the bias is + // a learned parameter of the layer. Otherwise, num_axes is determined by the + // number of axes by the second bottom.) + // The number of axes of the input (bottom[0]) covered by the bias + // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. + // Set num_axes := 0, to add a zero-axis Blob: a scalar. + optional int32 num_axes = 2 [ default = 1 ]; + + // (filler is ignored unless just one bottom is given and the bias is + // a learned parameter of the layer.) + // The initialization for the learned bias parameter. + // Default is the zero (0) initialization, resulting in the BiasLayer + // initially performing the identity operation. + optional FillerParameter filler = 3; +} + +message ContrastiveLossParameter { + // margin for dissimilar pair + optional float margin = 1 [ default = 1.0 ]; + // The first implementation of this cost did not exactly match the cost of + // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. + // legacy_version = false (the default) uses (margin - d)^2 as proposed in the + // Hadsell paper. New models should probably use this version. + // legacy_version = true uses (margin - d^2). This is kept to support / + // reproduce existing models and results + optional bool legacy_version = 2 [ default = false ]; +} + +message ConvolutionParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [ default = true ]; // whether to have bias terms + + // Pad, kernel size, and stride are all given as a single value for equal + // dimensions in all spatial dimensions, or once per spatial dimension. + repeated uint32 pad = 3; // The padding size; defaults to 0 + repeated uint32 kernel_size = 4; // The kernel size + repeated uint32 stride = 6; // The stride; defaults to 1 + // Factor used to dilate the kernel, (implicitly) zero-filling the resulting + // holes. (Kernel dilation is sometimes referred to by its use in the + // algorithme à trous from Holschneider et al. 1987.) + repeated uint32 dilation = 18; // The dilation; defaults to 1 + + // For 2D convolution only, the *_h and *_w versions may also be used to + // specify both spatial dimensions. + optional uint32 pad_h = 9 [ default = 0 ]; // The padding height (2D only) + optional uint32 pad_w = 10 [ default = 0 ]; // The padding width (2D only) + optional uint32 kernel_h = 11; // The kernel height (2D only) + optional uint32 kernel_w = 12; // The kernel width (2D only) + optional uint32 stride_h = 13; // The stride height (2D only) + optional uint32 stride_w = 14; // The stride width (2D only) + + optional uint32 group = 5 [ default = 1 ]; // The group size for group conv + + optional FillerParameter weight_filler = 7; // The filler for the weight + optional FillerParameter bias_filler = 8; // The filler for the bias + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 15 [ default = DEFAULT ]; + + // The axis to interpret as "channels" when performing convolution. + // Preceding dimensions are treated as independent inputs; + // succeeding dimensions are treated as "spatial". + // With (N, C, H, W) inputs, and axis == 1 (the default), we perform + // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for + // groups g>1) filters across the spatial axes (H, W) of the input. + // With (N, C, D, H, W) inputs, and axis == 1, we perform + // N independent 3D convolutions, sliding (C/g)-channels + // filters across the spatial axes (D, H, W) of the input. + optional int32 axis = 16 [ default = 1 ]; + + // Whether to force use of the general ND convolution, even if a specific + // implementation for blobs of the appropriate number of spatial dimensions + // is available. (Currently, there is only a 2D-specific convolution + // implementation; for input blobs with num_axes != 2, this option is + // ignored and the ND implementation will be used.) + optional bool force_nd_im2col = 17 [ default = false ]; +} + +message CropParameter { + // To crop, elements of the first bottom are selected to fit the dimensions + // of the second, reference bottom. The crop is configured by + // - the crop `axis` to pick the dimensions for cropping + // - the crop `offset` to set the shift for all/each dimension + // to align the cropped bottom with the reference bottom. + // All dimensions up to but excluding `axis` are preserved, while + // the dimensions including and trailing `axis` are cropped. + // If only one `offset` is set, then all dimensions are offset by this amount. + // Otherwise, the number of offsets must equal the number of cropped axes to + // shift the crop in each dimension accordingly. + // Note: standard dimensions are N,C,H,W so the default is a spatial crop, + // and `axis` may be negative to index from the end (e.g., -1 for the last + // axis). + optional int32 axis = 1 [ default = 2 ]; + repeated uint32 offset = 2; +} + +message DataParameter { + enum DB { + LEVELDB = 0; + LMDB = 1; + } + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 4; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + // DEPRECATED. Each solver accesses a different subset of the database. + optional uint32 rand_skip = 7 [ default = 0 ]; + optional DB backend = 8 [ default = LEVELDB ]; + // DEPRECATED. See TransformationParameter. For data pre-processing, we can do + // simple scaling and subtracting the data mean, if provided. Note that the + // mean subtraction is always carried out before scaling. + optional float scale = 2 [ default = 1 ]; + optional string mean_file = 3; + // DEPRECATED. See TransformationParameter. Specify if we would like to + // randomly + // crop an image. + optional uint32 crop_size = 5 [ default = 0 ]; + // DEPRECATED. See TransformationParameter. Specify if we want to randomly + // mirror + // data. + optional bool mirror = 6 [ default = false ]; + // Force the encoded image to have 3 color channels + optional bool force_encoded_color = 9 [ default = false ]; + // Prefetch queue (Increase if data feeding bandwidth varies, within the + // limit of device memory for GPU training) + optional uint32 prefetch = 10 [ default = 4 ]; +} + +message DropoutParameter { + optional float dropout_ratio = 1 [ default = 0.5 ]; // dropout ratio +} + +// DummyDataLayer fills any number of arbitrarily shaped blobs with random +// (or constant) data generated by "Fillers" (see "message FillerParameter"). +message DummyDataParameter { + // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or + // N + // shape fields, and 0, 1 or N data_fillers. + // + // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. + // If 1 data_filler is specified, it is applied to all top blobs. If N are + // specified, the ith is applied to the ith top blob. + repeated FillerParameter data_filler = 1; + repeated BlobShape shape = 6; + + // 4D dimensions -- deprecated. Use "shape" instead. + repeated uint32 num = 2; + repeated uint32 channels = 3; + repeated uint32 height = 4; + repeated uint32 width = 5; +} + +message EltwiseParameter { + enum EltwiseOp { + PROD = 0; + SUM = 1; + MAX = 2; + } + optional EltwiseOp operation = 1 [ default = SUM ]; // element-wise operation + repeated float coeff = 2; // blob-wise coefficient for SUM operation + + // Whether to use an asymptotically slower (for >2 inputs) but stabler method + // of computing the gradient for the PROD operation. (No effect for SUM op.) + optional bool stable_prod_grad = 3 [ default = true ]; +} + +// Message that stores parameters used by ELULayer +message ELUParameter { + // Described in: + // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate + // Deep Network Learning by Exponential Linear Units (ELUs). arXiv + optional float alpha = 1 [ default = 1 ]; +} + +// Message that stores parameters used by EmbedLayer +message EmbedParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + // The input is given as integers to be interpreted as one-hot + // vector indices with dimension num_input. Hence num_input should be + // 1 greater than the maximum possible input value. + optional uint32 input_dim = 2; + + optional bool bias_term = 3 [ default = true ]; // Whether to use a bias term + optional FillerParameter weight_filler = 4; // The filler for the weight + optional FillerParameter bias_filler = 5; // The filler for the bias +} + +// Message that stores parameters used by ExpLayer +message ExpParameter { + // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = exp(shift + scale * x). + optional float base = 1 [ default = -1.0 ]; + optional float scale = 2 [ default = 1.0 ]; + optional float shift = 3 [ default = 0.0 ]; +} + +/// Message that stores parameters used by FlattenLayer +message FlattenParameter { + // The first axis to flatten: all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 1 [ default = 1 ]; + + // The last axis to flatten: all following axes are retained in the output. + // May be negative to index from the end (e.g., the default -1 for the last + // axis). + optional int32 end_axis = 2 [ default = -1 ]; +} + +// Message that stores parameters used by HDF5DataLayer +message HDF5DataParameter { + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 2; + + // Specify whether to shuffle the data. + // If shuffle == true, the ordering of the HDF5 files is shuffled, + // and the ordering of data within any given HDF5 file is shuffled, + // but data between different files are not interleaved; all of a file's + // data are output (in a random order) before moving onto another file. + optional bool shuffle = 3 [ default = false ]; +} + +message HDF5OutputParameter { optional string file_name = 1; } + +message HingeLossParameter { + enum Norm { + L1 = 1; + L2 = 2; + } + // Specify the Norm to use L1 or L2 + optional Norm norm = 1 [ default = L1 ]; +} + +message ImageDataParameter { + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 4 [ default = 1 ]; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + optional uint32 rand_skip = 7 [ default = 0 ]; + // Whether or not ImageLayer should shuffle the list of files at every epoch. + optional bool shuffle = 8 [ default = false ]; + // It will also resize images if new_height or new_width are not zero. + optional uint32 new_height = 9 [ default = 0 ]; + optional uint32 new_width = 10 [ default = 0 ]; + // Specify if the images are color or gray + optional bool is_color = 11 [ default = true ]; + // DEPRECATED. See TransformationParameter. For data pre-processing, we can do + // simple scaling and subtracting the data mean, if provided. Note that the + // mean subtraction is always carried out before scaling. + optional float scale = 2 [ default = 1 ]; + optional string mean_file = 3; + // DEPRECATED. See TransformationParameter. Specify if we would like to + // randomly + // crop an image. + optional uint32 crop_size = 5 [ default = 0 ]; + // DEPRECATED. See TransformationParameter. Specify if we want to randomly + // mirror + // data. + optional bool mirror = 6 [ default = false ]; + optional string root_folder = 12 [ default = "" ]; +} + +message InfogainLossParameter { + // Specify the infogain matrix source. + optional string source = 1; + optional int32 axis = 2 [ default = 1 ]; // axis of prob +} + +message InnerProductParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [ default = true ]; // whether to have bias terms + optional FillerParameter weight_filler = 3; // The filler for the weight + optional FillerParameter bias_filler = 4; // The filler for the bias + + // The first axis to be lumped into a single inner product computation; + // all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 5 [ default = 1 ]; + // Specify whether to transpose the weight matrix or not. + // If transpose == true, any operations will be performed on the transpose + // of the weight matrix. The weight matrix itself is not going to be + // transposed + // but rather the transfer flag of operations will be toggled accordingly. + optional bool transpose = 6 [ default = false ]; +} + +message InputParameter { + // This layer produces N >= 1 top blob(s) to be assigned manually. + // Define N shapes to set a shape for each top. + // Define 1 shape to set the same shape for every top. + // Define no shape to defer to reshaping manually. + repeated BlobShape shape = 1; +} + +// Message that stores parameters used by LogLayer +message LogParameter { + // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = ln(shift + scale * x) = log_e(shift + scale * x) + optional float base = 1 [ default = -1.0 ]; + optional float scale = 2 [ default = 1.0 ]; + optional float shift = 3 [ default = 0.0 ]; +} + +// Message that stores parameters used by LRNLayer +message LRNParameter { + optional uint32 local_size = 1 [ default = 5 ]; + optional float alpha = 2 [ default = 1. ]; + optional float beta = 3 [ default = 0.75 ]; + enum NormRegion { + ACROSS_CHANNELS = 0; + WITHIN_CHANNEL = 1; + } + optional NormRegion norm_region = 4 [ default = ACROSS_CHANNELS ]; + optional float k = 5 [ default = 1. ]; + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 6 [ default = DEFAULT ]; +} + +message MemoryDataParameter { + optional uint32 batch_size = 1; + optional uint32 channels = 2; + optional uint32 height = 3; + optional uint32 width = 4; +} + +message MVNParameter { + // This parameter can be set to false to normalize mean only + optional bool normalize_variance = 1 [ default = true ]; + + // This parameter can be set to true to perform DNN-like MVN + optional bool across_channels = 2 [ default = false ]; + + // Epsilon for not dividing by zero while normalizing variance + optional float eps = 3 [ default = 1e-9 ]; +} + +message ParameterParameter { optional BlobShape shape = 1; } + +message PoolingParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 1 [ default = MAX ]; // The pooling method + // Pad, kernel size, and stride are all given as a single value for equal + // dimensions in height and width or as Y, X pairs. + optional uint32 pad = 4 [ default = 0 ]; // The padding size (equal in Y, X) + optional uint32 pad_h = 9 [ default = 0 ]; // The padding height + optional uint32 pad_w = 10 [ default = 0 ]; // The padding width + optional uint32 kernel_size = 2; // The kernel size (square) + optional uint32 kernel_h = 5; // The kernel height + optional uint32 kernel_w = 6; // The kernel width + optional uint32 stride = 3 [ default = 1 ]; // The stride (equal in Y, X) + optional uint32 stride_h = 7; // The stride height + optional uint32 stride_w = 8; // The stride width + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 11 [ default = DEFAULT ]; + // If global_pooling then it will pool over the size of the bottom by doing + // kernel_h = bottom->height and kernel_w = bottom->width + optional bool global_pooling = 12 [ default = false ]; +} + +message PowerParameter { + // PowerLayer computes outputs y = (shift + scale * x) ^ power. + optional float power = 1 [ default = 1.0 ]; + optional float scale = 2 [ default = 1.0 ]; + optional float shift = 3 [ default = 0.0 ]; +} + +message PythonParameter { + optional string module = 1; + optional string layer = 2; + // This value is set to the attribute `param_str` of the `PythonLayer` object + // in Python before calling the `setup()` method. This could be a number, + // string, dictionary in Python dict format, JSON, etc. You may parse this + // string in `setup` method and use it in `forward` and `backward`. + optional string param_str = 3 [ default = '']; + // DEPRECATED + optional bool share_in_parallel = 4 [ default = false ]; +} + +// Message that stores parameters used by RecurrentLayer +message RecurrentParameter { + // The dimension of the output (and usually hidden state) representation -- + // must be explicitly set to non-zero. + optional uint32 num_output = 1 [ default = 0 ]; + + optional FillerParameter weight_filler = 2; // The filler for the weight + optional FillerParameter bias_filler = 3; // The filler for the bias + + // Whether to enable displaying debug_info in the unrolled recurrent net. + optional bool debug_info = 4 [ default = false ]; + + // Whether to add as additional inputs (bottoms) the initial hidden state + // blobs, and add as additional outputs (tops) the final timestep hidden state + // blobs. The number of additional bottom/top blobs required depends on the + // recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs. + optional bool expose_hidden = 5 [ default = false ]; +} + +// Message that stores parameters used by ReductionLayer +message ReductionParameter { + enum ReductionOp { + SUM = 1; + ASUM = 2; + SUMSQ = 3; + MEAN = 4; + } + + optional ReductionOp operation = 1 [ default = SUM ]; // reduction operation + + // The first axis to reduce to a scalar -- may be negative to index from the + // end (e.g., -1 for the last axis). + // (Currently, only reduction along ALL "tail" axes is supported; reduction + // of axis M through N, where N < num_axes - 1, is unsupported.) + // Suppose we have an n-axis bottom Blob with shape: + // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). + // If axis == m, the output Blob will have shape + // (d0, d1, d2, ..., d(m-1)), + // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) + // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. + // If axis == 0 (the default), the output Blob always has the empty shape + // (count 1), performing reduction across the entire input -- + // often useful for creating new loss functions. + optional int32 axis = 2 [ default = 0 ]; + + optional float coeff = 3 [ default = 1.0 ]; // coefficient for output +} + +// Message that stores parameters used by ReLULayer +message ReLUParameter { + // Allow non-zero slope for negative inputs to speed up optimization + // Described in: + // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities + // improve neural network acoustic models. In ICML Workshop on Deep Learning + // for Audio, Speech, and Language Processing. + optional float negative_slope = 1 [ default = 0 ]; + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 2 [ default = DEFAULT ]; +} + +message ReshapeParameter { + // Specify the output dimensions. If some of the dimensions are set to 0, + // the corresponding dimension from the bottom layer is used (unchanged). + // Exactly one dimension may be set to -1, in which case its value is + // inferred from the count of the bottom blob and the remaining dimensions. + // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: + // + // layer { + // type: "Reshape" bottom: "input" top: "output" + // reshape_param { ... } + // } + // + // If "input" is 2D with shape 2 x 8, then the following reshape_param + // specifications are all equivalent, producing a 3D blob "output" with shape + // 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } + // reshape_param { shape { dim: 0 dim:-1 dim: 4 } } + // + optional BlobShape shape = 1; + + // axis and num_axes control the portion of the bottom blob's shape that are + // replaced by (included in) the reshape. By default (axis == 0 and + // num_axes == -1), the entire bottom blob shape is included in the reshape, + // and hence the shape field must specify the entire output shape. + // + // axis may be non-zero to retain some portion of the beginning of the input + // shape (and may be negative to index from the end; e.g., -1 to begin the + // reshape after the last axis, including nothing in the reshape, + // -2 to include only the last axis, etc.). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are all equivalent, + // producing a blob "output" with shape 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } + // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } + // + // num_axes specifies the extent of the reshape. + // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on + // input axes in the range [axis, axis+num_axes]. + // num_axes may also be -1, the default, to include all remaining axes + // (starting from axis). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are equivalent, + // producing a blob "output" with shape 1 x 2 x 8. + // + // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } + // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } + // reshape_param { shape { dim: 1 } num_axes: 0 } + // + // On the other hand, these would produce output blob shape 2 x 1 x 8: + // + // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } + // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } + // + optional int32 axis = 2 [ default = 0 ]; + optional int32 num_axes = 3 [ default = -1 ]; +} + +message ScaleParameter { + // The first axis of bottom[0] (the first input Blob) along which to apply + // bottom[1] (the second input Blob). May be negative to index from the end + // (e.g., -1 for the last axis). + // + // For example, if bottom[0] is 4D with shape 100x3x40x60, the output + // top[0] will have the same shape, and bottom[1] may have any of the + // following shapes (for the given value of axis): + // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 + // (axis == 1 == -3) 3; 3x40; 3x40x60 + // (axis == 2 == -2) 40; 40x60 + // (axis == 3 == -1) 60 + // Furthermore, bottom[1] may have the empty shape (regardless of the value of + // "axis") -- a scalar multiplier. + optional int32 axis = 1 [ default = 1 ]; + + // (num_axes is ignored unless just one bottom is given and the scale is + // a learned parameter of the layer. Otherwise, num_axes is determined by the + // number of axes by the second bottom.) + // The number of axes of the input (bottom[0]) covered by the scale + // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. + // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar. + optional int32 num_axes = 2 [ default = 1 ]; + + // (filler is ignored unless just one bottom is given and the scale is + // a learned parameter of the layer.) + // The initialization for the learned scale parameter. + // Default is the unit (1) initialization, resulting in the ScaleLayer + // initially performing the identity operation. + optional FillerParameter filler = 3; + + // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but + // may be more efficient). Initialized with bias_filler (defaults to 0). + optional bool bias_term = 4 [ default = false ]; + optional FillerParameter bias_filler = 5; +} + +message SigmoidParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [ default = DEFAULT ]; +} + +message SliceParameter { + // The axis along which to slice -- may be negative to index from the end + // (e.g., -1 for the last axis). + // By default, SliceLayer concatenates blobs along the "channels" axis (1). + optional int32 axis = 3 [ default = 1 ]; + repeated uint32 slice_point = 2; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. + optional uint32 slice_dim = 1 [ default = 1 ]; +} + +// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer +message SoftmaxParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [ default = DEFAULT ]; + + // The axis along which to perform the softmax -- may be negative to index + // from the end (e.g., -1 for the last axis). + // Any other axes will be evaluated as independent softmaxes. + optional int32 axis = 2 [ default = 1 ]; +} + +message TanHParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [ default = DEFAULT ]; +} + +// Message that stores parameters used by TileLayer +message TileParameter { + // The index of the axis to tile. + optional int32 axis = 1 [ default = 1 ]; + + // The number of copies (tiles) of the blob to output. + optional int32 tiles = 2; +} + +// Message that stores parameters used by ThresholdLayer +message ThresholdParameter { + optional float threshold = 1 [ default = 0 ]; // Strictly positive values +} + +message WindowDataParameter { + // Specify the data source. + optional string source = 1; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 2 [ default = 1 ]; + optional string mean_file = 3; + // Specify the batch size. + optional uint32 batch_size = 4; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 5 [ default = 0 ]; + // Specify if we want to randomly mirror data. + optional bool mirror = 6 [ default = false ]; + // Foreground (object) overlap threshold + optional float fg_threshold = 7 [ default = 0.5 ]; + // Background (non-object) overlap threshold + optional float bg_threshold = 8 [ default = 0.5 ]; + // Fraction of batch that should be foreground objects + optional float fg_fraction = 9 [ default = 0.25 ]; + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 context_pad = 10 [ default = 0 ]; + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string crop_mode = 11 [ default = "warp" ]; + // cache_images: will load all images in memory for faster access + optional bool cache_images = 12 [ default = false ]; + // append root_folder to locate images + optional string root_folder = 13 [ default = "" ]; +} + +message SPPParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional uint32 pyramid_height = 1; + optional PoolMethod pool = 2 [ default = MAX ]; // The pooling method + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 6 [ default = DEFAULT ]; +} + +// DEPRECATED: use LayerParameter. +message V1LayerParameter { + repeated string bottom = 2; + repeated string top = 3; + optional string name = 4; + repeated NetStateRule include = 32; + repeated NetStateRule exclude = 33; + enum LayerType { + NONE = 0; + ABSVAL = 35; + ACCURACY = 1; + ARGMAX = 30; + BNLL = 2; + CONCAT = 3; + CONTRASTIVE_LOSS = 37; + CONVOLUTION = 4; + DATA = 5; + DECONVOLUTION = 39; + DROPOUT = 6; + DUMMY_DATA = 32; + EUCLIDEAN_LOSS = 7; + ELTWISE = 25; + EXP = 38; + FLATTEN = 8; + HDF5_DATA = 9; + HDF5_OUTPUT = 10; + HINGE_LOSS = 28; + IM2COL = 11; + IMAGE_DATA = 12; + INFOGAIN_LOSS = 13; + INNER_PRODUCT = 14; + LRN = 15; + MEMORY_DATA = 29; + MULTINOMIAL_LOGISTIC_LOSS = 16; + MVN = 34; + POOLING = 17; + POWER = 26; + RELU = 18; + SIGMOID = 19; + SIGMOID_CROSS_ENTROPY_LOSS = 27; + SILENCE = 36; + SOFTMAX = 20; + SOFTMAX_LOSS = 21; + SPLIT = 22; + SLICE = 33; + TANH = 23; + WINDOW_DATA = 24; + THRESHOLD = 31; + } + optional LayerType type = 5; + repeated BlobProto blobs = 6; + repeated string param = 1001; + repeated DimCheckMode blob_share_mode = 1002; + enum DimCheckMode { + STRICT = 0; + PERMISSIVE = 1; + } + repeated float blobs_lr = 7; + repeated float weight_decay = 8; + repeated float loss_weight = 35; + optional AccuracyParameter accuracy_param = 27; + optional ArgMaxParameter argmax_param = 23; + optional ConcatParameter concat_param = 9; + optional ContrastiveLossParameter contrastive_loss_param = 40; + optional ConvolutionParameter convolution_param = 10; + optional DataParameter data_param = 11; + optional DropoutParameter dropout_param = 12; + optional DummyDataParameter dummy_data_param = 26; + optional EltwiseParameter eltwise_param = 24; + optional ExpParameter exp_param = 41; + optional HDF5DataParameter hdf5_data_param = 13; + optional HDF5OutputParameter hdf5_output_param = 14; + optional HingeLossParameter hinge_loss_param = 29; + optional ImageDataParameter image_data_param = 15; + optional InfogainLossParameter infogain_loss_param = 16; + optional InnerProductParameter inner_product_param = 17; + optional LRNParameter lrn_param = 18; + optional MemoryDataParameter memory_data_param = 22; + optional MVNParameter mvn_param = 34; + optional PoolingParameter pooling_param = 19; + optional PowerParameter power_param = 21; + optional ReLUParameter relu_param = 30; + optional SigmoidParameter sigmoid_param = 38; + optional SoftmaxParameter softmax_param = 39; + optional SliceParameter slice_param = 31; + optional TanHParameter tanh_param = 37; + optional ThresholdParameter threshold_param = 25; + optional WindowDataParameter window_data_param = 20; + optional TransformationParameter transform_param = 36; + optional LossParameter loss_param = 42; + optional V0LayerParameter layer = 1; +} + +// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters +// in Caffe. We keep this message type around for legacy support. +message V0LayerParameter { + optional string name = 1; // the layer name + optional string type = 2; // the string to specify the layer type + + // Parameters to specify layers with inner products. + optional uint32 num_output = 3; // The number of outputs for the layer + optional bool biasterm = 4 [ default = true ]; // whether to have bias terms + optional FillerParameter weight_filler = 5; // The filler for the weight + optional FillerParameter bias_filler = 6; // The filler for the bias + + optional uint32 pad = 7 [ default = 0 ]; // The padding size + optional uint32 kernelsize = 8; // The kernel size + optional uint32 group = 9 [ default = 1 ]; // The group size for group conv + optional uint32 stride = 10 [ default = 1 ]; // The stride + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 11 [ default = MAX ]; // The pooling method + optional float dropout_ratio = 12 [ default = 0.5 ]; // dropout ratio + + optional uint32 local_size = 13 [ default = 5 ]; // for local response norm + optional float alpha = 14 [ default = 1. ]; // for local response norm + optional float beta = 15 [ default = 0.75 ]; // for local response norm + optional float k = 22 [ default = 1. ]; + + // For data layers, specify the data source + optional string source = 16; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 17 [ default = 1 ]; + optional string meanfile = 18; + // For data layers, specify the batch size. + optional uint32 batchsize = 19; + // For data layers, specify if we would like to randomly crop an image. + optional uint32 cropsize = 20 [ default = 0 ]; + // For data layers, specify if we want to randomly mirror data. + optional bool mirror = 21 [ default = false ]; + + // The blobs containing the numeric parameters of the layer + repeated BlobProto blobs = 50; + // The ratio that is multiplied on the global learning rate. If you want to + // set the learning ratio for one blob, you need to set it for all blobs. + repeated float blobs_lr = 51; + // The weight decay that is multiplied on the global weight decay. + repeated float weight_decay = 52; + + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + optional uint32 rand_skip = 53 [ default = 0 ]; + + // Fields related to detection (det_*) + // foreground (object) overlap threshold + optional float det_fg_threshold = 54 [ default = 0.5 ]; + // background (non-object) overlap threshold + optional float det_bg_threshold = 55 [ default = 0.5 ]; + // Fraction of batch that should be foreground objects + optional float det_fg_fraction = 56 [ default = 0.25 ]; + + // optional bool OBSOLETE_can_clobber = 57 [default = true]; + + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 det_context_pad = 58 [ default = 0 ]; + + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string det_crop_mode = 59 [ default = "warp" ]; + + // For ReshapeLayer, one needs to specify the new dimensions. + optional int32 new_num = 60 [ default = 0 ]; + optional int32 new_channels = 61 [ default = 0 ]; + optional int32 new_height = 62 [ default = 0 ]; + optional int32 new_width = 63 [ default = 0 ]; + + // Whether or not ImageLayer should shuffle the list of files at every epoch. + // It will also resize images if new_height or new_width are not zero. + optional bool shuffle_images = 64 [ default = false ]; + + // For ConcatLayer, one needs to specify the dimension for concatenation, and + // the other dimensions must be the same for all the bottom blobs. + // By default it will concatenate blobs along the channels dimension. + optional uint32 concat_dim = 65 [ default = 1 ]; + + optional HDF5OutputParameter hdf5_output_param = 1001; +} + +message PReLUParameter { + // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: + // Surpassing Human-Level Performance on ImageNet Classification, 2015. + + // Initial value of a_i. Default is a_i=0.25 for all i. + optional FillerParameter filler = 1; + // Whether or not slope parameters are shared across channels. + optional bool channel_shared = 2 [ default = false ]; +} diff --git a/caffe2fluid/proto/compile.sh b/caffe2fluid/proto/compile.sh new file mode 100755 index 0000000..5743d9c --- /dev/null +++ b/caffe2fluid/proto/compile.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +#function: +# script used to generate caffepb.py from caffe.proto using protoc +# + +PROTOC=`which protoc` +if [[ -z $PROTOC ]];then + echo "not found protoc, you should first install it following this[https://github.com/google/protobuf/releases]" + exit 1 +fi + +WORK_ROOT=$(dirname `readlink -f "$BASH_SOURCE[0]"`) +PY_NAME="$WORK_ROOT/caffe_pb2.py" +$PROTOC --proto_path=$WORK_ROOT --python_out=$WORK_ROOT $WORK_ROOT/caffe.proto +ret=$? + +if [ -e "$PY_NAME" ];then + echo "succeed to generate [$PY_NAME]" + exit 0 +else + echo "failed to generate [$PY_NAME]" +fi +exit $ret -- GitLab