From dbc24a3b0efacd700c6ac191b8a94bb1ad4d632e Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Sat, 2 Feb 2019 15:51:46 +0800 Subject: [PATCH] Add PaddleCV/video/dataset (#1743) --- fluid/PaddleCV/video/.gitignore | 1 - .../PaddleCV/video/dataset/kinetics/README.md | 5 + .../video/dataset/kinetics/generate_label.py | 31 ++ .../video/dataset/kinetics/video2pkl.py | 84 ++++++ .../video/dataset/youtube8m/README.md | 2 + .../video/dataset/youtube8m/tf2pkl.py | 278 ++++++++++++++++++ .../dataset/youtube8m/yt8m_pca/eigenvals.npy | Bin 0 -> 8272 bytes 7 files changed, 400 insertions(+), 1 deletion(-) create mode 100644 fluid/PaddleCV/video/dataset/kinetics/README.md create mode 100644 fluid/PaddleCV/video/dataset/kinetics/generate_label.py create mode 100644 fluid/PaddleCV/video/dataset/kinetics/video2pkl.py create mode 100644 fluid/PaddleCV/video/dataset/youtube8m/README.md create mode 100644 fluid/PaddleCV/video/dataset/youtube8m/tf2pkl.py create mode 100644 fluid/PaddleCV/video/dataset/youtube8m/yt8m_pca/eigenvals.npy diff --git a/fluid/PaddleCV/video/.gitignore b/fluid/PaddleCV/video/.gitignore index c06b6205..7052bdda 100644 --- a/fluid/PaddleCV/video/.gitignore +++ b/fluid/PaddleCV/video/.gitignore @@ -1,4 +1,3 @@ -dataset checkpoints output* *.pyc diff --git a/fluid/PaddleCV/video/dataset/kinetics/README.md b/fluid/PaddleCV/video/dataset/kinetics/README.md new file mode 100644 index 00000000..25eaee37 --- /dev/null +++ b/fluid/PaddleCV/video/dataset/kinetics/README.md @@ -0,0 +1,5 @@ +1. download kinetics-400_train.csv and kinetics-400_val.csv +2. ffmpeg is required to decode mp4 +3. transfer mp4 video to pkl file, with each pkl stores [video_id, images, label] + python generate_label.py kinetics-400_train.csv kinetics400_label.txt # generate label file + python video2pkl.py kinetics-400_train.csv $Source_dir $Target_dir $NUM_THREADS diff --git a/fluid/PaddleCV/video/dataset/kinetics/generate_label.py b/fluid/PaddleCV/video/dataset/kinetics/generate_label.py new file mode 100644 index 00000000..4f7c504c --- /dev/null +++ b/fluid/PaddleCV/video/dataset/kinetics/generate_label.py @@ -0,0 +1,31 @@ +import sys + +# kinetics-400_train.csv should be down loaded first and set as sys.argv[1] +# sys.argv[2] can be set as kinetics400_label.txt +# python generate_label.py kinetics-400_train.csv kinetics400_label.txt + +num_classes = 400 + +fname = sys.argv[1] +outname = sys.argv[2] +fl = open(fname).readlines() +fl = fl[1:] +outf = open(outname, 'w') + +label_list = [] +for line in fl: + label = line.strip().split(',')[0].strip('"') + if label in label_list: + continue + else: + label_list.append(label) + +assert len(label_list + ) == num_classes, "there should be {} labels in list, but ".format( + num_classes, len(label_list)) + +label_list.sort() +for i in range(num_classes): + outf.write('{} {}'.format(label_list[i], i) + '\n') + +outf.close() diff --git a/fluid/PaddleCV/video/dataset/kinetics/video2pkl.py b/fluid/PaddleCV/video/dataset/kinetics/video2pkl.py new file mode 100644 index 00000000..881857c4 --- /dev/null +++ b/fluid/PaddleCV/video/dataset/kinetics/video2pkl.py @@ -0,0 +1,84 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import os +import sys +import glob +import cPickle +from multiprocessing import Pool + +# example command line: python generate_k400_pkl.py kinetics-400_train.csv 8 +# +# kinetics-400_train.csv is the training set file of K400 official release +# each line contains laebl,youtube_id,time_start,time_end,split,is_cc + +assert (len(sys.argv) == 5) + +f = open(sys.argv[1]) +source_dir = sys.argv[2] +target_dir = sys.argv[3] +num_threads = sys.argv[4] +all_video_entries = [x.strip().split(',') for x in f.readlines()] +all_video_entries = all_video_entries[1:] +f.close() + +category_label_map = {} +f = open('kinetics400_label.txt') +for line in f: + ens = line.strip().split(' ') + category = " ".join(ens[0:-1]) + label = int(ens[-1]) + category_label_map[category] = label +f.close() + + +def generate_pkl(entry): + mode = entry[4] + category = entry[0].strip('"') + category_dir = category + video_path = os.path.join( + './', + entry[1] + "_%06d" % int(entry[2]) + "_%06d" % int(entry[3]) + ".mp4") + video_path = os.path.join(source_dir, category_dir, video_path) + label = category_label_map[category] + + vid = './' + video_path.split('/')[-1].split('.')[0] + if os.path.exists(video_path): + if not os.path.exists(vid): + os.makedirs(vid) + os.system('ffmpeg -i ' + video_path + ' -q 0 ' + vid + '/%06d.jpg') + else: + print("File not exists {}".format(video_path)) + return + + images = sorted(glob.glob(vid + '/*.jpg')) + ims = [] + for img in images: + f = open(img) + ims.append(f.read()) + f.close() + + output_pkl = vid + ".pkl" + output_pkl = os.path.join(target_dir, output_pkl) + f = open(output_pkl, 'w') + cPickle.dump((vid, label, ims), f, -1) + f.close() + + os.system('rm -rf %s' % vid) + + +pool = Pool(processes=int(sys.argv[4])) +pool.map(generate_pkl, all_video_entries) +pool.close() +pool.join() diff --git a/fluid/PaddleCV/video/dataset/youtube8m/README.md b/fluid/PaddleCV/video/dataset/youtube8m/README.md new file mode 100644 index 00000000..e9f2d2c9 --- /dev/null +++ b/fluid/PaddleCV/video/dataset/youtube8m/README.md @@ -0,0 +1,2 @@ +1. Tensorflow is required to process tfrecords +2. python tf2pkl.py $Source_dir $Target_dir diff --git a/fluid/PaddleCV/video/dataset/youtube8m/tf2pkl.py b/fluid/PaddleCV/video/dataset/youtube8m/tf2pkl.py new file mode 100644 index 00000000..3b32e3b4 --- /dev/null +++ b/fluid/PaddleCV/video/dataset/youtube8m/tf2pkl.py @@ -0,0 +1,278 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. +"""Provides readers configured for different datasets.""" +import os, sys +import numpy as np +import tensorflow as tf +from tensorflow import logging +import cPickle + +from tensorflow.python.platform import gfile + +assert (len(sys.argv) == 3) +source_dir = sys.argv[1] +target_dir = sys.argv[2] + + +def Dequantize(feat_vector, max_quantized_value=2, min_quantized_value=-2): + """Dequantize the feature from the byte format to the float format. + + Args: + feat_vector: the input 1-d vector. + max_quantized_value: the maximum of the quantized value. + min_quantized_value: the minimum of the quantized value. + + Returns: + A float vector which has the same shape as feat_vector. + """ + assert max_quantized_value > min_quantized_value + quantized_range = max_quantized_value - min_quantized_value + scalar = quantized_range / 255.0 + bias = (quantized_range / 512.0) + min_quantized_value + return feat_vector * scalar + bias + + +def resize_axis(tensor, axis, new_size, fill_value=0): + """Truncates or pads a tensor to new_size on on a given axis. + + Truncate or extend tensor such that tensor.shape[axis] == new_size. If the + size increases, the padding will be performed at the end, using fill_value. + + Args: + tensor: The tensor to be resized. + axis: An integer representing the dimension to be sliced. + new_size: An integer or 0d tensor representing the new value for + tensor.shape[axis]. + fill_value: Value to use to fill any new entries in the tensor. Will be + cast to the type of tensor. + + Returns: + The resized tensor. + """ + tensor = tf.convert_to_tensor(tensor) + shape = tf.unstack(tf.shape(tensor)) + + pad_shape = shape[:] + pad_shape[axis] = tf.maximum(0, new_size - shape[axis]) + + shape[axis] = tf.minimum(shape[axis], new_size) + shape = tf.stack(shape) + + resized = tf.concat([ + tf.slice(tensor, tf.zeros_like(shape), shape), + tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype)) + ], axis) + + # Update shape. + new_shape = tensor.get_shape().as_list() # A copy is being made. + new_shape[axis] = new_size + resized.set_shape(new_shape) + return resized + + +class BaseReader(object): + """Inherit from this class when implementing new readers.""" + + def prepare_reader(self, unused_filename_queue): + """Create a thread for generating prediction and label tensors.""" + raise NotImplementedError() + + +class YT8MFrameFeatureReader(BaseReader): + """Reads TFRecords of SequenceExamples. + + The TFRecords must contain SequenceExamples with the sparse in64 'labels' + context feature and a fixed length byte-quantized feature vector, obtained + from the features in 'feature_names'. The quantized features will be mapped + back into a range between min_quantized_value and max_quantized_value. + """ + + def __init__(self, + num_classes=3862, + feature_sizes=[1024], + feature_names=["inc3"], + max_frames=300): + """Construct a YT8MFrameFeatureReader. + + Args: + num_classes: a positive integer for the number of classes. + feature_sizes: positive integer(s) for the feature dimensions as a list. + feature_names: the feature name(s) in the tensorflow record as a list. + max_frames: the maximum number of frames to process. + """ + + assert len(feature_names) == len(feature_sizes), \ + "length of feature_names (={}) != length of feature_sizes (={})".format( \ + len(feature_names), len(feature_sizes)) + + self.num_classes = num_classes + self.feature_sizes = feature_sizes + self.feature_names = feature_names + self.max_frames = max_frames + + def get_video_matrix(self, features, feature_size, max_frames, + max_quantized_value, min_quantized_value): + """Decodes features from an input string and quantizes it. + + Args: + features: raw feature values + feature_size: length of each frame feature vector + max_frames: number of frames (rows) in the output feature_matrix + max_quantized_value: the maximum of the quantized value. + min_quantized_value: the minimum of the quantized value. + + Returns: + feature_matrix: matrix of all frame-features + num_frames: number of frames in the sequence + """ + decoded_features = tf.reshape( + tf.cast(tf.decode_raw(features, tf.uint8), tf.float32), + [-1, feature_size]) + + num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames) + + feature_matrix = decoded_features + + return feature_matrix, num_frames + + def prepare_reader(self, + filename_queue, + max_quantized_value=2, + min_quantized_value=-2): + """Creates a single reader thread for YouTube8M SequenceExamples. + + Args: + filename_queue: A tensorflow queue of filename locations. + max_quantized_value: the maximum of the quantized value. + min_quantized_value: the minimum of the quantized value. + + Returns: + A tuple of video indexes, video features, labels, and padding data. + """ + reader = tf.TFRecordReader() + _, serialized_example = reader.read(filename_queue) + + contexts, features = tf.parse_single_sequence_example( + serialized_example, + context_features={ + "id": tf.FixedLenFeature([], tf.string), + "labels": tf.VarLenFeature(tf.int64) + }, + sequence_features={ + feature_name: tf.FixedLenSequenceFeature( + [], dtype=tf.string) + for feature_name in self.feature_names + }) + + # read ground truth labels + labels = (tf.cast( + tf.sparse_to_dense( + contexts["labels"].values, (self.num_classes, ), + 1, + validate_indices=False), + tf.bool)) + + # loads (potentially) different types of features and concatenates them + num_features = len(self.feature_names) + assert num_features > 0, "No feature selected: feature_names is empty!" + + assert len(self.feature_names) == len(self.feature_sizes), \ + "length of feature_names (={}) != length of feature_sizes (={})".format( \ + len(self.feature_names), len(self.feature_sizes)) + + num_frames = -1 # the number of frames in the video + feature_matrices = [None + ] * num_features # an array of different features + + for feature_index in range(num_features): + feature_matrix, num_frames_in_this_feature = self.get_video_matrix( + features[self.feature_names[feature_index]], + self.feature_sizes[feature_index], self.max_frames, + max_quantized_value, min_quantized_value) + if num_frames == -1: + num_frames = num_frames_in_this_feature + #else: + # tf.assert_equal(num_frames, num_frames_in_this_feature) + + feature_matrices[feature_index] = feature_matrix + + # cap the number of frames at self.max_frames + num_frames = tf.minimum(num_frames, self.max_frames) + + # concatenate different features + video_matrix = feature_matrices[0] + audio_matrix = feature_matrices[1] + + return contexts["id"], video_matrix, audio_matrix, labels, num_frames + + +def main(files_pattern): + data_files = gfile.Glob(files_pattern) + filename_queue = tf.train.string_input_producer( + data_files, num_epochs=1, shuffle=False) + + reader = YT8MFrameFeatureReader( + feature_sizes=[1024, 128], feature_names=["rgb", "audio"]) + vals = reader.prepare_reader(filename_queue) + + with tf.Session() as sess: + sess.run(tf.initialize_local_variables()) + sess.run(tf.initialize_all_variables()) + coord = tf.train.Coordinator() + threads = tf.train.start_queue_runners(sess=sess, coord=coord) + + vid_num = 0 + all_data = [] + try: + while not coord.should_stop(): + vid, features, audios, labels, nframes = sess.run(vals) + label_index = np.where(labels == True)[0].tolist() + vid_num += 1 + + #print vid, features.shape, audios.shape, label_index, nframes + + features_int = features.astype(np.uint8) + audios_int = audios.astype(np.uint8) + + value_dict = {} + value_dict['video'] = vid + value_dict['feature'] = features_int + value_dict['audio'] = audios_int + value_dict['label'] = label_index + value_dict['nframes'] = nframes + all_data.append(value_dict) + + except tf.errors.OutOfRangeError: + print('Finished extracting.') + + finally: + coord.request_stop() + coord.join(threads) + + print vid_num + + record_name = files_pattern.split('/')[-1].split('.')[0] + outputdir = target_dir + fn = '%s.pkl' % record_name + outp = open(os.path.join(outputdir, fn), 'wb') + cPickle.dump(all_data, outp, protocol=cPickle.HIGHEST_PROTOCOL) + outp.close() + + +if __name__ == '__main__': + record_dir = source_dir + record_files = os.listdir(record_dir) + for f in record_files: + record_path = os.path.join(record_dir, f) + main(record_path) diff --git a/fluid/PaddleCV/video/dataset/youtube8m/yt8m_pca/eigenvals.npy b/fluid/PaddleCV/video/dataset/youtube8m/yt8m_pca/eigenvals.npy new file mode 100644 index 0000000000000000000000000000000000000000..632506b9ad68f030d64643cc8100868b21c3eb98 GIT binary patch literal 8272 zcmXAscT^Al_s2`LLq>Z^XrV~qKJV+DQASb8C=!*13MFI}X;P7fhLT7F$@VcSDap>> z8D%FcE8^Sl{9flg&VAkc-|KbneV%*Mrp}n+>@F>3FSTaS0^cy7rGrKr41%B4AajF3 zeoK}vTk5rV-jbyYeE%1KOa+tyZhow+R@^|w75tZjSux0BwoKZ@X zcq?v|Z27lY@**i&vT)u;NlR&*q$)i|@^kn)N!aOCk^xprB>@?WBtL5cC8jFAlAJOx z$vczz6217@l8;uiB(GFwN|yI=maLuUBpDayAhD9zOV&;pD>*xGjAY6;NX|Bmmb{7| zDf#7SElEwVlBltzB*D}|^0&!c!aq$UQ#6bvR$_o;Otg_iqs2g?x644%y;)D9cUecG z@mXDB6rd)V8L8PJJ&<9ETp?6sYFSl?X4tHS5lM=I;tR4bqH9-)S$r3XjJdSrZ=RII@6BJ~wd#);>(MT(8orATmv7>J`A4zd=7Ufe z{9X(m^iIq-do6~Kc_oZqHHpGeFGOz1GjVubqc}YIiP+uqp=e$HK$O3}BTky!5hilC zMa8EZqU~mbm}%4?cI00dPEA*YP5EW@0ut=P--y@{I?iOJkyM*<}0^z=}K-{a? zDZDiDMZ=*yVYMbtw6*7o6V2O2w%&FzXKs#oQkgB5wq%OSoGB!h+k|^+h6o;*A(UFv zMf>kGAs3h`Y)ZF^7cMEHrF@I%J$#D@>bY68JWmo*Qb}T5M52fsmMHEm-z0ipOc2W5 z62!L~@q+rsi*7ZsVnA@L*zr6@bfFk==~A>PG>I0*-=l=e$SCpO%=Mx`ah(W8q==J_ z5V`@to`U3rO!*t1A< zIT<9by$BS8{|1O_vHrr!$xn@vw;c|`c=u%SV%sF~Zq-DwO@D&Wylp3bhuMjjN5+d8PUA&L$yl*t$ygEZ zJVq?6uoXIUCBky25TRp*sIFomWLWsGAYrda;+(~35w~oVu(ca0_PN@KA!;__wuiOI zEFK~9R#}PjS;Iw&%`jomW+_b8S%}b-p(1?p5b-u_uo!pAT&Sj+i)T~JL{*ik=&x-m zrlgpNqAA8gUKoqb>jnyuH$cqxF%nT<`-`t-{e{+3L*a15P`Gv&2(2{+qRGZU6wc8X zT0iwfth}CB^h8&zbJY=(TeU^zel2l2T}uqH(GuyknqrT+rg)p8A+GzV3mFx4F=4ox z$cgVOc8usNrX=?f{f6`rH%oepvLU@YYoV7|quxu@EL9cxyL*bG4Lw8`j~*f`u)BzL zRS_~bl!bP%viNjONx)l4M3g9sl_3fuc(j5jJ|QnE*2;^2=jFuo$#OzIR#yCrloj?l z-NaevZo+X>S5ev~BN7c|gvIzSqGV(j;T$b3^ae-^oh&J_YlD=i`TUpDKX$PB$qw$C z`-ca&wX^NTc8+rY#Uo{Zae>ZHwrl>*p)%k2s!AJ=|MZ1Rd_FTh{KUcSANj-IR{mbv z%1@?$;5ywFuJU`&%hcX+>{54wEutG(Rjz@Df4s)Fnb-K-o~xXB_X_{cxy%mhF7XWeOT0nm5--la z$k*lT`Sa8Z?Dg+F2il+KUNdUhs!J{RdVG#Ac%I{6#dG}5xtdK@pXKrUtGLOdlG|5T z@Lin>c3gdi#}=ICPx7ZZ%it7ugrDSpe@?L5<`XlMyghv{pHkn; z!^}&$Ag_dF14=l1MKM3oFXq$%dpJ>b4aY9eiod;ryMf4Ei0XadZ+XHeQEsKC5`h>rLyXStvo6}g?DU8VN1spK67Ua zKRL6Rw@us33+^OyPE;~yT}k5c$w~asCy5IdCvty-L{1*EiTgBePTpTMo$8wr|ELZ%A;lo>Ec;L=xUalO?NZ7zr12=GGVH5|ySp8Yz9j`N9$4jG04yqvdrF6Wdx zVXUDN#uI;sa^0j*j$X5b59lu8lgAfx&YDF$SY{EsItBBRWkFox7sy5{7ji>%0PAn` z=ibqNJX^|-UFv;#c~4)C4PC%xy?j{rtT+3ld$Fmn7r%KupYIHr&ue#i^1_a}EZc1^ zx9;@dTTyfPh3Xv6Yjo#B;vo*m%If6Auu z{ZB$K zw%D`RXnVd^Y{yyMI*}RAH~)-flajIA=ieBfb7u@I+Ku5}9=3c_&X!|JB|PJ|V0|;e z!IjKU)tK$w!4KuZ)#}8q4Wl_NcNE{lD2_WalIx5{ve6$KUTtZ^%3ZB_MA!(PC^Lcw zoV4N#Q?1zL%Wz)yXc#+;8OG<&Tk_X87Cd6ga=3s;eERYvzEbN7A5Aa zCU4HRHfH=e)RZmEOnG~O32Wao?i7AwZnH4v#S;ecTc?3s@N@u2FB-tD_5*nJPb0pr zZ^XepjCj_%{#=l4$WL_)`I)*Q?{VzM8tMjI+@#Nr2laXBAbnPQpvMpY=>ar%9`E z?#;gZUq)YEKcX)?r}bfS??k^3>)Q9`$Vt7}#k3d8u2ALMIXyXQeoww@-IHg$>cPd^ zda%`~9z3zFJ73w*oh{>4c%+32r{7fOV?N5P%F4_sN?aJM#LlCYxaU1Zz5+!y*sj3w z<_hfIO@X~~<$2EvdA7VQ$9+QO_)W1amj}plqO2_IZRo~}l)CZGvt2pSwJW6@EP`LpAcUE5k1mBVx~(gCU$E@;guE)RBgeNkMD7``+Ia>`3^hIzQu;f zw^)|_22FNv&@bgRW>~y}QteCp?fw#)q0N}(-h?9gCZwjl!0(kWkf!wldxD>1aju43ww%g|eS8Dn)X<7m?*7|35j;JJ&?{!)*NarM|V zq#oawFPb~VQJ zuSSGQHM*=l3;e1=*X32XfB_3Cl__B)PPujANq`50hw3={4hg-_y9w7)%q z_I*dNBj5-c?;J)i^TXifhoIK;5E{=NM32Yis90K#-uC5~AYYESxd#v?djJ|`W!Rfl zhTBGE@U`8KhU@!KJ98fvx9x?0`d+M>vKOa5mEvq(DSBKl0Zx>_)~W=zP87p!OED~7 z6v3*#2=`-)pr>AhL+AJ4*T+JnUoJ#)dLdlC?Z&&EyW!!m8#(WGLC2`o)#cu zUI7jcDZr!~J7F?lCw4r{$CILb>{^fyQ|Wvd-`aubpdFZ7mxuQ|^RP8A59N1rp|~{{ zEgHEnP23J=o9&qNGY4PR<)EKJ4i4PShWfH>R6ota^y(~Bn`EI%ISW1>naH)s#OjmV zP`hFqws~)ZO|LQa>_D{n%hcql#NyGl!RG5XQ zqIF^_D%4YeYfJJ)>i0!N`YTo3ijBippSJ5ejnI^-O5|gxNkF#(q;_qNQP>D zG748FqeUd+nN~866(qr>e-f@XB%-oB5fh^labg;--l>KABsj>UNolpMZ?898mnJ!K=gqPaCF^(u|qfD z`^_k9+7*RryC^(Sk3wzZdel^|hr`hIC{|gIxwqC~QRzCAHAh0uClYHOMc{cw1QeVi z@YiE4Zi%%B&R>J?erw=vxCRfdt%j-hYQ+9sg=hCxLUaF0=xeORf7Rho_X@{i-xVkr zvI6Rhm*Zl~GR#U^hMg0Z!T5U^9xV^Uzv!i?7`GIsazn9tUMSAQhTwi+2poPaf%V-b z=q0-Z-a8k=%Y8AFTNYt|{vtf`T!hCl!8klA7~eU#^F2XuaSMXf#$%ezcrC-W%FeNH zOc;xlQDfopeGGp6dx5?necLC0DNMo3E#@lGHu zR$xkq=+ptm}bps}-(=SmCFG6>cb5;pmp(SQtGV+w6zq zK%e2LIywyFLxw>;co-7AhN0rECFY#8gnoo2L^n%J|6zgrs}{H(Y60ch7ASu;6t?Gw z!aQy$R(TACo8C|i*f<0xaYHb1%n+nW55d-NgW;Mv800w^ks}6U^%ZjrI%$q~W#*W( z+#EBc&GGWN8Mf{+L(>d1lu4PP=%*LmLa4~Wah6fG;Ob5ZRdLWMM8i;KFfk>J#5Z(I@ zMCOwLC@UU-Arl5*qtyWT{58VE>qZzJZ-hHzj4*SQ5dvTKhelO@^gqxa!=3x%t7U&& zdvAz?uMHtrW(cczLp<~`M6HY=tXuoxa8p0@THX)U{rVyBhXKMX4e zjI^-ny(UJMX(BgP69o~Pus6~~e-+Kn{As}Kz6RFEYhaCm2AUK#@UBrEl0E9s%T&j_ z5OtXMS4UdA8mexn;eLu5vVzqh(NRN9dtZER>WhvGeX$~{FMdqxi+>J%FiVA8y1OTSWb}mj;-0u+*As7s_C%FtPh6|*f%Thv zATp!}JiL40foZ2N*8_#8yJJLFcW5VcM|N0ud@<>c1z%N=_euqor7F-UQbEch6=+IS zFl&Gc?zJi7{TXFQ7Aqrgj4}pkD&wl0GD2&VpjD;R`FBc42vdT(r4q6}D8i~n5n6i` z@gY_bK390KcbuL}y`SOt2BoD=D@~9+v z+&7Vje_wf&Hp}7KHaX}A%3+Y9TxV^_!sLuBn&M;;<0FeF6J>EiLl*bHb%R}XH`E{K zhJjPNA$DvxtTO3_jz3)?b*U>F4tB+$^sbm5)D=sIbw$6|GWc;&1~-yq(APr-dmLoo zAdx|RcNu(o*#(ZpT@V%21xX9K;8XW5xc5oAbIzB>yBuj4u9wCQZ)rHnN@HBT6ub+i zFeyX|EpAdcVI&3X@Bb+C^*=Jn{YOq?|Iwabf9ZGNUuu;7OGURkNFlC+vK=}|@yQ>$ zoco6)%75tN{@)ZH{+s@@`AxYU?esjmou>9_C%N;#$Z`HJ`ts{1RaX3@EB$`b`CmUM zv*ZU=4g5i}U4BsO{O{ED(l;ti`bM(x-{`XVM&JLm(KpXF>aWvAS#Q45S);GCNA4@F zP5VLtbbf)zq`CR-+ZsH?7{L)IdXSGsm#|Ij* z=mYsr|3G;SEp$1cg<`x~=;qKClKS(W{OsP-KArazx&IyQe*2cTUwlg{(Qj#a#T%Nu ztdl>lDdf>>8kGK;Iu^gC9!;-kOw}t|r~Ha`k9$czf12sw-e$7aYo_j>n&@?Q6V=ac zBD>QsD1O5Wk~e-qxtpGoTkvxlzUvv?Soe&&NIfIXz^8NyPw7TnBh7jFga++>Lf$_f z)5YhH>Bf}D^sHB>o%o2tK0l-(&mYpXw-4yYq6d^<_<&5>?o*G5`_%8uJ$gCg9vwM+ zmt6AhQsRy~^w{?frL4G3W8`m>dC)B?eteT|Z@x*&b8nJF?IwjTy+OuiH|Pf%s9B+b zwm-j4F3Yb|xZHJ0zHp7Ee7i~xC7n#WN|)YTA-l&{XxZ>9RC)0-b&a}2CBrV!qPG{x zNcAF}yk1YWKQ7SQofqhV&IQtXUPpza>S)@w^JKo}JRSL6OQR0fQtpCU%G_5&V*_d^ z{>wSS?sGIO{2YzhT1|3a&r)#yS&FQ!qDjS7v_-axBGW5LMXHjnL{w0RN(BX+J40LN zoS}Jzrzt`2G?gcwqBDx8XqEp-a_WAPmW2IBHNQ{L)wmOsWp;u}?j0xdQO7A>`Z)E} zIYy5^9wqG~M`>f-5!yHF2+fr~Lh+{#Q)1y^>h5uvW{x~e)_jOmlMhmI&_Vhz`ykb| zl#_>cIqmv%fcgab>N@;X*DIEzdrPqq3BGU0I zqG07BN{QM-pS<>vcXJ^{))kW2SV+oNg|ww-At7cr)z8~a23>bk_XoQu+HDv0Ybc7 z*|aP=n<9E<(=GXIGCq+-@0_xzULlK4e9R>EN161;Hj^xWZ=-|F+sJs|Hj0ejMz4&v zQRcY}I+v9}soojHHW_rfe+Gr^Pp5ey>D0{WRH~FtBc7(wqm(o%k4&Qs?KGO+no170 zQmLsnl`7|_Qh`A#eQ4fF_K&yHyG2`R%h0X#??MXotWKfL=_yp{kwTG8Tc~c^7K(}3 zLYF6Rp{N&|srbTXl8V|)ac-OGLBGwU*pN)mGLp$zl1%qalj*~?Bx)>8>g=T?IzdTP zt(io#+Y>2iWg>-#CX&8#A{GALM43-F5q52&?}?kp%xM$-?bt|kdLt$8+eo)UHjAcv*|>bg0BI_4(OMa=|Cxfo9YyW;7^>UcWj6i;R&FevZy?vN8z}SU26|Y#fmS7KAf=%jXi%RG^r0n+6fQ=QBsYrIjEJJ48c~$@Vm&Dq zt*3GR>*=x0ddm2=j&@vJM_*q?Qe$N#wZ=u#oR1MSw?2aO=0tSrQUsM1uBEDJYso-r zEw!nwA-^lDsi|%?#ksDgt4gcMxo{OJ^jk>t+WPV9Bj{ljPghmmv8Qp&g=N+D}0cWU()R#t#o5t^58YwcekSt^Fy!i$ATH?MF-W z{Ai@KAGM71rH%5wB;&t;QeXSf%U~Z`GSi3VR(q47w>NFH^roFTUbKIz7X@sePyZ4- z89twa*3Y97F^~2=^rWo;o>Vv0la7q`q@@GrlJy)9vK{3?AqpNeIc*L(?3qp5r_ZMB zSMD^h*qti;-O1;=8+{w*M#(a6q*XeL9Nx~P)5m90Zv703%bP)Pnn5#vOef80)5)XS zmGm~c((IM4w0F=nx^~Eg0+XC6ewH&`kawmj-cD3eGnE1qrqaruQ^;b8BP|GUAm8fA z^iyFn9ci0Hw*x0pg8xL?Iea4hcWnagZ?UKEukC1stR1cSH;(>E#!=ewv81Ismd>Qv xQmvvbEqE{JoHA4LQ=-LlM$wb0)|44yMWZiU(lKufYP@Dn*&}qxBc*!u{{Rj9la&Af literal 0 HcmV?d00001 -- GitLab