From 14b6fd673ba7a984fa6dda82e0d45855b72b053b Mon Sep 17 00:00:00 2001 From: zsdonghao Date: Thu, 15 Mar 2018 11:45:31 +0000 Subject: [PATCH] update to TensorLayer 1.8.1 --- .gitignore | 12 + main.py | 5 +- tensorlayer/__init__.py | 18 +- .../__pycache__/__init__.cpython-34.pyc | Bin 0 -> 1024 bytes .../__pycache__/__init__.cpython-35.pyc | Bin 0 -> 971 bytes .../__pycache__/_logging.cpython-34.pyc | Bin 0 -> 545 bytes .../__pycache__/_logging.cpython-35.pyc | Bin 0 -> 648 bytes .../__pycache__/activation.cpython-34.pyc | Bin 0 -> 3662 bytes .../__pycache__/activation.cpython-35.pyc | Bin 0 -> 3874 bytes tensorlayer/__pycache__/cost.cpython-34.pyc | Bin 0 -> 24827 bytes tensorlayer/__pycache__/cost.cpython-35.pyc | Bin 0 -> 24723 bytes .../__pycache__/distributed.cpython-34.pyc | Bin 0 -> 13236 bytes .../__pycache__/distributed.cpython-35.pyc | Bin 0 -> 13146 bytes tensorlayer/__pycache__/files.cpython-34.pyc | Bin 0 -> 62049 bytes tensorlayer/__pycache__/files.cpython-35.pyc | Bin 0 -> 61843 bytes .../__pycache__/iterate.cpython-34.pyc | Bin 0 -> 9487 bytes .../__pycache__/iterate.cpython-35.pyc | Bin 0 -> 9425 bytes tensorlayer/__pycache__/nlp.cpython-34.pyc | Bin 0 -> 35747 bytes tensorlayer/__pycache__/nlp.cpython-35.pyc | Bin 0 -> 35631 bytes tensorlayer/__pycache__/prepro.cpython-34.pyc | Bin 0 -> 92489 bytes tensorlayer/__pycache__/prepro.cpython-35.pyc | Bin 0 -> 92205 bytes tensorlayer/__pycache__/rein.cpython-34.pyc | Bin 0 -> 5234 bytes tensorlayer/__pycache__/rein.cpython-35.pyc | Bin 0 -> 5170 bytes tensorlayer/__pycache__/utils.cpython-34.pyc | Bin 0 -> 20535 bytes tensorlayer/__pycache__/utils.cpython-35.pyc | Bin 0 -> 20353 bytes .../__pycache__/visualize.cpython-34.pyc | Bin 0 -> 14925 bytes .../__pycache__/visualize.cpython-35.pyc | Bin 0 -> 14830 bytes tensorlayer/_logging.py | 16 + tensorlayer/activation.py | 163 +- tensorlayer/cli/__init__.py | 1 + tensorlayer/cli/__main__.py | 14 + tensorlayer/cli/train.py | 169 + tensorlayer/cost.py | 1013 +-- tensorlayer/db.py | 337 +- tensorlayer/distributed.py | 327 + tensorlayer/files.py | 1581 ++++- tensorlayer/iterate.py | 280 +- tensorlayer/layers.py | 5546 ----------------- tensorlayer/layers/__init__.py | 26 + .../__pycache__/__init__.cpython-34.pyc | Bin 0 -> 1091 bytes .../__pycache__/__init__.cpython-35.pyc | Bin 0 -> 1038 bytes .../__pycache__/convolution.cpython-34.pyc | Bin 0 -> 60865 bytes .../__pycache__/convolution.cpython-35.pyc | Bin 0 -> 60677 bytes .../layers/__pycache__/core.cpython-34.pyc | Bin 0 -> 43132 bytes .../layers/__pycache__/core.cpython-35.pyc | Bin 0 -> 43000 bytes .../layers/__pycache__/extend.cpython-34.pyc | Bin 0 -> 2987 bytes .../layers/__pycache__/extend.cpython-35.pyc | Bin 0 -> 2936 bytes .../__pycache__/flow_control.cpython-34.pyc | Bin 0 -> 2896 bytes .../__pycache__/flow_control.cpython-35.pyc | Bin 0 -> 2843 bytes .../__pycache__/importer.cpython-34.pyc | Bin 0 -> 6818 bytes .../__pycache__/importer.cpython-35.pyc | Bin 0 -> 6744 bytes .../layers/__pycache__/merge.cpython-34.pyc | Bin 0 -> 4800 bytes .../layers/__pycache__/merge.cpython-35.pyc | Bin 0 -> 4744 bytes .../__pycache__/normalization.cpython-34.pyc | Bin 0 -> 8481 bytes .../__pycache__/normalization.cpython-35.pyc | Bin 0 -> 8431 bytes .../object_detection.cpython-34.pyc | Bin 0 -> 1928 bytes .../object_detection.cpython-35.pyc | Bin 0 -> 1875 bytes .../layers/__pycache__/padding.cpython-34.pyc | Bin 0 -> 1539 bytes .../layers/__pycache__/padding.cpython-35.pyc | Bin 0 -> 1486 bytes .../layers/__pycache__/pooling.cpython-34.pyc | Bin 0 -> 13785 bytes .../layers/__pycache__/pooling.cpython-35.pyc | Bin 0 -> 13726 bytes .../__pycache__/recurrent.cpython-34.pyc | Bin 0 -> 56600 bytes .../__pycache__/recurrent.cpython-35.pyc | Bin 0 -> 56468 bytes .../layers/__pycache__/shape.cpython-34.pyc | Bin 0 -> 3796 bytes .../layers/__pycache__/shape.cpython-35.pyc | Bin 0 -> 3743 bytes .../spatial_transformer.cpython-34.pyc | Bin 0 -> 9158 bytes .../spatial_transformer.cpython-35.pyc | Bin 0 -> 9106 bytes .../special_activation.cpython-34.pyc | Bin 0 -> 2134 bytes .../special_activation.cpython-35.pyc | Bin 0 -> 2079 bytes .../layers/__pycache__/stack.cpython-34.pyc | Bin 0 -> 3005 bytes .../layers/__pycache__/stack.cpython-35.pyc | Bin 0 -> 2953 bytes .../super_resolution.cpython-34.pyc | Bin 0 -> 5375 bytes .../super_resolution.cpython-35.pyc | Bin 0 -> 5321 bytes .../time_distribution.cpython-34.pyc | Bin 0 -> 2978 bytes .../time_distribution.cpython-35.pyc | Bin 0 -> 2917 bytes tensorlayer/layers/convolution.py | 1878 ++++++ tensorlayer/layers/core.py | 1395 +++++ tensorlayer/layers/extend.py | 98 + tensorlayer/layers/flow_control.py | 88 + tensorlayer/layers/importer.py | 232 + tensorlayer/layers/merge.py | 147 + tensorlayer/layers/normalization.py | 293 + tensorlayer/layers/object_detection.py | 56 + tensorlayer/layers/padding.py | 47 + tensorlayer/layers/pooling.py | 490 ++ tensorlayer/layers/recurrent.py | 1644 +++++ tensorlayer/layers/shape.py | 130 + tensorlayer/layers/spatial_transformer.py | 288 + tensorlayer/layers/special_activation.py | 67 + tensorlayer/layers/stack.py | 116 + tensorlayer/layers/super_resolution.py | 171 + tensorlayer/layers/time_distribution.py | 85 + tensorlayer/nlp.py | 1183 ++-- tensorlayer/ops.py | 219 - tensorlayer/prepro.py | 2971 +++++++-- tensorlayer/rein.py | 136 +- tensorlayer/third_party/__init__.py | 0 .../third_party/roi_pooling/.gitignore | 3 + tensorlayer/third_party/roi_pooling/README.md | 56 + .../roi_pooling/examples/__init__.py | 0 .../roi_pooling_minimal_example.ipynb | 148 + .../roi_pooling/roi_pooling/Makefile | 18 + .../roi_pooling/roi_pooling/__init__.py | 0 .../roi_pooling/roi_pooling/roi_pooling.cc | 162 + .../roi_pooling/roi_pooling/roi_pooling.cu.cc | 214 + .../roi_pooling/roi_pooling_ops.py | 50 + .../roi_pooling/roi_pooling_test.py | 99 + .../roi_pooling/roi_pooling_animation.gif | Bin 0 -> 578933 bytes .../roi_pooling/roi_pooling_example.py | 52 + tensorlayer/third_party/roi_pooling/setup.py | 30 + .../third_party/roi_pooling/test_roi_layer.py | 54 + tensorlayer/utils.py | 657 +- tensorlayer/visualize.py | 440 +- 113 files changed, 14628 insertions(+), 8597 deletions(-) create mode 100644 .gitignore create mode 100644 tensorlayer/__pycache__/__init__.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/__init__.cpython-35.pyc create mode 100644 tensorlayer/__pycache__/_logging.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/_logging.cpython-35.pyc create mode 100644 tensorlayer/__pycache__/activation.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/activation.cpython-35.pyc create mode 100644 tensorlayer/__pycache__/cost.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/cost.cpython-35.pyc create mode 100644 tensorlayer/__pycache__/distributed.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/distributed.cpython-35.pyc create mode 100644 tensorlayer/__pycache__/files.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/files.cpython-35.pyc create mode 100644 tensorlayer/__pycache__/iterate.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/iterate.cpython-35.pyc create mode 100644 tensorlayer/__pycache__/nlp.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/nlp.cpython-35.pyc create mode 100644 tensorlayer/__pycache__/prepro.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/prepro.cpython-35.pyc create mode 100644 tensorlayer/__pycache__/rein.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/rein.cpython-35.pyc create mode 100644 tensorlayer/__pycache__/utils.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/utils.cpython-35.pyc create mode 100644 tensorlayer/__pycache__/visualize.cpython-34.pyc create mode 100644 tensorlayer/__pycache__/visualize.cpython-35.pyc create mode 100644 tensorlayer/_logging.py create mode 100644 tensorlayer/cli/__init__.py create mode 100644 tensorlayer/cli/__main__.py create mode 100644 tensorlayer/cli/train.py create mode 100644 tensorlayer/distributed.py delete mode 100755 tensorlayer/layers.py create mode 100644 tensorlayer/layers/__init__.py create mode 100644 tensorlayer/layers/__pycache__/__init__.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/__init__.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/convolution.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/convolution.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/core.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/core.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/extend.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/extend.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/flow_control.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/flow_control.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/importer.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/importer.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/merge.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/merge.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/normalization.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/normalization.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/object_detection.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/object_detection.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/padding.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/padding.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/pooling.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/pooling.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/recurrent.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/recurrent.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/shape.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/shape.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/spatial_transformer.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/spatial_transformer.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/special_activation.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/special_activation.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/stack.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/stack.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/super_resolution.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/super_resolution.cpython-35.pyc create mode 100644 tensorlayer/layers/__pycache__/time_distribution.cpython-34.pyc create mode 100644 tensorlayer/layers/__pycache__/time_distribution.cpython-35.pyc create mode 100644 tensorlayer/layers/convolution.py create mode 100644 tensorlayer/layers/core.py create mode 100644 tensorlayer/layers/extend.py create mode 100644 tensorlayer/layers/flow_control.py create mode 100644 tensorlayer/layers/importer.py create mode 100644 tensorlayer/layers/merge.py create mode 100644 tensorlayer/layers/normalization.py create mode 100644 tensorlayer/layers/object_detection.py create mode 100644 tensorlayer/layers/padding.py create mode 100644 tensorlayer/layers/pooling.py create mode 100644 tensorlayer/layers/recurrent.py create mode 100644 tensorlayer/layers/shape.py create mode 100644 tensorlayer/layers/spatial_transformer.py create mode 100644 tensorlayer/layers/special_activation.py create mode 100644 tensorlayer/layers/stack.py create mode 100644 tensorlayer/layers/super_resolution.py create mode 100644 tensorlayer/layers/time_distribution.py delete mode 100644 tensorlayer/ops.py create mode 100644 tensorlayer/third_party/__init__.py create mode 100644 tensorlayer/third_party/roi_pooling/.gitignore create mode 100644 tensorlayer/third_party/roi_pooling/README.md create mode 100644 tensorlayer/third_party/roi_pooling/examples/__init__.py create mode 100644 tensorlayer/third_party/roi_pooling/examples/roi_pooling_minimal_example.ipynb create mode 100644 tensorlayer/third_party/roi_pooling/roi_pooling/Makefile create mode 100644 tensorlayer/third_party/roi_pooling/roi_pooling/__init__.py create mode 100644 tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling.cc create mode 100644 tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling.cu.cc create mode 100644 tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling_ops.py create mode 100644 tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling_test.py create mode 100644 tensorlayer/third_party/roi_pooling/roi_pooling_animation.gif create mode 100644 tensorlayer/third_party/roi_pooling/roi_pooling_example.py create mode 100644 tensorlayer/third_party/roi_pooling/setup.py create mode 100644 tensorlayer/third_party/roi_pooling/test_roi_layer.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6da6370 --- /dev/null +++ b/.gitignore @@ -0,0 +1,12 @@ +*.gz +*.npz +*.pyc +*~ +.DS_Store +.idea +.spyproject/ +build/ +dist +docs/_build +tensorlayer.egg-info +tensorlayer/__pacache__ diff --git a/main.py b/main.py index fe5dc45..ecc132c 100755 --- a/main.py +++ b/main.py @@ -1,5 +1,4 @@ -import os, sys, pprint, time -import scipy.misc +import os, pprint, time import numpy as np import tensorflow as tf import tensorlayer as tl @@ -111,7 +110,7 @@ def main(_): ## load image data batch_idxs = min(len(data_files), FLAGS.train_size) // FLAGS.batch_size - for idx in xrange(0, batch_idxs): + for idx in range(0, batch_idxs): batch_files = data_files[idx*FLAGS.batch_size:(idx+1)*FLAGS.batch_size] ## get real images # more image augmentation functions in http://tensorlayer.readthedocs.io/en/latest/modules/prepro.html diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py index 4c7f57c..799353f 100644 --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -1,8 +1,5 @@ -""" -Deep learning and Reinforcement learning library for Researchers and Engineers -""" -# from __future__ import absolute_import - +"""Deep learning and Reinforcement learning library for Researchers and Engineers""" +from __future__ import absolute_import try: install_instr = "Please make sure you install a recent enough version of TensorFlow." @@ -11,21 +8,22 @@ raise ImportError("__init__.py : Could not import TensorFlow." + install_instr) from . import activation -act = activation from . import cost from . import files -# from . import init from . import iterate from . import layers -from . import ops from . import utils from . import visualize -from . import prepro # was preprocesse +from . import prepro from . import nlp from . import rein +from . import distributed +# alias +act = activation +vis = visualize -__version__ = "1.4.5" +__version__ = "1.8.1" global_flag = {} global_dict = {} diff --git a/tensorlayer/__pycache__/__init__.cpython-34.pyc b/tensorlayer/__pycache__/__init__.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1a6c731f57b000e3aa43214aab794d8d4054783 GIT binary patch literal 1024 zcmZ{iOK;RL5XZ-xeJ1;AOX<~uQxWW{ClEpisVx$hs^THQ7b}~1cTK(a%5hrl9^uOX zCq7TEoOR+U1H^MTa2Hs&=3d|)uwKn^;22n3a}qcKmekx2+y~aLc@TI2Y*6zs@DSLr=274g zuu;w9z++(Jns);40NVi~aGSdzaI!tfcY)mki7nX=id|^JtHZtR*@+NqszhqF)C-#G zoW2xN&yCH*N@({tq~y$|c0)0QLki5umckYxcXW;(Ue+|`PGCrkWNOk4>p^s*R)dDmMpT;}!5biPOx<%hcg?NX^U?4sB~Ll_=2ZNGELS1iEdN zZg5>6EuB=}>0QdAOqJY*_}YrKHTXNyYK^98u~TT1To%sC8J3%GTSt>elcU3fs>6A1 zGR~`*^Lgp8WX>(#SUlU`Wn(RvSH&)rIL8vJ_Ul$oW9&sDmp{y;7>_XX^MPxkV2HhoiI8>Y+D z@4)_f2dI_r&F8vWqP*o$JFW@1`b5c>CD|Hjc(-6t!Pmr6YKhg950 YX_IjyBB*3Q_8Vig|GZI~v>W)6U;QHt#Q*>R literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/__init__.cpython-35.pyc b/tensorlayer/__pycache__/__init__.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddcf5bd54fea2ceff5eb52977564e41528129777 GIT binary patch literal 971 zcmZXS&2AGh5XZ;aeC{Sq+S2xF!Knxml@me;A(bL=sVYD!_+mM`PHNS$BYU?fIl|M> zN7^f=z5*x4-U=$QtNqXXc09Ic?8DJ0rRv2j{h|XhfukP;z7zOr2z)8JQ1n1|VbufH z0}{9&xeu)0@F4O4*r4GwatbVMIE$PC%Nia=9s(OSJc>L5Hfne$@(!?_hR2b|z{U+v zB2R!#8s3e(3v3sN#BJ_@#L4!d*aP+m6t?6bDt4g_pN{vp=kKIkQ!Rxv$}Fic1^p%C<7ISZPRZ6Z_YwODG=>@Lg zC0&VYNqyxc-PnpM<4d7671YT*tR{`EmRIx!r&88XyP%(?@z%Z9_S^J)`;>EKRLS{t zy`it@sjYNDjV)=j%EQ5Tcm={vQjwSHMwB=mQaiW4MB7;?Ej>CtRZ1sHiEdYm4Xzua zRjG7v1~%Nw literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/_logging.cpython-34.pyc b/tensorlayer/__pycache__/_logging.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30d46c3e08eeb21be093c0ce2bebc4edebf7aef0 GIT binary patch literal 545 zcmbVJyH3L}6ur))X-kn9SeRfzA_L?H5CVaUkWfJ?EJX;pv6H4Gc4enjMcrWJ1Na5L zgP9-j%EZD4Fmas%VqwAZIXba?&b`-pZ!I=oRv&xM0Pu?VSw5;=gx*79xC!I{Jpc|c zB*2_XIKW(B9^8RLm=CN5_jn190$}w@d)CdpXW1Re+w?5P`)PUx5*muZ@b)OeXlBp$d)Q5uh` zR6l{LqxYj9oolXiG@z16kwynH9v57cIyzNy$YT_vX*rN$b0XEX9?_UbB^O$%oKCs= zQ(t|R-WpAneX*MD*0v6$`u@i_%0ssaCR7QWDRh+0RDmTs%A6WjHIXd73tAoebN@o0 r>MY)uBFXSwjymm=s-Md9E2m1dVWz%aNH)&-j{O1J)@(T|Zp-@w)jM;b literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/_logging.cpython-35.pyc b/tensorlayer/__pycache__/_logging.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25290675eb51556b12149d3ffbfa9bb46d77a33f GIT binary patch literal 648 zcmbVJO-sW-5S>lZCT$bigLv*mFb55I7V%USNDnv#r{*OXl%rcHYh#vKd9~e)^tD2JiuE$3cFGp`Q>0ehhMelmT|m91spXGe`j> zz|6Hpk1C9=8}N*+Od zFfyH5mby|oUr6=XyEq$O_P4eRsWs1JtS7jDJ!IU)IKt2y2sKV(fNMN4c%@;0kFrth z7(cJF?550&cT-L)EV$F6HXc`*egoyB_G>56TI?m~mAch)K9Px-WV}q)tRz>fS9a$M z<)NlSgUJOjoBzX^q6Liqz*UL%Vn;PFS4{$PwS<6J3!#hgGm!E)TIsDbsZ1-A^$=G@ z8fc{QUgpL-84XTNG_IB8#W62ME>-GZT_YpOpi%{|YogO%8lw8|qRoX^3GJfR%$h-K fm0X%&zo^7rE)VD@=^jGpbXkvutji)6xuN$Br@n~V literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/activation.cpython-34.pyc b/tensorlayer/__pycache__/activation.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..253f0e0eb89a30e0a372433aefad15b3b7f5c9d6 GIT binary patch literal 3662 zcmc&%&u`l{6sGL_s+%-v(I1Bu7>i<9JUDWkv|Sf8*@|`@2DCvJx7d)2OiQ%QmJ$V$ zitQrXr8(@j^Y$O~zRUiPopjx)m;C`d?LEq|n>H!1Zp9RnA|*bOdhh!_zWh1AQ2F)d zi{07+A%Bopu32b*0mb|XRRlkeNJ4f!5|v0)Ceaj$rb#qIqFEBnk?t(nh3BX6?Ewsf z0bo)=tLQY2KPZ!3Fqy}}(E^$ul*lg30KYR>A9!RJW`o}%*4UjT9IWODpC-F7vqzfM zrI)Z9vYpR~h$mLqVLqi0AwW|j&= zTdiV=R?Dxrmk$|ikXx>e>#mG1*B#Ie`iLh+Y1hTsM3QE9WW2*|rjsL^)WWt6tV-=L<&@#B2ICFAAXP8-2Kp3yr>p*o>W>ZVuij%SQc1UdSA|&&?ltv? zT0P+*G}58%sbrPd*xgw#t&Izy+8y4Dj2{VTd!&()6f#f)U)H#!j$--+PEX>fSG_ z6oJTO^*ur+KNCuk2763q99?p>54@Vfy;Mti$#(JuH?}=2hF7O_1;Uw6J72mR8|X!N zGy$MKjomU-SD_U6IN$x{XS{yaYij@d|_gG5nf<1CWzLgdu@jEr+01s|L+TWo^leHf(BrZQQ(Zt!QTO z_$D;RfNizlN#h2-g>AYXye}}yWZGkIV6-V!%8LQ-hQcogz)H!FDA$VH(AL%#c*@dE zYNhY=!}xbWO!$4&l4W>Rqm9+o8d7>wvbY^FiX?*3@Wgk*y*#MVRe!Boa~#A25&sa14_^U0z#ltXJu+p0(*lz24_--&mFa$|9dfS^WprPeeUs zpctl(2o8F~wiZz!!dRkh&=H^sej0VQT0u}a#<8wi(n~;{d={ih#93S|kND|;a4a!V zn>V1ilaXAx?1fIj;R)XH7;%hTg~o`H9%wZA=>y_nzeJvtk)H4fNebNj7Kr-IchG58 zOKg24<4ruY$KqCCk2X1=-@VxY)-}28tbD~R@bmWMEWS57iwkcVa2L}9vF9t@t+TeN zH$H8w`m5JJyVgKg{L51rPX%PeQCa>UBzYTz)djMG+X+kvgw0DylFu5^jg^QX&?vme zMif;vfMRB#B4?MplVbV_ygm}s3va}AEy)6h>Bq2rKMO9`YsWB+0B4%9< zaOWVgL+WxgLwgJe5@*s15Dh9>o6A};K^CJCD+XeM)hM9x*knw~A#HP-82|(du$!71 z-o@m_C2Idcv@zm%tDv{^%a|SdwrF zQq}K)gwRo+Zf(-F5y*@AM+>Zv7r1`>0ufJb?_Jh8M#xXX?fVnbx1S4#Fxf1^eI$3s zlL3X(9}TLGPm@T5Jdny5I8z$nM%ND&UmlID(P031OO22!*=+EY>*JH+1XVxObldSY z;?(eoxb*vdzX&`W%6+FYqh!I&wAf;n$vF_JffQt0yKC9hh^E0e8_bUp}xCqdxSZgny4I#(JKs*O;oYq|SPE=0$ZDt*~g7MORp~#-eo= zU1g&+)>~l`T@4mpW8c(Rw81d)DP!MwtcSsv@j6W!-5~R)m<#5wk!`d|vpub`9%jSK zn{v2eVtZB7tMK<$yL^CFo~tF(ahcEt?#gR_NeKVjY#HNhQi9IT#qA} z+Sty@6)lpyY$z$dngv>_qFgnT*o@1qJf2Dwpe_TG4Q(Q(f#l2OxXH4V3&ZWuKU)Xp9mXc`eBixk)?ivh<==Z0K4gHnje7vb{;5*&z-|e1M&2osR2 z2_Ag8)Hyf1uYOrql2=cH1dci2LzM};@O3Ei%HP~(^C9q-;3}I)%?DYLM%59+T+ms) z*c?Ngd6DNA%~Jy10XKGe%=EoyFygL%JIe`ELN#CE>6bS`73aZ;P=zFyTx^4OE3}tp zt*qH$dEJd2%&Otd6@3lIS*|)?x`&wRO}tuSNpDb-An-m)g^ydKzx-Zf00@wBi=a}2>Ue*eld&e&VMI=QNa}GcAAa6?mUyHS3z&bJMe#6}5kCR- zh_l&{S5|RqbruyNB!CN^X6dQa86Rm8#n34}%rsBs2t+>t`OlH@e}-QdIDj&oA&j`# z?>hwb`z=(XtQaU+)v>Aly+!rz?W$Vf^+%{K0Nd~5P3JE4VwQleCC5lmfc6~@cyxcxC1+y zKg!Zd&SM)(!#_!KodFe(WTAz^!%m=?{C=1f#>OEB=A<$${;7Zrz%fGkd>e6Vzsc{7 zt<4YG?TH-t#)=dui+mXs?XSfojoZ0^Vwg4|I2g|cI*x)kOcdD$!x)<2=h5(}9|VexgT?zQRo&(UXE7fr4ca$H% zeT%sIU>;X1cNzFg=xKc7XL{5Y1Jmw&*xB`W@BH$1hkWq^BP@jlqQpgA{y%8)BPgpo z$p&pFFd-OQ*3z_GGw~?N;s_^A%6sXNUO|UIF;rnUH@$bg)8GG>`B9qs^K9#R8PcWj z_yrca0FO7m2aoO?7%nZWi4L^8B6F3SR4E?M3X5FFkTl1M7)ek^Lat}rrGti#1;~~Z z%Ek~NuEap9Ry9G9kddecQr^|6Q1-bggiktH}UO(u;Dl`@u_U_v5ZG!$l+xE496l(rGo5D#*iaEZ}X!=zBlJf zHUE5p{lx-zE?)o%upPh5I+p~6=TYF*iSmnM%o$7$tK+`#Gl8NEiLgH()LcZSC=O+y zGGh>*I&hsbIN&FF~%QR literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/cost.cpython-34.pyc b/tensorlayer/__pycache__/cost.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19d44731ad56664105603e3089351cbf05ba24aa GIT binary patch literal 24827 zcmeHPTWlQHd7jx_UPYO*BwDiNI%7MIxDvU%$gvaEk|SGIqtqg7QL<9H&hBt$xEyJB zhMJj`L`FiB*l--6sez_N3-qOZYJ;E;1(Kpb-`cmJ1=6P?0~9EdqJ>Z(NYR%f4{g8i zKXY9!FN$OXt+mn)XV0Ab<@^5sKmVDZjqM%%!RNla^rIV^_ERnQc@+0wz&B{6-bAs-~i&x6d0KatghW<91^hnVwwC2a{gAC@8>=anHS=n0ni9;$>A z@$?8wXL6MOq-NDL%h0T+S)cdH+7jyXjAqS9;IphD15rZ+&Pw2?S!V{K&Zx|DtUq%@ z{gF9BKEvj)Xf)?2+r^^ME0>Q+0kCPTQz;u0zZcw zuPZl?7V1kQ}g}{jkhi8KVZL?X{dR zXf$f0lHnVsZ+2{Crt+UoKl0S;hHKr>I2pG?!dQ(n#C;BjHclB~yN2l-)(R7Ra=orPZ^~G&tGcHFM>~8N1V^i? zTQA!2`YM)!-j#x;jWf&W+u|%ZX*6D14a2qIIkyjZ<&5Z8NZqmUF{4Orz^M*SoeM zeI0uJyU5@~zI1M`HhVl8Y@5q=JHTI=aZ!aSs{_%Y*G?}D zFJod87Mc}4!H0hz!B=?6#*La7x67oX58u#q{stN0Dm26!VIoYCLx^$^B!-!DScUV4 zrX5b69omB#1^Rkd*U48`z+rEG8JETCfca0DRj-H`p(d5FiMZ~I5ERcTpbpp91a})N zZO;r(%r^{xg8K2o^WYs4$X3u#Ad8>niDinw(lJBf)PT4yOw&-yKhD!$3!FCC^`s#M zOF4?H0?0&dlj+2>%V+cbZpCw>5qC;+ZQ(iu>T^)$TILr!$kpn6Z5G=FD>2Tb_Yn1*VpO z+S>ZFufKSLB%mgG3zG?-pT8Vc%v(-y7vrrhR0m|bN26?2J!n!&;na^tW7G}0ou~|e z#=1!7cr+CEWZ%io8GHfXK&JGeefqg{at|PQFTOIle;*a#EX5R6llyDBj>(G2{jN-I zN@FF>|1E-_e_5hda#sjm5ZZL@UCd}C!GB%5Q_-$}Q&xzIIVkbBi0KX&iz`cUzmVd7 zm@7zSRm=o+Yu!tsJ@MUdv+`L>D^m1Vb++`2x%zKWG*)m6UAF5cqx7C6?wA%rWg1|o zLB_oe2)F|631Bb?Fc>ob20(RDq}1(&RC4mP(lDk)!7wIYpRbBc1z|N40nO07FZ7r{ zJ7t_WOPF~6tf4sAaB6m~M~1Ag*d|14`<tv%4Xwb3tk!{CZAED;)<6C1 z)K!W(XSRbr<)D%%X40}(%x*iZ?g)2@xZAh_`PI0YleLNC0?c|>bM!= z-6h@nPvdfz41AY>w6X89d(DNmWjT=bsBA4%hZW^NgU8Vb6pp$mDA9n6IDvx0{fQc# zDaQV@i0?m!3qUl7jq%_LHb&(RL`+JXIcb0>2-ArcP&UvG?9(UoLH$_inEahB&C1^s zr4ywwy(t0%VlhoB7QCM#CssCA@Q2rXkh3IsvYrF60l6Q{VWyaAlpt_N$rB0M5R``m zS_L;CAH>nhn)~x2a1BU7Kijz=Kr{hUx{Ulz@~N^50V6-|tjlyhxQ0vYTRi z61X20SG9tYuZs+HA=iYyt>wzDmmVyQoDX2^-h)Ww6F6~RZl!6kgDL5su zni`2TH1|*%+DR?@#7RSb4LG2HtB6@U2qzheDWRl{iICRKnq%l{LiHx(%jbwPJlaR z@SQ9J$U&k+>UVO9vp^(-!Xuagw1mP#v;BZ}5EOpsHiV)f{K4&!JHwJC3@gLL1pLGe z%=+C!XeiD4Sm7}LPA>lKUtw<~pv>$WUs8Qkg=>dzU7A8XO?N!$OPv)!q*5{ASBG0=q>MrEDZfKB5IFL#ly&o;Y?bHuz>0 zN`(AbssM9S#+*nY*n{vwDG`gbxw@0cp@j9=F4D*5pSQ?tZIiFz0}XA`bk{Q}JbM&f zn(wH@T%Wvbn4+Mn#<`*R-pnw>sP39`3s!zLInCy^mhW|4tBTIxQaXjsZK&4IOiD}Z z$_jTp?vpCV0_C`dvyyZZ>T|>P*HzEN88tD;P+vV`)|j+!AUe{-Yeq8U2G32@HiU97 z&%#w~wykAgP8LO}tAaXn`O(a=0Wsf6%z`DnarSE2uS}5EtQoR$X zEhDoT)}%XNJ-$KTQ5SehToD)|%IlrB#J~2hFi5l&vrWXDq2~1O0v}swZ3>F?T1vr17S22 zZ%~Pl9SwJ2(R9|@>wO?A9U%pWp9PNiFYxjrFQ3C@X+&IR`r#I(=Q6AT!9Lt*hn~`( z(w_!qO$cJ8K(zeS)P$?lf1uB3EbYO_bIrw4bDuO~a5t!p> z5WnG*rNRXlvxJbCVLGH(jXM#kN2Hz*vPVRu9@TD-LZq&kZ}rEk^YmK@u=x(ltl@#? zjms`?C(nDAtDD7Ync|rTQQw`1{$jOFu>f*3#PcmBY_$+}0x8N-4?>*ofs=c7q1l~W z-BZd#(AtF{y(MTZcwxa!NTwsUN4(1rBwg1FQ%%lYD`KK7k;DtCAm9Y~P4S;O`a>TS~0G>XcyELNwx8VO5m2v5n#zqnK7*UP;7740- zeqevz4Tk2^A$$eX-p7r?G)P+jpF5C%L{@4ksDVfY%^hYxk*eZA1$62WZ2gJA#e8Vj z(;0%SDw4B66BT=)nKC1cX#O`SXW{1&gu>pBA9UpV12K* z@lRXAVh6Sy7(+tSX)6~?*2i(Nr&Q;THWl)f+FBb{)cTdds_zom1NRnFdClLJOy zHvWUyil7X>5;bGGMnqna30^K!?*|nip`?Fyk3v- zCK{rx1x~-w2-ZVTZ#&oQjEbs*;(izH$u0P3G;8b9k*)Hruekm^Y8F-4;9dfvz`RBR z8pDltWW4h0FaV6{d-YVLrr@2@&mZGQEK-AJhGH%ODKJp&kc-xj69kH8NF<3QO}B7J z&(P~dZ3jz1`h{CKB<6#6h9nD>kyx9NQV|pdYT-1x;*@k6oXeX3hg9w@t~Kh2Tr&wg5S0mNXWdf#aTt(G}%x<;H|^RoobfDcwN$wc%;6 zm`GhShoa1LXbS#FC{`s`HD%I136;ZBZ$xymlmxFY@&1AZI<$z{ZU&)2zC=garHRvA&WY zGb~2m!8VM5rFvfX*5)U((dm<+TEQ{CqBgRwH`O-jdJ!1a!R0!Ed#G1YBU^rbuo9!sR#cFh7BsYUknCtr;^Nnjropd z#VWsIz?gok0vO81{WOED&3gFciunG zupWn5y3GBU(}ON1+JV(_Ivp6mDZy{>uef03A%|mPV90o1*4nz(DQVx*G@ZXITDz=u zD%!V7I7u_0eIOD&Ek*Drc{BEjN8n?a8u8iBG!pjP5b1|Z15x}Ms`(8`FvO570Ur<3 zK<@vEJRaHnSUIrbjKPR@6$g<=sRQV!wmKsH9!}=K4)sm^*R^k!v=w;j?&XPM*vFJf zY_U3S{yTirUUwiK#iwP(BLqJ?u$e1q$+f#a4vA9igaPjcEOgidr&z0;LmEy4(Rt#O zXGe{Otmq(?W#c(znCZC{h>AE}1EXeY(S&Sgu31wi`<1yHncqkwWkIFk*bvcowoIET=;qtSLI;{l3=NScJIP;5BVYHT-6u>^EX zI56aou1djg@c@-_+dnD(8q!%Xu7NQF+PN?niETd4H38PpFM!S3tenW8dq)zTRwqn7 zo(_x4^RfKl(yK>hZ!N%|G1x0zi)YaBd*dmU60z^b3qGf<+Cxn7;AH8 zTo^`#LxT9wZ(J0b8yX~S#Yunu^2UkYq%9wZO)$=qdaiHk4P#N7gl6YvK(2^diez;3 zXB!vs6`ZqhU)2``3#+9_ulGA_6%D`CHap9fdHOMA5U`OOSc&=b@wJ!Q&`#RHY22u& zO_swC(UZpc1Na7C!$pxPa1wy@;Wh1!u6-c34`Q0IC-_tyfCrGitCs+>AHY;UkeKS# z&p`Y$wmGwE}*FkalSYahn-(&3>?X5JU{Os zm*eQ?n@n_>my5gzckyILRBAhiIVyBZ(MtRErz<1+#uHnOPTQ&OiH4M2I*5*BI0kg$ zA>`T$&*9TgNJ@+*HpjqHkUB)84&JQ5nl@4Q^E4UzW;+_qmS3PEnB^qP#F)NM-&fwRAJF$hb~_M%_d`2*oAH&X+jYuCeWrL;*tc^E=jB7ZolZcP|%s(S*({Oy zap(i1T!EXaF39o;U|OFH+>dMhOL%ZDN3(_Z&1|Mwe;n~sey8V7AFd?@Dy{{0rd$iH zM08iRs{p1X*On8mmAwV*6DWS=wJ+C#p;N9E{Z(=80MEC`9{0vf!nI0*Z{pQI9k&;p za)6K#t|AITdVy~Jw-BOYUEy6nW*rb_RUIr$^Batb_ek|2nLL+zU+lxBTMR+ltL{+@ z_6{=_hW-lggrV!aYw%+7vdl}97l>S}gpi={pUu#1_>Uv}9IGY#cVKJ&J2ILIlKqMB zhsfz8C&KUTMt4=p>Gu-lRJ=?@IhD6~y{D8@XsQLKlNWpCnC=ntN9dLd=B;V&AM{Qs zWiWz4K2uG$)-LfTrM{Wu=A|;bT;PVUDR0e5(!_ofd^OHA-^e7D*}?1if(g^*tvaJV ziw4LItQZef*lkqUt=KSrnb9_U*l`~mAI7C+QZCf=|I3W{;Izl>IqfD94EUpMJcmWX ziNC%6xHOJv`6vaFqp?mDN?6EZg)%0&wuYT->TP*!yl=)6Pli}dgHcIY z?+l#X17$sAsE>%D>JLz7OWyDIkfZ=HMHA=tB=x5$NgYkVatq-7?D2>ehI_bu#j4q`1gb|k^-wf`<{2z|@6!D+ z*hXcxHa8#J$@k=5eNoiOqCNFG45EDTO}&oS({$T8Eo}w;azEr&>9}?8yBsNTxueY! zTexE-*gc=!VbqxN!O+}QHews4jnm@&l;)Ju_Q|EWzsS>^>;dkQ<$CrMA3V+B9i`ka zddP9R-AK$2f!r5g)w``0R^P=|;BM`b4>@kjZM^L{?jJXE+zwlL{(gYjCywKuMxz%% z7Ke!R&+t;@IDeI44lkJn@jb_`9>ZI&Yy^gm$)a&xPV|ml9&+R>*25*0mmmyxL@A@OW z@LZTr-+Z4P>C;}IwsUDs^{Ir4Um1I1e12?pd~R&^e*r=!?56+# literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/cost.cpython-35.pyc b/tensorlayer/__pycache__/cost.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8237488e033e2d767e85cdf3b56a0f4031c6f5ff GIT binary patch literal 24723 zcmeHPTZ|l6TCVEuxq6&&Y)|aiAzO(z>uGzY=i+PvCU(||W388X2p%V1CWP*qu9}{* zyQ?Nu)wXA_#RgkRR$|G+BH)1sv@ftoJRmIy327zZ4S}>gp)F}8w2Ooy1X{!k+6UnK z{!`bUo{QH`B#7JYGu2h+e)+!t|IdHwiM@MAjiox4lwRxFQc1J$zpPmm%^J|GLCqS{tYOU>(X3I;+M`)xnzdK69@DIG&Dy6~k89R` zt@XIJge1CV9boc%$Op;s^B}Y0Ph>TrSx;!zA?AFqq%9%m!&1bP40#6y9bt+0P$iUz zr$<>jlcV&fG^?grhGsp(`rIpPOQ_GYnl&RgkFkcli5lYOtla!O>&%;|Gb-~O>(AU! ze`Jn3Utn`sG@5gq?PAer7amQrjf{*oR@qoSLSs*8)=4&;kM?WU6nlV=(C}$?1+UR+ zU0bZqeax<3i1s#pF9_pge(cWl$G2k&-G-?r+u z?|Xjqlzbu3OH^itXEO(~chvTMn+9o*nI#t5wrSE%0;5 z@w#&Hc%e@9zpaL)@(07)*%qW;L zoX~JOYi+xOQi4$fpCB&WaYNg8&31deW-Qva(P)G#HP@{LYo;Hd$5piAbWtPI_F7ID zG#a&0$?y%+H#;^mQ~6J)A9?C^!?kW`oQ~Te_gIZ{#Bi>p`)dRGdXHqI@hZ;P|ww9$BJH4N8+(=#)+Tf6C8ch+pnF>9XRn&GdRSMyMD zwo$LEew;S0V5&7e%Qn!`We+%T)3%Hi-|PHTc}f)*HqN}!w#~pcTFwpIFpaM3yxX-6 z>Fdzz-$e!|@|6p7wb_%=VB1`_+X4Q{jEgEvSsjQ*Uc1$_*EqwYQn(WB0d#c%JjAM~ z`HLHwGB=kVwt~g79Zn(ZC7}{8AduDiQzF}a2s~;~s4-O$2Yz5;4rTAH% zB&G-w9WxYu42aRf8V$9S<2>!Pz-faiPa9IOl%t3zfN;|`ZkTOg5n#x?0b$9Ac?!Rm zZ3A+Jr)rk*LY4v8SHQVVAmXq2t02Te*GocqyejG#fc6O{qBSQzOfkA~u&>^s{zg)iY7$ecd3Prs1P z?g8B1i?7V?-$4a9O)*E+?EadrW3~bS@5=0^SXI*eUnlzcZ%WikE`$&T=}Xt{VoD|yi4^y@xq?(y#Y|AQ*8LRP6W{#?E1$KrB1Qk2 z&X#^TSN|=F#u9F!%XZyll-`rX9n(VSJ_GDD2&=aN0au|05e)``21Djw1F9~I$hp15 zNrIhLy27-m3dZE?^HmX$AbDo;oEe%A1RwL~r;JnQi4)JCHx%0%PR*|M2!!<&+k~8K zzcW>QuxdN4)o==+V1=viFq?uPS$I5UG*?a6wc9%gln;VLP9UNQkZ9TSAP~7Jlw4i4 zA>#WGygCq)ZRve>GV6@BD6)`EV6m0JsW6Es#?Q2IamBdecOe{iA!LaFpA&@|4dPKg zYmyJ(Exc*lD8fZ~N_;k5%YbG9oh>vh89Y^h3Z<9}W;^Ip4l0RaCM|o#?6$+|j(C^I zu8pe@MvY5s>Y~y;jn}AqX3*DPd}WHFwwgiS$0S_;83Z4bgFhxBZR~r~z3xJFvK(kY z)SMQo!;18u#p7rM+CyEmjcC9{oIv5>{zM_pR9ycU;`>h^0E*_oF&|S|* zFbE@v7En6S4(!t>^g;bZ>4f~9EzQc`Q>9ZSNQ@#fAQsc4g2BBMI>>cjQ=h#RrtGiB~!KgZF~dDOxhl<MLUJ0KKZO#a3Z=VV$Qr_vKfzZCALB~+I9WO=LdRTbPX0b$dLF_@G<3~u zHes<>*eXyW@DBv;X?%vrS4R{EXmBbQ8ty?{E<-cA}w)O#%?5>vzQRjnzxV zZYdW@Q-%u_Nud_B68WD?J*2s>5uJc{%;1|@1`vZpiPZ1p5@$D&5DJfg2H+A34^8j` z+Cgymq1zBuhVTcMNACiQQu-Tg} z+X(Cy?e?-MAo++Aq>Q2lHf-Y9wb*!@RVb11XQ=_qO&N0{hG0Lz3#CLX(&p+;GKZ4Y zW4lNn`+eRbv$cJ_h7T09Nz+}=gzx~G0+owT!tTRmP#2?$(cSBjA8rW5l zVQn;~j97u8@jDk!Rg(4-=x$*b7Jurx*f>cuJqJTls#*jxCQ1XdvOF7&x96tjr%t`Y z9TT=X=y)DBIeHCKS7n;<36c!EKrVXI4emvp*}Zke_i!VoKX{l z4DHo3W{pYv2BITPyk;arZr0pHZA0kx@+@4%#@VXZW>o?YLx{V`xQWe)DPtAp0z{67 zrHLe~rtDY54_NnjvGm>eQ457TKdN^Ewf$o@!IBr#@1QW9u z5p$7z4$6J9gCzF>HGSOT)Tr2BvrbX&hhO_9<8!+<0k}-ZR>Jow zVE)o@vVKw?OIKcOeAK>_7}!(YSxMd_IR$zoO(l6lM3>bORUANWq_-p8k1FhfUTWjU z>_6ZeP$$=(eiAz_PwG!0oX`)K#s-g+#`U-!|MYTy>iQ1SWz+?p5}yLz5#9Ap`V@S# z`Kj8#QA(pS$|vT@n4FZF3VM-^;sRrdH6P3v--K}vcebV|@K=GND&1*VI2Js2I*~y` z$6N+F^$y~v&Qi)ux0#`2l3$T6#&lMtmZ*%Su<9R#qLisH17|F$Y%}~pDXJ%PHYSF^ zd?~xynn=;84+kLE1go+w3XnhuJKvUkWVdWTYxb@k z5ut(bAp%!4U{NLzjLG0>Q0<3Qzyo146mL+8kR1(oV9<2d+UtEFD;*&PhhGGa_%ASc zk-^U*SQ-)EmwvcK>A4JZK(G%N+M%cQr}bxmSrdX-DNyafGs@c{WB(;CwgW1KG)jiv z3{*-;PJs$w0woyYaSnc%xr(4rO$6pR8pLlnSE>7gi&;WQ%&;6%tj3jy)FV<)2-zbd zQjco4M!-4_1nDh7YrzW(EA0M4ly9btHQS^5kf?mzuOT>WBIqR9lw`}vMj!#tBQEvBY$C@JsZU=6 zg+nwiRELxy^yhi^B?cmUoxxR9h9aTRY)>YD`DYo)U_cy;%Gd~s2EaA|IbvEg0Iwa- zT^dooSa9Tv$_RR;u~9@lK~&?mMS^NSH?UtH&n%*}pAO+Gn06Nz3e%Jm%pFKTA}h5N z)Ig+y<_^1`NLBGo1$62WO#O+##e8Vk(|M;%vpw$!G*Ph!nkh5Fh-@&+&!fCMKy}~z z32Yh4sIMIEw)3)!6?ynAcIo#>2fM_(PJvS&=eh;)rtH!4SD+_ZM%#8LAW?fP1{TyFc71&pK`;3&~imQpd@Ch}1YaRX%P7{k# zd8fsjr~MrESS98CLw8hI4xp8UwrMG@$lMcAYl!185BM|>sHg0iWP=x~s^#2p;9kcn z(P_+BhT%s%AnD$irC=)AaixDI#5&Q3V~0?cFfj|L3@v8V$Z8VzD6`>5vXZK$o3vZx z7~@wiSD0*|CA+QdbYGbR)Luo<`}C(RF|h+f4lE&|>9m!vBQ=Y=06z(YvSSd;A8Hxs$2nYF>&y|5N60%vkO^ZA;BWrTM2_wgz)yNP9rh764{9 zCbI$UyKu59%3RvQ-ITMp*WQ59mreg5HX$f$FG*zKvdUx4;MrjP2WJWZ)P<(@yl55+y?n4y?R zzzDplcE?5K$2kDSF=UZMjHZ8g>Z%FJ7;Js6>_ll=2#tO? zC7sshvgZFDHG2xth&-UJV3(OTXmQIKF%QDT@u?Hjc?FKbNrL8!>mWJ{>st-DT3BLy zRQ9OdnUU|8I*QEka-K7$jK<52*q|LlxpDD4obD(d^>vn+HeQ8M4@xwB#p%;*G~#B0 zd0(d=Z-Xcr*r8|$jfOAIPy+t*7EmAX-Jqk*Zs-bYS`-tZ$SH#hz<|NeLLGY?m|lim z{C=9EAdN)SpCsi8s+zd`D9IBi09GlFf_KDKl5`XMOXgM~_)+Uymm02$L+^l=x%sE4IS%dmm;dV`Y>4Vy9Ne^BmdCSd7=|NIr@{#(m7gfdBrBjl-kOgSIj@+3(|mpL%B5EkV{3HH}xeI-9;Sd6}dJs1H?^}Oz_%}-{d(OOM6l7PEo!c^p!R5kx~V`trCAfnVgcUP+At+V4_Q zV+mSq0@lyxDua;K#`TebL498-^?`)NFq7EY_(3@;F+(bMC|WVa2Ea#o`=!Wdh?7Q$ z#ju_@VlmCfId~M_iZB)|R!ioKsO772`q0Rk9N(Vib~>DeWwvgwF5f`Z8}og*T0{fI z!5J+R^ox$lZtCD17k6153zVYcQ+9IQqcEF^Hcp+lH`}D@TUy{%?mHhO>phs6vm%I2 zQP}&V{mGv}fD>1x8*}*1`(NQbp0iSc9G;=W-1j*>=wG58SS_d1fj2lM_zfZi73u`UYE7Du60V<*ELP&?~2wgYn_Vr^%Bm}3}_#UL{BpjoJkg9hj`q40#k!0IMW!| zX+w!{vsi9N(Sacn*trd=EYJYDNYdY&nt)$eNIh;|L9h({^<>!__d-+T|p zaX!$r4|MH=lC}cR+`T*t{61PDHn7F&xcRSeMtj|XNEDxQ6&Dcv?7&E_BqZ1F`Z(c9 zQ48nTLmSbmM?kzu9>RUjVXI1Q|rsR(mt6$qHAF-+BW#cN|99;K@ZdVyzHV`A%T)e2={0Ge*bW-YnQ zTeWgA;*4gL>@c$iTZQK)ztWDw_QL&GxPqn$D{Zf5#yNxcO;{S#o{$53QrxCS&6ust za-)$Ouxe;>g{!WSlQuI!yl)L~-G!5f6m z9F4X!84pk_IMNJMg)+mTR%5$qiX}i}!eb$S?o{A3BYTLUTidq^&sVk6+$6)tj{C z<8TSaSyIpSO}$|(N)MpfxfzfvqL!i;9sAkFMSKO@EL>Oh1%bY5DbnlxPFF?4FSX6i zvSprq0vQBIt8HK<=1;cQUTQ-lX$NOfj zsL{rA=Kv}W7Rt7O;ASp4L*c=0ELFxdR3@=4sO`E9u zMcRpdvmK3Q>oclcwsB_&R+knOQaaPDcxbdVV~Nf9RR-@cV1qU{Q`nfJ@Sm7Y%6oKT zOy8&PEAL0R9|GIH_`6r%TRJ@2OlDmfX%emuet;k3V#U+AkcqJ)PvdP0JMc7Od^$W0 zZZ1$YUgR;Zy$^F>EGO1!AhYpvh-|UU8rpKu0D~@SS3~{}%3D z$Z>1o^)Q>c)gMRvl+=?`dOZY7-ctk%!b}Mk>W3()YWo26NP;a3i-KTf69F3nI}j{r zIVD&DAVshPa+X(ivo~fEf>m;R6R+y&xV`90P#xm zZ3Zg{Pzhl^Aw9}`+mRlJ_&HXK6sR58n)Hs2rs89N_WLWO|76+kTRUp0N)!E7qKS$d zsc53|zO8%EM4^)w7*5{5m1DTa%8so5t6Q;P{Mw;y zc(CI>JRXco%OqOp<2fu6PWZTgneZ4YxD#mfa9H7pe?R(4-*BL1L7Rz8cQYJbBC-Oj5kC^EbVy5~-)X|bR@I4|Y zAWPAXxji}kVM^gr#(D96N^eR@ z`^?eXpXTXJHu!eQZao`@51-!f_E2sHJ>s_A?h@vQK(31~(cM<-s_$Z-Z@2cxN8GmM z9^Llb_K%ynZHIk2e?Ktnv&U`Epw$Z?i9P>JJ9iJm@YrFSwo%zQAO4(m6qRjDPJ_b7K2&d%IgYDc+?WtsXf&GbDXN37L-jgd z+EA~{>x<>hwLbUmd|I}B9@p}R7*N0U=|S)Fn3qp)d!HTY(?Fp1YH2m~sdS3SCUaZb znhU6xVn@53jMwv1l^-O=!>Wi{$kL?*xyx!i?WGrT7(t~83jc4T8k>|rZGqPi9cPHA}G+XSZ zNSS1Fx|^1zu@NAiMGzpDUF4EW&e>Zo`A2dJ`V=6CV1OW(+!g_n-|toR2W86k21yWv zGKrT$43KFg@z!Z-RC6dwLrs;|^eR=3oSmA04E zPARRI)y|C6OR7Js1{JkaQI=O$9$IIWS5e-a+M887bLt~+Ug5>p7_zMVdG!ok7nFy8 zi)`Fk>rSZ&p;#0uz*W2v+?T4eFH;mhyB>l{zatq(+FH!hd zyO5Sd1(`uRR5S%aMEy@l`8BAE$2kQ`l45Ags0TtXkv?n(fKZ8b(0oOKNUQ@GL^&&U zQ2RCIos&8`o>$%lse{5ZAj<1u8K}AMRFvZKd!1g}jX@f_6NI*Fd-?k97$mAMF539q zjqP?YjNRTavg59A?+3f~&>i^ec5mqUkL{o%&Frx*9SAgXLx9A7Xh(kd$PXji9eM(H z5QGo&=DK~iV;=@%`@kK>b{yE_$QQ7_8^yMUl|hLj(dZ4w!8o%0M{a+tk!SC=JH)r& zm#&z%8+djLLpdDA(Ze`yVsWlL^bhPCZS;w5w9<2dL(;N? z-F?3ee0msp3_P*I0)ij}n`Fp#e;moOiv%{5yq^*!640yJM;=K)Cq5kc_6-{cQQAm< zB4V?>Zr>aFBzJ3oRrz6S-EQfn0m0C2jiOdvdg*<+0kq^@F2yA(-zn@{Y=jl^Qw z!2m1rd01oYsOg~x;(?obk~5M5z@6-*aWTj3e+VBvddgNWMaFE<_;xn)@~mlBI*REpaU_rX0MBU&Z!tutCu?;Lp6C5i&DQL2LC3!91efH^9f4J8JdkJHL7a$5Fum;X8hKcO=yKy{bt%27S}D z0i>gb>3?;y|0+dg+;Py_>FwQYIgUO9Nkx-Aw4gV(+H)xR;qMH-oA^emq|_5jJyFV0 zDn_NGp6N$1C)f_Pr(2-sN&-1M2?;${mi9UIL|5k3lNo7QP)}y1vZ$U^qyoi;88K{? zFyX&j06-Zp^D#c9o|RPiSF8^OU>}-tCPisM#i!YKe<5uFCg_M#cl>+S*>P4)bd=dK z@52QZ{)xpwDjWgJc?cZPaz^c+Rr{~0p5Tx1VJKbaOwh>{)Wl}GZYWbu(O8s0EcH6q zBMaUaqvs*kuy3)um=E&0qK2|=Ld+>Zu+NlUWg4;W;k#Jc|n~xV~Si z@GboO%5VG-lSYlM8+gHRuW`pnil}iv3}{0{jnN^Dh~ZV@3xU-38?gwMzI*70C)NwL zS$%Yv%sZf9FLs>Y!KBf36v|q#R`BIt`YTVRs&&OWi|=LYTImAby=I*+)zGqnzoT;n zGpC*o+U8#J!(XtsW^_6eJdxOPizlF2D;>T2z}QZ4IEEIWB?tki$xcz|K4%DqOBkkR zg&Yk(-^PznP8X|q1eGIf{5Gl)Cs(DTGes=r?W?0$e<_Ok1@ojG`m`$fZZC94U2Sk=>~XZxfN_1}tEY_^20ng|=A;24Zvj%x z^B?sfu9o#NN+0Kpyo7M7z(5_iu_h&{7+zxWEfmT5(1(0$yQUlDU<`jDJdJm?vhZAT zsyz;2bHoN@XEB>II?j&^8W*QCgsfK)yhrfl8&<_SnmF?rRJIE|?}V0J_zD$ssGvsY zRd`^w!&IR0bW0foS5-X3vL)C6c7yw?cMiU9s^y+pCA@;!K>(;yEME_S$0a^mUmj3t zii#?4CG#M-L)SG~B+a!Fs#U|+fm%VHukrB`3n6BuDITm)uR!hJ1VA#gk1)UqVr*F- z0HcI(`cy`f{PN9un}9As`o;n+DoO`JMp}ToE$&M!ArY=g0wb)zLey+0u@Sb2;lzqWNDx-~SxH?)h=fguH>paq zw&eafj#|o|FL2`zy0Ei_Ba7a97nb|3?++0O7zB@Cuy?{>AeK5kT=H+~85(93FKOM5 zf`Y*h3mP5iGp4RHJQ;ezb^DJB0g7zcqHLox^Kb~i0{CG%`kk4WpcW+xnT$)lY2ONLKtd;jBDq>e1(0;mKoX@B>WDGNpovU z?5%J91E1Q0=#XULgA{cMa21$zva7a4ryCuYfW(Q>qC=penRlL66C~eSkqchbwYE%S zjY7w9l10ZE1m3vMdew2h9J_th5t>;S3&56$o(FtO8TzR6)R1mWCCLv@JHiU1fs>V?6OVTUYE5sAbp_y(Eu`9B<4pol4s&7JCx?>+eBg9qkm>9gC-ZS!>WUM315>|l$HM};66R7N^7WuXLzw2mUWby#|( zAWG(Zz5TE_5JpO7`^ZB?Ng3)}i@e_jvbAzj2H1{vSUop(cdDs>&8LF6SOs`;&KMn> zM;vh2f}KCZn~0)EEv%F-Tklv`A?_COcS*``Su3RqOo3(VJpL|}!aIQMeGVh#o5!eq z13N>tNl1QZ3S7ij1kBG-fi!>=VGQLBV;VeDii~Wjr%Q#m*X6B66OM0TsaK6Oq8x&a zju;SZbfh)F=OQj>wIg^0&WG3l!@)j)y@03#Z@&|$irX%DC287T6&#G#(w&MN; zMBt{F>U39&ykrtbg)|5<3=yJn0bFlrc5mCLdXp_`asM<;F;bdG$e=SQ>f+)&sm1W> zSpxbRh;*k@6m*fv0FcMFAy~Mz;+g1kB|%3qZw#K!#NMyfMD4e;{FdYOLLE`hO^thE zBOo3+auGwvib=E(k&Pfcqya+%D}X5#kwm$q(gQ?ox6=vw9tLf-n&zRUlQG2kww<@q zEYK#^9%k|-ZV$@sj(dHN^1%dHdq_(JBa=no7)U75S~32Z@4=km>fLc4`2biMu#WU5 zc}PCRS0X(Ra7I+t?e~r}^NGu*k!I46iG|b(1m-$D_+UI_Rx`9K6S2i#b2ry&A9(-yb2$9_blO3mlhCUQZX#(tGTe874HL@W!t}V%! zJogIQg*TWJs)O?t36u>8OtVuyF?W$l_8r3q?_Rq$on)_gG}3dyS+hkH>5}91YV;x3 zl0f!ztLG*PVX`5g19=gF#CA$g&;((!@e0Q%BM>EsKV&Y*oGqOi5xEt2>YI`%K}a_a z3epQ6PUhAyO?wQ`9uYnhH+sY|_Eo5;MT*`P5n~iheACi^JOX=?bz)W%ZAfK~DRm_y zjHy!nVm1~R7aRz&w(pD}I|FYT+izuO->|#A2XQKgI_^RzdcXh{3x=}Y4kAby$y&%W zMgCf?g|x77o|%46iPaDG&}Kd^U{AE{-24Om*2fO!P}b_pU7m4JL)l>mnM4~aYx_0Xk^~tG2bzd zT5N6W?Dc$V-6K_#+wKs^$$aWXCFiEu)TT8v%e4AUUK7|tE3BsE+%!#{T%gQ;-grnV z+7d}-#RW-Di}O+NYihGPHYq{NOYrw~0_Ba97F}C5u?qTX;m7Q7m&JV+f5_qoEIwiJ zDT{k7wpe_|f+>pdBNllebQO=un?_j{ylj|1at-uv@IJbXLVa-=De)?z8`J*6CLr1b zOFsy4s9X3(H&C2(z4L2a$V7w!|7I2Ih(}2Nf^}5p5&c^TnG!qt1wBVFDV!_uyLfjk zH?$ygGym0&ar^YuVgmr=HCNu;oAmEKb07Bx zaX^d(p&KOH5U61VAs62IC~e7wk6H;AIf!IBce3F4Lr&PI zLn58_L;yloA_~qhHI1+|rjwg+Oye2VErMBJys0Bt#EXIqJg4{lV8TIRc0gc#|4+LP zG(%1pBMaHV0@bm=RKBD`H^GDBz8hw;uc_rvs(;g8gWwA+d-ONV7^&&Nw9m&#&6AT>xF*_R<0r@F z`sx3c^Dvz`vd!d(d~mnx;L?b=NE z$LJP*fg+hRQP}XOcuFy!Gdz>bwu2z_j-j2L(_+FXPy_q>$JViWY#r13Y}G0!OBo2k z=>moV;JfK;)ny2n+1i=#7Qt__pv=VW4Y@H#rj~me$(wltZpZO@on{2^U`TRe`yBZ4 z$hubaO@5s;xx*2u=<)wNG%0nTDp*i^^J-^7{UBIWc=07bR#mvDo?+;c3h-~4jXTSn zbL!uUbW^tEtGWtSRd7lLYwF7twX*`Cr#T6M)>K$i2X%qFiWg@%HD4^0UYt_lvf5cw zAx2}QquPzLUtz_n)o{|&aTv$_Xyob1$lH&ihv~|;A3to5!|n%RFI~(Y(z74*<3#s& zCrKEz&6xC5JBh}(lkNU6{2_;)`Z)4~JNsex;W+A#k}fC7K8vW_!Z-d46d~3wq$N>7 zX3!25LqQPH{1Z}s3F_i;L4lH_7#efxfzT_Y58Kf}sKh#GzNSDV*3lV6IV*Kg`)evV zCv^-wuYwCw2ZiTAls9x0sQK^Il;ZOHy?)nEKpM9fY1emyV*PFc5;d2XU3~5(ZZ{ew zet#6Z$$sb_M7!?DABLN5e-wm|-KZzc9I+__*lFx*bV)+(#-V-`>e%&1f%H3!^uwaL z>E7$PN72MR^hb%CMD8RGrQ5)d6W8<=phS^q^hc9u61(9ee=sq~b9C1q;X4?}P|Uj@ z1#SnRJc98I7|)MAJ1o*20eZOK@9w)gOePwO^W9N+=w9z)OnkkQoeLb2jvMVBgk9j% z2jmIx#0pCYf)H#Hkln!~mSvX-Y$^F^MwD2(Ud=!9NCJAv(KvLkyEur_M*0&Gm+ejW z{wO55J4380)SXSYW0nR4wc8oTou-U3`*IyrcN>aHJ$!Sh?!bW6wIO*FQ< zKicDF$5=KfyYD{=L82I~1G7{jXE2fDvgt-dPrJiNpKt&+>AE}Am_^6zU$q&-@1G62eRShOmt{rkqbWT?~UARtlL>BMQ+}3-(}O(`PtqFu>Np72!}W@Mlu9Z7>}+b zZaf)}BaL5dG$8w0`$y<3OzU=Xd^_$VA7^WKfG#5H1gEG6G0C&J9ggBi-yKAUZV+`R z#WeALbQt4!17zX?3(^F~%1jEH>>kNMI*6l@Kn%hjn2Unf%7q7EFVx|vdosPRx}8rx zz8a49`d}|%Oz;9kVGP#5xdoWmeSbem#_{#7t?nS21kJrD0+Tjzytjq}J9G<3$6I#% z)#>r;6qQNOLu;?UceCSp<_x4YLw4;zZ?w7#DEZ+pzjyGB{~hn1IO>T~o>B=a74^(K ziaEh{s6EpHJy(&Q^V2<{=c>}apq`k@qIxnXElcXjyi}IelbTeZ*f1l&*4an@n}ZH0 zlT|(@r_{5G(tpYNNID!qQ_f{5Eve)*#~v(YEx-f=QJR7Oz&Z!ctEqu9JLY}3r1X(P zAT>h3avlN)w46}~XVn3;uV4%KFkLtiR5BJ-Q~hc-V>QEJSs*y-4K7Ajdmt9iLo6V0 zI9O~4d2N7+-GkR0u%Jd0^^$4(`94ffc*10VZztoce4?7dMA5%wcwnK~ydXonvDo^G zheA%q_Ai~yv@(vbw+ouk%Ds}SjF&2W6W>RAQp#ZZvx%4jmQ>KUW?04OUZ z5s@x<<_M2e=>Do%XxR60kwp3cYPK|e{WgK(5qk{-c9 z#ic8EjJ~El)L2O8;&9MQmvU8{&WoVZ=eg7iEZ$(j_5F>~>-hQQwXHY~zPt4q%o@m- zZnzb6_x#aTBGO{uABB3WWLP!FN9m#mV)PTw`%R3GzlTCOOU@d;{L6mjsZw{YIA`&_ z>|Cu}z`N7V`3hUsoVs&-u4JP$GoW2=3qSm=;4558Pk<*1C&=MWwi}h>cOO{8C(#p# zaGG(DUWS5{WBzkioL&K#hNB4$KQ4ZR2&Q-&k04l#J={h$=H#k!e5Q=0ynTHXo3BLC z90H5Au4BfR?&~u|ehtM7 z&Osdu$pLiGFI&sS8B*t+^5s;~J6A8a@PH$WgUdx_tALDuD$@69MI}{rFsBaSH$ink zVnUMQl@Y@b(yH2S)Q(&EdSQa}CZn$SJz@rOhbW8+cL=scSBEqS`EF19<9%Z-p56QD?j#2iwU_~F~ObyoM&`%60Nk#ol7MD?^ z=XD5C)%EQ#h_nR8zCMk2jjHfmda66oFc=csWoJ2`Gk)$s8C<;H2x42saQ(oO7oD1O zJawq@UimKYytB8Q`3WlaP(cSSD*c7s4qJi7GA+<@O4ad@$d()!bpqej>>M0d(ITRM zD}V@_fpDP;v3vspk1Kq1M9)-P=^{w&(c?^)No}J^8g+dUNR@EG} zzk^=s+yT7z7aZJGv-MV!;M}R0;$At;%6BMqFO|>lRNpDm<@|UZW2Z=W$&!vsEMCB_ z_^#~-E^Tw|vVNz9O_!A-xTDfq;_JOIIT?3NUGfFK@iGbpC9%wJ<@jtFX9HmhS<3S} z9Jh~`0+Y_Lnglc0BGDmMB&2~j&@U?L5@H%` zLV!rs+Km5`IXGgrVu@b#1yoDMb`=uw{8h@8jVJjsWn(baWJR(Ll{f6mq$i zxy5h`jDJWZ?=fCMFGm)LA<<}XBt5PEox)7O+pq?(MMMjJk{dqmhGWVx`W`u+HXK4H zgs~L*QzeiVtHSpnGI|NyeteV59)+NaIPl3nrl7lSXb1sl>?y9DfcXmhkS|kz3qbU* zv6!}&T4FAJ^&j{&mPBu)OCM&aOGK)~p3_5(A$hW)@eTScFADD*0S&{pi?oy=?Ka9R z@Uo$eRT^j%H=dU+d)_b#CIi;%p7+JXAJ{hwo(G4@^Ypj5VoDW#g~bL75k=QnBhw07 z>fd2)eY$v?k5v@3t74i3^((c?Ww$imW$9M+y!V<%hB*&;4-1lW5#h&XBZMCSbL71 z47D!!9d43n+DmkCP+I_>BSr~-%3$<&(z>cHjZEP4l<4PW^jeeJ3Tgle9Ra%pEBHrg zt!4cwDiGu>R`_Gb%xYTjiWA$1Gk7>vTKieM1WZ*B1n!xv3sV-2=WOFMHQ!Ev&Uq{B z(6!yT;O~|s75UmfIYMZ{YKWx@?~fk!bu?m#Ade};B5X>oKrVRx4@Utg9T0yop^r}) zLn%iI??l7#1QFT2BK~lAc-S2nMQOBUrE%N^i0t!-%zS#Ll$X*jfD1{qK9N+n;>6v%Gz~{gHRC^}&aq?<{`W z_I`N3z1`lae|G=DM;|`0Pb;6?Zg1PC4 zRiZdu2+j7x%s|K|ogW})5T{kBZ6ob|8@M;BX%*dejH?;=iN90NJYznUBn5>;dd^xJ zoJU+|n0~!K#+#U;M=h;YE<0~K-+`oC#@{!j{HC*3sX43oJCDB$6@3>y`=7%y`RXxh zU%|dmT@sQXngEyZ71{Dgh`9?f2xZLyL`XAx7#}5K#od zBp1=~1StsE2@n#(?GzXgAc*pS;b0oTR6v9Uq6Uy5d>~k0DMF3_AcO{ZR@K{$HUCc` z{qBfmPVcnLOE$?IH!PACP54NqAx0#&|_s`H2kg}M6-aGF_HC&!2GZtPy zqEFWXQR!@ok{Ytndh*yd1PixT91(M_BtR&Zjm6WO+WU=$X#8%Ti}HfLHqr0G%D6A) z0b-Y9AMtLim_*aaWJLOiwhL{m2v$@~5*3ol0EoMOuNMsh0PS?z_Mu}EC&c--TeQ+5 zFxJ!_W(p<74$AFL`h$S-!3IP7NFhXH8}TO$B!6hASobRoV99Xx{$zmIJq!#OMrM-& zB#M$Nv6%-rBZBG=`p1U(#6vSkGr`9sKxO~}bCb$@Fc~p}*U54@B_uK`S%UV%o=0m; zl7+u2tlMxbA6d^4Mv_4Xp_LBI$L>Br-?*`jXbVy|h-{PS5y^E3lg<(ySr&B65{&Rd zx3(lG^qD+?puh*jwxt0WS zpIbdQQ3#WRe6GvO2qd;sc7mn|lfx?=qntpLAmNC48+*1)8bjn((rez4q{gQ@N}`hV zf^(C5G)$?^bacn;pNksjI zqtQVM*ZR#&m1o2nM0;|gk^Gq;qV(U)h$TMV0^SrMm~fAi3xXd$_L;MrPBhA!+ekI@ z72I6u@(L@q&IKg6m2VplzT%m>`VYyoyylHGT3XH zxY$OndTj&3X&a5kbwQNfKvz+`_9P>PIT_&C;q+|GE=wl1fSO3SZ`gG< z3^8vr5Mg$xh}Kwoi^f2{7q)>vjG%YOBX)y?b{laBC24b)h40WqlC>G>y)AGdh&UYL z0*SXbLBey(LXLrDPW_x@C-3EYw*5q>F!sQP1lIh(oQ;m%b6}`_qSzSq%Q#3T2pde#r8K#2{Guk%-T2IY~(XXYaPk-Nw0^!gkaA zB+AD{fEyBlB|4h`p`kurKyuFj+6S<7>$9;zK;;r!!Zg(Mb+cUQ$wsLQ9 z#&;~D7OicQcV5h_dt_z`+Z_TqUCg|w^xR1{wQbGKGNXReuSwrhE3BsU+)0``y+E1$ zyz!9Mj3tuJiwlyT7U!en*EHr$SW@DZMb_lvix%BjwXq8NYWhbA5a^FsJYdmg@iB`} zShQIDn8l|o?z3QOLf>Pto);3?c+)D&l9vthN3LxCHG0O(iK(AnMh2X5+>`$F@1aMR z-Lelt9O@RnF_SMZx{CQFE>9vtfq(OgbxB|#Fu^*a5{robE$JEn9RE2zM=&XzEAhK{ z&#W-CAais7)lG0y^z~u`=*TOnyk$4--+kgg?hhwJ0mTb$au2WY@{HQ+a*6!#m)|@1 z#=N0`7z;u-$g3ex!wNz!ysuGd$yJR;1y?YLWHxuY_!F7DLJ?`O74!rgBn z4j0d1F6Asa$8WT80*gDcuegZtCPp##su(b0xY^!+L1u|)GTnvX9}v{PkDmH76elOU z()ryGSEAO5ix30%A!aa<4&5q1@WbDnQtNn_LE|AV^1MazA^!&4O!`sMZ=a*HV3OAD z{)?rQWn)*cNxV9hK&q$9eBkBO@cC4%*ySYrI|TvHJI5Di(9XNH8Vi~ySii^OeHJ%Z zNXp?Sto@7y*HS9p<|EJTdCtP%X??L)K|-P0MLqwJ6u0n=n<#!I9%%zD=I8zQ0Ha_H z33Iwr(Fa+HXgABzw7{}}PGP?>eP{KyNM$34n}`(gMjxZj*~L8AZ_t)K5ZGMQGkybY zkQczn<8?4UO&~B6E*a=H7;rN1buReYM)+jPH~X~+zQnwj@B!=v7?v_^v(0$I25|Kl zj=E9C+w`E%TC4>gMLth#U?4Vi4y9Uhg#G(Fh$1&qu#dC>|tL^a&_gPFO0 z_P^yhoXnYWeFfLb_5S0j_}Y9EL&0ob#s7arm0=Xv?}}H!{1z(Fx^K{1{mMY#BL_sAWZlQ_aYoCQYx+O#+>e8jQ&Fu z>4J^FYRK&ThR1?sm~_4yMLL+lIX!1&gjJok@9)kmVtr;2Cyi+}YA+TsJ>s}NU5Ioc z_y2k}TYVMMWxjDnf1f>Xv7o@jWevFkM~;?j8R?rv1Fpf*)-qfG2Gpb__RfPBk5p^T z+_yLBlDmXR6_5XoVOaBliRFrr6G^10jPyK<6VOc;-#1Czo9q_T;&~g!#OccU>V+ru VZ&jV*kLCrR*1DH2T>5&=`A>C}8z}$) literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/files.cpython-34.pyc b/tensorlayer/__pycache__/files.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa187d7e4786fd3a9407a834ef0f2287ce6ab523 GIT binary patch literal 62049 zcmeIb3y>Ule&5$Kvu`Y3i#G|91AydWN$e~Z00|OD@Cf3;;~jy+alu>O@K(FCGqbw` z%+4&jXR(VRaZ--7cvq5-QsgR^WLG4|axBFa^PC+NR}!tt149~J5#E}mdaIO zVyhH8a-8!+`Fwx>?w+36#f#)wS1HZz^mPCG|9<@5|KI-)hPMv=^%s6VztW%be&0)c zj&T19-{AK%o>%g`JlB+0N_*9mm*>Xar@eeSy3cs|OmyGl<$I$0UN7Gp-S>I6&Fr@YeB zUTM@T9ra3MUg;Tc@raivd9Syn^sKi)_Z;&|;~vF-KyQ(0$BUdQr56-6;gybirAe=J z!a^unQBQjMKjkgZq2%L-a=lnqR*+jK45g|0m5HRygILL{D8rx zXD(RyMfIxMMn_#z_kO@MqQ}VpHFdS}r}udLf@S`?I$!OgKdJT??fNARhXU#0H!Se7 zMn~Dw;a4Pg7gG+PwHpyoAgR8L0t9nBxzpYTdw*F@4i zLp-AyrF%x_2Q;;GPff^w)+RzmZ+5G?ucZLZc9r8@ZvOH|m8_cD`IIE-e>$8@@P}4GOo)*+Q+9^~*t{?w7O6 z^-{T-2(K#Pt%6@E%vZ}nZs=OM7S#Pq)%sd?#joG0l*&PNZZ226vC^2E%Pv%^0P^smn6mWDbd^NjLpoh=8XgNhb8k7Utb=Y#UTn;PpkW zVIqy*y}rE79$hXoBBku4qG@788PQ2mc@}KtZIX2ys}Ka0#afs=Y+E8jD&}hYVD&kn zXmQ?_4^wD1EJT&j$eeY<)7|8_tg~^Mu3vB0SF^=JjVYnJYI-r_%i}f|%FIsnVYyNE z15KI*mSufS3A2+-<@jZ?Vw*MkokXkOIX*KJ*T%J6g)UTlEsiC>OaRr^F`8dUEo(5*C^~J?XZE-Bq%G9bWt=`o} zr5dybZ&iZTLbcK?w??d#A%Il36;s`q}LS?zsPP5m7YL#-M85Ebwaa0$+w%nL4*Jc;| zuJDB_J>Z{s?nWHG$MSLElb5@Ki?@r_@?xPD2R~7&uhm=%x{mzo%d?C1`XcuwzjCYG z+GQ_`{*+r!c=fbiB9O^Mm^Izt?^wld92Wj}fx09DNDeQU_> z>}6}vF9R7Yc$?*BIaJA@<=P!)VjA$_@zOR*xoOF|GN{N5B1i&+`oqaA|nnd;F(DTDFuC zz50GSJvG)_Q2IB7>$K$Reuar&sAg^YfE zQflb{h5-wf&@oVGwLMNc>1~He2!%&^<%HyU5Q#WvX$im=ut>yMTq}a zxq3US(bXh0$Y#gmkJUF=Dy)>VXR?je74WQDpwBg$=iY!OI|?|emY2#4h1DvB2HB(I zlgF~BUpO^(EL*4o=jePZ+`TyQ!m+HrlVf8!%iPf68L+qCQI-WamP8LId&Wn3N4*Ce zZPdfoyjfPa1(rH&iQT_FYrS|Z%bXM*K9+rZmfqB}{qUa7wAcK0x7X#G4OXg^MnlCZ z3r1w3Js=bDU~JOI7uO5R^j}A{#;sm2zx*;ad6>G1nCQTh9nkTp4a-?|yX$pz-PzF) zt_Fsiv)S)qsWzVBm|6>4JwlePUaMv+v+7ryhn5ilLl_a&W)moJEIU^YXK(hUaNMB&SLf@s@lue36kZHg>J0`kw|ZmZ#PP|~ z6USeii0W*kH(p*}sR9jai@Bx7a`ol8+1WpZJ^%lJJ>fWauG0TtFz9}zZ-7O9jzyf` z+L6vbPFVAE;neP!xVhlu_*!{Bs5HvC^6GdIkOJW_KA9^l7n=22VJ$GYHJ0)x$zv?- zpWw$osf*x}yF916EnFZRA^_q~aQ^^cun4tM_Np-T^%ze;cxgG+Esl^4{yW z2-{r*9uUn%5&FG5Tf9cUcb#lWI&AmkoJx^43@3#PAF}ZNC>)tzGX78$Zh}Cib@Ai; zkrXE54@B_?qi`4BT11`Sqvl(@_o&#Qx6p?uWoKcHQ8<@fEZ53@r5MXjBvTOHa)5oj zT0xW$BHs-#?@W?PfmpG}ha|W@J=r9BP8b>@ubb=&EAC#4lU*y+A_+@3p*L!vel(=x z!19~qr%(}$PN1|Qhx<|mQSLGR?PWP=Y3$Onc$W*yLJPw4zLoOc&vtH?`f& zXU>;+o%TLD>fOnB4Hg2^oJGMLCkq~#9mL5HC&dbyE_vQp-e%%oAN0QEX^jxbqhd1e zp#D?dqmchsT4{g2)mL9wfF}QV2i5u5W0H(*@t@(o)gN-gt)5D4q2B6QgkNs;)K|*2 zR=T#*8WP(#zq+tc_FFyTeqfa=wZ;preh5lY;jR7!W>(|miLqXTF|7g7Cq`V^ohYJK zYQZ0;um~0jwza`f%W=Gpq;k0ar-=}#3%%4xdTV-5YBaOGrYpUd|=x zGMCBbl}}cAux!fA?URs;06G&*$_;P^<43vCujdOmyZXyjP+ap`Y$Q; ztS&F>@_Aie(d8VMu+RJp3b?4tC0(qqQicNV(oNOEe7PF@0+9n1rf#B>(huH6E`#PH zZwM+NVZ0DBrP+6n-^sq5eRuLs^AV%}$6aTRS4!(A$5-cXgLD>~(L0xm-c7dO9cWA* zzp;5T_iD3LS1LP3nT@@r#_g4I$zU9)ciB*s?Tw0^*OOD zQ*#B0v2}E-zYr9Bzi`_>uNb||S*GZ40v8JDx-NcAcdsj2f5`N-`a_1S)#q5SRv!Vd zVEJBS(DJ>;pxNMbOlm``Lc4#KZ*Z84w{1_)p6w%pPo%cHzs#Z3p`QJz_Mf5YkF!Mm zScvpeQCEF-^3~|71;M{1G>Kl_LasIDR>5r5plbB;53EbT=k|mDQbt7sp5n1 zfNjbZoJp4MnZ$ZIED(z-L&aZ7ff=uZ935ojrZ4eGmEN_;Pg&%@){g9SaYF8iB<(;j zKBZC+82o9YE?hkb0W^$S2$?^w2=^#GtU8dLYSRH!_(wD0K<-muO4`6duSy?p^X_c* z-s1+1q0ql~XInJp`>oVH-ht~oyuC`LXgdX~QbfJ8-CO?y8_NSqc46ipdv|tt>;Etr z^xF!0Z<}`=EhLJ-1KxYv4I&aqSB_cl{HJMUyDMrrLuNJEe*iTO6T~Sh>9JJpc}sQ7 zQvLlfm4-Zuk7DsrszOYt95kK>Rj+S?8OeW-s(pR0cYU8d?$=|~B=_#X#&?$SJ{o%S zE9vQ*FQ=!K(<9aaxkNCshOaX$>%BJ|$E&dxnFj~0 z{}0*yV``WvLKn{bNQS#vLVQ-13xC|=Ln*rWtfyqS%_l7lX5p|juv25>g?|p+ZSea1 z>zB^GIX-zjqFF*3?)3(WpjsetrU;tnP2_g+#QMn-Qmxfj{Yc(csUeTc7OMzh%hFb% zPcte{DWW)1y;+Q$k1svCr z{-dCagjU)4LIc@08V7PiES|NzEbv&iTqrKJtLY?&VKJ&*BCdL%ffy{vA|EI=kY*}& z)+g8ca`pCjfV8?OVVO&A8Y^qN713_+#(ISro2<_-FFR3Nlnp)ViU5})MLuR(&aWZ| ztwY0ZXd{B)sG?{k>DWYVtnpS6ijt0|R!6+(JCgcUbVuc~31F9xSyeXV9n~{xzs9U1 z|3pij^!I^ftq&qBgALhGKHy|y0Yc5S zwY6L^$e{;kebjU1((1%FZupg^>#|q^X5B+B7bMJ%B-z<&rFP>%vjQxQJF#Z2QShM^ zC0L8HHBp_agUp>0DpiwI++3x1r1hUT@xqHIPgnv=k?#+?ruoSC;6lZH-wdRujAy2_JE*`|@ zi2dzP4W*h-E4SIMiPq+wqU6oJ7G=X2I-$@#=TWOaRApVe;)?bqz|r*6Od7L2f~#`MC@AEYKFKjg^(U@6b{B<3uhb7NRFMdcd&2tLl5QSqSx4D)mN1raiPfPl!+R9@g#RT)GfCnbx{En5 z|FRyR;nL~_4=$D+57u`JgO}osRRl;dn&W?2EjX{sRW4&Y+ni#4KxGPz_yxT; zX5B;>U(urk3awNnKVXv@Rds%=P2uR#7cla#DOP5s+Gu5H8tb~gT5GgYvt29Mm(b(y z^9_71UUpAvZ)SL~FV&aco8g+;n;K0Gr?#dK!0ty{zgc>HN88fzkTVD8%@WMRQ1pbR*wW@Otm)m z7sFEmO+o@N6$~|cUDS37kqWZe9ussy{+sAa;w@NC?wB`jO#JFZu|}v57q&-)`qgr+mc52`%c})2=+Ud!ULA|5;Wo_<`N9MgO`KiE zEFt^Gxht=|b?&u`*~?$PaPf`Tue^q74Agkwh{{T>SX~uP$sWGAyi!5Ri!T2n8sF8n z2L3!EJ9PY}kq0K8E8QwUF_f}zurycExEpf4ENeLBbFExls#S!94`bo=CW=oV3Zs#| zQkFpiKW1E&eY3o{TE$qdY55*@1XdTz@#2XWZ?pk0YRwYt#p#nbk|qLSsL1>mP7oVl zX!Wu1T$UV{Ah-}A$=I(1byv$)U}&%?Da@+^4`v}?y7`T=V++2w%6?PAa-}hEM5Wbq z6K%eek{c~zsS{nJ~K^$#|MVV~WFZZ&fVivrm> z=k(u5l-F`*xfkSol=0*Twl$P25p>y~n~PFhF4QpCYLlvwGFbHPLZy+rwc42XbH(Pw zH!91O8*st5Cdje6JQ0v#wTu$j(fYYYNU2)JSL*b-D@5W+~?=B`aD+B_O6CGOiwe;)XKt+~A`6C=5r zTd7iL|Jhh?xJNS`$?0*}-ZJ_0gyi)8xZm%mu>6+!)k?KAYtth?WCl~Vkn=n3M`lgB znXQz9Rt8N0SsO(Wqt*PNp-xjA47rJ@`}~mA&1_TigSMnxu;oUsl@i3GR*}uUD7x|r zqwBUc)SkE9xOc1Y9jGpNoEz_maIfK9u%oc+9CxN>gKlg}(qNV-xl=){5E7&RTI4W|)3-rq=>qvE~5M z8x|n6X_g^ySF(^NwPjUVFhU!n(*RV16&8SQSq9DlR3Gr3Qfy^a7MCE$4Qi>Zi_Tik z-Z|S*u|-+fEU`(3LJL_GSgsPFkwOe_ZSpy2d2LCODAx!xgPkus3@lUYY3643;_`gC zBqSRG%X>>m45@r}xMq3TR1GgY0K$ooNw?plj@kftkM;8jq zm1?CN=zeXfZa+vC^owCk8B6?3mSZFe$qz(5)tuM>S2vcDRc1_E2@IOR4{UC8OqJ#% zvH4J=5nANtFqfVv6(?HT)D;pJ&PwqWRDSH@|C%y3>B`u)2LOnzehM(gy+2=Bxx`d8 z)n98s>QS>1?KR`Q)?m4YX&54ra;vXY7TQ2i&1n3mxiyv#KT zd_p-2aK;E^(fQEAKp{X7U<449K?gBtb{qzC{8FgLPUb=cK$rw6IH{&4C;OjUZ! zRK=JQ3{%MD0snbxwyx}#rDG~|^ z8hl~qbJmC*-kqIZW4rersx24`L`~#
LHx)%#fp*PWkQ(@e^n&N+&2-0(HS06|O zHS7hymTK%2W7HT?KIFEKZCU&HWfBktn}gu@Q;l6RA)&?}#5E?yw_OZIqt8`Y^+ z^I_WiFyk%2Y;E1x#ELa*#9Pl>!)M-<0!i)N<*lRs*+Bzy-ksgvd%IQejb1PKa$5JU zfs^U?c3GQtTi4&5O!Ghe-iU?ma$%2K0FBhZk`c9LdhD^zvJpl72gcgJoBGS1ho3vV zb&;v@eIxIUB2&cX*^)<;^S&tZ)(41;K5mcq-d^uLbXfbH)@FO*Iiz6*o?gFv;nfK3 z{7K*yZ7DFEB1~GFH!3XX;uo&0g86*$Ck}Wc9f!(wXfwD`S%G?6=}K~qkmWi|&4OQs ziG*>Cld`u&$_<&++(vyh=1q+2RuSVqHP`&Zs=FEFhhQxI!Jla$z0V z-&N{mBsj~f%TU}ZlLSOrY%Jxn=is2LnA_5d_?;CEl5hCzB?HKo!UmNg89Mo;DFulP z@XKrgE3FnQ(jFKV4VJKi5yKoNM!MZ_E$GbRn&^MMR4;WvQy$H>e)%QvlS(k%A&cEbO06yO_nI46~tiT2HS~@o{D?3vo6!dK^wojCLLZ8 zjZ~d)lsz?6rXZhiHqi2Tn5%G1GU8|0=sP;3iiPs#6?ZgUtyEsOZ6aNr`nq0ZX`(na zH#yp^ueg`ce@hU1(sNm6_oggDQEP|Yyf}ips#ucSX^vU}qK5AGi5AJ(6H=w3Z=;A= z!`Nq!=WJ(I2elH`D(7UyvgJ2+#64@%Q&yx?_NdEkESr7j9kwAnkB;p0DWj^8dYnXS z_T2H)Y(O|YbIe{-t^$>ElHbYW&v89HBN|LO%4j~MmO`0xTz<#%rsYpE!Hz3y^5teS zB~WO8B-72?;1OWM`hn6Nk#7-nNkUkI4( zVTjfiR9P^gAwmqC(Eju&s5#(X+^9B3MX5;%5uEI@brm%?9_Q4|pms8ItSiNcPm@wc z^2}YLYb3k{XF;>fJS|Zrm}=4NRE5`#Z4+L{WCGl>@5j^LP1#nqWuwXq37^aBk#=6- zbWl0jd2y;XZN?^nbnbX7wZK`r9JgPrFC?xv_00XW88$@ z`b22>yGe(BX8mam^na8>F?CiGyPEZTpH(ONA3abRC(bP}t;KrsBN#0_5`q7Kok^&a zcyCI5httkBkFqI}y~Ht&05RS`p@A5*Rz!3H25>QR9X+neH^nzN$R+IY5%oA!v;+hx zFYIJD6;iDXxr8&q8smEE6sb*rXtUrC+uhYsSUkM44i5`^DjZ%%Es9j^S~d3>WR)Cd z^-O!wh+2Ho77*H5s`iW)W)1eDvpud+ZhpJr&%jZI^h|3YLWQk?2$OZqi$7{N^wTXa zH!m{&f2@A|30?l2E}h+)D9u!DV|%+9M^l@jDlP@WiV1^J0RVqSmmknY_M@#nXJ*ps zLDX1k^*f@Z?2BFVf6yX`#HyA2Ko1!N1OAWe-LgHxRF55SjHOjt^=MRHdICQ8puOi8 z71vgjTYafPzK`im&v5Fnsm7!p+t0h5DAvYP4ZeTcUoKY(1STOK8wBshZyqU}tE&VE$fGE`@6BLft>?U9B$h)EttMs_P6 z;yNbO4-t_RrI;@SmEtUWo!RP4Jwut&hKwzG&Q?RD!)T7$XaLn?nP*&JG%_Z%&kx4o z&8_jRN!1^ehH0hzD`SJ*8ul+S8x#6>K@MkI{t9CKvNq&?cn_& zP$tlgw`VURZb^zp(jzFDI{94oVr>yxXlcArAHO71sU>M7Yk@QNh{h?Lif9{+!HG?s#10v|XN0Yu!d$1_ zj;rj^$>TrpVwmxGSQLsLe`Ph$Zel6yNW_lAlQ?=_6iqKUs(hb_vpM*Ri_B)US2zSx)+436Co=37N zH!KO>A>8IFmEVL@`AI=Hkr=WGjmNAxXb=Rjthy9*7WS57laHZ3aD&>ALIDi`(tvEE zB0J1_TTy7NcgQk?y@zx)a;4Bbn9#66I5(CXxw^kNL4?WU6Zd-i+(c1$TOTc)oqUZ{ zt8x<~5+*fMn?+$(+YRg|n9!^nLn9xZHNy#`dst5(mXDyfGT!>5S_2nmb{XQZ^}-^6 z*h}5KD9ud|n8RJUW#dot@LM(H&p7Z9kG9w}&6%{r7&(EfrM1X4N8YNj@%VN`Q3Ny4INxbtGWU}ubgeK)K(9_zo zxT?yTFNO00K|^HkrT3W?%hpu$S>r?E@rdAgoApVgk0F&Hl(6?MzQHUPpi@j0)4wog zbg}wWHYkxTpgNv@1b?SRaMH(M=zaR>LyVe6LhJ(jUEqKV)fzjvv1|y{Tz)y%+#4krU#u(^$e~=5{iHunE3P&7w*!3)nBfaf zpxs_CmzsUK1sPew@-Fy1kjfrVtM-^ZRu{2}z1K-{bT&*s#2swuu313b?8PgW-kiPm z=DF9e%)a&J8~$HatQb?Y_96wGb)$iw5v%JgAnQvSIhhK1AP&%n{WYJz@7QgcSv-s) zzBC>w(ipmA+a>6Zo6F=?DEu>4x{t-vHM{b6CoaWgUOH{NPq;Nl#r6l zuJD;8?s*zf{#V|XY+tK*%V##j@MCgo{MHE>I@qkblphv?5`Gf4ax*By3~KHT)vN*v z#CFP5j2e6VGfb`?@|n0Ywso`V<@YdJt-g!qQl$+$TK!CA!2$}*f!3m3m?Q@TBycX!NY zrD85$(_B7C{WX{OLk8_hJq?4fC$*2it^DmkdDcAjP;)!Xx_R2@lO*BDlDiL~B0VJ} zT^GsMhR2twX|#Xv=ehU)OI?0Qm;XW+HJ9c73%dU~U2I7kMa66djISkfg zD}@DfW5Dy*!}Z5<(C$`~Rj`gy;*2(KOJi>gQCM>Oc&YW4(y}0gCvCMs1;++3=t|p9 zEp-*Fm6y@-v%S@5im=ps-%F*GXgi58_;ZOMJxRni?>G@F7|o^aE{hTYd3IX0|9w>L z5yQ7NR7hqw&c-k`BjyPgp>Bt~^+mI~&>nHn_18?_w?%5Z21-ezdG|?Uy2U%7HnG7A zmxSDCn^#m~P)%`<%J4>?=Z~gPtzJi0NV`a+ez-ZR|LMjK?e~)HA)SEg35Z1|q8ZXC zAkh_0jqNR@Sd=#ZE`<)EzSKnoMaS=izxHXEefH}ozQ=n zXge8ZNn?V|mm?AHYO@jUl~9Y!XhYjyudw6Dh)8jZ({kw(b^#QTZ_az1)<>`P8P9HF z3l=7Ex{bo(F{=+Orr@Y;xgRan7iCQ0n{9;{-8t7!D1Hh)j7q#e!VJ5t4@`<4*y{=B zqr}y~T{o=<%A&CPvPd(H(%hU{sBJOU%Fozh#f=t|qOD$>l~cC@=9`@gscTFFv`wLo z(ZvyMq`0omuAfb#@c=t_WF`VhyI|g{@ISS-gr-oLsn6Yb9E&#ZxP3ylr6=x)P1Q`1 z(}UOOyRI=68wO}}y(ra7_`X{Z5%)+$xE^J?ogF19zK(1NlzDrgK4FF$R3ykzXP;%S z+cBpNQs14u=C78+;_`!Wm6|)^1;&t>4a|7R?c8f2#_1=TL+vGcsd@OFXJ#B!kgqv) za*bMv0uWz|UY8vnM#X+NHDNejegqcRo4sGpU0+#j4aP&=%HaE~xx1bB1U?806Legl z$XFp>X+HkWrO2z@Ogzr*ZfuTj_P%Qj&rqXQYN$0B_uJS=H<~>HmTS{5?d=WzlX@}A z1qm$jeE(TJf2hmTx**>0{GZfC1~siMnu_Rd$)e(6YuG`iYm#i73~GyW0EG&u<_ig zjHnI%5i?Prz4G;IY@$W6jKujV9)u4S-ESFC#sS9{Mn$=n-CT=AT90ffhcu4zb zvTh$nbvqblO=(fZpfc`WF~@bJM3y))+GCv=II=|7E#CTB(L}Bb8HEco$IPIJ4aykW zV2Tw}b<~c1(8F*{h+8=Ps6X^Hg{ z`HJ;E61lC0+<{7iQels_%j@YxUXIvdO-Xq;Dp$g+DxyjotM0hDJ3XzoK-|Gc#hu75 za2ZUw{KgXbt*_V)belBoy5%?2?HO~ay|Hf3Ch`hvb*!^iY6|4mR_`>Gy7^zFr^8_w zw_)g@4%vV|xoFhkPVdf$hZUD{VgB+)^Esy|7*>fA++~#D=?oTD*Rk`0 z;&bs*H&18yk4l{AED@_OnNUII;tG1w_|T_BXp25H8?C9a6NP#QeK>hJqU=Ii;o+%6 zzn#ZDsY9X}N%^P5q5Ls@-w=nJY$JinvOOpB;r8=M(QK2)u}H}-qeszpo#UW>=gm`= zD$j-CY(9^|Wx5iE3SpBH%OZC6g~@W+3vK_LWc{afDqGL*N2gjRy#?2UNm10#4@gS3 z{r@`Bv{|0_3os`d{j=CM#aT~gFLw;or^-K~Mh6@BPNxkvCL=-A(@;mdAuz@gd9_)-NuNe^Tw&{*;m#A3wS1S0alK)FQ z7LokeN57)}ruYW?xIj68sBr%$@{eoA@R)hVQj^_2XElO%?U5Ao!wsKf-#$^vze@XTEd0OA z-2;wAZ&Y$zAk{3)hJ~qEycZo>(5$CZdWTF$yZjW4K`~Mwz`2W2w8j@&uY-Dy3 z@QYd^Z*$`v-UBQ9q%pkv*f#$NTj=*_HZkMif#=YBq4;M`|-V)gFc3kWz?4s{Y;O*v|V=~U`c!Sr@ zT?zLuVg-cc%tWwe3DD`{W~E$bb8xO)Mj(x>@&-EB+s2{2LQdo@Swd zq%{a7v_?DKPyQ@@m^52X2yICu)0KoYG)ss#8-LF#sscYVS+Ht75uGA2=g7p(<*U|M zCp`^y$>*@zVB4eZ-hI{G%ho;&*6pioBs3fsl@JjDN288Jyt?U9j3lo4fhcM7Tx3c9 zi3j>p^HIq?9qG1_uIdqi=)|5KC};pEU=)*JII9T&pjCzt0~N8UsU;bS92K#a!w9yV z!w9L&)Y!lk2bjCbolAx?j9y0fCvb0qNC(%`F3sOo=&AT3SlMzXiG*MqHBShF$A#E> zh~NPZo6l|9|7{}7UT?@F-)F;mF8qBp-z$~#IrsBw+&h$HaMF6`KjLA%8IqL1mkTph zlal~x%y%w*M&BI31dW$;kvu*kgK?oHI@FVRFurl(7|fIpxF|0v0H6`ZH{*}+&CRVI z|I<9lb*>{kB=mIdbwY1B?SMXqMP+&G9LAWDE)Ip(oQD}smBD+HLr_9@{@>B1tMj*1 z7G_VBv@!l<>3Q|IB&(jGnP*amQq5=X)9;Dom<)C)jE|_mE{Y+h6Nr96?@#I?A+-NL zauJe`)@9=URiSv>nml{1efz5a50tL6N&ara_Npm|x$(B|!Or#&4SFP%O%L}x4&LN1 zJ=~A3S%2vmTdL*4mYNO0Z*pO9SsfTpVZe+q(GKF{cs`p5$b_L`Bqg-v0ec7zYvVJP zlL?R!5o)4>O9zU)dR|J72%B8KP0ie)jbXPTazQXSf=1eq%Q7XHUMhtqmOOW4vWp z2jg&IR(cvYOUqZxRaHg&Pf>%Zj|Ntz2O`XZKf|R#S4Fgm8)!$D$ehO+4?rPc{`Qv0 zkM%E!XHtAd$Sr(s81POVf_gpGb7AISXtU$LqaoF5ciV$+zT@$axtzRbbD0_&y3*Vh z8YqvuU4+gP)?cQEt=eEW*;fA&x*pv0 zv@%$iI763V2tj`~%-jE2VvY5+22ggd;Ows4+LGwe)=+V2wRXe&skDZyP`eM!ckLzA z?eCC>TWbck_t+UnQHD`TZ_(i)40#k1_Y@>+^W{j7Zgw>r4|8fm@lCVr0Mi;z1JkFu zSwC)LV#D~p2c&+Y4XGImRvse@l=hK%V={MhhdC~1JJpr^j<`;11z!i(;ODNtSg+r} z-W89{@Gn8QYsq#-07w6!n*U2&TD`VyvClyVTN%NYFjgL7x0sPC$G$2=obuNc#*q^Vwt-_%T zERpH$&>1Cb4nqNOxBVC?Hsgakgc=fxNKs=0-uh`%*_f;A3o~QZ4S39E%YB@gV^9W> zd66O640!tQcfQYIpYQHQ7d^{CUmR^{Aq7#mt%Jv3J$>=3UwXkme(Ut)O8)w#YhQaiKYjV+ zrRtlt%gsF@i430(Y`h%`7yaL0UO6uscOhbt^86o)^M7)|6JIAq)9Q0F%5I`&Oer+c z{r7eGKo=pI))4XzZLmh`!1k%~a-E~=_!aNvC}asjI1WKXQ0&FeH^PWVVMtsHwcH_d z>g`Yl5*j1Y3!Fmdx6;i8j*63|y1+_c!i~+SJ%2C7u&GU3-S})_4Vu;LNa_H-`nCeA zM-Vye;_eBA5a{1QsMu$oh_UEnq}t7wEWBX8KPgbKm8!MqPFyk43G(BIA)UB4_M)2c zmd#!^rDu*X#v0XDBE;3VnEU-kORs4u$dHd&P9S6y)^_HL2)fBu>u^f3hb{J_DL_Lz zhG{k>ko)WDze=pF0;9;Q~VY9Iqq*iY-ghp--0F>iBvJmA+cUwks;WKT%)vj-INWvDlBe_D8WFwb;-S zar@=x8)#+5eCV}#z|BV37dZdemr+}v@T%LqWegVvsL+mkHf~=gY(myd`2b^))F&l} z9*#>P%R|yan?Pq*XMP5~RHydvy`2`Ndc>OYQd(V$v-Kw}f1{w7B1&-cUq|nr+VHOS zGl|aoxkP?H8l`!9Lz;GeqZ{7Ut|q$oUnlbW-6+4K8`8A%8{6;>DqGGw60^p6qqlbL z%Sf<=-X1;uxb*1B6RzE%*5Ps1oM6nU#qNY^jw9x5YR;p6*<0MLtxF&_|7J$VO~2xm z&UuR&@7o!#X6Gbu^DTA@WW3YFYna!1o+&)^8%g)SYIQavP*o~fRVH}pyu@GM?)Mt| zz0w8mTL{4}+F{kNY1RwvfL+`Y`hqfQ;F=#PDT>A(|_I0l>U+jw38XehseNK zHg9;P%UbnBGUqizi3vK<+g+0@RulYPx|)FfsL7Y@sBBjgR;R1U)u<*01YBk8=mb|2 z&b4mWgcFIV$(vD4wz!&XSm>oGD;*)FE1iz(DxH<)O24*g={V(grE?N>yYvAoox`$S zKeV0r3`+@vDT}V;oR~ivlxeFBYu#0b5GwO_qATzM<0|vDs50h6DC!D3eL#bfPgbVS zD#Pj9t}@P&1)GsJ1ijp+`fT&oXU))M2H$Smqsp=C$ABp3W*^Xr1~Zxk0GR_i-{Dys zLC1YFv*f(UbwtrXiK;(hox01baKGDI|Epeu^A@aoF3kL- z(M)^1J2=O|%w?|vzs1MG&D8V)WaF zGG@&jjznmvWyPDi~aj4xG4*2+# zH6e$FydrH1|Jci?8TK3~UE66?`{54RH8R?N_Z%B*&eB=T@$6wbA~ zjPH4O+>1;_@Cp?jD6-l7+-!CvQb)^Q%A+W`PUxmw{RoKoZlk_pSi-D1hLF)%Xi4TO zT$`ntLHAT5iVd_n8^;Yb`S-t|tq z-s)EEq}Zf;r7nE9N&*7s-tFwNj;yWi?JV!i%{e1Z4M)^F+6Xjv&CV*mq>bCSgW@>4 z2{%ULr84?&&4c!SmV`$rY!9%UPww|lI)!psu#+#$VnS|k@c*!4xOcX(Fw2&B=5OdJ zDg=F-s~H{eb9%-k0F-`_=Wb{g26uy&Fqj@`uXEQU%2}Uq5rC-O5^Dqdc94dyGgV^p zAJcLV>=+Q6qseDC_TBqSaAF+mKSNY^_OK8WF5K^mdd?cQaE`#NS4!(BRZl!`t4#sU zk674>w!^S@Mq~PAl-)}q1>w@Xc~KT zyQ7S*(Q{1M=)pl9yK7T7dNlgRZlG}VYc!7CL{!dbx2v>hpwzkTcD@_tbtXIcJo&hK z@PLE1xY0c7$+h!L7QbzykY}T3vM+Kc8jIV>(+Z{KXR^vio4yECI`zYutX_xBAzWFw zD)Ydp^BuiH5IcGj{eLE_XXr%od5*gTsi#5|6ccv}P_>MV>g3j%q(M8A#hYBB zr%7`1PS=F!1Qze$cXp=TX_|c#P+ac!VyHB=^- z0J`!#(G}Aw(6Io!5^Si%xtLtyP@zOE)|abYd3Ub7u6!p?Wsm13PoLy^>iNl&(MN%& zPAKq2w|LAspW?o7;svhHojUP+_&Ke(FIeD`4+=w0jDUc4k}j}86>nO)Wb{Un=U!Kau8)`*#7*)S>%P*}1B7WD-eoM^*L zdr=kPT<+5h)nDx>R)bk+oTG6)+f`RCiMp%mlc#2IlK6lPu-5X&dpO6lbIGia zpZ;W7!-%>(twH$=>#&H<#y&k<^)-#_&vAor5o2Aa`Mz$C*rGi@C6+xUiwoXgnDNaM z25+523q=eXbXaNZERb-1-BdFcasUONd-u}KC&N2}ZuEQpyAlSpUn5zFA7r*6?T*XB zxbP>vY1PRT@>Zd-bm1KuVTX|fNZ|2)YD*)?zJ2)(!*Ebk=C8z^HD;tyNDK%i!^3g7 zAwIPoS25?viaD8926mF6h`V}?Y}u`@@rVtSiRE?uZc37f^I1$<;6gpc>?laBi)3z>- z^|Ue+D;-Hd`#Gy!|(zmudIENZ%pF zhqtvajng%>8#=dWA5PqM;Iu9D7?L1r+Un?gmZEu1ReG(5uK53w$L2(6mH(jYviT@0 z#t<+n<=M%}@ymtV<65!uzmo3{(OUBvb^SYMlb=u$1WZVVo}Fn8D7l0I`7L2`Jv2|= zD~oHAB}~DAbmrA8D$&rnRfg9GZ-*JXxof;&C{_TV#sY$%PQ>GSyBbr~tZ4b&mbx<@43P0vFi3S*)-Llx+L?mP9S8%$Fpa|Ah z9+vWG^p2l?xS0x=@0_W}pC7+czr{>FemXHz$@cCu6%^AsyYkFbH&2@TsIuBR+nukQ z!eOktPhs-58MDb0elDCJ=8FLM88R%{!YYRGnyR9(qdt{1&f3Jf=HRtLLp*2mv3IE6 zUCk|>p=GNV8|;%Onul%E%-2caFs|;DJI0#PZ5T&m?BYbDO`PcVF8R>TC?ERTmDQb( z#c}x%=jl!B(~*xlK8$zIyc91#7}q*K7&B)1{*W!p_lU&j3U#0FbBszp1H+Kdz>>7M zaEQ5a1O(LA@>>#2SUv-2tBlU_oW3?Ty}6N zHR#8kZ)#`olj$;)z>4BW$iZreHPGME{V83{B_vve(8UcF2LCKZ&Cih2J;u%N(R(ykGK~5)eq2|y`{UOM z`*Jj9arM4;5+5L^aC1jp_tsbNcthke%Thf2>K!V{?JZ5?d?_`yw)8*B*poKb}M1$ zq&sI%&NDB={Ym{-fa&`D;pd&5vEY3emc0X{TBDF3^J2^y0pf3$$4=9 zX+@DY-d1YK^<@mpWJFs-@!60&KF4;Yw(%!7FFF%?DEw<4x;xAdbBF<@lusb6pqsSO%D+y*7akyD* zhT)jTU;&C1=FPP7!6?J_fnAuHOJi-L1I;j^JJ3uqx&zJn!v!v2y0=YcN8x-zkU|pdC@}rN4hgdbcS6z*&Va&E z2xfMi3^gKi4s1w5|0(W^DoV| zgxogvBX0T;^^Bx%{_k?k*WDd1$@f=xU`BHP?w~o-nh4)y9}q)K3os~@QEyrkc zwY>k0miHjulJT2|(8YPn6u5w}uay7^t)BJeN=?@S_qXl-Hupvi6-8U=BX}9Z$%8zE zMCHAXjPQSocp(MZN)@^wj2#&9lY%bF8|v|u`$7^4vqJ$zTey=rwB(n`$p2$5tsbOF zjnAy|{)h-agN-qOwCewrcJ(=e^sGweC~(B@t$uCbfMbDg;h+}htl*EVX4=oe&IBD< z)apfrS!h^4`ERK5Kfq;dXKT>RBg|)MKUN=l;!uj!hP&&LEDsN8WN(n&osBR^9b0`) zA=1h)SNyFC$sjGq02`4xXPfpxiD<}E^R4v!JdUg=t+$oFcnxsx^p35akzOmdHt~m) zPk+d9yKWHq-x>)4d}3IBP#y9GUV8Wd?L#~#5uFYO=kRgvg*hHi4VrIwz1^P5<1hXM zxMbMyic1rPaxO!vdF)|k1)e~R$-5egL}_k<)WS@{%&PyADnHboOYUs@C%agmR*wXy z{&$I--@}~~q2?}fekH=yYYahtOX8tzN{%cPXOgYnpbE*sNCz_Q?!n-^VFzmXUn58V z-|BKim&>}uyXpMj)T6jIU#K#_%ak4n%#gChg3(dEw*7*CLwDjxoWr;exof2x3(kjb z?`xN6um6eyhvX&6Wz*_gU12cGV~;sbd6ePJACS;^Q)46R0k=JW7+IA~LCwtkNN5w! z$(HI@vxR?FQ)O^AlDQ!84b|Ba=;pU|Y60_mHr1hr!QDD^ZU0B(&ahc=rp*e&RJ=XU zvT3rFAxb`7S@w^rVOtU^!u&RtMM6iO-`;*!1p{+|mmi4A@Sji)gK?x*W`RZ2&t#BF z&Hq>G_i1ACy8#dYX2$SJ*|rt58hX~^y^|Abf4~ebul)2AE`f*bo+O?@xHJp zJ@V)d4p7iy&>uT(xXOF*FyB$WeW`vV;9EJ0B#w~@@6y8={O)1#FV_TeF)++wCI>hC zK9L!>gBnKCed!@2=zDwivpe^o(vU-ZABP@P{_SweP>qg+b<)!#nc=OEKdZXw?*I#3 zHQvv! zvQqlQgd@h(uXwi~H%ie+ZOFk7>}^P@q^4_6cs;nszcJ+bKK9L#gFLsP=WDf;_jUt9 z($VKee7{d5sz8C?uuX()Zh%|_jqin+(HkAC(oe--uSq5gz-KuC)g5M*c1II3s|A70 zq^!>pQA~WR9@8L`{NuWuz7YriI0rYALqPa~3MLIsC}hFdF{znzY6n|MX{|Fi z(rz-+$fc-cu;EU$IDPnx9o}ts9H5i_%hoEWQ(6&+3iq5iHr{nBW+iRJVlEqE##B-QBIRbapsawf&%mHbg5}Hb<>V#HaxgBfOG0rUBV5gQD zirE%R+agItG3YpPa`L(7UVLtXsX|uc?X1S@3*&AZ?zo6-nevfh+zJ@C0>(AHn(add zuXGTa0&FHf?xvE(2WX2$r^J=5S?ZLSOun@lo%u1qCL)s$Mw5Fp61e%z?4?2lB{2uW z#p~o)_6tCrGe99$rzxMK;gTxJEMpfq)KtyHth>so5SqnNF%P|lZ2xF;So`)k4n*0M z#`;?s&Q10I9)078pfkGroG#|=pjXkFXpP+#IRP@`RCGpJ5VW>hWsDTM#eo)Gl(;se z{vGwuBiwjTf@$QU2Mn5l*xU*R+Qm~QW!zFM_|Y)wd!BFbD3^xG`N2MnlYF#_yEOsO z0X7;JdSO!D5Li(E+1hjvUlyI&%VykdEtNukV;6D?txepk_k`h_Y{R@{KD#JTE}iNd z0q(;BX{*I%M&90Se3DK&cbFuSrB{*oo(Y8~oE85*}$T9~sJW7f&XrY?z6S*(l^XV5-IPivf z)d_{Gz1)!X1uLa!58-9Ghm6mcnxPdJ_)VhJ4E% zDfEcBABX+f(O!32bMd;92;6~raU73o={XUzJNUlMj683zY(2SG4bVAjkws`LFqrRj zN??InnFyYz*yrXri1)n|Y=tDa6l)C4`x!5V%o^D&B522jgiD%gO&PN>Q22B7gJGX8 zqEwQ=aF*G4KHyr7_VhgDa(!52qa_lpx$nWkG)r``8DSamS`U{plo0N3F}-)pit9Q| znY2B~w*PduQ(&GulmW%L^|4OuQx|fG2 zo#R>_W=XeK;yHas;QV{cl5TH%p-4)c3BSS5aq~b6<#?NGhZjdj_5leo31wE}A3*5K zbZR`Z5`Y5Q;>+ak^AKafraP3S*ywzm(=8Sl`0md--kU_?&3nX~e}fpty0dhM)Mn3- z;Lrh|ggxi*$YU zR_^Ez4jGdGgKM3?8Fv)dN3&Y9+cc3!xWRZ?^*LZ0iX+Pa973RjE8BL`6=Lr5n*?oZfJ6wu7hqeqCb(U+PWas9X2X)etCEkTpp-_aox*V!@8 z6|EyWPpTprVG7b)#TuzCHF1yU{|n0=EZ$>4N7;Wd5oypg!CU2gL$4-GI>}COz#QIv zbeaNi4ho*is+}SFH_t={!=`WkJR-{x6Isq9jErK?R99f?Ys7e)uPGNfw9C=?VMgY_ zs0|FaHh<$yCrRvPUSp0m0n9oUks#Ouv96zlG@t8EcTFbvwk=)|C$bIMHjuu5c|hRjST zv@M-sfi_Pl>uKkh>`X{)Thfw5BO*l1l1+R=M7p8EiWhy9m-FMX#?mZlhgl9P{Xx+i?oT-YGWrBQK+nk#+OrK2()Ea!vFRUzmN%42Q_QscA zJ@>}!SI@oq`ngx%xH#1sAly+VKdItBuZo29dK<`yX!pZdcUxKj$Zi-8$A zA_@d%j&b8{0nDHqbfm~Lh;0V>W-@X>w~Ox}M+ogfpz}zY!(E4LP;6ckbm=-nwAOJ1HSwwc2j&2NS z2~xNGd;wz6$7AQntcH9V8hwrmtbzLJ;ag227^xMQ(^SG6%!^Sn{cX^ z=+J9qaT0Jstywd>vTx6tD0|^UlSQ;e5{~wW39oCN;=hRml-QK~(iwl{<(0 z-;34yJU%ko@a1UejR7&Cy3D;t#fZh49epPzN6bZBiz&Twh9RLOMTgiu=vjBFL&rRb zrQ9K`m&EQ|HK<#8k)p{SjMP)LdV(}%-Pgc@}NML)%3OGm}d21^Ct zy(eJTtA&6@UUfuiE1IZfibBkUJA4r`5+Fgr5))o@%;AirKK!r5C_LFV&gz6!ZAT} zlwAnN{))k|+fPUJ1$Nn+5R~EJ$3PiyH#J@Rd`u605rS;UOEsSeD~jhG8Yyp&sOCT&(BjqYrBhkPal$35~;Mc-{K9->O|%y;fK?lNk3 z#lAM*#b7)T_HFJJ#~;>NOvyW(X3 zn&Q31rPUuYImVOVLDfVQ(|Y~3E??tF>`NCBUm@P|f<#sICh(uS0l*h=Zjgw>kLTwCHsN9y)K<`nChaKTzJ@%PkU<;p zyC-xqif<$ADb#Mu5|K6>4OeloarWI&*@xl^1V=BU$42KzXYNGSC4%5;F=uFqx>bgX zyNSh&mr?SzmxoP?4yn^fP5V`bVJscm7D&A2q+wmWXqLh4gMAE) zg&dK=T09Ffyc2n5vtXVZH{d9MM5Q?HFzwym7fGybmxAo=8GmPh)@Rb3%WVTR2r&qk zDQAEN%f{SEN75uzP*Y=>=IB;=BY3Sxy3{zx-^$b(!-SEej8) zH@ALNC2~lF<0iEFDuLc0PQ6I(-4#4c*eO*|b@0*;Rjrd7s^!c>HdL%_HPTTve_RV8 zg)y*2*sTAKP^lO1S3Pay2_c@5^dY!w9=d?%ZzN}7e{IrHS6GtPQ-44fcj=)gdG+!q zt-@37D!5K)S0d3VQ8ik<{xZ}N7120!57kptSrx;NQ7I^D{r!)IqDuMZ`~#jLXc))hW23KxEp%PF}ks&M-2$;l=JAkBL6Q@ zJ+)SQ6U-8vdrixHZ$Xo^yUA+J7t~}a$i0D)l(yy~cr0f0%tsT3kOnx;9B~&c1Ve{Kc7Pzn(kF{2GfU?3L#3%Y{a9DKSQ8B}w0XWp$ZT)WobQ z;{fX6sS?r~0h3lA5}x8(snxGBrQ1vy)=K;M=y%5_y4?|-bSY@Z?6!3LRVdqutDN8B z7N)h#?Fhf5Xs6-z8T z0>}_+Q=RJp*9SIgo9TZeHQyz_cL%Oz}51mOYz z7EL`Cl7OP;!s7%aRHON(jifhHLI#RD9Ysaf){ZKzr)-9hR{aC&=}`VgAf=^l;3GC? z!E2-WxilaW&V~ezak3B_Ya}Bva0w68i1r(1ulcKy4gQ0|xVz0CO)2D}^BiZni3|n7 zxXQ}L!tapF4Ycn8ZCnseK(_`G#^;gsQnw3a%Z;W*O-~*ef;UW2a%&YC1i7^xIO&jq z`3)U+FX(5@?O=->{WV+nXP0l#u}2TL8RTHXyqX-2DBuzC+OD`j0&@85OU<296Y>z! zafDsFg7sTWQu_al>DFeeTLa658)XU#n9!QUsMlOKcLON#t3Rl@EIQZQGXikh4K>^Y zaN3b0C0B+=xMty=F&ysPJ-;%n5v~DYMNsGGhG&;cleJ904Mg{x1l(%g~1tkJ+9r z6K$u~M{MlRq#7J%fK1v7U=LPUfNk2gcL=0mvJe(yKEQE4kSUTHJDr{FFHYUer=1csw#Mt3eT6vqDsqs7zrJ% zvaz+D+Px06q?S9R&O-MSErF7w>D`?|bX%{hesvS#i6gMWo~(6rr77GkUDRV6CD0-K zV#h?(yX>K;FB_d`)e5T;HdRj4tXp%7%?WjHZoYXU>}>0H^+$(-(g3#WA;?c;K5Vp{ zzl~6Yh9IloKv_~D^Ntl&2bfSrs42Y(<($0Gam@)>nZ#6{1SwO0r2o79Z=w6~5l%!w^$ zmo!U_oDS>S?$x91{$__sIG=3L?!FtrgirM~w}mt*fnrPAJd$!=X(E_4UpVxMvtbRC zk2@#omqfoB9Rr$y?hWb+t zxx4`h?-fdp=mU6g99}^ATCtph3t1tKL7%U^(%icBLb%if8=CzroTo}pJ%zF20V3KK zc^!CI!smhivm}wHgw^;X8($0|50nJMsN$wKDJDq|vQy?jaYR*~T|KgmEWOl7-!2Ze-+`IczEpGkq58$=Kwa-r z^T^a{u~-fkI0@u-66HYN@^APnccr;gLSXO>$UH<-%_F(&dGL(3j>69C4bCo2Q(MT+ z`}MV;?0>%b$Tk1AW(;p5%nT|u*l(h)=CDApt3q&Sv*>caDOB71!dtZ)H6|f=%OP!q zI~-YUM>m*PUqjLfWNld6hz!BIY~Z%@fO>ZNqO-flOEf!1CLB+apxpa$jwu{#B5nV# zDasJaE!vvSq3l3%@dz#e_Q4`-hebGGA#U^J4$OXwWN#n+_VCT{4dfy* zW){93!#QIXemLTgWsGL9%851l3jdBgrnD2v#2MgqC;W7S?W~$0aI#CQ?2UrTv2#Z5 zj@Lt?L72S?Mt^6tvOFnI@#wm_J(;+lOx&MH+#fg3@-F`I=PYg8FL^b`PPoQVY$wmg zh?3)tSA87)%Am5Xhw3W1Ggp`U-D(ilazm+O*&~jAgVI_P9k(Vj4%%;x6L73cG~HG3 zrlqyJ;l2f1X7}BSi3i^s@%C!mPtQwKSFUP=SVa>+fEU6{!qz{{+%8 z2SJjTTQ%69fWoysZQ9jn`!8zt9{qah*)j55R8A})NvYx{MkUxXewLRXtBtC(w|h9# z+_i~RH5Oip)!)MRJm28Ya{;G22^A<^ZS&Gt*RQ`TM1Nr>khtD;zihIg3!qaVN!Xr) z1kYi6UdS(ovx*lP)93Arcz8#qK$pO-!>VKS4&%+KpBvH)>p)uu&JDXIL96R#veWiGA#5<0TPZEfNjK*_0cgMRt%{D|0@~)L zF=YQtgrZgy@@l8OM?@uRe-p|4n-bP{@TX2Re-AKbuO6V`Ym>9^n5=uMVVF#HiUa9| zu%R6iD&k6KKRwHPCyyVUU>B1gOpwup-U`V;3*TQtpOBg}QeJZqv_3xmGT7b!RXz7; zBqVspqfm*Xn!#gcQYq`yYo>V5_b*fyaYwaY`CXQW^uMFT@@*$uTSl+x?@KI|A1?WY zwOKa<0!T!OynGYeoiZHXl{%Uld#Tkw3+Ypwo%LT* zMkZzZy6$Y%eNA^l%nk+jn#Tv{q+qDMsnwaI4hEI=oX!Fce8CT2II;CeJ66seERc)b zA(>ky|2Gtsr5j50sG{@rWQ|qjv>$fXSG9euS$|TOVO_TBB1+BQu8X94zLc^4h%SO% z{%&3N=pv{NXj%lKJc4(W1Omm|77smoKkJgtk3 z)KT3T%_Abhe^wWDcVY;P>JX%A4ZMUw)M~Z-vN8_-b1r>214{ha)85n$$j*ZqZ7TY- ze}laT_>6r1@Y?pzZ-08Uu>H{X^ZPG7S{#`e9@~Cs&mVged!8FPIDB}|lY5>R8GHPh c$6p!QH?nWIH~bsk^ThVy?b(qp4nO+;0RtD;c>n+a literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/files.cpython-35.pyc b/tensorlayer/__pycache__/files.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f959b9fe3ea0e2940fd76abd6af257a20eccd0ac GIT binary patch literal 61843 zcmeIb36NZOe&6@Hdu|Nwn*_-QKyol7rUwIn1c@cM1aWY=OJKPiaKUX7n(688nQ5S> zd+=TlXV4^e?Ik7dmX5Vzr&KypR%EGSOR2;uOLk<(i6hyPm2%>wDpiWQR4&JID#fZq z*{EXKk|Qgh@9+Pv?ipMp*V;;{XZrPf|M$O--~Ipn_iWxgk}v*Zad|M~{hpWl9OnKd zzQOP1Jg?$;MXnjIlJ)8tugHzN&w9mdbf5Ezx#+&nEA~bA{a&#@x*zb01JV7UR~(G) zhrHr&bU)%1H`#s8tB-n1o4w*@Z)uBH+~Q?aYM)o%>Md>airaW5;{mU}-CNq>6?eMl zL3`fi6?b_{yS?IWZ)uNL+{3#euR<9kUWMF8y~<{jD{D!aVO zZm+V(tL*hE`@G72ukw&rdDyEQ@G6gZl}EkGV_qfiRStTULtf>uS9#p4JmFQI^eSUs z<%m}q_bN|$3x~ZT$@{%cm8ZRVy631@neZt7Lwbu$yIvGjDZQYmNw0Fut4w*7;}$~6 zih9D+|4DD24kf2&yvnm`#fNk^6(i)7TJ#~KLM3^8POZ};73MJ-p33uH<+O6Fd``Jl zUQiEwIP4W^6w%J8HeV@QGuR4Sj>HIxc`I?DMg8d>E-1xWaYSDB7-pp&m?&_86pGIZ&; zZ(3Eqs`;R|j2~~WdX={{J(L@xNIePpnr4ea7}TPsjq1thzODJA`{Q2a9Ze+NGsM?4 zqjb;c{D7vG?x_j+&v+Heoz-Mh6PN%LQPRXyA2I=WJEuvgM<#$vRo0wT1sM8@CZ{rF z4v=BhtITVz>Mipj>rIa@lrHvp-pJW}xmmAQ%dJ|o5#*cm`Ne8|x$5WVR~lhhtC?SI z`Zw~cwbo+3QfifgYO63Z@>;W0$_-@IsmFDWzpfGZ!+6bEd#d>o!zw9^PtyQW)es;D{zOmezoz2hJ>ec*w(_bpJ z^3-Hq?ybeJgy3j?b&<#;tJQq4vamquO0W{}*rGlqzmi|R)mm&ey7Hu?#&Vm2qRg$j zfr`wN_efB>QB}8;{bmqMEZ0jdRfhH#3iMp1Ix$yomT#Os9p*hUlFuJL?5b%w3L>bXZQF#_@x@vkc3mP0Nas89 zu?UIF$t;kzv#7d~~VC%Am8T(2&a8gcMrmF8;0rJ(D`zqvHC&}=SnU-4`2Ry#ZG zWw~0f&Yg`@xfZ|MEXN^30w7waogvo_?lyN#m@udAiQU;8rJJeL%B{{mOWYmDy&1NM z0@IygS0&wUwa`+7{%unFt$S+o|fCH_SKo?%6x4bgm9-%ldzoC{N|(ILB8ks2K%|RJnwz}Ga)To$%$V5 zAe)^Y?=LC+Yr=I}a!tR+#4pwJHhn=Kc|K=XT21g;tqcUcm5(ZzzjXQ4=_?UJ4N)nz zbO6JE1xx5CD70RqcB~1D(-Oi1H9I{0%rQPk^Hch<*Y2)DEn4~7QVH}ZB(f~T|8BK@ zE3DD$X=sqoPsAUqZ?ITeuI5kYTPw@pS+zi)D>To&0Zn!la6~PyROd@8bqWpgM<%9@ z=1)C$a{Oq%R0qz{`Bu1las0WXd3~qG#|xIZp~KT)Z@;T73vMln9#Hm-kMfRs4>;Ot zhOK#{s%{G`b=VTSe{05i@o1hoDLi~M|JDq>sb~A)J)LQ<`R#45%QYV?*K4hoic=Ph z$Ye4glks3|(8m|nN=x)#SG6XrUN64*A~kuCx`~+R!joOl@rVt}8Fjnsb#>jDu@J5X zhMV*G?_sGnp5d5U3p;&6mYsg9W+%7e*V_jdTdn2OlasefjYa`Nbfr-!HYn8c4 zi#7@7*a_wU1a!3ybklDQ?DVN!f5Y=nvLgTZsmUOy9G|=jWfClx%GJqA8Su)8h{~*& zZdLtBfsdfDe9Ipp!P{EPw!rs%vUUEA^g0i*)cq{;FzUqjaJfgd*j1XKLSeDBRDW@H zX6Dbq$p1gUNI1^j3+~?=IJsBp>%fX%U=bHLcct@>5!U`(IJLVbZXq}^v09xAYOQLa zx-wC&Hh{VHiK#+qsnl*ZN~?i^pz(}9MIPf>|2RMX30(wY+~pbFZQ=qA5CIT>lKcCB zXk|EW`;2lO;0p-xf^R@3liBr*cReeb;{7b7gm*jRedKu`WlXdLp^^1|$n$RZSqP6t zaVVtUYvsJ_eG>Ux?^oLE1KtNln~=!|887pGpAjmAlIei=K9NX2Xpe(l_;$!1hoZN` z_BialKVXuy&E5}Yz5RMXlr~}yS+cr4;=Mm;5w^MrJRoI@A`E)BH+ii=?>gC%bky$2 zIg=r67)}ZoK4Rg6Q8?nebo`Mh++=!6>*B}xBlJthABy4+N8v8MwTL>uPt7-Z?^Cg1 zZ+-x&$oA4Iqi{CAP;FHGS~(VjNIW1EK0o`Gb4@f(GdJ0IS znUF2?S}z3^R@}W7Q@UDeM8c9@nr+NL{a8qnfv`8oPobjyoNQ=a4);XxQSLGR?Pob? zY3$UpfWS67n+4%{-^zF&kL91ilXie&NH=ICytNz8<6P?i9j(z$?inmbTae)1cgPwNWd+DLmkJxI`YUN`@c$r zKz-cNYJ6piIF*?2&v$rLyx2*rTVZ`}jZH3kNf zGzR@P2HXGvHXMWL@xIIcvt;d`;_~qjm+?XCVE+Y$p3&t+T|TeNOS+up5_Xz@UI7<$ zxu}bER>qLP9r~$Wnyc1>|BT3i3R6GPOWFJHBbPz@q1OZ#5Eq^gIn4Zf$8P6e%)d8v zyZw+6^AoN&Cu)_o6B8?Qw?I1!?dYA$MenAPcl%pY$8Kz#%)Qzu)#ci@F{WdGrFCn$ zTKn$*@~3~=a~a>%$uI^u3sxv?U{8X{uxUFKpaa%SxU9;K{s}S#x z@(m7g@wV*l+r4$i@MD>+?k{&RbFgn;Ciyc&{c)bC9}AUUEbFSzcD@>2wIKK-LemJ= z%~TLPb*lKFJb;^W1#6O}dnU17j%rP#mr(JS zGvLPSphpK9xfw`3Qlbvh&yexp#Y;xAxoVpx;u^`&+#0s1(ua?f2f_Y9Nt7 zx^mol=jXD@c4yRbhRkZRZ$CO2CWzBI(qoy%vzF?prTQmfDh+uQA1&d-RE3z(IjAoW zs9xU$6qEmMRr~rL@A_VQ+^5H=N$%bL_3td>y)^XZSF%@czL>qLoF1|c&>cEQdV_um zyY>;A1z}%>VGI?NSa;ZCHhkS-dGGzvBp#FCfOY;s>y}52RA&S{F>gQRz~r$0thvCuFw)>vOMOJp0DP)Uk+U2}!tDTc~szfdrGH zXP!0T*@@$8Cyq-=)?D!;$yu#|WG!E=BTlVK?}TQ|h&-i;;z)^RF$$89p$9ARHL+YQ zF^ZxhkqjkuCO2}}BCM07YZ1LBF;`6rvPvFU+(eF#sx1;f<>yK*q}1pR$PEE^-tw}* zqxov7yqHwe$pXV-RJ&AM^*{@OR**+LP;McYRP4M@uFa+Tt%(4+b6H|9m)vwu)^;l* zY4G}bg&CXF&aW;xp;?p-J?e@8ks=R1YFW;$Akl0>!mev0BHgH>=ojhORBf#BRuPJl zwx!WTQ0O~~`en30)vAe7myTLh*5w`5GitxatSkRiOPw6|zNM!3BQ%9JC^&)WiKF=w z$MqezF*WI_3n1`Q%Sew#lG#Aw)7e=~vdK`9GKp5yMgaLvOKMt({<&teUM)2&SiHs6 zMO1gpaps|7xq-4_(9AIj=h6H{=t^cj^VUg$!%2PNfkmL2%#lELozSyK{gaYYgdM2< z8h2y$Min|7t#yz5HRRGJ^0NjT@}YRZiN*rNldG$%g>q0pqs{tg7OIt%$*j1vhg{A}EFHO!mJLnunF7G-OaI#mbhIVIHUCaAc%Lhnd_ zKY9GQ=T98B1eOAQoYRc8A6ZAFhHpfu-Y&z<*arg114w!z@qR&2dZ~C2qWv71Z6D!l{FV~wv6=h?e z>5j7V%f=Qse&D1E60O-8T4}gs-{mRT!o`Ek++lyaGb5Sylge$TXO<;wQm=$7h8n3Wmwhq6A4@3I-WBH;!geOcWP7t#>WqcVym#AL)ERL6Kxahj#J#gl z^xq_(PQSs_PUfPoarWO*-oqmi5^NutHoXoQuarPqTgj&*bp(p!{1d^)l9zWjgL|s} za#MrSY;*=2RWMu7YJcv0GY_c6_+ojv={xPr0vM{1A2T+0ECv<84F^zTNYUODat)^` zroAcT9NdHB9!}?@HX%kK z{mUgRZ2T{)1?O~moy++4ghwn6sZ8Mze@^d>O*iSqSM>M>mrkZu9I{D`#<{rJrf~G= z3ljNP6f3t}Z*_7sjdk5zX|y_-nVyyGj}q^n@C|$}UVe9GPi}O0ATyBNljEA%lNrm5 zW;SQ{!|FfE!rqx3wS_*K+0EaM%a0Eyy0BV%F=0_>vC^=h~~74?o^5g(e|v2EO#=+%i~jY=OaZH=h(*QjR3lmTTaefKwg8XY|FTe8U*;g*)FMav^h1Xua{0bH> zkm7-(C~J*!eMLAWf9S%}at#SDD)|d2ZdVdT`Z=U^sOn9z4oo~-dA9_)P|3f>(p*7t zZkY9wjM|jXm1=phQ4SN)BJUK2xZ6Q38&0Yo4u92_7&0rW( zno|YtPtk8G_VsdO3%~hu=i{U z)zpBvGJ93{^j2iVgK1#cXLlf4&8oqoKy%JI9XG<{)q)w|1qC0SI{ATZ4JAwZT=i#X zqZF4)4eYKGIyF)Ri`bp7wF>X9wC4Omxjp&y+EVQXEbzON| zVRUVK=LQvR9|;u__iUwK27X^@Z|%v%NUru~suY@Ww$@ti(JVj;dK|U4Og=p!Grc$N z_XjDgxM^;sRwhi<<;7eHM7qRF3H^L8f3QaM5?J3Smkw0pj8cVx+)9 zlYt_014+{#xF9$8e|Qtw?|Skr-}PMECH*&7bnFUPTn_6iU!Wrqjg4$oY z@Xs>FCtV)javu<}Ge`l(xepd=%NLodruFL#Ni}M=p8aNo*BP!hR+c0Xsdfe`RpAXJ z1>IoLl!C=!^}viJ&7pAfZv%3A;h6Y;<-axs79Va4Smn@|C@9N*w5*EIAAr6>RzqmV`DLkt6kfI+|!U{DVA!>}1~7|`)6 zp%Oct3lV@}5~GmICh1AgN8KfYBkPfa1NH>LMokxjYK43dbhJjKWRW_?==bOub;+nw zM9&+%2m0~;Tc#DeG3@!HnUAnj=`%YO<4UkfA(QjOqR_@;QWg78E7Cmt4f?7fS+XQC zfW!WOm{|byZZS0)s+U*2))vJ;5kYIuUwh74u+6)@-D_?2-bbYcTY;2`j4x%Rfu>X#?jP{f%gE_Gzjwr&g`&&oi1Rj1<)`JBe_stCc|#) z9UDLNd?2WOyQsNLclf!@TNjz?-ZS!^C^AKCoGp1oIq!`kZ@!PnDBgB^@9**6M_IMc zX=t{Vo(k^?JU9CcflLdfsflJ5yfdUYsfj z-kE|2mIaiu&{U`oSSqan>3d4OgurHLWeEaXWs-m>3$4XM{wxf0y(x)P^3I9|rMG-` zfB{k~VS}m=2;J<_w1R~6{VE%`Dl6rhGzEq;gGKCJ#2<%=kz}`A3%awoB643Z)r;V& zsDe=?w6%;vZ0lJ|bccEYwG`@{{InQ|W`2rIXk`hZ@@Q!!41l9SYbE+;8L5|;za)`S zPjPQ{*X61)%*NiXN?%t-`_$wcWls&2DX1o#4YWKS<~j_LOz#;s`i>5%V|u)C#T^CL zs8rW%b4O37zN!~_nkdH1O^&4XHTM!~ZxPZ?IxWlW?sP;bS?#iR7XxrdbxL{@%n>U< zw9mah(IPo>TxwHvYZNhK`1#DSg6*g3qD{hD6`W{T7Wl@jxMyuu%8O9SA91;j=kssB z&E|n;(TANnX*3i-fGG(CL^Y7e;=H|p&%(Ph#(1SfjTT1C4} z#5uKdsF=*|>T)?^%cO&m_;RPH7|CiuSWqa}o|I$~G__zRslwvMr3s5;8v$>D9H;X4(=v9FHV(EVwm=43&%Q{RZRa!Y|`6OaS^1m`QwonM8&aD zVEM&=E@Dl^414Wkb!lqS6F5=RBq<1s5?hv-AN?@tJDD}^WOdXTF`#4+D=F0fF*`i1cdT2-xe2@VvCzhMgAV=7`qLWd{|JR*)vPA=H0xh{R-Nd7=f27~es-Q| zEk=_c!D!);2>b`^Cqje7dsE;$oOY&tgsqJ10FH44i17yc3#6WnGLjN7fQwn^>TylJ zX}-Y$E@6-FP>(}1OCFH%!cKNmA=Al`OE@E}F|MahlG@aTHVeL+bXQkl@$kwJJS^;q zaCjZ5C_1rc)!bu{RdSfsQ^}$csra}pAT+U5?I|tH2Fyfvdt9U3{C2~igMSL?na)sz z3Ohp)ChM6O|0-$dr(0ZZUgZ4$K>hfab@?m0ba!j2G}Ek&@9AX|O-qJmxDp5}rYu7R z0Q?`&WkwenigpH^NlB*<$zr85=!lZ4FXqhux|TSg2?<^ydGB()=Y|tX;Oc&g;?CTo;ur^Oj%bx7gpPuFkyG8PShZ*;>DJ zGPjD^##@*Ruh}5^v2HBUf4PzDcZ;{D7p-u?MSm-ozpgPWa^pR|+j!S)=(QftK8ktk zNOoJs{?KJT23Nb^-Vc~YYjb8hA^LkrZ~8_vhfEVDZP-5E@ise=nXqp>>4hf5{GQ_* zyvwC!swG%m?O}+OT29sF^jvViVqIATchGdf2FrXK<1N;T@wwmG8(L;#!41=j1`BR= z`YNmk;x?|+mNJ z?P%c9#4^+4n@SgGfep;nOIOCGp2=TmEI;Rqvei?V>s7bADt~0^*bh7(W;_uVg<{8FUJ0}bSPD84vAggD{ux%0TEjFr zGFjn)wy$WHma`yZpK^0~Nldee&oxy;WuoQ6UT=hrtORTz#jAxXL07>@7idp^76OIX zT|gQj+(D~n%WEQk4nd~8r=*yNa2u;se3MSaCxzT(YRD!v9y8`wK@h;Q>QTs9*qe?| zK8m)$4QfmJ0|fqv1M;n!j4qprV$gW+kY@;c4`FKL0ik^$rBQ=&ZY{M6O@CpM2vf%< z@AmeY$+8f)K3X{2`5LX(;3mc-Olqb!i^A-)Ti8o5p;>Ii>$V3$w((3=ee&UoGso3_w4U77U3m^Fc} zrKTvfcf8qPQ}GQ^tWPjqn_P`~>SR~vF0&OSG3t4NCOC7o{#BnjJl-#}y&@F^yY>0x*?I$6CctCPs|PhC$>g2J<+Iq7AP^j`f8AVAF`5_W-uE^x>N z@*bsF`(lg?LTG{ZX<{BiKU%F0IN0P1*YmCXN`w8{*fNBAEx%f5?}-vjEYucCFTOEz<&CqiUY>dLjo19Yu2?ay=mkyq=7voA8#G=wBsc=Mf9kVY55`2Qn) z16%4qPmHNsl9=G5!ys$hKKh+zNTF#Ho#|KiYKD+lx>L8eE(~^gYWs$fdVAUFFoRJL z54^!hNDm}Ha8F98NoH60OcM8e6(N44blvos%`e=KygTvkaTzw)oVr*X6@n6e61H;l zC&&D0?+LZ40tv)+x>Jl8`~1^Ptv>RZyga^nqsirqHSG*sFi$86GlexUBYc4AlGx>^U@&W3vnY<4&Xm{pGIE3Aqz5H$FZyUeulMghr!>rq>F)t1pCx7t*L zx{ZBa5S6xZTAC^tD=(*&XPc5`QpJ%H8; zNJS2!7P2TCQ4LOy?0)Iq+N{#MQuEH>?MGqOkh1Ju_v}?5tX--7nV4&)77@jTVz4(Jao$3tI_8&2C-P z6{Z1-rck};;)o_vTvun;&!)_{j}1Dq2Z5BGH+NGwj9OhpNvO=!=WaZXMtgMJb|Bli z6L-Xh8m7p#!7KD#&zOn<11P#)l-4DD-z$QMdn6)Qk1*ZNjFA-QL^cGL;&5E0_O^I|F=S@!G7<7QcUy>Y`pNc4vP3Vo554`= zH3t>s98R6wpjN{D;%HInvX`R_y14SknMuR$au%?#*6x3&aD929GaL_fCx^4L_O2xF zNt_0jCh539k+CD2ADNx!;-T3mfi)Xn=5_ED)?nkDbwe-`yr- z0tXwnt;&eb;2!}K_1Pomyv80{6w62)pW;FCP|UJ0pU~bs?j0{@PKq zA7UReHa6H?#q=DtW3TfloDePYGN(v;^H;Gq!l(*@DX(qG74!Kk;%aS4bcxGV!|Vp< zs)ra5ODFPWekOjVp>-h9pj6n0?ecmum6xM*K61jrG5P*weGyfPSarwEUD>N@3zQus zR@{l~^pT_U4BTmC;Heyz zRoAiXg5q=WGB;1l*^^>8_IBl-6D`!wzXs~++sc@f^F&;mMHlp)F;}Na}2!i zd~eD^<(V*?4dzj}Ojg2BVQW%iQN#wmFj*mgKJlhW)_+cLwguI%pUeUr>nOOs|C-pLgFzeQUv##F4D{oZYn%u6*~XNzS?$mRCX(z|+#S^6M$ zQ6eGR52gBxSI#4_OVsXnl=65g3Y9gnoq0l7N0qk^bVrqE45)V7TSvD`J}kQ{75qfS z|Jyv45%}0gzassn`38HrKsJD-@bD+vk8945n7O`ElihaWI(HbL{Imi@C?lZD1U)2b zXXu4`X=$!fdhuV2P9>Jt9y4iMG-)D^Dp3JMDg0knY{yp^as7$v`)g!k#_u2hJ8i-4 zs;~d+dU-Owm>lgN^7wC~%EmkuRhU36`iG!dePun&WZjV;SEHa9Hqw2=;d zga<3Rl6J*fiL%%Fe^sIi!8&aGo*fjKg~iqTtVZyTJ(6L5xZ!iW+b1gd*Jz)Oh5wJZ zyU(%ck4lb<{0zoI1CNE-Z>T~x{_#KzsJL(-(ksfzY_zCZhT=K>zeD2olc`15EvP_U z$uS9Gp?=X^-Wl+Mh z#@wp^cZooOe^2rHgbI3i)#8YQ1~Vw^iU{);bYr}0aoFD2i&1wQPTK#P%!)h0bRi=v zBSrs~O3AgBmWzU^#>5u4>Mj+jxWjqn2|4H#r`soo*8V>r5C6Z_MMDw|kB!Vu0{#}G z5@;KXcW5_!?BmAm?qzrUL+q4?WA~JFK}@%w9QAiJ+Vt zQ#ryjq~8K79Fq(z732CNk-QnHv&owQ9xj53%>gJ(jDD!@Z@Z>)*2J#l3~khB@Sb9_rwj zZ_{L}L>_J5j!dGsw~hAFvT^UW*uy^)jkg32x@{KQN;~Oz6Lq@@=QxaWI*H(wvzNmy zidg<2Gc)0;dr6+91Cut(iJvWrq`8ufhNcMdhT`v8MOENuCIwclC!+%&W*v>Vv1HX6>*S`P z_V_H88tiP8ntbMEcP|6`Fj%)Qvscg%TvS3t0UU`s62a<*>oAhF7Kfsw?X!{j_$MCd zi|vOc@pN?CdZMaN)S(l2cHy5PK)|%XfB-nF2|`KB5%C2|VN*;?CK7ofVj+hSY%PZo zGP&vTq00^`|1#O6X`PFPFN|15-zQjaB1i|+vo6iwSLn(3B0$-3hkk@G8wF1Ye*gJsbYMTJRw5$THyt+}bFus|4gl}$c z#=RtCgj-xka!A(c{O5%3aFT!lhd)(W=Mm;b?&L9O)wz)2^cUPSIn*RX=l?gl^z{6u z+WgG%igv!As64CgmO#}r1oITgrTx@BIz5#f8^In8@nIF%Lo37-0@2Uw{Rv$pd-nf( zE<*3oqDXI=-Dhl6da1-Y;C|8g1{1F##%DD zF_J;e5>N{m0!1@O4$(0}N`!_mloWudP*S+8l*Ql@hgS>_p*wfcKu(~X8VDLOa4Hjo z%3aCHoJ?Up>DOeo@JO{?+qN#KMtecoA_fVb>^;{A5@M<7#10`dT+D zOP9?HRZTokS;MH0hE!GtBE|wnB3#za5xg5>M~ldA$Jq%$7+~r4hNzCUFNjZKtZ@F$ zU~0W{!%%lC4bO;z0H8LW`Z(X+^^snsv49r#t;MB8C*oQga4gQ4l@#G(J~6@ z=g)+B`@cY}@qx||y6t6L*i}25QvKN(DKD-xZkVGKZ0=R4-G_F%_7Zybe#g3~1ILwv>SG4YMqZJ=8}dsjtE2Q{1c_vk@_( zk`XI!uPOBwf2g~5VZm`^J-oUUGC(Ljx zLAXE3_CNqXe}@81@X+bE4Wd2Zrn6lZJoGna06?hbjcVf^^@UKahw1;;%uuF%Ixbbq z@NPY^F0=Mi5wTEdLJqpas!SihQvN(H0MP%PiE_tBnT{9=I@wwU2CM~Zw{)dQ^Q>|; z?=LVJ`fNNkBh0a3&>5V@?g1&N|Jzh}e58|o^}OHV*8ksi`DeQP9+yrIOFXuDgpt)N zny56_|Cfp*ARJ8=V}5oMa6vt>j~j3EHbCv}!O?*Meh>1+wi~1#kMSnrY~g#3Z}53W zo)HvRve`x!(3hs*S&%G60x_zKISa$43XDB)8L-tfSm=Y{b9#b15#qSB(jd=ddi1t}WwObt%+wIPPLq31J z3*GY!CvkE1poNq~*LDsZd->FbuYTz{|Jb{yrk0D>FJAfDTg9uFPF$?N(YVy!9TLW{ z<^acAp*+$5O(vFek8uam6~WH$E6)GL1s{9eq)cbPi6wifnQ^7iO!vR3%LlrA$fYxa zh(mj+Q8chqs=CzVY&m|#aXGqJvJcKU5cL!L=ktvs;!y|^r$HTe3Y>a76nlhbhx7u+ z#QB|Ud!Do0WRotq5|}(=BQnq5Low`M)2=o?n^=Tq9lL`)O8D9XSnoi}u#3CLkUOAn z2cKezc|1mWAuDE6qu-j@M3Br!~rDItW8p}<8r zt2-=<+@Okhfx6teuLu_q+U9sA0X%G3k`Dc`L>+%2%FLPCg*iLV5a9#*ODIhhg$|gZ z&omaTIlHZb7c=PZ)f~20QR{uF%9^|LOyji0e#m0CqSy~x>;tLT^4(3|X10eCn+JK( za2^f9l##aqHkVTjsnyKBof<-+>s&2A4M7D;(B^4H-Q6S6!gm9q(Oc6R4y zpi6a1#_r85KqvHU|9n>ciud%#Eq|k)m>)`T^ADnTPpo^__>WSZ_Y0}~z8$4`a$TAv zzp-`i8kbVt`v|_XmTl`n;pyw}n05_Rhe1x=%rSh6rxnzbBBhC}eNNQdl?(eP1 zWvdC^D_u>%c+}*}c7C<12`kdo<9!`6AqhBYQot<)Z~q*CYxMM)-CSJ zw3Uu5(v?of^_0#6bERL|uyma3yV5xxIw^g~O6LS?*AIzDo>A#qFjvu)oKrJLgL2g> z1AKFpA%x1jmFfzdytvAIEvk%p1d6)CP7%3YLftsHiP<^&| zYwwsb%e5;ygdwaPdwL9Ta+vjgolI~|vjEt#U*|L&(X5c4HXX-c-hpHh{D=J*i|C*M z2UuUf`G@^iZ~jKV`2&il)eqR5G5~GBQU`F)Y9f7`bGASl=HUm|Mj!Qi^Zj0>q|>*- z9&$tEx}a>ZLlxg)-MQ1Nalgx3`%kDp@5^>QURnlG1pT?oc|OHKF9)uSALAcn}?3G>igIGV>% zfaKImO?LjxVIqQEC){SU!7tfWD=Lsog2ifenV;r-3D+c&qVn_ZQWLhd>5doG4ng^g zk>6L^cOp980qKBPSV|(sQA7;5xE7c1iup4kM(O*=L4PC&UoVx=07Y*@SLvMjsL`6W z-(#nuUr8HhW!-IW?WvHzDyM#-D;;&0HX>Fz=BuGt$4)(M8O1JEF(;2-xeSa57mqK2 zb$C(+uG(i@x6!G0I#0m4=&hzY-Ab{3=ZH{502(1iKe2={Tjg*hLQ5 zb!@&AYt+DgWT>1vVD=)8u8qsEJ*0`ntdj#-7U-^mD*89E$q$s%Qc0T&>1T3aQr61c zENDNgwWxq`Jv&{^{}8QA89g)ebk2v|ACbkuq%oY*@hNLU4lQ{%{IB*W?Y zWe&IAW|2PE#03)T(SZ-Q64@@y(1Z%Bq(@SDxG@Un+8xICoIA5cwjj8BicSUDXnt-q zJ5s5m(9RR%rrDy+nbapsz)Fj0Nq|H@rNJ;uwHb?hgFtvKy`UXZZ zzXpHIIh*W2iEdVF6%_e+)Wl2aHCJJLIMz7V#)@~nlhj+?s(lh0bgxu`4^~M);Ou+d zUDlPgwLQu5_Ux>)($sK7y`ybDvsdg~;fvakjh`nDl$&&8G*PLd;MP1ycB`aZJz-pc zU3_{UciP*N%Yq$xVHT6}Z-ZZlWy8HQt@#PS+gO$8fS=PPrU9UIh&=Z~ zvoN?9w1mO*NV3jdk0@t-!bJe0c1tV)_DPV2t~*s~@*mZ559~M(o1^JxH}<`|M{q_Q z>o`MHcjk}~6Rz0rh)C{B+*YpYEG&Oun%io_Gx+hZ~PGREGdkb-fy zzBzjp6=Tmv$JpJ9If#Xp(G%2+{fM5io93#1QZ$V{y1h?E*XTK>Z1muuj@>2HjUJ7@ zu^T8H{Thv9HxZRH*6RZ;8Yp#c($4q7yiVt*o+Tew4<7Km7B`wlJ-K$C&f}164DxL3 zbpCnnL}PI~bxNVM{B&OVXiFD?N~eA}o!9HIIfN?t^$eXz zKF@HMBK1^=f@0!M0jidfkvu4KQJvgclQd|j^SFsi^)yLN-0qnW-N52)+{a!^I!&{0 z5{gS-O~A=7zJB4ASIs660{Pgf8C>F>Jbpqud3v%No&@VEaW*EG zI2kBai?yYCPu|@tuP5KBllfzXsZ%Goo_u!dMD$VM$>R!q-Yp*Ux~I6$9eUj{mMp3=oXb6$q57*G<$5p!jdLWf zXHs?LlB&C^K6UaM-Vg7y0oMEXUwIpYJG(GUj>SsYKFx2~13Nz3;WoCWF5=X*;5PvY zL7}-;D3x_;yhB&EE&$cGE)KKF`$nym?9iOZ&!)3JcIuO54I}FEv<4M(tiy7ooa;j( ztgoq8{~9+)69ZUq1Ri#eo)t!TS{!>uMism`e+?f7aKXHHqAb)d=*dlxd;^Ily3;6R zKk7X9?uDE1))tGP8-t#|Dp^4CI=+)xMv~q*@WW8BtX=TM^jrB zLH?~vuNiWKm@)?>?yNAQib6U-*ckSW!wK<_5J?MWQ(>*Od_%8-EebKKA(XEGE=?FY(BfvEEj zhzMJu`Y1vi#3GSax9X5+qy0Z&#{CXj38oyM%>%e#lS?%nJ+v3cY$!A`p$m`{J(DP& z?^=qcI92Jl9=h!RYaZK^p~e0EF3I-8EEdDPD352RrY0_xZcS*h%ArbeFvMu>r_{@D zpGkj0ArRmo9eUo!er)*4#TdV&+R1+s|BtIj6!_O5Q3Y zoXReCRthtg92R7g>kL7pp2kH@rTxFW&L(DrQaL877${kzG z=r)X_F?MmH(I!rGdxv~zXOs_p?aJ!S$Ktqri1Xxz_36q-9UsQKYhH?19FA*U9FBRi z;$X;;75hZpbA_-k4mcjAn1fd+=HNy;TsVQ;m;nNstHn(z{;QaSO)2&p&r=+ZIWLS> zOr~7ilwvfB{l;?@+21~2!p=lOQcl*1xUEi~q{hWQNsWtrl3up=x-oQUCt{Q7%;HGG zEo`j$$6|msyWw~H2j?X!Tff;zWRBS&O>%?ML^KZ4fBqq3AWgV2Gg{qaf@HJ}ngi{m z#fRo)|JO+N$CLAVQaTY2(cR^|h>sY$xWU5UpT(&8 z1LSm=2nVbuRHwut~tz0eOKd(oOdjOmXaIm%2wpaB<(d5w#tACl~o6`_luAENIIO~JF? zF6F^%-SI^Zh8vxPtN-+~?4Nch)2 zcxRX&<`4ry*=MW24Ke*bBjB*>2hIkRt4Rw$Pjrz2>2P%MAXuadbP={>AYLa+!1|LA z49p10DKbXbN`kFL9B!tWVL0|M*m`1aS6xn(0$ zGsq+{b~7mPRg9V5CW9+}8IXPWKV;3}I)bD`>QW79JzO2(3Y!=|5awNvmnmM zVmB+#yVK))+b_R@q@xjWb|$e&p^BsMwyhN(Hy4TDGDqnmHbRvv)nmaolF}A5yb9XC z&VW<@IY97lSYQ61Y5@=9^cW|22wI%eOMwW8`9=jW(CJ%Ssx@>itrh!jaj$D>%_yL< z=qY^!BV*WjfQOKfyxY+a{!bAvq#8S!QV&$IgCBlU5Jh=IF}`$9=pos4D3nM9I;q1) z{tg-WIjY?0LxR-$%qoxjrVxJiV3}V?s?W)xXDD7Lcq47c5iv3(xRg%cJO{2g=L3JF z_kG%}!M+0>J=Ez(V_9lhKl!hz@-tk|ljM~oV?oeTt_R$BK6?pbA#_sh{3`%oTdN1ytKb$3aZr*wH-mw&8_I5l7BvAEN890tiMcV7Xq`(n*J#yJ}23D3C%Q^&#AJ6EE!|4L;_eMq=3#KJ%3Ry|QLr;?R-7rX z!Y~zY&ogX=?Bs~TewZbHOby$V(g+r}uq;w4@#5CxSrrV-pIvb%D#L$FISj{>Kub~J|*Jhw2eKnb&G+=}V{gs@r6Gs-J`O#g{F89XP>l|UbsFYrZ*Fw+ zqfe`D`rFS!SB>{&cCyH~WgezA!0~%XJhlcRCSKHNLC)l>?xb@ctTYlK{>NL&Ot^&7L3i3 znmMa>ut${EI@2O;6eEqCgG%TcZZeD0htJsD-EwF8IdQ-2s)8n^m2r@8H_IE#Xj&cW zOeg6hD&W*u()Y%DS*Vti?o*d~Y5QW|y!(}ZvuV$q>$pV1}s6ZM+* zP`M(^Qye+vk6fyvFRA3G!Z<3=hRbE^)YR$;+zIQ%G%LNQB&UV$swv_wZFF*(98n!O zSXk-f+PW6pN6jMdSzY;CUQmlOVL_@^+}w@&BJ8m<;cr;33@K9w(Az+Seux*atW=J& z%fgq`N|hGKC9-s%2!&Ce*j%w~lm%&3+~iI@hpE#ea7X>Ql|0KFkeVr_nsmBN2<7El zu?8JO%<>KPV3|>v?VhyVk5m-njpHY#o_Xf^XC|2{WHpgwHPM`(a64=#L}ANpj}#MD zz=Rbrq3P95P71u-MP&-AnZUT0L>3dEjTGG?RyJL!Q(_wV-HmAEj{!CjjeH=Q+#Auq z?QiBUmTG8)In^y*Cr9&N0P36u3b8s(+Z+v-bVp_dJI~3aY9=P!b+W;9m;-};MQfubcAMV>$c#~pE)Sa&7h&3JmoURD#el>4U@j-_y!MiLHZzB zgUP|=ZB_%H zz=UBLel&-_$s9gw_{L_jbFZJ8%LWXp=v-w;`&#K1?iwQec|X%vD;4BF?SP97z6=dq zb~5LUUO8I9Avh8|z^prBGjqJh5Id|SS)DdsYR!D!+c5w-;`T16^Tf&H$2kS~I2LNFI^@i?bi#7B=Ltts z$`qSWmvahRa%hN{RAX1+!DGs-GvIRaPr(-9?BcYzqklI$?i%ttJUUWangM;znHTG%UIe{{JZ4(~kn1$O)B0M?e#}-}? zo#%t}-|-OPf<(YP%~m$YIDFt`SlN=-QlRlMALP6YvS}o-h?=_+QlV+%F@{Qig&r`Z z(?g3&`WMbG8_D}zrO~#X2VAHRiet1mqSf|2SdwOJF76^MBVOgp!lu!^y z(W~h<&z-w4J$-O|xHIxqokoAb_nUr76!l%EX(C&4(juW1U(+4m9=1?~eth}RuqD&G zFht-S)50)|xjms9BuJw@nLc=boDC*;iD*U{<@gOGD7*I(dCG7Xs|T@uUyA=`tH1D- zttQq5Yv3d2T%Bn@-~k2T%N>pqw~kiF@#av@S7*_5@#DyA zX!J0VB5EJZ)cBzPvR1ed zq{E>`g+8apn4to_KCb`+2nHHH$79j4P@v<)m=?2@3yNtH$OF{;ifU_2)K&)}2C|af zJqCObLzUw%=AT5p#zH$5uX;|@=Q>21AWW0k1?MBbJc!|ts#zfE<+(!$6IUKC4E{?x zNWcgBRExzU2S}ttNGCPzkJ=FuP{__Rhp;pnQAbVx<0QfPYkya8)%*tW#4pGDP|2U&y41S8x6`q?0>LI{6NA$0!EvbOmOV2@QMs)R;L^!cdh>R=Y*0kMHcu-s46w7LGlrmYUckb_COmGoy3zvNtxpw7Kg0RkaY>= zvz5~<&GvC+ebu=aI~~&2mb4-%h=>ofQWKXDk!z@`;^iLY<-GN!>d%4gw&oMpL>rL< zF*oW?VejKSBmBc*9c$8-4hNtjP??|woh{DU7-r2T@aYV{;+K{ezohuvUU}`yFQ0vF z=BsDlc=hbduU(k#3=!_A5s}LNi-Hy*b)JA1k?8(QdiA%tD-P0`R$Pyi<`G^70x(Cp z@ixf|V?=&D1%MewN|VdUjoePY!<_rG8`;f6Sx#adu|X}AZ!EXuy{^hl6Es~G2-0VS z?;ziEe1qTT0vy;76tQ-~a7VB}l7otXFh-ICz-0XZEXWC6$$H~z2&)_{2~X)HGIk|E z1Y3#GA-b%1Y(3X57;LaN=dYbI>@p}zlQ1n5KWiW2g>!@K;2Yf-tP+55OZ(ZP9q=k^YKrOcfzmV>aa!`e6mejL%&a;slF-}Idi_o!sBmM_ z11wb$`Wl*<`v3_xgk3HMW@nROjeIW{L_${zE8nYkFcH|K)2u}2QX^`U{1V#Cn%R?m zGE<`Lh5bz0kO&y_^M0Gty`g@M?1L}>diUaT_iB&Na$Dr zDWJN{y+?hB1)7~vCuT-WL|lt$y>dn$p{zuw&E4;zbgDzwJcxzZA$*rW?rc41T6vKk z$sUZbRbM)?cLNHqy-zSp<78khR8s%}EA4&q>x_ye|K8~nw{bgvdg__mG?ydKHl>=3 zm$e>v_8KeJ!KQ6tytwNUs>bQMO>SV?asxjqNE@+QV=YtTYOy0 z8XW|@;y~ewgI$22b+GHV4R%3xYyi7lS_*>PaIg!DG!Bd+9D6cH!G*BvXS}Poo{VY= zyt3dBeBq$Sz!y+9bG7mLnC^KI!fV9Kv>y{x)v?EBKqudfsc&M{UxHRs*=PJa&jfEwRlu&;(!4hSxD7IPB)b5CJZN7_ z*aiumXBZ_|51ka;TlmQ2_Nyjp5z>__|ESW%2iQP_@El6sf zgjPXLCKDN)gdplNQp!YFVIU-yPMGQQl7kCrwYf8PRx^87{6WU%5e?!fH#*lLm)UDJ z9cAW#ee|mpZ9^&gIz5yck?~A>@|>gwdCusjzx;g3b(!<-EQ<}N3b%e#B?<_9<0f8hIlv=%}J%U+AHQU4vLQqSM3 zdfJKOdX3pnmxbW2eegU!x)G0s{k1_yU1mvIPyGQ|+@Xga=hcg!vI!oB8Y zvA?8Q+S}$QdG!UgSxRqjXh%kyYY{J&b9&|@!I|Ej;~V@mmjoG_LL8|00UV^8AO;?p zAn4?fkReN7d)|)E2zkb*P2T0a=o?!KBt=|BFG4`T!-`9_jaHePr;%q9{1cJ8SCf*P zAlvBxD#+&G1kFg-g1O~T$I{O;u1?WQAKQX8feunt9(!g;39=WvDK{(&D|OsPaCQR3 ziS)GL$h>|hahTyYOK-BMbuyZ>QE`f6+U8lwaPR=SB8y(YGM-FurbI~D6>W4$in;x|Xs(lctLb|x*Xof%u5 zqBm#WJ~r|EwWr@H9AR3GNAvV@d)K8>tGt*Rqcakd@4CFQ#OY?@(3Eii)$l|GF^zyo zX8=)7d9~6R)R@w3rsL{leZ1(qGY#Dqhi+mNbYu2czf2CH5GSs3ag$q*)-t!1`=X+q zj4!79(we`bMPl0V?IJ6;LM_OGeLH03c2G_iT+b0--yqNBBhMT#oALP%MQ{{KmSrNP zw($N3$&7VV^lo!DoW<(w2b`leWkr&8&CM{AxB-H8wAHCX)f=p2*8H^>y8Hn+bbw&o z=|ow=w8cm6?dYUbc$XfxFTtCJoNP-I5bB31r;}*{m~E`A|E=qPj2FO!SV`)92Dm=3 zQQJtZ8zCKNEFqmZ%jn*6K0?kS{TyN*j!8eYUxRl}@4v<+Y*GZt%dQV$&~pI!m0m(8agDu9z1E08{mj7Q}D59UMUTHg!3SU zRGjF;Mj1g!3|7L!E~4#%`78cPWJQ0!(Ctn$N0SNB=NzX?ZXhy2_N}t=v3xsZY6Il?Y3HzZI4^kffEK9fM3^fcY}M@+%87Q zQCu@ke`e_h9ed;z)M3q5hH(I1*!Y`cl*Xl2E!VcQ}BU9n~_jMrV$Q*nczA;=}=j&z8IY zhf(gf$=co_d4{J#FpRd2b9Nv~q)q2sG)(MlV{oQ1V+wsXFS|^>5WTit%o(TbckjN# zQOM};kT1tsh2Gw~b41GRO0fOa84`2EPU)Z0>$avrvmw3?haDYTSt@LW%omK$tXjsR#=s=sq%Mb z-C9^^PpW$hbM50{XIrd7LMq%B5%X7uL#ElMTdmWa2urML6ObG#ZYN zf!LfCSF!2YXtHrf{PJ8?yKGRbNrQM-W9dkFoZ2jQQM1$t=&-IyuO3PIo4q07e6m5i z^F9I-Gc{M;&d;>0iOpnl2*i1%iC|`V;n1f}X*Cc&;ar_h;Ar;5#EJ%l5gl`jPo7pE z;InSxpD|8t@{{Gp6rTy2D}EUvS?HMV78_6t*l4UYY~xaOtui;c)PO9WOo`oM`OJOw z!P)Z{&PD$0&LeTdf!VWy8=tJs>ml8z>Vo_ReGuT@yo%;T?ko%C01MgzXI$Hv$l{nk zVP}J*nGJ|IMQwygJ?4hNw27Ud#L+#}y{x#60tTJ#ht89c$a98U`@MSBTgr(k&NQ|~ zL48q>Gg}2)fx5&TfOJ8fU`KOuvRC$EW_Ti_4wc|^pLQ^4rh?dk(|6-aie1-tC*R;I z7cv3i3&l z20@>%z0}^k`Fyy}1Q^rkP6pb-%eFmi^a71Pu}D!JR$IKIQhy zcb@eL(+~rObWohBbCRKh+L5AdVhTc70)#oXZ3Lfe?AKW2kVs!XC16pD{NScmd@3>P zwK=~@qdQyNLm@m{F-7y4?DK{?<`X9wv>}7Tg?BsCyZJ12$&9o;Ts9$^pJl@6j;pvrfxf;xKSjVk2zJaFY-;h_~a(laE zy&xMfd5ELhhYR_0AQ|oWgnQQ;oKl)Lwv?xbR)ebl`SwFs{9Bqayp7N@c+>#D3AEay zg1@c`!NHB9%T1>6Z2JpuHf}VSgdi=4v61C)aC0wyLV%vFXmin3R% zB`-SkIMC3t0zGps>HkF~Ska@&?}k*SX_^0(UYv|C#?8t*#9!5b>&81gx|PAkx?~%I ziig;?zYD%Vj&Ju{i1s}0%51}Su1xm!(XU{JINw073}X)A+cA(c4&jF)hFC^s2B@4A zW1#dW!$J71a!8GJiKe>>-mtW!8}3=KWp>XUlz7^`5$}q| zP4t{3bJe;=h*dNRR9-M|_ITO1(SsQA<;S~noJLIVWPOL&73*O z1lPPl^2HCz7lhTFL3u#otPXR6133F5()7&mPG70jto@0mxU$9^=%Ms@CE#1p;+iOG zj0z9%5DK%*ClLtFF^Xvsl@%LHq{>w0JuN|7Znn}Sk&;y0s=+P;^s33UNve_f9BTI; zd8hL9IC)l;(+`q7-NdK_%lv6xeyldC(%!DoTzlsRLeyAR1*e&)DSv_o5V{jcfzQ=8 zFN>M{+IzzC=dT5l%e(HE-4#RuL<;l>n`{u|IaJRJxwCK&#E0o!#+pQ>YAD5 zBrYR_4Q30=mHAnz;M@j&Z4-XCrc<(jwuP%0rGF+eP%8@Yv{TX}mlCPJfgt`3$>zJ5 zQzwbP3mCIk_tEgxsTmkd);-lQOr|=;f%HPy&@QnQaiyBSI>UP>ejl4;FOnZjlF_8z z3dukS-&-J`5|T4gUV9j{J~8nk*d4QFp8GTslC$Flh@)r;a;B29I=y20_2S@sZ2=cj zYqj5Hc}UGWMl4^udlFRXJP}D2RUEDOrPUcX0|H1y6Vv{(VlQZ&T;#5kx!MB;{G$Z- zY5B>j+uJT9@ST|>nei7ogENpm<(V1(1ztF`tzFY@)fsUn#OzRTzd34fz6C~-?WoSo zbU3K36?8an;0u2E!ik-Sl2`?I06-yfVPvk9%zFR=aN?W(fhs!xOuibc%4sL-jIV0@ zn{=7dWmK2Vx`-_oR@80=x0KA3JCugpWrDT7T$(%gxD}RoD)t?^rpndlrXF*VNDAw zFRX&tnHC!eBawd32y0d#!RLraf;nLoDe>oq*eD3=9Oe2PN|9`yvQrr84e_vi{yB;j z_sd39*!N9$M@8p)wrg*jVY{n>=whIrHnS109Eqq$^ZjtUX@|-;L(jk4o+f>G&ES*A zr;ShW2RuR{4uts3;DAAaCpi&Z6`>(MHN?S`@P8q~oYxs zEb4bq5LXUR9ej0i;v?f}PkfAyeOR70+i1>Dl}im((v&+o==n12nxVAa{vf1i-y8I$ z*O6-9?DZW5fpiDG{;+ELzB!bpYZWD4VOQC{bj)q#1QqG4aNxTXndA#7Wh|H5Ll*7W z&y*{9Y-C`6qN+t!^}zJap27$O{Bt>d@lUQ;Aa7=C!2+tqIttV^?9TlFYO}B_TKo@+vK5k2s+JXw&Za7x4Y(uHCL3hyUphs`YZO?O* z>87dgxt85FL)+bvdtHTI#GrU`5YRa3TqJrrPyeo107KQU#x_irier>d8rs%BPeNe{u=ql%0NZf#i6nYho_ z>S=GR-8G#+m2ak%pbqc*7?KhXgRWh*T9u`2gC)0^Gm$KQaR*51fvw6>nY6-TU#%_O z`mnZCuERN?s@S#;pOhRN-uf-wP(5^&EsU=W5V|E8w(H< z9P6sOQD~6WUWo3T3H<`P^km zsr0Buy2|C29uv$1FV&N1?kHtZ{aP(O&swdje6NFj2zz75i_z4REmLmav9VWT!BP3F z6)m8h?omBncD?~)@thv_%d^pxqul7idqJT5(DvN-ec$t=oZF8I*!0L;74T(wy=W%x zr6{lC^MPZxRe2^V=*mSi@e+uNqfN&@k7A-3o3;;FyE!!851Y2NzcHis+sf~UUkd*< zr1`tK@*%1ataMG!^4y)3J6?OxQ*IcnJn+3;)kdH{47;AYyyy8(g1*^SE1_}&&v(qB z@=pvWtF}M%r%=vCe1cc;5N};I3dU>3C1VNS3%N^%PZpJI5WgAAecy);?=EcD*%aP$|{YZXZ93aVgYAzVy!#d5U zogr4gXFHDGJ+7~=lAR7@DTa-p{Qram%2Bk0CSb;>N6vx;X-`c^oDP8QOWo-~=-bvL z;)n%7sQMLYsg5~txO%l0BEiEUM%g+t-aL}tAOso43chi2kl4q`?p35VOl*Ou7AN7u z5xzeYLZk!0K*626R{M{$#Bl>W;*3eMgdEO_2V_I}U7rjIiM~l?B`ET;vIIQNT1P#U zG@6+@CeWghH>6?=heA8-R=sq>3jWjrq zY{|`}#ET-qp(>}6EYL%T8K zHXYBj0{?QFLbKq;IZB=IU>@!Z5Z`$Xe?ylt%D?~-xBC>ocIMML5*b(YK{RZ4U23u!K8J$OezNg%V= zL3G-ALYS6MFirc2axta>{utc|(?E6XYthMfn7+Vn1Y4pKukE>^+K0;|NBE8HxJs+Yshq#MZ# z!)>AVtfL-rr=en!(*Ra_X80rN;y8^G;ciP(41xiwID`WSyK^7g1Ar1728UTdM@>G3 z@(&!ybc;}7m$Jjl#i#3mYol9vDCNC7f$r3JT>~ipckB34kUW}3$Ng-OZR606`8;ra z&v7uL?W(GdU{sjR86&wBPoNJ>JIFdtmbsLVOlP3j1wf?V(FN(H0%zE$i6&fmq36l2z0-XmxhF`z zX)_bsH0judP^zz%M~0ZFWm1yLwQQ<0Qgyuqm)tQ|pAJXHU@$yJeBS8&<@$yJiCsBPoPlE&v5;Qmu z7c|L%w=92#jk7{Ckfu8`0Z(>#Gx4)$?vNWjakce4%kF6ZeQFYL_-xVYz?_25jbW!W zwjlK4z`859*u!?V#HbS8LIeqA!WTUjQ9l~dfZMsHcRY=2nzhLJ_DN0ky>?fxuC+CZ z(~+gxzJj~_q;*%G&A=RSoPUKL#6Vgv0bW!bt>W_h5sZuG(lrpxQk%)tMRRFyMAHEJEtx9p}PIN+g$WNuFn9{!?YN8si)5KL60ZS&X)tJb5u9KgY=Qb{jwYduIo>~sN zHtuZrA_r3LK_A?NE1DsBo&P&P7W$^`>ZvvvyE0^U5_4t$FQGJ2c+@+Tj&n*~M1=G@ zMqPzL_S4^NBRd+KuB%$CIX6(3%@SJ%ApxpYm#8(VyCy8{k4v;i$u?qe!om z@1~t7QA0{4YwyMhHNnsw9DBF^4LrJEo{7C%eCZwK0g;WmMkWR&_J0wkl#3g)$yUqm zE&n@IuB69z>G1|VzDJKY>G6F$B4cSkgkwx!XK5U~^? zg8&VH7MYQ5@^RNbNc#Z2>3tudGrhrWukrxB=}f2npR)i6N|Y=&P1-^v2<+KC`@QG4 zXZ8BxVnJ^Ie*3exg!rq-TzNeI6yM+(ZWca=4_&Fz>oaj!8?I~ecQ^J}S){L<7 zVt-m};~A0kXI5Bq0ujDII3mmot3Z*zFvNCVSeGc(7f_053zVFm5#JCGON(D2Tk)V| zMEQYly89}+)U{oE&kS1~6+~A8^{kN%xOyU>7A^F`y@nks-wZwferuZa;hM#F2HzIG z!5?u8g*X!8bAulYdU%=>!3_}_;xj`WO$mQfggJ3AHHwKi$I~1*Ee>YH(KMwp22VwZ zT4_bB@hB&jt(;iG@8?tE{oPG*jM&ilTS{|;XM7<4GAFJd=Y{*K2xl|FpXUTZQMjYL zxPFAP;HRAvpBT>$#3Quo!_u_bLS25YT&ks#rrg($o-f0W8A{vj^+O8wy?#%6ZK(!k zx92Dbq}%WIh85HI&7m}1s~~ZO9cBB{G53@cl%=o2zVA|Ck}jZ_v0UyAS+s9ISFYra zk%9fGsuWn&ho*0K6`DWb*PH3ZuUxP|u4hZZ0xHE8GSnq+D-%UfD2sZo{Gs%{LusQB zNqH)qCN%wh6`rR}*N7TGXMDasu1kaJf&$uZI99N1MX9hsr{8X)Mc3t?=Q+xB)7YD? zWw*@GcK79BN1+wbDW2>H)K5AUiJtXUUzJle)Yz68nt=-Co@#r(in}3h)(>rM3y+q5RCrYSv5m*}%IytA2+p6;Hj^pxkfL(>gu94F%*1GbBIOh629%&W?N zMLw>|Vs&ke+HY+Q?fJNtyuY(nVl7XSfGWh4am#dN5Sp#0w5E&{%3>4axx%|%m=a-+-d1%dKI+jBqgeb0|_ZZFDX(IZ=x$B*T8quIEXq8S}N z6F7EDm1d*7E?hJlPl2c~T6FwHWE0KWw0ywS&7<;u*s!g^_N*GTl-~<~C;ZnC=kMj! zAh2qyk39cr&@)?V)oSgV?rNyq!1En*sQlGr0``Xf6p~!QH+Tg%@zxC^Z@gw)HCFJu zlzY|i$ug2T;p;N)ga9pkgBB|O0yCK-i*^mR4L{3EFmr>l0joJ?4QEnoNO9RB<_%c5 za7rA&974sEfPI6(gHeUugoz`|xCCOLusp1+aXfn}x-lU-!gTaGG1y|O3KML=1m73O z^JGYma7Q7Qjtk-lpDD3)e2GmhUM_LAM=-_n;x5_UQP>=ZJ^Pb*1n~h_h3)frafuEV z#L*nfk>F(cNWLhJ5M`073v>Q)Y|iJ+AD|P?Hs`THjt4NAGf+=oxvkhdK9WGw=h1UJy5WQ@j=x^>x zuO9;PVg=ti1V}8&WWvf)+ZZ+`REo1m-~^{%hz8OQU^d{!-KhS@vEZ~V9U;RcSpoqU zwe(~T`BR&0?-PBK@=B=UWn~FNT(pe3E2%d#WlZ=()5EV!7vS&7U%N^NP;cv{qv4oA z2u|Gc`zjp>*k?3ozy}c2($H^^RG)k;Qho9Gd~%9bN8>ZCEEsn3I30jHP(WUR($*kE zwcFWN9A{pVhuvO-QVuKF5OvI+Di&{IJf%67l{K9YC*DCQOERe~gaDP#4N-`LO$qeX zF(~zjV;W;#c@WJox_|XgTqVO_2CnceelE@Xa}+dBH)0yUfLk>4sq(!bn&H;+mnb|( z;j?kmmM;5ODeyaVBV5`pjO=BU*Im0k>oy$Ev;zNH8bf2<_9coP+u`jQxFG;`TBHc+n8cz_>RE z@8D;eCNGZvH$p!+nSfmI421waS~oUbxy8GQ`;K?G z63DO*XPRv~*t>UtRk6ECK>)u+1~i+p=V2oS?~mS3k=Qp6<%e58WA_I3*vQ^=pf!-G za{zlVa%5pk1(65m2$I2FlBjw)_Dq_QyeV84s?Qe65jPqNCOHmBrE7*ikuJ8y$PsR{ zBt}2zqliN|Vz4y#ur`1u0by`_1+-=4-6X$Y@1<*m0=twP-YVWD_gx#!%3Ucp-w$-7 z#*-Q}`M+Dnmm=iRFgooogKQ6*Z;a=@>wAua5p7pgWkj+Xob|%4$2#|x_80Qwho+5j z1PwHbxM1i6>Q=ZL%vJ~PH1wZu4|qdFmQn7Eil4ENyYT?}$h3p31LP*>@`>s66>sGU z^V>QjJyqb;8dcGROD^<0*|GOKFU0nQ$9HT7Vw(mXy8?=}wbIB66S7Q-QofaqbwQ-A z72#St1u1C-HtI1sC%&;$M3M+5Z~?cJw&zl=k#DU^w^}7*#NqToSd|1-2r`l#StC)6 zM78lWbrRG`P|v0r%dZZpyhXR$KBZ%{_(~ST~>m|C2 z3Zq$ES~$UQ(R?}wqB*KFnYw5`ZH;J}0s)k`*CNA?3Zp_s#vsZKY9~32yk)<^7so}o z>Nku9qmWxQUV}SL8gJ$<=N60wJQwlTBKI3zRQ8m`Ry2zpz{$i%Y#^@#f+OP?9yjJG z@)qF%9|LfMQzyg#k|Dvg09I)STbu@7w_73vEb%ec`@3%?=SN782ifosm?Y*aM9hPq zo`9bKayYAE&;wNgcBYu0e#a+Te7r^cbjZqQH9y&`9N@(tpc0fp!8QC)7WA0#6Ts?p zJdQMqORq_MARLm+$KJ6&z&kd>9QEf5bCj8yR;DQI#uR1+(MNIucDECTy9v< z{k=-d>y9~2vrg1K$J<-&Vs8tFYMg#GA)y8xGde*?n_Qg1HSC}f_Ho2k=?(8S8x1}F zV}_#%olescx$#8TP>`a>>BF@PQX4NU7sj^c$WWevY^?8_$2P!WgV&*Tm~+A+&KFBB zU(|33l)VhLjVbwB2sfsQ#?%PsMooCaKS)JSktIGGrLc4vIwL;h=OR;#=-(1DQHi%` z;41WhB@?G=jAK03$-~NH8z;WnR)uy~-3&T54qo^)22yT8AJBwzmLd6@|2s4m`ljvb zp*EShGFWyNaAp6qpg2-^(l38R&MZYk)E!**LL~UOo&+YpRH8aa3>>yAxk!@o z=6MhUi!NOa6P-`v=JprNU>z~-8iDg7dm}ozt+FOMh{A} zv0sZ%uA>3y={#Kw%3gYpG>k( v%O{}nNqZ(oI_mVv?G?@41@HD9b&vdCe2lqZT%DRS=x=^{xpw833!DD}6BnJ% literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/nlp.cpython-34.pyc b/tensorlayer/__pycache__/nlp.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27af78ff07be96331c71065552125c59e63ecb9c GIT binary patch literal 35747 zcmdsgTWlOzdfusSvU!y#QFm!ZQ{s%4>|L_On@CD4X;wqdXoQ{R%nmi;Y-{Ldb$5|$ zvDr;k6^CT6z`_xvy%J-|SBPxGH8LUBCRgbEz0hJ%XZ)Du5s>jsUag`s$Z)Dh|%E+omm61uG zD)+1M5mi2_$^)u=OqGwT@}Mf8Q00@Vd`gv1tMVCDKC8;-RQbFrUr_6(RUS`K&Pxip zeS!ueCffM2Dqm#gpP-pYh>%xQ*<@J^Ij!!LFR|25kg`YFXVg=4!C8b3u_6rZ4MS1X ztE&8(Dqm&=nHnXdA)jMqS>Od#9#-WMR-8Xk5&T4fS+;}mQJ2q4$QRfwhM;PrY#$2B zqh4cdDDP20B+RMu6;&Qr-H)2e)x{lp?( zQ+8TCM4e~UQ#_hw=Y4|eq1yO)C44$3PZ4sBUCXji`RnXsW{XBikMgx}7!r&h@oQ)&A5# z1pMkQRO{>OmD;+0cHOSoj^)_}ceAoxSa+bh1UVYm?wp3x* z{d&nN?o=&j-|tP_AKP~7CEIliu3hu&TFLhNOODOFKV|~|xGSj(2rCzM>rUDAdlUC3 zuLqTVWXL| zWS|RVWtxMIZPi^gZ)>OORSLzby;JJsxZ&?8J{f%G@o|3xWA)fjk5lS2Mt3u^Gp(Mg)RRuRj~;cYHQXm7 zfP(0JvDVK#=~7;YdI*x$tsZwF1LH}z+PkK_jQljz!paxalOFZBTRrSjk5R1-bq;kj z@Xhodk}1~e^pg(t=$F;<-XBU@Y1?^(FRc8odeX~wJn7?g-B3^Z)!t)?CCyz}si`MN z)ZXXRW5noJn;GTI8eWfj2>OP6K<>KK*{3OH@MzFjMif@1kEI%@7cxEf82x}kQtIU6 zBkIfzhV~&8jfg{$7rNrcqaI}OLaq7FYMJ>WlMc0(#v1IgE@CwA1dGbFOwX|}VQTB9 ztSWP@?wH<&Z7yN8jora&Gq>txyPE6mHSxD(+ve&rR>tIgyJXsXvI5N;8=klAUcYi> zz2a@`6mzBe))lNTx9(J}ecNfeFIMZtD_fT9Av{Qb#oO`f4rZ)O_{op$5>_~-d2W0E z)@q>;6#brs%GgNeN=?S1Kl#a7Y+g6371uNCYbJk6I4gaxZf-j^r@vXMW8v?4qvnpg zV^ypBXn&Zbf+y?9Jtyg;GYT%&5OT$%D6=<6drW!Y)lJ9tcAT0{Qrp=Q8s)a6c)Px9 zZdtW`)2nRRt|_T@T-&s2$as6lV@OEkSRbp%?q;_X&yW#rIYizrW_Aj|JqboNNYt^gfDi|lw)z#Ht@ZOhxY8tt?n^C855SqsAE_){!59t;5 zY}=Ucu+MbvaUDNXQ}y(IQ%~rodX7m&mu%1DB8dk1=HzH{d~Qx~PTA6r((y6`G8QsK zdK~W7Bmx+gZSGq8qlxUMo3ooVw`{BCMtRn3%PH=g(`JyHRO;5PTQbwlo92Uw(aF)N z(dp5vqcfwkqjRIzMkmHcCni>+h%%!`_4IN_C0#*JiJK-ytXAF5g|o1g)sC=JZ}Kv# zse~+yn{&zfAMB2JByNspqiSqBSWP2R86B9*<;(|@=A=1=f7AGP)w~+lT1VHTpNRip zN=K2zK_lgUIuSmZxDT?FILWlh_%kajdKx&CLK;^SG{D`c?_h!#v1-8(Ir(v0u6&FY zhtU0a9CM#S1XqohqIqK8xQVuXJ~>zXPItrFw*Ahsx4&&WWUhW@%i62jwQQf?^X6X3 z-sbxAQ?+fst5&ug$J+Nh(Qi|eeqX6xt=b}2xX8hNw1bR9i1IVm_O@Ls`)Oq4XAoAd zZ~0yPM$+t2zk?&<_r}f2_e9mq_a%sp-y4se-zo9^j2;8O!`-%!dvRxN4dRDU@QsR} z@hoTE_VUN{x5zHm$Ms6tHy(Zuj1Ud{{>qgPTu|358&Y5{ zXpnnld*9or*TyizH&IB*ZsHq<@e|J5?R}>UWn9L`&EcY^ju`_6KNpNk#!+L?IAI(& z&KhTojM0Pp0pq-J#5ir361vV5O8vLjOB_1>b>P#7kBB*EDCR({fr!(soC~cYj+SAx z{O}~D_Wza$GikM%QqG5gNW(ZtR@i&TP07oI42 zF47AU@^N=l2m-@MX?f7Lyye;*)d3l8vsXET2L8$&>IveDoB|odgFuo}2D5L;MFhE^ z)?cd`>O<6fDcfneX{DC56d?SJEN0EAZ-J3P*e+VdO10vF&2kFA5F<}=@wIB*iaGB) z0dJ4nZ=lyEa)apRBIdwsRmBpmrlxc6C+L+r_$PCwZ=P`WR9}Zt_qAb9ck?Oo$?^X>tfC z=4cs;Vu5anyR$`!1kzn%24bc}b-<*^nQwlKk*TaDQfnq^PK=|gC&$NEUL-fy3&xsx z2DN?*6Azl_x^0S5Dax0~i=$>~qh2Y2g+%i(F>R4*m68+bS**~mil8O5)U+L^?gVRS zv2pysLJ>yNsI@C!`1P~P*Ym?9YtzL+$qkA$-vJ3~?}G5`T9DAmPV{Atcip%=!pFUci#pj4>ONqkjeg@5e2%0}7?+J#jU(Ne)UouZ zy@(M}dGcLS`gwd@;AIIae@cNVfaXGl1+{z#1rARQ)d%9pFVMbkrqnr*dxrB)lu<@S zkdXEZB&+xhFs4J);NXD)6%(r1Qa0WA{eW%?xy0Dlw+ovX?-ueVv+CKp&w-f`5v^yD zOcvK7A}{W26|vf|dP)=;AOvWA1;i{=FYIzS1ASvV+o{LBA0xhPIUZn#dJSqmMnwic zqbEF~y@4>lQ<~_fE9E`Elg)IUw;8u(Kzk7Oi}<)i8&qGv(UZDp^kN{-8YPB=f8^F* z!be!X4cH3I6jyL6AWpzFg=G=Wf|if}or?DW2{P#YE{9X0@rN2NO0LvhhffYod^Um15w+uwn3SoP;=qkUC=~ir?v@fre$#Mf%e^9 z5dC}@5o-0_+OVVys#Qv(Mf~G8`$pz9p_Ui}(WdLI1b5qL+ch#VX~0jYuwGFO?cF`cjPx2U-%xUCd_K0X1lNg@WH(C;&a# zsq((RP@pV1#{^(B1V zw{SUZ4d~gAc~4**;4Bt{VDG_t5Coq0h|rI+vhB%P7=GY}XSHrNxLutw%uZD6r`|mCcVUR?Lf&6}huc(1&O*W3COF2olPr4Y<~XXlPr&j;fWKZI09-%Rmz46P-t8y#|rA zoRz+ypu_CZlW_^?5x`j6b?S5LIveP3pxA*F>_YwW1Vd}-fu(J`1pcdYEZeoZKp7z( zjLt$jO?jX`{uwers+e#|wVOwvefa?qlUkt#GG4bmEk`$rgBt0R=Q3(P5N!>y`7oyt zPXg75X3TAkGQ|WMbQk2AEz|APJ$-;gQ}mKY5hZ7TwvC1dQ6x=9TgW~03VSHz3)?VD zw9V~3t1NSZ5v3Jvfh_4qdCVoDJ3tmjwO-$J(Pp4`Yd{z3yO7yyrRq)@?0+?$RjcNx zxf;%pRTEf>E{rggWnP=PHZeXY*)!nA(k=7q+|1NWBIwG&u$(zPJw82qElP(~G6|h% zVloa{gFHStd2Kuj;p&*UHZwU9C#%%ViMh$E*W%K4fkm4WS0}H{O-Bi-FHB6Ko{5C6 zd2M!Ta#A;?kw(269XC+ZM(2$W7Qf7|_iiuTDhd4uU8Hod{ zGDsmEL2Cy@lOxQGH%Ix9qBVaE$Pb!G@Z*^LfCYy?j?0fu{bMjJ8TJy02Lnw7qM8Uh z))a;sAnbTk7;1p9K^=w;U&$-Uv3J#4ZzTuQGVdRfu4j8N==XXt9pm3nzmDB2pOd7~J7)bRb@8>DKm!ff9fM1d{~>rstiX@yiTlD%>_i1t_ATeu0+ zM3qb^S|u@fNkyMCZ!dzFgLx?jI2Q>x1#|ASOtBI3EsPir#9cf}$`R=gh`HZfw^B!X5GrCgQ9xc!IN>6F? zbZ^`xes|a<`R=ew^4(0z&hd>k2a`^h+d5nnQu{buGq_Q}Q_mPDQx~9+ok;fsXH9%k zucTf!&ZK*cp7ar5to(OU{xwd-LmbV%A6)VuYbW`O2m(ggR{UqbO1A~#zw)R)Y;GVcO_VCq_PrrgGi=@K`l^I^a$6VMwJOLtga^YixD8AKbR^cl1^03W-0u3RR5$9D>F$VwnfZ^ zd}ztni~!wQUAIiaBV1Kmp9m0<%7$$Pp+vv5%^OkL-bM>x{#Yc-mottuTdq`7Sp z(`eW}2R@0DOL~!JAXx?2vXl;l-+@810yQ|7?eaTV1A&+Q44gJFG5l^(nBkR?Kf*E7 zeoXl3cS36cKu&&|?ex3DCEyU(b+&lf=7o}bf>a5uXV{lW3W=2|GYy88hGDSZ=uc$? zL_TIjgo<%V)j$`NyMja!mD&RBNyBuOBIc{9ltCyM^s7VdT_E9KSUI8K+C*9d;-sbN zM`LO^NYkM_c?jAhBJsDCS_6F(asOLjBtoK)p9uXRR!|PattbL>v@a$6rwlj)f)*mi z87dOGN=GF7OeAr`>j{bi{F6vTkJ9e*v9M=LLm20NY?q3-Jmxb7;oF3Kl1&=YUC(2CSuZ%shJ82?`5S*d~?_2s>Scw9p@x zRf5zHmlQb0*a5ysNJl2L)CI|8!4Ymkp2WhMQJe8GxHh320f97n6V&=}cuOzua7rhg zVA@hVEc!U^8ejxrF*(J@!)auObm_irB1k2sR?H*NMexrB!co4 zAi!HN#OW4Nc&V34JAkjiJpmu0QW}>H+TM<)OQ&8Jyz%;KE8@B;Pf6f860W#yCYol} zi*zZ_AbyxfEWWKu4QK&=7R{F;-gE4+fRx9*8p9qXOFT7P8V`qH1T?+fI#3dC!?Eq5 zQF91dqJtR(lFZAHHj$Ev1bN!!0<)0x@k1FjPQfty>rxk{;~yE&pLtrbY zM#zit8(d^4W<<=br6xJe#t*WqmUxjf&VvUb^FsT{sn7-d8kKu?M9jWM%_uT^h7Z^; z$#2c)0!F7-eqvzbo)0>pNqlOlqK2RK{B)C(`{uB$f}FSKq3Gs`d~+gdnJL9{;j}Iq z@@!1KIykJ*AosedfhYZ7nGzt!FHYAI!GXm)@l=S+bsfN3>1N)sy<{9cr{f-w%;TwW zKLY7Tnwt0A=@2W(m>-0^f(gR5++BR@-Rs)*23{4r*IxkbkZMUIIJ-o;x%bXW(uN?= zNE8i}gIFQQ-IU^Gt}~rP(ezn^dE>gXUtHmjvSL+qM^wt!O^q$;vC@(%L#LWzYu*34 zsgc4ku2!_Ko8x*ILP(}-o_xV}8YkOLnZ^kg-!xk;mrD>H(^0oJIte>z zy*a?JoUbDz-zc>-$svmMJ8^d|HeQzcwO`zq-KcKikW1apBa>jrk0pjYs*lVu-LZ*b zZdaAxjjIwB#cm)zB}EOzMST_s&-pQ=Yd5CS0lycQih0;zRD`JPG3PSxcX%oCg6;-P zwzEyQ0s$@H(g+_9fTM5}_ncM6VO9N}x8NSG0kh6s2XXw4hxJO$*A`|~}K zoRdFuh#cd5%=oN?-z9i zxv$|y%^U;F)q~IR)S2`v#!GNJIg>tXWR2tL6XJ+6kZK&MRcx=}mNsnC#-{P%N}9*V z9mPdMMp#u8$j^jmFn&Uj5V&<|8K?!!04d1F>Bhf$pB~tu=XL}-LwSZTC49^2cMj+G zfHQ)ZJx|&cB1qhtlAc!HC5SWowibrBEm&4Y#9YL-NMKeI#6tzSq)tweEv&J~*%~+r zYUX?my+HSajyk;O{0@Sp@I2@a>m-BacXIgL)>Uv6N&kSu0<8hNRR+=dB`!ApacT|X zBR&1=xS`$?I(4Eqp|hThz@vi(u;#G_b6-{e;P<&{AmvTjg6Vh{7UK-xQhK*&^5u;6S%WjL)Mh092e~5|j zkBhoe2H%p=2)gnu3jaH<2k)x}bPo2Ui0m$$%P~D*pVt^vWOt7oz^VDXknR0h(?!<> zn1i>WRR+w%D*SpetJ*0p;Ztk{!Oky8R(e{{y;}Pf2LVCj+DtX6OuMWP%s7cOB8C*W zwNb}27IL{XyjJty6&#t?-WIhT>ixw@X6sMCA)mLy6(C<^>auv(XQzJBrWQ|7W zj<+^82j&dmo*OEXEW33Uldb#-XEDox-R5FEa7iW^nrpJ%G3|70Byqll3z?_%wu613 zN~0+7-my2c9S)EM<@_aHNci&o!D1}1S6VAGfEfRVl{twUbsX+>sk6XGUJ+>j1YD>a zC*Xn(M@Z~H5vLDV=v%nzi0Vdw$uRPPC=;x@Qwp^FC&kGQ{fE_sMX0^zxazRRuFRHDIw`c*JYm>0)gFPErv@|FFK$fXe&_^S$yahU?Q^kuq?6u zVBpLs_m>5x5F;kA6mDeuZJJH7Lv$APK?z*PVvvP3RvSApwgw(7>(v?T^AfY8HtXt* zUwx9XfXZl0xE%~uM`@e^3okQ7!-35R>H_00KocM)usT|AKz!k1C-C9ach)yR%<-SQ z5BJTm&t-nB-vb5T) zS`BXiDcIhJYcTx8C8FjxtE&%cg9(7s^%nui*<_RetNTX%OM zbTDb}7$GG_O^!Ev2%G~PM|WtfdR?bxUV-*E1|#bh_AH2K;lRsSa-yPs8})%_&mHK$ zpm=Xs>lbs8h=+G#&_09Sw01c~Bg|*DBxOXyl=B9No4J*--9TeW({ybn1D zlj@MFzioYtK{V%gik9wfD#0Au5cn-07-ZSX6pxJv23jrV1a@=SL{Ki!ksKH7FR}Lv z*dP^c-sSRY-OwBB-EB7Zjz}Fw6AEnLfPK%|vE$-`*$TJ7 zfPTiA3dPsY!=1(>(NeuR9HodUE`W?+f!>U39+#!}Q5`5T#yTE6FsH_6=1g;9avIm! ziJ1x9U!6;Cu1dz3oST`OGR>@-roKM%ebvi0FMgWQh4e@an9JNKV@6XZHt0G!@*l4uZTV*uY5jC00CXj(d=@wz&P%{s%D2TJc%N(`BtmJkp-{F<4ap5fZenNKt&J=7W@1&F1uy%bL_ z+P-l*<3k02m}*H8Y)9;XNzz8ECN^wg#?W)}htbIQZR}wblY|r*?I=$;oWjf*!=E|X z3h(BeS}nKj6?0s`aN<3D`#UkyU>Bplg)|Uuevh-gqO-k%Y_A|&m{@gJwUt$*rewk# zgMz5yH_f_>y}Mp%w>*+HzhDku(FT#RP;k1!xWiFO+3}x9Dhe7A7TM=34`8}r10^Qq z`Gn1YL!n(?z^*j&6QFiP$Qj~l6>}smB^!VP&MuZ>DO{xd#!Asj6v;28et?O6{DyN3 zKsf*$0j|i%A%!5ysmW^WX|ji~4@g__tr{;Us+FXu8dijx6bZcsg)+pR?6UJ07_x?o zpKesPHPkfBFcQXW#vwH0fJ6W|68QkV1b=U#XwDR$E%721ifeToVKb~1@hbK5KcPR9MXt)9g&^E+UtNgzVQoV>A6o7PI@I~=xcC`x zO>juR17NlbM$NJE2M-E$t>Z_y=*KvIy;Wj`l~oX1Rlk*i|QO=+(4b6@gfH}51=sKVBaw6uNfh1_t^V<$AE+H zLzw&#IrKpY+NdK3P6I(KI5WT{K=V^Hn;6_7sSs%s1G{Tru>ep*Ck1ECpvK}>0nY(A z8}p=-Hy{LPPE3}|IqD`S5M8oD(8+_Dt(z-8wVeN^9KaLq}a;;Xj zun+Knw}Ctr$BEz!j1q)qFYJFf{yc90M1zsj7p|MddcBI{F(N?$+qcn7Zl?j*jhafM z$&V5um{fqC2<#BOb4g}|ZZOS#pbjl4$ol}!^}#VrggD%Fr>99UzK2sUuw3L8qLHoL zml)C5fiT?XAsi-=*xSLGRuKE?HsZ$HRNey%3g#@I3CKaQDf}DLs_cXD6)iPNU9L;} zW50~J-=p4S`Y`Rp^%cFtiB;fKWknwZQEC~z%L#-%q8vAl8#O{3#(?pv90hX{*9;H^9u<%=dVwVz#gQ=?<8we4 zF3B+hk?$vkJPLjD__%)=muCrjk!^8?LOrCWNKT=xBdzw{)`HoWXy70i0oh*mUeP!L ze|CrX0<-`;Lb#k(_h>#51_g=YA;Ojsk+4G#>Nc;p*@9Mue23T+cyTb5IP5VN{#kW= zX7u9HiL9NN-X2Cl52wOEbe0_UNs<@G{^5ZV0f8{V@&h&~5{SbuX7Z8x|4hN|1-l>y_@;juqyiu?6Ko)#@ zNM%o9+t{htB?FNJ0!riCJ%dYNtQAQ1TS8!ItOX2Y{Wv)6`~m8Ql`QHO$eJc}Zb<1~ zNd0Q*q9k3o2j&>gOPJDxbR;p_SmJ9hZziS1grs>uBs^4q2#FQiB{Pmfc}%PVvK~-l zlJ(C_x3t-nYaws~(Ps~FXrset;ZyI1hcuNaYm3XFqw$=8kjp+R=8<3 zX2hX+WE%0Yl{MZn7MVtRF$+S|$mMW?MHC+<}MkZLUI3tbCH!+ zCmTH?b~Je?03%Rz=MuL5k8l&S^#jKMJq7QEfk6C%Ffr+^fd4@00OW|t0Br{f0{9Zw z?|fC97w!m7BLcv}%C&F<2>cI_GTMTHQn85{I1ITuDj>fQLeE5m7>^!Az6O1=F#}Kz zFeAi;_hLHOe!-Q}Wlf_Wmt+$Vrf@7}agGEIIj)xd1a$@qAZK4VHqE5b=|0aE4MP1i z7BddfP$lAv>YwphYdYDVP4$d(`4MZKvMSq31o6s}#iY67|7R*)t4I)V=J+UTC@f+o zr+-M`Pp|FjTwKF3W0n~hKapOeoe5Y#fCNNAR7e6SAPNJjps|1`giy!y1i^qPh#N07 z8W076W>3vdPL5ARA(lBsD_HF1jRi3^F*7wgqpdBA;^o`Oi1%-=SdF#O_59^PYm2uC zAr1<->}HSnsk-ZDwDCpn#Qi2nh~Gh*iUV??94trWe1n%&T>K2T1Iq#wSkoT)s7&0mzGj>fd*1+T7%kh!nkS3=~*KW za-)}C9~!>?#q7vRwj@LlQAxcc$eQL9>Lj3RVkM>0Ay_orWcQ|cuWb;ZacIxN$}1#$ zAUhy>o;*^s0;U08%9$WstABxPGVAWuRDn{;)TJq^)Q z@!LGSvL-e+!A50Doufptbq(ZYXm`uQ+1u;Z7|uh(V#8(28m4{o6Nn{xd}85Wh8UbX7-Dz|*AiYY6l6?5 z&~m(gj;AAx=nP_-p`+M)6)$kk5oeL&@H`+4z7#;Tk#}HAcjfM}T$>&))k|mWb1rQp0^Wsx(Z?0LwTeNW*bj@Hgo)^Hzdb)MKbP%dA~@1Y z!my?az6B#e5&M7T;r)u^0SA)%ui#+Eoh`W)@D&{m_l&Y7A-bh#-+S0M9Bx343QC^Z zNvpXOIA+fC$Z(n0tE))_E@9}#1g^PQk62b>o-W|<)T%2!$9hRM$(tmi)IZ#pG_?X% zS{V$yZJV~m#56LLIO&iRhsMW`d`QYkiU<$eS6ilRJ82WgAgnU0U&h7n2KA zT6wjANShU~1DF>@u0=4cY_MALV4^#8PlaDBEsMWJKh=emUkOczz1nn$SGx4!g%CY* z0P-(|n~&l1+M}gZa5HcV=O2pvDi+2Xwgy9Zmis4tYVSVx{w}O6@*NAnzMTIedl#jr$*|`lcUN*0&ImAv3e6l+XjB3N5RPCz|W&$9|E%z>mxQovtP36 z$$;9!=76K9I5usd;@Ce8LnyXT52`aF!GB@|-on~vxOOJ&n-eX)kMfqXC#^pM$CTY< zO;S9?NbG;Z!L^=QuEQe3vxq<`0rc+MUREzU(Q6qq(1I6ei;|S|9&&N^sx}u=!G-rM ztc2J@8=h@}(=LS6cngZe zLD6{UN#J3BKiaG)T6T0QAK_&(F*y0qilaY6emws1^x8$oIZHPokghUJ)p@CW6hY{FTgIXQPGMqUMGFscI$r1L# zFUan1M?oAzo-~ZFFoiZO%B&8jBetf7$4j@+?x=UzLfx1HbMc@B8M%3I&czU7`+Q-<`kx26AqP01=kL9@J5Dy^d|&b*wzhH~{6ktKhVpEa^dUsDQKU zAll*P&b#{i&3Bjd%?FEL9)df4FjN0;aP&p-qhXa?@^e&42cSy&232ajDm0H!Wn#iZ z-x6s;rod$v2kV0o{}e9-r9@4u{KB^k)i@dLXJ-zwYA=`RM5@Nc`^?$L?L9xWTXY%N z+qiT;rbI2JXtL_UVnuz)v3M!+Qs%|xg?!rid${mG>!S0s{3*DpSjE19pV?!6($qz@ z(C>+sYW_r{bk^WO1~wN72XCz^cu{z78H=#7e^D}KUie8gI1BqvuiTfu7c!PZ93 zEeCEi2PC7j2zx-uLri6_^aHXTkoP8>6*E~c9Boh1?G{>QKejs#Vj~dWu5cD1ucac9 zjU7hOWgZ{*ui>IqOYlsvLMX%3H{tjM0Yj8ctuT_2o~IrPk%JCVP%%a2B==kfT83S4 z;W5sLA-i~xha8Ur5eJQ?Ka_Y7@frS8PsM+cYnI>`1v!Q_NYP;k`Ef(t_t4a6PkS?? zuZFVMs?xOc2Pp`z4qp^bpvc zI2;*D=wO2uYgr;eW|O=v05)UWDzf44=*!dVz_3L6evs+&T65rZXF<~@ch zx-V7&-x;C?(oC^>1$B+AYdBox;0xYHhFDm_QCFL|B$rsV{(2E^;T}A`A*~dLQ4ic= z(iDeTG~L3VNM&LZS=3c=9xw_rAyYB0D0OgX$NN?VuaA>#0*`9BzfT{`H~9_&bUFk? zjyXKl^ec4Y!Pg>8vuGPkh?wjzmPoz%M+J`5MwCNZt3XDkF7VK_Llgb0QTzq~h_KvdmG;v)*Q8J58k()i@N zOfef>#Q1U!KYx~0p`NN<<))3Z&|^=4XY{5%Wj92p>UQA=1Nd=y+yJP*bBb5ZGpN1x zlVLIH!E=dE40$NWals^$cnEiiYba_yGAIW2G?0i;>H;~C z*TDe>3>*T4P*Eenf#x9^`4(ogtzQ}javLj4LwEH-Y@kP=%g|i_d_rZwj{1thIn!tzb#&v44!vjD1zO^>%;syV$1T?39w`}4uy2rP1w5wPB1(41WW7y z?RVrwYl?+1DO zG~)10X@4gyAK)C|LzEh90RVzV2oGXBWPjOoKu`~-713XxETpCrBQ8~ir0re zJL155FcR3Cyh1aQziDL09t7fXR8Gf=`vDk5Z6i_(;i?N5LT@dwig3@D)!BBsYayRz zSsgvl4@gA$4k}N?shqh4GD@#aplXsA?Ky4p3jv+74P+2iAf^-YyntB0m@*JG1`H$V z!y>2_s|Ttg{h)D40U`0q3~*f-P$K;&Dw3or(V}*l#!#7LDSR`odx(2AVP-LlXos97 zs~X3Y3GwHRK^)?^Epz3EW6o)u3YHZ)t_(b6fF>wM3^kPNoADeO!b*e50z40N9;j79Dun+r~(916=CR3a_c4g*hdmp$tI2EP2h#OxOl9GkchuZ2&D z7(Rsuc?~jgCE!hJf}_hZAF_v0z@fZ`naDF+TYUi;{3mh~Rz;l~fLHV&ME4{Fbji4$ zdaHyu@<-;vjppTvG4}( zjL@KwoC))k3`^d(p`+m`!eQ=`ZQpVXlQ*ayo=_MJzG?$TBOGFj3Lv61@8K!hnRID1 zLGf|$UT^$3&2&bb1Na-9zM>qYj?Fpf$3YJG5#(@2iatv=hpK(&f65EZ(~27j!4T2E z-!#AtVIV`uEVuPx#^V7pI2#7-<9(#()I+>S3lU-ef~jsmLN1{CDb14~z7(_uKfRaL z!;3rv7%UlOK72)qxr=<-)3*RIFc8J9k9TS{cXO732=p|T2AwRaLwGA>?`jsy}4up zc&m&tha8MZRb;~*Y|rI}8Wc*DbX}idqtCj@Y0XmzEqUDSrsHkEef4vkVlG0m5+h|%Kj7E92%>ZhVXt? z3y_}{0p#_N(y2jzY6ast@LHlbdGM}f61`%FtX22PD1vQ|?&f~t$lXM@FcDAcWioBO zJeZK0c$9CDAzok@?jZDn6MlInQ^kR25iEhIK%(T)6&Y1=0FoIO;diid+HOIbUf_Hh ziQ7ZGFJ#8iwyHyLP`EZFUXObZ^Yo2(-uysMZ{3aWM|o*Z9^i42pT_E)dL7f}ifccb z#ODf*g>x#UE3!)3oocY&6g}P|C1x1#Tm9E4YVx>%_-PI=fw>b7Pboct!IwP_QPod4EHW9IUt7rbW6&)^FO z9yN|gci%Kea$k5oJ1SDkVq>y>EikMcMPU3j92bQnm+BM;xSJSf!CeM(1dH#W71i&O zVV#l-iC>bW<%%P&gx%QX zRTaw<-b_e|0B;KLn~IYj4O^$e&;$4G~4z&rK$cP zXmr&szo*}rAP}*i(Pxq493A~mShTi^DU&^#?~NBrzBgVheosV-@&obG61iSZXvp{J z+3mt1!O3@F3dqesm|ew{zR4er^3=cOZ9N5GfaQEQ7e=A972cta`CbVVmg09}R^T0d zEifo$(E*5j?%TLgFP}1wq|=b4PQqk%45qYU<2+uX!}IidVN#Rt^eNeuc1+$xa1jQ$ z3%H{p?ifzG8AMS8oj4$#n61+{zwK zG=b(QQ%xLI{qu3GRMqS_N}SPSa}W zMU^hFf=rE)(U4bJSr&L!m4;Mlm=)(wR0Ka!V3zG*eAMN067qRAiy^4m2-}B(@~GD+ z8_IiB5D9atbWxSYRB4>8{{#bqx*{C^5jesA_ypBORVUdoOo$rd>69v6Vn4BnSCpMr z4^Zc6^#qS**m<9zdZ;#jUJRek%2R|~X4kSTRQ?M4nAxIH61ntijq$H?1Q;1@Y#HGi z$A=Nn@awAd8pn#IqX!JNkbV7APTKiw%Fk>&l}dTV&+Ix@bvt#akIKDO zwNSUR?G{*Jzh`B;Tqy+^oYFrW`y<i+p>}3~1s6(ydJ{bYDLuZOr ze)4gL^4in`5UWo0r~?@ok2}@wW#wh$r=e~xeO^88Qja>-gAVlw^=(t9Q9A?QOz$C? zVwFxjZc`7xuNHTIB59@aXApjK>Bs7EHyiP|hu1YjJ?>Szk0h2f^yX4kJ?>Mxuc}9g z(W^Ex${9DjF7*JE4f%l3b*NKMQq16Cud#?ItV$0{HBc{PdioLi0fnU0u}6LC)R{2 zVr}yxR+d|HD%PIuG~BOLYAY8vE!RVMkp7~#?bRI2RhjA&AK68$ZA|do*538yLLn&n zJqwkwk<68vj7ER*lk?ZSVphtoXVz9t{*-W5`fknKa%@g~vslBD-}Oe!ZFk$MRQAyR zFi9Ct){uKn(n)6&T&y1CibqjqZ;D=Qwex|1C>HVgj&<*t*m5MIdp2tNJ4f3^#k>vPX8|R#|r5~l^We8*}WQg=Q+^$Ll zuqxZ!vGzt1*-bTOH(_qtR@II2tlE~dvS&`2L2gp1>({T#OgFEY_s2&jMkYt5MlOv^ zkIanBj$9rY9~&7TUy35ij2_X`%N>z)1wAFMnHaHZWh)oX!e%Br%uc<=%ZR2DvM{dA zChNbyJK~YJHkOU5vE^Vj4NGNoU@n(4?@yQ$<|O`2;ol|mQe0~tU5|bu{{2ZEMG^;% zl>4bf_(bA9$Wr1Y(b5;86akUrNvbcfGca3BH0=3s%U(-X-x0k%VE!!bm^)s8+ZpE%EG*PoxN zZuuS6lI1wop5Kmso1E}_inU6`7Ky?|4)()sWF+E~pRu;K>}ttRBO^b9uu^T)@8CC* zW)Jyo91*`eZdSf4s%E|?L2UBf@!0w865r40G4R{mEepA?Y_G0D@GuI#QT8*Q<*eCW z{;>WQsm1!JRxbI*gFjHtG#dD`;ft-?}<>7RvRhhydQ`rjH3k&2p`A?!V3OFrltI% zGpzTQBkeGTs71OsP{s)-Ez}PEog2oe2J`L)v0ZQAwgVTu~y2J zvIlm_srXzBImwMzD>W^3fv1w~N-`7j$b7A83jq;s4oN9uBl?s_vUDyjzoIfbOv$p+lsZVJ+KoQU*~fS>JdG#THAz}4hBEb`JFqK zLk+_9+j!@9Eb8CcHs7%FJ&A$#jTOHGLa=K=(k5Hbml@u5;_?t5_Z%+jST9I^zmdlE zC4Bl)M~w@{%SK;kCUrRdX*Xg-B%b_}WPT1Gw~Px(v!=!a3LJqnSE&ck!bDT;0c8X! z?m?7qq||B9dWQ2(G)hKAkkIwBq^bA~ETxT6pw!9(19~F#sfBF1{wD#^6k3UauWc1J zFxXABO{Ub-6_|a~Afi}Lqn9kMNzR?$-dw>_!_p~|^MjLXr2~{K)Di4bIQ@KME8DII zy%)p1WjP*DgId+OhJMr|pV6}&(QY8jZ2hh;Z)Y=I=N98Ojb<0ZzK@Sfut4?n z8ky8NqZ^}m$|y1<{3D-!9Uo!#Hc%&UQe4s30E_@j3cn&61yvpY+ZFGj>1WXU9S-L~ z{U;jjNiNeJhoX=JksA7H5*r!LK8tycYI$GQJbE?;Z=&(Ikq87O14Y}kwm_Dk6?4_y zzNsn5c69^LO4H!n1>w7WQM5xwwszZ`8s8%tJ7V(eY>>HWXCvbx)i+Ng*smdFX zI2N+0`kC7i&?9s;rdBz@*aA?hqo6C4O+$pUE31CHM!PaPxM}STPQpPa1cD{C=^PyVxi!?hisiYyu`~u?u4>&mi{R| zQhaZBCe_P5$XF;3=R$7Kg)Or}yj?$X0Q_1|O zY{iT?S<&U?ESd!IQ7h|gq6@iJ|5`0O0FZXpfnSK0GV0o(i6Eg|+CW=P$c45A0H{)~ z+U9T#f(*n^KGAuU*Q$_5OIhg)N;%9PJsFpP9sw4$Rije2rn7+(28tbsz%FzsPk^wd z9$472i(tMw$C6#G2_6x`x#%pU)077~;Lni(QpJQzs@*sOt;-LHnA8d_kny_VX(75n z7SwQ^JeN`Xg=lLCt%o^`s>I9a)Bj?Ye9x*V6b13=jv zzcg`qb}C9pWnp|0^-Lsm&C4^B6BD{A^)%{TZ@Y?`*4wYXKmP@Oy?5j0^`g*!&_zlo zefS7>{kK2`w17y#&!9r@z8cGY;7$lUflwom1tES)QF`QXS9SZLcYzI|FfhPeW2 zeufJE6Vd-+rdTW~^`$!>K#Tz)2epP>;3hK06G)f%vC*NN@ABpWtQBHdFd%RS5B@lO zAhIcq7r}!+3m=GG#US3Vv-(frgLXE`@W3Qsh>((j5FiplSw5=(7y@FhwxOWKBI(j* z&=xdZKkQOoFYPEuA+4v80>VfiGvm!6K1AZv{LwEzXxYGz!}0@$7ydXRKic(=fv{wl zIUo@9Hx!6!BJ6NO7;1p9BMo7w0m24!7K=KX<{KAW6C@K$V;gj?ejyT? zNQGq;X$4Q@c&?`_<^||8w0-!7Q^XXJKMFQrscHcq*Te<-1qxm|p&rXLfn!UE-lpLJg)9fR>?>-qo+bXi!Jst-vkq=Mq(WF|L{xC(z66IQ_r=7M0ON=gMlnP}0o89FE9SOr1u|kTd~KhX74#ovjaO zr8XqkD4`UF1+>@w&<}Uir^*cgKbNebl(u&_!uCCVn z_FHe!x{J|rJ->^FUyhsW3^QpPbpyZ42KyFchm%F9GtWnT4Gb#Z8P+_X(e-uSMXY>A zS1Q}3CD?B1DQ!>gj=RL~47()X8Fop&lS$b*zOm|1=aq7shl^rq4~L7gr~-C+%s7@h zYn(QYrlC?(tqwn_mr^epC(~Us92UufEfuH>< z-4+P^&X*Vvi`z0kDT0r)!3jbCc%lI;L8}7|P2hr=!^fS#1;ie7p43DL<3_u3UJiaT z5VS4OTSSEkbeCMRJ<<-Xt4LdD=hNCu&_ZZ1X+2wH=r|dIR^>aSisk*9Oj#<7DaR(6089j$M8(qqIjonRidYh_d z1gsHcMI$f)?t)GS{}L98JPeSoAn{{@Fof3wPDw z9yt_1#r8ECj zNtsoqkwk2Xj!O%)ple3pZMCLbCgBl=sx3yugGge-wt`BcUs~pkC@pdhLtEr48MiS1 z1~^M7xuZOZ$!Y|Wdo()K^?Lg;xYl<^%`exs&2_l; zP&c7Lop?7zV=cpT#==^*0lZWX(Wtq6TgT#~`FHMqL3>}o`C?~h2dJ~Rk*hgt7lW*F z>)vLCP88DI7Kv%-Yn=n1#K|SSh%ykX0vuUN2b%A|2w8^Ko6C0iZLEPn$$kco7MK`* zr|8G%(*z2T&Cvc<8 zG%SQ^SO5VUr!pEaHXg3#R4oQ++LR{`L6}4y{+3d!AZ{Yxe-m6p=o9i2DIb&y>VUWcMc9q@ ze1y-F0oOecLc};p)j?OOPqNQM;x;^$pc%lYh!pe?ZG7MUoM;cw7~ntIuO7gK5;RyV z7nxAFC)r$tmmyS-Amf2Zr=fpkGCCCjG)Y8!_)K|l$b?g+))_>)M`oeU(5*bk_^3@6 ztMVAKdJjnO3G2IHzjJ~*?SpBssD*k3nm9D!UCYAx~mojVR67DEyI7j(|QItqE#< zFr1~=cQ~h$o+~X$9#(xEcNMUK@EERfRxOsHH5HI`q?=W!ZF_K&c`9Z`xKbzFELTNr zEJ$L|`6OcUMIgSLu)66MQgo>mi`#&!z&-&NqEZ@%4cgv{s7t3_5q$B=ax=oZEKf<{ zITEgTStgoh)>a(sGsbVQk-?Y~F}J3gL$V5T-mZtD8z=I$ z@u+2{6wigzylBX@G4*O-u|k8~E2ajT^n*o;fE>R#T~h=H=Iz8&Au?BV0Bfb2dCT^a zarB&ydq6Uer^3Amlpk(r-ZQ5|tR7>25aJ3Z2-|Xd{;hYfX#Y#NQ0!cJ4y;3}C5_G*Av=gB*8LikG?0bP`3=XARzs>CS$BbwA3ARnZ+$E?qG-mZ--{ zi>eHrY>2Hj|0||O2E({o&Awue>0t-~nT|Q~1>30~Yc*x+M_GKsY`IV>LVA$D5C=L+ znc`Jk0vzat00$xh^y{#AL3sq%H$s>NXb^QKk!GOr5omF~f(ziGLya?93p^t05Onz? z$~pla)L?dL1qAu&4SO%DDyz(t0$EX`gH!_v=rnAH^NHFRQjsP(%kR8eu{Kvq*7YB- z0!Xat=aQ>8S-g%2hcoT>e)ne@bgiFdN48vD7XV-E_(peB*E9-X2T5HUx-~+s(6i`I zsC)pgPDCJEy`7+w)|vwZ%c&zH-zYXU$zgZ?DDKYr`ioM(){Fas8`Uiwa;e)HWD*Sd z;lz+f^^rNYZAj+|L zDdK{C8e4QD5Xb_4h;ZltGz#x-&w)K!X&|lZE%ljha3TkgE%yBdL?=my8$SV{$Tm%J`gdBz+W$#7jnhs@_*E+g{x*uG^%I4dWBx z9;3)h14dX?l*mtpSTKG5wAJ7%=eH3ng{whlSSJ}Rzn$afHZOxiNcyK77pM)`T{3{)FLJfUT4TU0tiW9tbE=iz5t*#SeJU30I?Y!r$Xt=R_h{Su-EQ1%9M>FH7}nhP1UmVfL_TgV z{WXD3+PN^rE=mCZ;S6CEtW&PXa8>H{2HXR!Bz_|64m|=iMAjWvB$ghGni=JOU+@XB zT>?enhO-~gD-Sz4XHXetk0mU|S6E)PjT574;K5?BFoSJbVs6wnT)j`LM{*W08BGay zZ^7CqO*3H5WsYbzU>N~iVCe;10;~kqM(Yg-E?nn?Je=D0+B&E>{&Sn)o*8zxtn`s! z+mIj)uy<&h@LVL`{s_CINK>0=JQs@u$ErR_?F4df>BD9{+lNh7Rf`3yfel~;TYK;T zhC8@K)Qo0%`F^z!?CJuXKDD&GOf+FDa2yx17wk6kP~+y_84A4UZr==DNm{!=NQqIC zofvYo2A009B} zJ)ctEoK<#e=9_DKTiO$5e@1gw72ZaUW!?anaj-%3plZEsRcmGQ9wZ=4sspP2w)GJP z(VW{}v2=G+{pHYxm~Uy{Aj{69cx*%>&o!BdovCW3tWrt%R zga|d9p*fb=&lK&l=x=3fGzO>2cg&noaJ7 zO2(L&ot~XE%}di$$z4&&80O^c401B3W~Vi%MfDjUZTRAk`zUOD8ZjovF2i-_@+@|3 zUAi=uh~5yxoE*PAWty|om&W1@8)KN`Q)4J{a(p7(dNnUx+fO+w{~e4+U6HG~kBcA0 zGncALz{Ss!i-S$U{{j*oEw^GXOnNs!Hcdkyfgc7|{#MA#sU(8Ki`f;_QghNs?fx8C zV#v{lS`2b4*8V9)R_()o{7sPSXaR6~(>bEv_oZ+KiPu68x7u#m05hP#QZj)r{x|T2 zE$svaT`C36Z!=Se7ZDAzyc_0a5Es8)2yv{jIj9B*Yb?oG zLh%tXG-TOidzwc!;q+ME>*eA)?FkLM76~H*2yGwCz|O7Z%#VkP*nWj=td${PU6*FD zCuhj=Ku%1eR&!h4N{$N{ z?z#u>Rwr5->`2tNkOsoN>T$Lgb+#9g?L}k@>#FXmma>Y}luVdoP!JvbnptzP)z&NS zl!mkB=gpyu+7vPx3QiXpcPL6J8}t)NMK>e1BHME10gM)Gpv0sopRg8iD74E9*p+5} z0?`f&IYV5nT@J^kWCK*d`3Os~7%ozNW2IGDV+8J)X6S6UuC8?E`GXR-qIk`5W`3qvl(ZE z_fK%iW4|oq1N0I+yM>}Tbne7Q5WEm;1P597Ul!Y z0lqEh0IkmOMu=2EV(^F%fML;PPScVGTCjh;gnmLHWe8RuNJt>kB*dJ=a9+yKcEPCcFmPbM zP}e-Gh!cH<<3|px`p+BX_x`0)cl|W%BSi^AFueiV@sUCNDh5W+T*Cef%}UA6aBd9&HY$3W)vjCG4Z+=na}8v78oh(fMbEuj zk_LPTko1jO+4CBACAa3IIdSpXv~Wm(9r!B9c_cBo%S|Ow(}C}8LvBEo#XAO?t7}uE z*{s1Hy=h_hyb0E!TN~TjL}7udpET!7Us$_h(vM>XTUaaQO>Fer=UyOB#Zw|UsiFv3 z*$ew0u0GEi08w1zjD#!ZO08DGAr_H*fX&%xCilw#+(u2M(PTpj$xA9g*8_Hl-kKyc zLN}Q9JrIQ^@Z%1Ea(%oD6Cn<_)#+&xjPJoj3!E0Yh4^E0_a#O&b{PyeatLEdB=)v( z<`krT`iZ#lK9u*sd4f61XM$}|S_=P0wFY~CY)K1?QkN^z{@4*C?)RuSnLbQAeq~8- zVPX|HRoM}7>e!8gJuCZ{*r`ofi5LpU0In%uH{-L|V}ae%!z)9`4I$%W)7W+~joniS zs;wgA(&fqNnHe2`wn?h#>50oh6!9#;2J@NesjSujW1$gsjDJKGxh=cHmt*#{jFud< z!E!%CK&D9s+njCe9GlEGxh+UiJY9~{^htaI7HSXw`G$A(fKkAjhzR1}W#PeN@s$=048yqcyO-?AW$)}S_#Y2j3 zW4KYnv|98VFUu(}$8gO6NZ>&L8KWBz!XcanlQCWelyDy4LRakVN!gCF-W)#eH*h&n zzKaZrV+0%^ENKY^awzNwyKie@>~-2Rh(17ck?mD9gurpVLO_AmMhTbGBl_;)P8`u!0#Ec6aYc{) z6S)aFazPYQNmxK803L-Hc)W;XFgh+S@ZSp)g&DZR$e|VN9_OI}ZA42z*wa&!IQ49D z90&DGO!Kgo;E)^gLLEOSIg#es?Ui}`|14A#U`6mc+7HSl)^Gjvfh!a4P3J+@oNx0& z2pA`G2;4z6+4(*$`Oa{FT5gh}QTRusv%FC+^Pm;{>mq$Uj{RZBW9JJ39_irYp2W?! zw0TxASej?GV>@ski<*EQ%J$nkIB^Na>zS@oMU#Bwe@*wiwP!*v^D>Bth9& z)@vqhn#= zKJSKyFBLHY^2bpl2NWAgv!fc=I#^b?J2hs(p*>_02?6WJd&6cBATDFwmy8J>2JrIS-e-OJC^vD(rfHy#i5E*WY>0nPXp}-Dn3ca`>+kG&BW9f=B zByhcPwah0VGqis>)WWf86^)MVdAeW_>ZP%eae!tj5uaD@j89P0$$mbSGtT8#tZmAg zY$*@K>q?fA=86BGsdCK%LBN;eBch?Gh*g~KAc5n&HluTC4aIC(W?=k8dW~i!U;qIE z5Cu^Y2_S$d3}}MJ0HP2=98=>20iq!8u+S(#6aw#N_z& zGwbNsYA00YvTz!Q+=m_ds?)@qo)}h#Is=!@b7C&*KOR;ohHsjHSf6 zirhPgGVj^Wo{4=<tl3^Py>G zgU=!y8)2fWSRPq;PR@YOi7&DFuIvetX;N5x_s-42!h7%Bz6+l2*lXNnfHSD654b$A zF|w^nr6Em0$dHsR>3}>5*=y3LT{bU7PsKy?;L4iV#RQX-DRqt##l|#{m%*J)4<}=< zS)(`*sR}D3KvCD8hnWWe{fz!NaLxa}gUZd($xO z8=pWZ(c=?~{xZbi+rbdSBe|xGBHp!&>(v^4 z>Ll={6O|(+5T{%jdb_rX8yOAOjoAgYs9`G`c5XFfh`oR~eJK&NfU}m1>xqDO;ZF2X z*>0|4kSBJ&V#{D6`rB{M&B@P2dnFMZ=_FxTLj~W0abN{IedXc3vf}~&k^3*=xW?^G zxfReA9Sv`ck|iO!rD)%K*dZKlHI52Op2bNcxfD2R&hbERnb*t9N#reI*Tw{{x>%1` zR$`4V;7HVpE8fL=Ni|5DB%%wXRxfC31*)_(5ct(L?0<=AWGHXa|0WKNPZ{}ul#>(@ zzO*klP1#nmCQdsbsdRoH7rztKo7NduyUmEl3?VYO71^*fy132vB*=+?UHMbuPK>ug zZ0Qrf3agH_wu$2dn(IeRXqxpCJMlUCxSTd#G^Dy8mWmyC%sTH_KP)$@01_0!wu)qdSbWz0bNy-DMfq@r} zc}UOcZ&`wA1#eI5F=?6r})*7}%q+~})R3FqQDInfK;457lhB^WcPwA;bpx#Hn)!%QvyP$90pZ~%j+~$Ls z`hSC!&x^kdtHeX#6{@5IP$hkXD%D>Wnn#2(G2x+Ti5MYM;DYNcBDwQhyby~L{jB`E z-!xSHShQoEImoKLP!iYPG4V2Ub`k%spW0b*nX!t<$DoZsP3flI{<_BH&>9`=*AE$V}QSF}{~M_a5jzbl;gj(@m$>iS*L z7df{adcZb|9H$U=AETpfV=whWSZrBG!9^|&wx1D_8G6HB3X?c#*IC5__*wqq|5<#IVzg)aRMt9 zEmLcXWTj_h8>9H(QnwUcll*fLI2m`kg(ovZA_?}%L2}?O9yza>zE9#U#Ao8&P?IL|M9qmWc3aBwJ%CNjJ)83ws6{nGduh`Z*h5Mu)IgDaI` zap4h7;yaHTbqnc8i3tiBcfSOah8_U76M-XR30-Q?Vl7N0=xma?1(0TJliY{bw^a3G z$nS)s5sv z)IwURKwGF5o(n0AhO&gDJQ1CzVXsD8tj#(95UE9x3w1`>oY3Sv@F61o35Z6hGiFC_}14^+!H0Ex?)2P2XtGu8m^^SVH z)i4@6sI~tX;dvZV87v`=luKQf z0sOf124N{&_)n0Op=qD(K#xF| zp}YX{gwB9H^C5HRi%7hhtc?4Ew;_`a@^m(STf8tCeC@zc1htFThxsvL%ktF;uw*p? zg>+X;*tr8vFf;fVOKczQcjN_We2o{A@3sGdLkPu7`gt{$u(b(gWnWX4mw$B%%hVJW{NvNZyv_R!(LpaKH+d#{> zPYzTU3sN@BLF#HCyoLzMA*Cfc$M>|pKmUco?VG+nVngt8O;cv@T#Unij#V3x-mp?M z4oj73rhY2vCAT1{k?M})W=$x{(h#}DuS4RLjTVqfQFTy5N|R}B_ls)}t1?fAfOCWh z`V{`1A2J{~Wk*_)c7Dw8AMrvY{Kvfer@X|upbk106dZ?2 z-qJT`@cdsf+g{wLQyCm80z;5-CKaKChZv5}96s&~!mpgS#QH1E>S`C7>b&_8$@lWKIYddk;i(hJS~UAf$5Qd0!*&cXMe^kTIMd zm;p*CNBBV=KY=)W^I3!v0-OVOh*Ih94+M?)9b|Y2{<6D(h#rn9Y)cXP>y{%lU*Ph)R@{T6}pL4OFosz<`s;;`^w%K;)RWRFJ zQyA4Ysd#zl<01CF*&>0x$(t=B@tf9lY&swwN9A;MWiJ4usBA=PA$)TIL+HH()(ZUb zWpTEg?OMjCJyu5#)C1B`ITk~tshqh0DoQs^plXsA4LL3I3jv*y4P+2iAf^+tynt3e zpDqwJ2K*xFx+16+s|TVI_pb&a@e&L$U06;c^(QKllqu1oR++|7nGBh*&A9F%?$v;q z#T=p)a+a)W98+|{ziRy908ee1E595&PW^bWtjKU>;4$V6P!1_)3p_`Luv}-d0MCgD z;LnWxJGK`{-(J8aAnjrwqTncmu8{`;w(*>Ra{FnN)GWb%hOWuHba8#k=`1%akjBrdXDu9U6ya&f=X40k6 z0>#I{>$dUZ1k)LD4q$I^_lk0mIyUB@9|t+$N07rwDf$!{9jf;2|0(YoPb=;j1WQE! ze%$~+gnbMlGYrCiq&^k?-upm#r_}?z77MXp`+|+GUqa5J?kUZWAG{DW1wXwP)dN@$ z!5~o;2u3d{v38M%d!`uSP;xI$UN2*8knwpLp7&D*7_(7-i+uROAnjjtVxLL{L-w-5 zF~I1na0p)x;z^Lef%KZwIe!_6@gMnpwNHJ(SBNg+C%8B}`4#C`wou=3!a-L==V=yVHqN`;hW`67w7H5rUUM`zPH7fDgmevCCh2t@~!zglYQ~sg*O*WzMFoO zIpknJsvsNgR(mER)NoLuq$~Q68hxBiPV1aPW69obG@M`q#;c#>s2Z^W$!;ngr1x*} zWGblhf%d66Qrrx1$i&e(zlySC+aPxhqNwH_crd5XlY9im3fXI7n}eg3;vn9%Y60xi z++SYwD4iPgr&cYV0jwo@lc(xhCUGlv$ZBPeJR;cQ=x*#K&eKhF3ls4eUM9oPb2jWt zNF6-NH^>lgBMf&7dcpC%Ja(zQ&b>P^2m~msyO|~jEj&v7&vXWAWbiDJ`Kn1 zA<`G};b;rh0SGAEnG!F$y^DGJ#yfAmucx=}#ye47nu!NUT;!Xve5+Q&^ttHT4=3=s zh(q6;a`B?9l2!*7tTjZBw?hfX2CLP7^}@R6ZMirk-m!Lau=}oWufPuH5bA|&N6QyZ zH9!}Py6{z!yAog#oR$zCf1sC^dr?lCLxtj5B1^CQpP{+br{1rIug6}+Ce?MbDONC{ z_@;D`8(`z9SqJC3$ysdLIwYsN)zg@{Jj?~}%)+q%9_s4U z`=ql1JrzdqIE4WtQg;SMklmxhQ7$?150AB=mZ=(U#?~q}g zoN&NF<%~8t1;y{#b}GmN$4CJm`TKm-UR;L{jdsr$Zxjom+Aj5blC)HI#C@<6JGv@j zbi!)}DGcD%f*sZh98dzkL>!urcgpgKKQIr6!_r;+tYH`E-dhOMV#seuk*k$$cU{hl zasH42-LQBCTZ{c(cs3O`fJ(TZ`7fZf7UM%^ph!o}f>$v#sLjCpZ>W%?B?S0zimUnSz36w^e zYT}&gpNV5NdrFGL>hp+UWY4+gc zu{iJ=&Fhc>lv~2(6=RHBPvktH!x)0+{nypj|IWbJ!1)uWPuK&w{=5B02hI)5^$zzQ ZJ$3W=joy<3*;5zJ|5|VE{NVZQ{{cWRYOMeO literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/prepro.cpython-34.pyc b/tensorlayer/__pycache__/prepro.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9570152d7ea39864095025c67836bd500c36fcc GIT binary patch literal 92489 zcmeFa3!Gfpeb;%b`XP0z^_tPsnn%sUGu^hj^_rK*(mXu!I1}5lJuQ3emX%hitGc_S z?yAQT2c<2ae|E5XDGArCtVaRLDcLI?{a&I5MI2MCaSmWAaLQhfN302`QvfMMAM z0=w+@_y3>!sOoB|rI9>#n3nqXz2}~L&bjBF^FP1;^ZeV9t;1h^>?>1W{xgN>pGUc$ zr#XL$-{!)mD5~(k5G_Q}RC->BrV2@VF`6nS=Y7#sUvl0bO${XHgVBXe(bQ(UEJh2( zXmKc-8j1=kx-VK7juy8>Q(OGHKUx@x7Pm%ITm5<11 zPDGU_qRNv|<*BIhbX0j)RCy+PrrTG{-@98TlEq3{HuIk`t~Z>UeMQ3Hu|4zJ&XFNKA#%T z2U;)B$Mk)bhcEb3>y$I40iZm5;y<}go--OD%EK7o6VL-64}(|dXYKsk>{*&Rt+Dhf zLzdBU&CKUC>XiAOq#Q~pYaFV#8EIZPiRU#kmB`rpQ|q*|s-dfPGB|&FojkJ|+seb} zrSbkX=QP??IO91@oY(kQA|t$({HG-sG$~$X1~cq0YIeNJ>}UKhXqxE!ZBb=0s?Z_c zCw0vo<$t?%NW+dxnoTO4`7#q#2ciq((H8$RTePNM(qvN(<`Vr##r$V5#ZjfHN%-nO zG{qd_7XL|XX<8~jvu;`QQ%zy|y{L(*^P@=#mo;xScA1u5=8C4VQkbQyQRRxJ^s9{K zhoj23Yi{dtrfz;HzoZGTw_-kf8|XN`7SqpP`(=@XSD|do_CIZjpAdCWElmFJh$^47 z|KBNkp=Lu4DCf`E2ly^g5G6t&UWqENikhfB5DP9pCCZ}96L$G&Q5ju6VV9o~#nB}M z#T)in5g^qIeesmfi5@BCX-oNT(I!3eF6)r*5uMWIGg0M_h+ygUp+}z=<{sco57;FsJ^+49bX>0toh6Z0{kX`wx*a0>C!)$v zh{Wr`=k1a_Ulzqz2FNRyKPl?343O7CG&%M&ud(b#r^X8H!G&7AyfDYv_PO$Mvso+G zr)O&mt!k{K#q!ehOrsH3$keJfNg9aD^||U;vAwyvvealU$JO>wi?rr!BVKH8Yt6^i zGP&obE9F+Xy*X~Q%B@dwFSTVKvvGcP+(@ z3)Puc?Zs-AI(>e1dZ|&PHSLjd9G7{Z;q98O)hjf%-fAqz&Gz6@>nOvoz5P5L8Lv)P zY75?&e|6>1lGQGuiU0RrV>C>$O&U ztTTsRIBwJ`%CyX2_DWNqSalcaBh4zKlDU$}!`jQjRay z`4oK0>1Hcd5ev0ut9{Sf+e)`)EHu?e?Y+sn*?ZVaOl$tNcO*B`!8wwo*yP=sT=~dJ zZ`y3xx>T-AH>;PHtM!>`v%M>>E;bkjovAw+Alz-{GVV#AnJ!l-p;eAs)3r)_S7&;a zp+f3iy;7&kvkXU7)xIseuNksfZeE<}6Y1k;3%}dK-{xN~MA2uWXx7PkG5T~Az1$zY z1c2y^E^dxusCHj8SBP3s^imYPQiyiFJP^&1X)s#ZQ)m@J>dCY3k3L?EUfvYFyg6zW zqYHger9Y|+M3uqlB~CU)FAYR5aoile)ECY6N3TSEFAqg84Mi7p9u;1Khatz1swiAK z8NE>d#i(U`O@#uJ%GIu+Xzwe1(aXbjo8pG`$c3S(WhbP2gUOm}>cyzFMQ=sclV{JW zomXol)eWn)`~8oq-2bT}mm6sMzl&a2*%$FTC(rJP!Yhv0D_(k`@Pb~zJN8c&R(IMf zY*p9LuE`fJ{d}RnwJo|h9K}CT;D_|dkCFJ%`c})iJ;_NClVii>#o;LWpbw-{xpcnV znwc*(W=mRtO6ONgbC92UX?D2|A!^i>k}gsc!^0(hKHOX`FD$H<&R0vz&1$8@Qsz&W zm**Cl>PB}$-Vc`J@?w?o)wD;)vmd**N*^!Pmlu~-Cycz6D6G+%PaaTxr{}9)eygE6 z!z8FyCaef=9i(fy-YPv_f>K&?@*qW@uTtb}c#UR>QfRh6JX`0H}Cq~DQ94s9> z8h*#dtd6WWFJ&pN@`4lTx3)4yy}~nxA3t{V>_Jr)vWaFLEZGWg0O!P+!xIl5EFC#J z=0oHeR_ewg3v|WX%tur4B3|@g+T*!bt5g|+klKdm*W%ph%Gl%9&WVW$eag~GM>A8v zGrDrHBn))o@WE1zm8)E@G!{>sj+d+cnj3wb9@_ZhBIm03$YE-=!_}URtALMsY2~`R z{b0UBLdSLvjS{b05|!4dhGH{qN~67|r+vp#vzpD%v0UGO^kC^@rSb76N@t83Obb~Z zEX9q>2TL=J1%1}E?KWX;*!3PTXQpk!OfwHlS87Y6Dtc_q&~cp9a{#UX9jxj#tz=mD zZ0JXsBOjY)(CD!VgJR(C>gec^gNF|uJ$UTkLt!FlgoMm%3h!Amv*5*AbD~*heYSob z)#DHPRGC<+&pnvUu(f$Vro8saLRRbAYkyYg@UqRFbLa9D@7%dt^BwtaowGgpWRLyf z`f7%KHullZS0V1z{8!6SxsK%`N9lZ2^q8l)&(=-)G-A7{-@(#q7|Weh(bKa}l@dn( zUZZDL$ie*E5RJU9!mdRzy9>)$*R)aBM?WbrB|{=BSEE>gD|`KPE4L(NvwB0c#oGG3 z(f$tXW9eclgmet}81Av;1hj+!#Oetv2LGR6faj~T)z~-$%btLS@o~Q_om*OM%>&wC zBbKYBmR8%Fol&2ye_8D=a)=67GE{6xY2x@o1D?70#6*($RJ*?p^qtz3 zK`K^jm}sGa_8^QJ3wFC-uM@*fMX%{Fwxiv*P_4Izs*H?kj1UQNQ6eL151zJD1T+uo0OELnqTBr!5F@lVm76x++C%SNnW-+B z$Yl#sgc<6R&}wW;{62EWr#MWFTG{Yw9!4&Tb5y~%Gj@4KyUq;Pv-uy23iZH3{&uHxN=eTA)sLDHV$dS`KKVOw#e z&{x<=>PYdmT|Dz_e468H>iqVmIufjEvjsotKQlw&MEohbvFI6ooBxypqbq)!7{Lq1 zsUFaov6&3DE&RSBc5f)E!&9YsNLHE>`t6u7m`Z^!HD+^5r5Mfi+3h4rx09@Xm!tl} z3EO|THTX>UVbpGn`TgOnKiXe>c}w*2NYpaE^d(qMSU;Fm_&1nc-oltYm_uiUUm1*E zCQ%cmzdXjbeO^*S7?64Z+m3?a&_rPv_{nuxbFRUYt-#OZ7&4o}B1}&w#T!wm)R<%v zBZ;*-lL4iK@|H$f*2>T>fTo^~=bSlHn`U8FGA`|8buNZSf-N(dC&+U-Fqnr+pC}!9 z?64P*LTt)`(kxWV^|Wcv&6WiDfr5trd2=snqIA+50k|(c%k->zTN;g)ov z@0_~(+~v5uM4~v+b4`SttED=h58kp=o<)k7lQw2+Mx`+W##Lq`@mwCVtMS7VM;@9u ze25BsDLOP)tsAAPc!r5k_5_5d8ZR|m3^~zVldV1Jax;Sfm(J)M7$1>PHxn_q6tvV> zZZ&IkomlSkB6a1sR&6?iUxn>hT5gqUvvf_RdLrwRj*)P9;;{8Of_fOH&hLDhVm+O2 z-T4Y8i12&!Ws@TyjeyTlOBi@`=i%{(CrXp*=5rY&>k{*xJ7**38}LI|pR9*idQyZ; z7UPsB5SX5l4aX=FJFGZzOD0kWf7!|ACrS`8+wt6qhdP*io6i|2Tgz(k5L{Uvu)p?~ zSQBiuwdG`NI6gx{ySP+q4?5{+Z$f}=x|soACi`X@&2}H07ptzMDJ&-x@pcYl+uDVd zb}^o752duJJ;Yl%MQIPE#3fT27-eY>r33)I4kN!kV56!%z(@k*Y)@W0=ndM1`F7!Q zdq=lE!PZ$n*ea`4@Yncl?%)uW1`GEWcNRWcJW#l;aG;Q`k-DjzwnlCPbbMp3ku041 zvvo4Ly4qSP@$$B4<*-)9lV?Yxml1q!3nJc^IA61fvK|*M{ln-5E!?cR+aifmSyG zIp0`bs7UsSB57t`l24nf8(7Gl(U|SIpuf+U5U8Aq8U5Wlm-V^}IGTAG%X$mE3vHpM zLbU0U)=dnPp`FStu8-a%uahxILG0ok(v$F5Ta50AB{dmOp7fMTz>if^Vs+-sOSzI} zcX8Fwq<8e9NmfvNfmxO#HX}xC;)_bfZYzoxb*Ssm&|yi3OF9^d!LG~cOkCz{Y)2d` zp{WA{Hj0;Zcu|MTI;`lhs>2l>zFh|{s!Fqi+1-{vk3 z(Vm2Q+z0g-1cyIRm>JX!en$8eM`09XzbFV3h$E9ToDB%lf>Ob`fYxGku|JBB8G0?~ zy!m8=3JA?Q6Yjg<{Q9E!XQf$b^$BtVqM7dg;64C4_<0{0T|?B7h2jg`%d$9H21WDy z=4da<7tlI+IO+lem;IBW9@=G1+lRvG0L6FI;7FUYb2ll-3f?OWZ>8=9we!MC9*#m? zTXbUepv9UT8!Z1W{quZ+N+AxWwKVmK1R^PBzt)Vl5p`>j`Q-$fTOA@-lgNJss4mwk zt$9b^^Xq1suS$=*E=E6?mbw{FmPFax`BmMzq=U6%{`n%`wkTDha}urxUYC`@y^1XuM}F5@hK?B!m=( zqm_3!QaBQ69uFID&&gG(2UMbP=_h~;P$~*G-SK3OkU*qDg`ylt5w2AgFR(e<7l;QX z4%vkWh*h__ftBoyJBk{ep=J=E>5By_a)QP*6Txs zt$o`H+roKj#UT-_er@gko16&!uLS8AqA}TCm{{!PKCR0v#9F&ASq4(fmV-5#HI-Y; zI~PQ;MC<=x^eHpo(D3yeWYt>kB^pqeq>Rb>w{EP!O&ckYnkUCLm;Vzz@ho;YMq#Dy z5GhVXVhBH$f!1*CQjnBnx?|Yd%y$qtX(6tZDvita^`ZR1Yk=-sxJ29QbS`Eu7_VSA zBroEveC^Zvt)KA{z|>wg_j2i4>^YE1_2`bSOvtS)Q^5UpH|2MM``4DmS>x77;%?&d zYs|b&=~EN0{daz16bq3Z#s<2UOGDG`&CSc@rSb|Un*F|P_NymtHns<6%QKiEbT5Ki zDYB~BwvUr&_h4anVFbQXX!i>R!%YU7>{Y5{rPLnvmdwhqCZ+G`_lBW z2(u|j1a?tw?b9*!5H>SgdrUpM(J}Q%F<|Yb6fQmFTrb69M+Mg#>agiOS-*V4&FGl0 z*KWoycQqqS<bhhq#Chg64Ylhv=NwmXe*noIXpIc~yiaQ4L*Z=a1 zKcb(nJ~=b2C-{+YD@C7wgNwkBcCn_z2Kit+KPrqFG(tr*Lbj#>>vl;Slio>nMW|+W zp_QrcHV%_);w?!ll3m&wo?yYVwGkB#S|1iaE)hM2bGyv6LE9vCLQd6Us?b)1RL*&V zBGEEsYM#FGYb@H6GU`s3v`p8rM&x8PdIQ3tUO4adX?dKN>=fgFuo{c$mAZiy*^FN$ zHMgY;5pAOEL9uZ3VH7_4<4c+IoEtA}^=gG(-;!mmye1#|ruyg)xBBS8M5Fd#$523h zO$QUiqU$nLy=0>)lhrP^P~fyy+qao& z#Wj^d>EhDTE*a@|n(Meyki@OWQii6UzB+HO)r#hDZ$OY6r5qtPj zlw6q*BwdcqLlK%Py#M4-`Y)^({R zZgYrtWAiWmylVWTI()AVS65I7+x;_5K9fT3ysu;O1r>!RoqjqDfqsDmv0ry*>=(E# znE%N7hf$4w!A*X^Z05EiPAhNe!$c921W>b7pErVcU69TVd?+y(_;8j}TAIwT!@viq zN9)$;CS80o$sAgRjhvLv-ZwV#lc@yuK&@3+q^m!XEEf`g>_3 z{>MXsl9hb}+OJ5cb&Cv%-b8}35!kRv*G)r$>l!?#7SB%`3&MmCKGo)YmYK0FU!1^Z zV%|)D%>WvM?ioBQWEYyvA%JPYMDCa2tgnd4)zwx- z0ni?%JSg{2ysvA@^Rq5ZWCaeUJP82R;`t^3O}x2Yp8=1piMI%VrlLlS(0gIeUX!g0 zj|P753k>p&PPq8{lsRPzuYp3v+D&N2?cyZbnasGHnM{H~r7%ckk^g}U@S`+g#&=yT zN^J+f#B?A<3T_Z8{>EmV9>n}i7@<_8;`>px`PuP~984n1!QYUhKROO!QYLf*uBhX2 zih#{i3FPG0uQ}>}e~txYet~k+$*MD(sK%nUOhJu!6a*{#bhlXNtwyAYNM)Bf!R0{t z=r;#^wP6h?Y$;}d3o=DktXS?rCiG3Sw6fmjv2eJ$+7c@)yfrV3MQOM-y5Lxt^EGR) z31j~iSQrezQoYrMu><9wF=6bRSor1{v-=dfMPS${z|u{j0B*I8!gyUe;5j;}!IlkE zgDwod{q@s@b!`NMx>rOYTnL;|hzx+gRv}M&bE3ODJtI3g{YZeU$Bb&6VK&IX4TJPP zgW?d81dTf&cc9ILJpcsWrSL{uqWIUN7EYmtuEEAUEx5eNlsf=$9uz3##f5 zglb+Wh$-JjCv8_XTuhGbDF2Ji2(A@E?%IOu%s263tu=0}ddZ^Cm7f0Kb1uxm;~f9P z^mgKu*gRCJzIdp-R6EqXXwP}54|#BzDCfSl$yRXcd1$iCs>kQqNKMOx3JckBEMI7} zrmehGZgeeU+su&Y=38IL%)%TR15SubN9DpOjBtvGb|#gRkn|bPmqjWxhNn}4HtKGr z<%Ph#;HO%x&k+rvTf8sJ_wu4eUy^x;KDcb(<@H!gVl>6XI9yhoq)yF%9<8pRH=v1% z&x8rRjTfv#%;&W^VJ??M8i1XGtCATQ-ko@})+!c%YPEqR+6#A$eB!6GrmCkam6|v* zjPBDsp^1V`s@sN7rkOZ{{p7Jn4xeSYAkZPv9F@;|IB5TuTlkt12N18oJm=WE&nGQg zcun5I+*Hj6H}#(_jXEcJ&>b(vCQ9#GO&yD^ck*QfPgg4fV^_iz;3@E8(&IVP1Tdm` z2$vVq0icfe97(_PvFdVW&AIGm3>(EC@lW9oUCaA*?VKZO!;(kR!ojh&;2qe*1Ns9r zf+l5tyP1|0r{-#9t}5r|=ISCb#p2q`xtyc3O=R!*L;>@C5nn!JJO(%=L>QF=dFo9?Vv8DVry1Q^Ee7Yo_q{-3^lrH zRldZPWwIr8|(VV-Yjvx>3XS&(g)NF3KHtQ+BL!W z6sQdp2W8Fpuv>MJ7wa0rmra+O1cgL;5QtuKxlZmt%gKABI(~%wp0Hl-5YSc) z98V~(5V%JPJnJ^~H^XSCr|;)goz8nXe$vj&bKh1Y`JN|Ta^j)nbx#nxD1>BlUy@zV z)fu?8WI6It>@J|E!JKoy{Hwq9+yC^Xe|(}n=xYcj=wU^b>$#T4SR8G8#Alu_E9T4D z?$@L@-%i}cnV{`$odX}Ql27tzd|n6R+b`jwPLrcXLR* zOMgHKe_V$j)Ikl3e@KTbI{dH>=3@FII{TsyFy6x21}LWI)3eL%0cx6lk-D^^SJ$|d zsn#D6j(>!c=<&OV^&(#XUTo@j^KXA~J7-%9L!_?#H)QuxTrXk4UVaVNf8oY({om_> z+RaAmn%F+cbqn!*P>21D(EQ1>KWP@j@B*Or?Z$HKkjYiR;#&-h?*Q@}7Dr(gApdp? zs``2$zgdBWYT|+n!FB-ocN*l!(H>@DrpP)de%O>u!L{ zEq_eK{=Q-U;}jB5goj_=>sii2paKgd!E2PXR&-5)opc zaD=i;T0w^GrUXorD4PE`p=Kuo(JZn_#0JO-D6IZyukwB)DQ}0DheO40L?{W7Do`mp zP01$m&DgAFG=E=#u8=l(9&eJo`;k8ox*7Sy=ID=5@#bh3#Acu56Qm)ZKqeufM-jP& z1?al2`~-gH-M82gN<{-Oy`=?8^=lDux@trMk5`I?k0V|2s9fZLluE7#pQ`= zW%JZ zl9=VoAtv5}Mr|@aeLBl2zxSDXdL}4^v&zUm1 zof^Td;IS?df!p|LA`4KMf%pf!y;d6 z-keByz9VPfc1*lBgFK&xyUL@1RaxJhF14q75Au!W^VwLKBS_H1oF$t5+{}EF2$J=S zhaRdtHv7n9Gv(RCM`jN{bfo;qLk}OTKJw`7`D2G?ADex+>?A~^a=dizz2!NL=`+TmKbio`iIn_Em9$#w}2g zQ?!gBVnY&z2V~;5i?s{REu}8V?kibz=gHG*hi;AK62`V?>elv_=OHYOMQ9PULp)r& zzeTJ?SH_qI*7&?nsvx~pyB|SDrM-Ev+MJ(@+2mo6ko$;3EPAR%ER(-KNGn$60VBY~ zGSeu@UcmXd5tYQIEzmXsj}9;`j#Zp*ELL!osjRk(E7R@b>U4V(?^`39dp9@|8+M3R zxeMn#K%(MZSR{@X?uL~c1s2^?94a0z94dI(6r3zdwnvIUtqa?tmT8;B+k&I$YsXPuarkm1JC35_!lmyMM!U*`WD5B# z=o_^e3KhoUyIvDx*^>Q6fvk=Aou`q^oInLsurW4r%>r|4SjuNlxxvYond?T7Z#FHC ze^Mp?lny4fx*j^bGo!=tpHvAZL;F)YGsd?iWDmn$> zB0{L;9ZrF_5}IOfVLOyqVV3jM_`$-1qQ%^KM{%t1FMsnl^z(0?44(D>{>e0Kq)cv7 zCiN{$7FrG?rul9v^!WauKUZQgFn_XX{oY`>DwapIcQ$8+>eo;~FxK??$;l&PgK&EJynN;D`b~qiOp0M3dMgcUpYce({rWgtzJzdE_s@k z1p1FNDoMQh;WNoV74U^vy+W5Dm%Kgnthd$crt+`Du}!f@iqJc2s;MmjsK9*(_d(a_ z>+LsIpyq5-7uYl>I@pfMEH=f&_GSh$dfLA_iWl7^kdp+7V-8Qt%OpDl)xMsqo@PEFa|q> zINF;Q8gqn>oNM>j>TtgsFw%8nys#Um(d-S_>b8ql;PQ!~<}SK6|WK9;XS#dmBq0{mlUP=gNzjIweK;n2VO` z)j4z^1XW;*>V=?=$UvwoYe3ycy7Bg;!W}y>KT=v`{}Z;ov^bm9xH~II_gspE&k4gj zKKDk6chWjZ-_n_*N5(CgU=DA$lVF%SS0-OC%hJREyc8@BYSf>q9lD#JwqiqgTrRY< zD*M`5!MsBV5@DTrGc23dm)ctuh}RQ?%Vdx7iLh6;_oGSOH*7zhmZ8nTQZs2PnVH8# z6cyBYFi5ITlpuemt|WAc>@QZS*6XVe9X@(!>CnSRjy}X$s`GJKyaAiFQ8^ValMSwY zi*NtDAu{KHUA6o5ot>U^0gnSb`UgzIro_k50a1b7g>=s`6|CQ9_-)Q|*w~!cN=zKi zw`rcAJbSO=a3T^wG@+H2bN(m9z}Dbn?t6}$0(~!vt=c*(?Zr2<1uKVN%t2BDGtD91K_ZOraQ7s85!8et)d7STQoM2oCU zgKU0{hsPHRtIuiQ1V$snPABGt(%*+@tubi{eY%q*fI_)bFGj^ev!cO_9E{u8YP?Y2 z6|s9X_8Mu@$LfXXqrROsI98325Z<% z^8Id>j||GnM%f`B7AfoiPV-H!CT|pOqs5>Sgm(Cv7y1-f9XTK{c(dAzuu!TRAg>AV zwIu`U7&H{7zfoIIw$K*+@p`g{0MY&sg>C742(h3Z;!pk|6^bUpAKDz%377r48|t4a zyvCXq(E@L8SJk?N4X`el92+V3F=FsahZ98bwZNtZ>?&Y>=Xr&w+X;LVNFIH)>R!qN z3C$*cX*6YB+!#N%*H)J6+&SMGO4L(7%WQb*qjH`1$a37oH(9Q4u-NPWE;ob~^V9Q|qQ$FNl{{}4!*Q5z#Fn=($+u9( zB{m&TxFGxCiNp94Bm+)~SehKM#H^c@{AiM#c2d}k-JmLLMc9mTqm3{z`r(O3hy^$C zSblGC8`pUY{IOzt;5kVM+avbp-E~0vOw}3cBl)R_jeq#)aBN8Cd=D?vMPySj{xS=2 zx-`2uu5+n>c5G+6?}N|1FaB%fjQ?jHOi=epotb_xWt*|ZpzCpYi5GQKh?Py?;sz!d zad~xW^D|+lPi?AttJ?$5G!}?Kk4Zv68@|0utR_s!x78>^dpPTThaYKMrncGloNiWI zEd++mslM9cG&UXTp{eaj0n?7Sr*>xLgrsi0;+<6du;9xhoJ0@o0lf4Tp3^3+#ocVy zdIy5OJ4m~$Z#z~V4`9Ck037*Uob!(>{hI+0+s_t`DJK6-QUTc)4CQFmGe=xva+9b_ zbFq~Xh3V|V?n(j(ok_>Zj+ubece23H9}Fk@iZ6p#VAyfVg~bORk~*SvU_y7$5JQ9_Fnm& z=gf5K`S-lb`5XL-K5_KnhtDEP(C){jZ<+i^cH_v|Yg(GRby_@ko~7{GZnMVQBXp>? zE?28{f=Dv=N>qOnP4{fmCA__%>tS{VU|GuTLr*r0uO*hWwbG!gN6PD~uxNNE?Q#&`+JCkgx zXLDS$b!W#^chtX2+u1m`U;`dZzl*WHPcxYL&nRaO?b6Imu9&~ONHOid-`9}|q*Tse zjfSH=5(Pkuw%GI`$>$L#gGmaT<}jN`8A(pI>SPXUFE>MH(lOT{G&Y}^mdr0M$sjj- zvl!Qex{c{-)*K->59FQ+xnWCuQbLb9awnzwGa)y|WBDnZb`d5}SAN%LZP(;X=Iolu zeC=85)0S~|L(_J`CoU+%!NThlrHKjXJzvxL*dADBjeaa#f9=WFv`B?3HXmoiN3V%T zCkjMv=5@{4bVhck_lL<@U0iCd`sB1J`*&5?miRYS@NekA3dnxy&356%u(kSZyRg(Q zT*7pQ#ebN!+kM&BSYO`F{XeaFc?Tz&mztHgvkBBTlyQ5Qo5u?i_HXX)PyOo;{o-B! zM<#OE!~=Rio0y;CgrO8>CBq#-!`s7(r8P7Qo^0}Qrd#|j8}EJ5fGl(#?K@s)d^4|^ z0|caFx?pk{)>fM_`&op2B|@66T!S9A&K6gZ0!{{zqHQYQL(85vv0!PQ-Jm5^ub0xX z7{*}Ydt4SoeX&}vRpoDtkGO58Jzs8ldfdW-FZZGBySSnZ0<7&I}Suj=N8m=};8jLO}_tg2aVh}g1omzZgSnH)EQQT{~1E9>8ORd$V zsj~mXIYzdPtR?~6J0Jb68*#Jx0SQ8$B4Nr$%V#${=`gxZ8+B&P0x@839 z#;9BNe22?PT7K4}H&%Z3d`I~>Xv|#v0N+;h_wka+7>)*5^{p9cKj=5 zE-DDMk#4EYE-hD~P{fZo^ytw?A3A#U$RQi}l;_cq#u+B#DsSA&8(*rKZZ3SvjC<(d z2|yqn*PCJ?_io*WocY^**b}iq0!U;NE-Z^v{h}OG+SuMl+|(vcMGaickb#vhEk}tW zeq9h`7bnr%M=&>5Na~%%VUG79C}?jwUz=IxH`UMyb_jkL^!SII2>QDF>w(~r3LomK zcp!-c1w$~N?$Y|%94z66Jt;@l^u+uu&1~420DuKNcd-b_6628OOHN$BC;trXRH@2_ zsmyg4ztmBgn%P>2C8j z(jLNEpdGw~@B4x~Y|5(*;y0T`gkgmu?$1ma-zvchC&z|$Q+H>^Gui-<*nI2oy3!gKimMG3&u8Xw~2a z;*E-T7gIVU&Q8BZhh(25wq;zCS8TP*=TgpRui^ocMUerCtp~SbbW0bh5*8ju8q%6Q z@7bYDaxpLhwcibi@LkC3Tq$RAY@qy)p%s&w*EsviVc2IjmM)s+9vn@)kgjYrZfv4o z6L4e~a63Bu?4>23U<8b9PfuPv5B+#&K?T5*qJ95tt< zl)K>AnIq%8f$W6%bJndLyGsthHdV(RP1JB{y?SB9VYS8OMJLFNI-T2>NCEP-p{%iT z=XmM{4HkOuINuwvy1?am){n+wreWc$pYU#-w#HA}cLZKaslpz>$zqWQl|ghLU}ll3QCxdsN)~|@CjkTT?{MDk3&|4d&xtf1Wq(exVbN6ho*d6au3mm-3#G4HT84FIraEWEAGdVN02K5 zNqEpKH_T6}m@E2kC;6=Cu6)Q@_U5XR@ds!qZGQq*HbH&4!=@e`&wD1g`Fp|MY5aSY= zHG8aurmrDx&l`qP^WC{~8VAS`GOar8q6DE;VZle2DIKVqya8)<+m;M;?X4K0*#+HkcyaDekET;YY2> zj`8f2sEzg9OiRW_=lsK!zxyjI8c8PO87~QeBLi6V#ofRJSk`0~0cYrm=JI(o&NCMN zkQwgd!Gb($fa}eC=C=?s%@VMr0Y?&mi7())CKcPh?g|VzSo2D?2}DD^8c`0gg!!6i z61+Qo@ew37ODT)8zU@Bx)|VjP=Xz>WATQFU zCzLUKmZ>{(No&4|*~Pavg(=j%Xx&FcXSHaFd5`YJrXBP;h&*4v{a{!H&KpG5-=ASw z@zj0%8kU{lB)@Vc>jlfy+eH6auYkJwrD3d4{;fiu{&$RrK-GAoB(6>V`{UPX;c7H$ z!rsm{UJ-M^;9_ot*@KCssvdM1he*5shVZD?@NSazkus87vzkjrJ#@>d9G`9th0C&Q zR%kW)2B(NNzyj+O|DOzw_+RVrZ*=J5obD&>$29haIf-_=|2Uz`7##K^bd>6I+gv!S zq(KF)a5g8Lu?bi>g9F+XPJvCkFph9LP#7nSFAUQ}1r>yJ1q1~~F_&_kdx-}y>Ij)F zyjg*mV{)`I+lr54jbR2KGUxCTEn-F}F_eoTHx*!GiX{dorj13cv=@QR0e)!^Q$oSh z%+%I&QN!g;ytcFMMsTK{d0Jx~EsPd1(R;XLSSO8iutS~Y(LLv0-w1zKxdj6VRW*7V zytg`$Pdy1quE?ra13@;^Zie(B^JR2Hg)j5%uk5=rqsUx7(`{k5SX~nnSMKb-^WlS~ zNn0^KQsVZ5-M4#o=ETGtwea(`C+9x@`+xZ-{?p(53r{3Fg?)u&r_ia_QoFCxXtfKc zV_0eUgXW8sc7LT-#&@!LDQ+R>EPQloV3yF{AGM$i?ZSuKg~@hdy1gr{eLDM;-818@ z6#7dkE3xf3krhh(?&J5Jg&9?;AJJiP5k`l#c4)Afy@O8x4KEKT2ViH+*~nw~hHQr7 zH78`1%#)mjX~Y@RNTE+C!Did!SbzB<1N3R3%2Map1a_^JIHqrTvj^;1MnqAw=r&dpU@(+r{MbT&@+DrK6$%r}(@@0tXOSBs6s z-{WJ)zpsOELqcu;AI^QytxOGCYgf7l)&peu`x;oHtI>|#1Gg0p7uE==w755ahJ%>R zo*Nr~4))YYCnp+CHkKf2j3pO5Dbb`-&^w(&ZH*f(ey$i{n)8_``V2=R8}bZvw=ep% zjkA+vmpL6zP(0(%hNmp!a`jM&s)FIndCLu!?~qh$zBY^c9Ml-NG>e;;4A1o)VS-ZJ znZmfSuplX5rc>!5&a|vO#hIOjq>{|nN2g!ku$Q#7_x{O}dHWsK0Nei?(s~^NC>K#d z6YSYuYE(_A@G+(>kQwuh_N0^TIR!V<3KTMRlyUSX@@h7rW*}nrBO)r|sa>I(V;ms= z4AHZ9o39Atk3m+5K1=ZD&laQ4+OYn!f(_t!pfb>5YKCaLwBL7d^P8*tP%TRRXo}`<+FOM%YkkK}un8&lYFZNOP=n(KIs6xq zjoUbhcDdSZH*weQRygg9fNbKfI@JisZ*j6A0z%Kr9qvcf|0mD>fSiy5O@V5pG{qC4 zDHgsb&=fo02u&GGXbMWSl%}Nhn655IQ>fRY{De?JN8JI>Z1@!jq%D2R#)E_f^@H~j@o6ZFwduPEs|gZBIRZSSx*oL zB2AWkxi)wy2jc|^(3}TuoG(k!-dg>-Vku;csq`P42T@Sl@12FAZtRPPuqnR1@LoGROqiboq#Q2pE6fOE=trCkk2n|n z7dg?iPv-mqhVou#d4VEM2RzMvc8v7^5HKwj$j@dzGc}ysCFlI-QNa?I5?M|j*^+-I zx#)Tb;K4&M-F5`LFbhNhT>66^G@Oo1n7}!#9&PscVb5b9UIg@O#S@MlP;d8v&A+4E&e!+H&-|@)MK05gWh`{(zviX)z_F_ z(P4FJtAB}U&oDKDNFLch$o?HFtZO{KjWk$>;FiB@S7BG-Sm9wjS5hso5VK~GA?6?F zM8MW%IE+nWJ0yW&^|7HU>$Btp(dK@*>5h8@|5a*V#$bPjbCw>nm1imYw5^H8W;-Od zp{T`3Qzl`vtpQ@QscTaQo4vXGy)?Ibq@s5Ws295JCTD!4WSoT_I(IISsC3^6W`IGR zk>aqckMUch#O-d{2Io*|v}2OWofV-2WIeTV(u%gWPrBdSrd}G}J%-d{{^3DC4xjpHae%Nry*Ndq5n6=ractLz( zMpdP-6F{jvJBv-+blRg=6{pyvm^vSMXyR~Bsfrtkg|a6k(1@2B$XBHi>8>f2xp;T7 z(()NQEsk8TV*BesYD+hD_jIe(H=^qlb%+*vx(I5=E7)F?esBE^5FfXa^_BHAj^^={ zTO$(Dp6&0>H!@pB&v2yluG+X;CT7-%U4*g`tG|R=V8xMu2;tG#X-0ItT`txepHcq1ZcH^zSr});Q$;F11 z#E(A2PJW-^w`o@B^tfB0?~ZDSzxr%9BU|VJir~Zek-&{Qlnt@s$Y8E*%J4#x!Eh~H zAs9ydU?Oz~CtSp}Zq=Ag_L&AoD2K(neuXO0N)puu0BWOv5*3}tnKo`XCKm6@sDRf5 zKadre027JVIUc1rwo`;0L^rX`0Ksl1$F`K;KSa!!xN(8yLBZWJ+MnWrY+qo4k^~ig z=^?a45I+T&Q4C|7lV;ioy-0Uu_VwfHp~gBkP&PxV8CxR4Zpw32SWWHinywI`vUQ<` z8$I3{tkH~TSeKG0*ap>8D(g!{jc}}{^G!QvojwynAH7_ww1|A|7Vp8(ykZev)~_Si z?=09Bd?tZy)^X0Z(l)jWG9i8Ton)&fp4Q}>rj6r;9E2>Em!^?;YEx!*^3WPx#!HEH zR%UPMJHEHNdYKo?MaftyH?=n{LV%j~L&f%wlDtTFVOVEF`RCU zP8@FxR_bQLVO;O{d6`8y{>5ldTfd-)#Ey?t4=&y$+@Zgg82zQ{Ol>yZz1r9OH*o@h zr3Y&Lz;$lf+!LMp01_g)>036xxf}DFyD?|GF&i1WwOI1$W|N3QrH`7SaAFE!R&UQd z_w3Wp+n&j0wCFOu6DLZe$8hen-(Yet`%x`DY?_R5K6}t}Ja&{nWYQIXxIC-*Qo65n z28*Bhv-W0sQ0s{vWN+pp>pq(1Ug>cCL_-pQL5AC=&#z89^ELWWxX5@0UbNY|=8j@k zgJP5B@xkM8+hKjrkcTh-w*)(^(}=|M@Ve_Xem_Mf!8+FjRthpQSs-!ik72 zeSDgDMDX{AR7fXN;QkR`ROZ4W_9r~Nc4y{;-2P>OWmD}uGEx{KeA8|EMY*#T56&Hg zkw`_m5{&8BU~`dr!OXx~1kegq*+-B{gk=t2xi)Z&wJV93=DDjS=SXh1@Eid5QL-r6 zj8!Di1u7fQqwTLQ=s7Mk9|n{_IX4WTuiP+v@2#IQ13myT^@D@WP7r)d9p+;Z@|rp& z{#RLEU^D+E!%I&JrWMKx8MZ>S%NCfL(!D4OmZ#^C=})F%fu8aG;sF^h;*~ofyW9hu zG8e+edCj)csD&L7T;Z#N3T@P`i+Anr4Zz)F4tLx&!i;W?2=5*9wHGt3X#+YEIX#|i zYcN|PvOzCPki?EFUuGtw8kO}r9;gV}B)S2DmT{pNRT8%C0r$GW`Bi)wf*&P8I25!@ zJ(qYY1ta1*FfIuXZMqteI{xlw##PbT4l5P6c(<-wkNWdjJl=-o5@}5=dcqgEhEMxB zeb?t~kBT(3TS{Q?*H)MgqDR62RLxka);LtHA38R1WMU;JPV)j3d-=(E1V#(|woatw z$phVohbgVc3LboJizfU$)6n8k#bRIEeOTKYWjfU^yzj~#d1X?NUIv%9$4g_w@&BTS z|C*i~GL8^;4UZk=WXTlCreFFsW&AcB&T|Orqp6Yn@L@hBg$=AASzcq}HqQM%#Ol6P z#GV}k+p%B2lc-T6eOu8X-(K9$@6qDi7k@-QuRYoBqwi7^IN4N6CT}F?ZcbbtFDhf) zIg~^~aW19EEl!U0m4A(Ao`rU^Bsb(6xD3>1bhR4GF{4Xi#*dy1^HbwBQEM*1MJ%|( z3N|68`e~;7e2msLz{lmd!n}6(3}=Wh+IW?_M{_x@P9QU>V9OY4yS`xDNf{as_a#xj zlf;gKHxzs&W0iT|&~mYimB#YI1mT7BJ8?Wv4(F;iKSLf&y`bZnHGIQ{yuh_Cd ze~#;&^aYaHl=O^w&iO~8+Il)#T#0_!2hg(Q>fcE##n`Vymp)W^kBBx)iW}`tfgXr3 z^YjpegxCi8=mAk+=q-3n7Ap{B)4>O^C^#=cbHFuu$UY^?Jh?KdB2J!tuUQtpOTg~r z*~cx=zL9QVpvpnKiC`1697^Z^{*Gkeo+lr_bQ+VK_e3wKWOVf(;?0bR34SHt(3zNo zk~`6NQhg1?C9gXJ1u7}3>C72o=dPZ`2ipElOq?|gWR>T?vJ*WSTjb$Z zXIZ1-?0g%#Tzn9JlHtySrZUrH%%iktxz3FqU%}QN>Go>9%n-Wpa1uF?USF+48Y z!Ul#M+5;Ag(j((eEA=dgvB9Y=xuVSACH|M(AWH4@BD)N)xP4mJ*gQmz=QK9W#`QMh zIPmW_eBeM0#p4A7KQlIN$tbQwP~jPVn~!nm;7p1ZL|^;~gUv>kSPdJf3^vB1YuXv1 z@~ik`3@3+;5O>&XENdh!MgO{lr(GZ+t z#0FVGo$w$_@2X%!7{Nw^*oy=bS%O~U>Eyk0uNIdpwPgvBdyT(vlm1Xd(Hj|nc!M;E za3YCw==1!9bqrjTTTk<1e3);H-@oWT+OOMEw3K#7e3$5*vL)u$KgfBO8k^Bm*O*iG zIgL3PF-1G~5a0B+%)jSm4zlmg23h8hd+i~1(7w-lL+sX#t)%4FJE-d2-p5YfzJ3EM z>+Bw5%SXo%jg!0iJ}j*z4I&#!N?kjWEN`!oq#K|3x{Rdn$wpF7N~P$)D17pS>guB? z(2?Z1TSccr`LB0Gb&(2@uCL!{TAxh$pgG!eC@FRAP_n$ehLUdl@vqBJ`gAswvU$=; zl#)@ATlTI#nvw@@`Y^g#<0pI5H)N$-Q@v66u-N%1I@WU(DRu2Avb?=Uk#79Cbw`m5 zBLw;lKm8sj8Q|Y7OBFX(06PUP;BYW|%wqL%3xKX11KLa*M^%s&l9@qbW*Nk{Gx!4} z!0uo4va+UCH8ejj(+O-FoXZeXpu%M};kFOdLE#5H5mlD`iLcfsFwsqMu8u|~K`800xTDVG9+BS&e1kATBI@&v6|+OJxzW?G%UL#Q#6tI-C?Rj3=0Su^ zFx$=P5U*p@FHf8~bBvGj5Wf%GuQ~%IT1mf0?e{E6)(&*GJ2GH7pbBf_&AEU!&$>S8h_UjO|jAMnI>E~-t#=pg%D+ja3_}q=< z1-OqY7EmT((lj*IKFpC!?v99Mo4}z3DElM@nZwoCaC^|lgn5D!X?1a4hs$jux!W*g zT(<}8+_~NX74iu^4~<E9m>38KWYu zIMvgAD;L*G%_ZV7yR|u^=i-^8A_RQDg0w$N@i~gAz*$)r60lSuUv2`e6ghaVB{`ds zS_P%S{wT1BBD&|!boxR&ZK2NMAp1mohqFwbO zi9XFk7}0w)1@6`15@Xe&Pa)RO$6)n=G`)*gf0+CKSyRJ|I`b#w<9vwRi5D4G{Z31d!v(PiWX|4dj z<)iSrCQ|s+8<;(|gjtoj5&lq@X@BRe(ag;zO+s5Y@{&ZXoJhj!IkIA-A)zW!iUhFa zVtn?(NpI{NlZ^Nv-;zH^uQJt4SKdL$Mwvqf8w^|6TKQ^` z_M?Zx(i-L*vnnW&PwVW<9MCUIW;?byH|Ort?7Lrwm|MtdkH%WC+QrqQ-MR#A6}E>- z=W%zLkK2oT@UQZJ>8#XbO_!#hM@`QRU7HQV3f2j${Q5_?iMzZsZn`OH8qYUcEp3x0 z#eXmf3FLZRi;(=1)iLW}4i0rX)pX80pD3)4WaPo_@Q;KTWT8cukj1nPY6y0a;F0Q_ z_Pn&oc4P4D#@D3Il}LV1=EI{4z%5bm3^aE(HjmL*`1!Ab*$*HvjQc7ZR)fKy_4IUVLoVVHEwTC#zqe6X-3&>iXYSj8`Ys}jeC?V&uC8F z#!0mME~L?ddIEogh0)>+kJ!%;$8@b*;$lOyiUlp1SW<=**e%Qa3z%Xyw=k{nxwThw z%Vv<~T{6Fn+h)SpQr2I7;%alGYZl15C+V9t^X3Lttg8%(4Xk3$JFZ>Eu05e{?riYE zcsRF&ufvaY_GUUDK}+d)92@eXSX?;*u|AR;VkxgN)yF58>SJVf4?8K)Y8rOiG`hBH z*lEaV=%qsry4-z7(vOn$=0j?fk#lb=w%})8&zE+=y68S4_##h2}q`3 z^3QPrT)Y6+ZW$*u3O zddt@z@7nB9m;>;V_sh2i)g z@tIvjFo^ztiRd%F=zT=#W^?klbT-9dY@od*_gd{fF4~2%@3UB(ezAMmdneEQtcKej zPNHqHQ{8G70Q))aCC16FZ21$~N{62OPNAEOH64r5$|FAFCQJ#!_h)39gQ4P(yiyPkt&A%=~QCY`-M2L6zsMQ(*F1`ADW#$ z_1)Mhw8>6f=cTvMX)M^&Jx-4^-2S%4soa>O+Xro&?j#b=j7Fq>Ht~yI%cGb;6mzmK zSLd1K-e)Z6C!-De$3XlvW7qJ1d{JQTg4y*U!GZ_I-@!NOl)nW|%xr2wPAisM^09!T zY1r1ccHV&$OUyNiZt;CEY6+vu)?y~rb;+qaW@Z+V8pW`Da+bhQmOmC{`|2dl?p|}p z8{Z!0dV|y&AnzWi8hs`ucAk5UtoWIniD%bDNrYamg7aykA+OE#!v0=s+4D({N9DWW z)WT*!w()kxFUWout)!P>Zo5J8*kd=vP@(guJ;VXM>Puo0@#@``N=Bamozkc&K@+Ow zOUPJ@@cxn4=gB~`8FjL9VGmj;zJ3V=ycI!;c34O4-VAIsACrpy( zT7nag9nO^~dt+&r5Gp3J5|fLVP-I$tW*Y!`CRCq6nrNyw6Db~p;wMT<6EC{7Q7X`E zRAw7d`l!jS{qy)HksDEwn)-1~?QA6!bf!t|H}@5<{xUWs^SdI2EsXSCuCDOS)R>_! zGu5uPUAn$cBQaGXxjlw*w&Me2ZG3%SK!Yw{$?NFn>uYcJ0@pWWq~Pvou(kM9DpuCQ zr$z5}ew$z8;L-?|A79(Fh8468B*J9}|0{o8Yu3rLza)$%8E~I*0_IQHe4ju*OCN9z zBX>fu#NVaR!|d*+s2kkc9PQtSJo?hF<`Z-SQxx5(zti&ngmEN@t5GZE9;@HkBR%)D z5TdQbN(+y=jf;$15QrEnF$!K};SG3k%c(3Fv2EM=?2XGdQH=vTa!zI#d2dxhm&V)F zb&j;--_d51;o}CvT+IOv+a*Ls=og)B(zqr+LskVQV-)Gx zD}_Erz2vwf!n9{v&=dsMS`4<-!f72pOqnmE1 zX7SFfcS4{KMToKcTJq$!b}kp#G1*g8vMb%sCUOtl)(bq=HV9TN!VT)}5PXvlhMU@& zxI2FsF(CS>vt#Pp8-szah?`z zAq|!Bh>AFqRIED>HyErf0+83h%HH3)2h>mdtoPRy$JP7eG6)urP8dg$Q>^ylCfJNIDV%pm_7?SB zgINk4S|BXb4`@Rc$Jvg)H-cPz#s4zpu(lg6xkqJo0Tz=F8erL$0|1@1$F|hpM=Ra% zp%c!y!Zwp#cS)?Dpwg#>V;dV$c%E%leLEKTrW&KmO`um-jOR(JT@#{%?vt4NBUk;OT-(+mn+W(aP^eEp*>O zN$?L$Nw70|`8FlJv@_cO^6e4vLSDLE*s0_3NP)j3(BQ(>MUHU7B78&p?7JdU;Wwd zFYxROR7SC6B}V?_*l_uWk#|3z$Y3O7mJJ5mEU%Vk;>J=cvbbKbrN!<8Ch0?(yWEmf zzWcILDA8(MFoPV#L$b%!MI7-7C@Ii~A{*3>a6W0+ycEUt{n z3hc#{wHKtkCGyDDJOb2tb{C^fo#s@h?M`E%BJGHVqK#EV^(Ex7+8;Od2QhKoXbAi$ zg|Z8}4R^E4yTvi^CrYget_0F5j~q$s%swE{WF%uC)OVux}qu+F}D zLe#uMB@>&Po+IpVo6Ljc?+uAtPi0D#EANyl)4EwQYEcoXRf8Fg?M9;*635dQ}Zgvzxn>s7pQOK^zu#eX1 z)oFk~_8zGOyA5$I|Ef-z5OyW8^;{Dyl1g$5o@|J1}leUU@Z5 z(6!3Y-lLTN;*$Fd~=NsetmnOD<2z zqUb&lHlDAsD(cZ}K4k7Hr&6$u)uJqO93(%^8BT<$w5|)@BKLch9PL5kg&{UEQ5Z3! z5(?QKPEU`rt)qRCs zMg5Kz`-_k8ds`n*7O$k*4Ru9M=9gA=12WICt=<)%qekL!k* zj`o&NoOS(sb?^Vy!9JEcI((h@Q9WVBXC>R$FnQ&Z62hFnuo5Vm3gp)>7zI!AD~|GmPwwkgG+gnEzy_I&*|%9if)K4`6lH{Qpgof za5lcfMly_IeGUYfdK+5^)v-)#Im3!>KWjM<*n|yoApJL+17Rt+NgRlmbWIMVqe3{4 z^!n-?NJr^#AnA2)4#Z(ZCkNt?BjZ5Qnw`xsbP_m_wDjDq^*Ine-HA*;O*y` z9k}&z5D@l_RRx@@!+BPt?{%;m_av4EHe+uEuhGS3z)s^^p~KaAjkg%D5x-NQ-|Q_i zK4V!=zo{jQeYk!B=1oN*t= z;Xd9{xRX!@$Jk5uK;JzNUz6+Dt|Iu^&u_|gU@nnJRMxYs$f-pnx-pcFAVpEePZ{$7 z-ZxGF*}Q}rBT@WSV>Xc2NW6j602cyqKg>#cjcvroi{cMHhqJe_8_a*h!G9yfI@iv# zJmjw8_ADxczU-;GDMYeDY=>*O3ZCG<_zdJ5@D|Cr(v9Z`e8zSI=P(x3AR1@>cnJ?B z4a9bn;qO$XFeT_4gso2a|@6Jq7(n!Dhv#%6**7P9og}a<187cog zIFhLbwyL;D7+_sCUxhL<$NC&GgiMf#csOt+Z*nQNub#T0W2d37JD1{eHSI$xwa6Ss z(zk|DNp?CFGXBHDrx3&XdhEGVk>KmqV7#e6$vLFlQvJz13+k13z~@ zM=n987;8?Hp0CbS*;vl@40}8$c)oI1pn=mWm63q!S;sA>ME9%`CjxVoC=_t#g`{QKmczz`rMa7?DA*;jm1O|x+|O)zKf zg~z|jG>d;phfYrEKX!6T2MY(~-`}N%(f&V~iso8M8O`sT#V8eB7;>71kL`31&+~eT zzoNr0=+KLOGQ+R8>24Pz^+~05DQ)(5Gg7~(OgG3w{j#3T)HxgFp*Hm4jFEbp3Vu%C z>?9}AM8ZhjhH7T8P(m@Yqc~O^DIP-M181Q`2?X z`isGOhTrCYg;YbXV3<$^LR_=G%DDh$%OIWGNKD|rPKck<<}AhwNqLQl7`zpn0|w_$ z+4E-~GlO#qbAxlYMZ0$|1T(yvoGdvh+1VYO+bsZ%Xdyy zdxoQxCkwLBvgVyUd!zu5h25L1-1C*f*8NU&=~s;9gS8qoMi2K~iy5?q7g8`V49V(; zM4I4*#0cK4``qJ|liNF_c&Qj4wuky*Y{Hw2$mGlTqk@dzMiM=emrUEmfquH6r?Ca= zzKzirR_=XKC{v6nETOmJ=L=+)ZQK^MMQ=5tPvJIn$9X4Z^%wdlS=KaPcscCtlvnke zC0(eAq!%dr0&JQB;*XU7Elke`%XpVnwE;%6dTH7AN0I08JW|Xu``;~8>vOGn-w#`f zek~tKyJCk6wlPZ|bu^B1AKL|y6QnF>yce`+IbX$`ye3zP=%~(6{qHZY)E1W)!`JbQmDZ}oCv+c4zk*%MhX*)WZUF<8 zjZZ$KEfLgBSV_@yW?s`#bJ8!JAqK*ZJ`d7U4~iXpP<{B|!4kQXPZO*oxak|;c8oS} zZ20J^bYsN_JFO7Q(?U}w{lw-@7{(>$K%u|FC<*aX4wK~k{yW@*&72%c`tPN!8K z4D&6RWA}7Z4b{luF?|lNQZ|@882+&%<RL^Dz({JsElHOD57h1g?*AmZrvgIt<5W?4+aXjUfp9`9Jm=$`x5Wk9wf zO}+#*r-tk2r~RFqQzL%jyH>~cZRDd!gx<;ujnm4}{juJ~{U`LXgdU>@Z#N^D+xlh4 ze;ZrNZ!SK7sf;k#2;o6S*vf84pSGF@ixuwXx>O`+h`zIagsBGkHC*+BoH(w+)JUd+ z2$79@6^7}HR$3A(Cm3c{Vq=(60yM0!(LAd=%RXR<5s37cCyQYx zmvTFMwV(Qw&cl0!UOe^q2QQt9`2WHsd6dfDyTAO8>E7qv#x?ZuwN^9ti%UNglrUMw zlE1HKInxZOX_vTsA*Z3I+$cSF^4fGWX+-jOO*!e-GwCaYLa&cZvRCiHhYs2{zNIq{ zLo?F^?d4lK^U%SExqH;}kmO`2^Dp-WYg+G{HY86!y(3N>&IkI09+&Vue@NxyDnxX+ zTmoM9@=v$4hZ6c`7J{dY#Ue;f|4d_PwY|AgtuE;-)=Z2gm~l$BDSo#m+n`q+zfTui z)5c9B&B@e5?ZSob38caPM>K)VXiRZI3;PF3_{-kO^hhI?jF3xJ+SC}J3Wll%`=2r@ z8<6}D*(?|o%KJhpI8KR}gF_LVXZMck-U}AEhlia5`dj8~+t22ulDg*v3~%7t;R*x_t9FVITkR| zyHB0;RACI^3qF#eQX_(H6h(4fwgz#FzoU&MLE|x*tcFLRsD=uYPnv*t13WAQ=gX~` z`8Rg@1G;)o|0au44dWr}84G_zk2qG;aHSWzXvXu^r3LNSsJ=Sq#hx=wjuLR#2>qJg zvOb~{tUtYAK^rZ6{T31=L!{(dJ^>VE~oxWUmMAr8*A!%FI z&J2QflxU6;Uw;y40Lp|I8$c1Wawvqc9X&9(GkXvm2d8#Cqe6W%K0NHlIvKr*v0 zZJ|!`K9Bh~G{WvL8=BieXP-T7gjNBcL|NIv+v%xSXdp%vPAN7*7+6zVJH|$Pm^nJ_ zfW~Zq+5WqFX=;!MC% z7R6Y5*CAx~Dnc@v2mu9P%h@YZ^YfYWk>xPjB#}kOyFzt7+*~#vJ@&I$)~VH>8hmT7!c{0I86O>OmpnuJ3vhDu%u3wGdG{1JSoMUW*MALx*PnI%Z9_a z`kK#h;%I(GLtyfgx`S*9VRcOK96eh1U9Pt=iC5XQKI$5R)4O^9e|Kll+eQ(FVK;8# zST)^ZbrZKvlh`MqEZlY@@71<@$Qc2`{w=L*_okwlCN#(g0}UY@Af`v>T2A) zNj#-ni$R{cTU-1^ zu-a?33mk|LBat?a$41&SN(>_H++bP7XpHRw?FsXz4s&xNVm^-udqR|Q7-3&azOv*{ zdyh}mxFY-i1uWRP?kVTGr-wJ+Iv252^FXvUq{49?d{3YF8-5t7n_673ez>gN&kv4u z-$9JX8Kt*6B8-yow@z1UO*W!Vw`85JT`M0)%YrS|>(0VjS*Nt75Rc_0)7_tSwNYn^ zFBo&BWX2uKQ=O=_h@i%waU`fKO7@`t0bk~XaYun{if0*qfSOO9zx6iM%s)T?J|Q>BPag%=ySgCvU@H={{wMuvTxpJFuI zGJtcObXtkj3`TP)Bq&+s1q6j5H)ku4rKi|O`a(VBZ0fijwUY)3H z?WQfr*eFp3#Pp<$eqH>y2}4nwILMZ8vYFD46HZvBJl(x2t>^z}Rj?8#0K@{SJ<$ji zoHmV!o8CZyar%oT=mRO{9~olDjzFNogsnUX7dAxlT~)+n7-yRl4^BAHVVtD~mK#%H zWFAN==54?R2-%2v?2v2A!`|f=wRH223R>yTXJh-Yys6SECS&!MOVV317|q2q=hL~# zpqqi)2T~+Y3yac+=Av$;8#R~Ruv2@}J&0^OlGd)gEsj0x{xuUx&O$<3F!N zsvxORD*Oeh@>wbJ=4(Cxh^2oZ|3{Y}rqi)`)cU7yIS^1fJn*4;pZxEE-+}K0{Yg8b zYVWl5+|i?h!_Vv5%#uyITA4w@OmPd04z&D{BkGzPr)^`7483=&I9ev@x@Qibxc%o1 zTu}og2gh|*q&RX_hh-Z^8~T|}Mm!N0=pG8K?qRffXQp zh^NNeRiF+ufa}0ZKohtDybRm~)_`?@jZEBZ_g6^W0&W9$0H!3E*MQf7P2eu@2CxO( z1GWL&s-byBnw)KXJe|{@zO6O%K7HJKl`rh*P+#kHsVd&Y1vNfiGgIj^r~AsD^*^rE zDog&VU&$|djp|CZRC%$opx<)Rm3cdfxi|ba&v`x>3+WM7yNl|*1hsK^Yh-Sx2&UP^qR{R3+>`=^_!&g4D-A%hpF+u R-dDvPuTp5{SG>7=<1d5eLZ$!! literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/prepro.cpython-35.pyc b/tensorlayer/__pycache__/prepro.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e73f9526cb2ade97541bad511360a3567e7a0896 GIT binary patch literal 92205 zcmeFa3v^xAdEa^N#e*dHrueiTzLIPUunCX^sfT4zvMfrD{Ge@twq(#Oc!7HXToJez zJoiEb7|C%gIkuBHk2Yx%$4TureI(PQX;;(4Nz-&Dt7+0U(@ti!ncS>3P11B)b=s+$ z)iv#8X0`MCf8RcjdvQU46lvLsK;qz>efHUVpMCcJ_WyhCgFAN)l_q~`@+*I>5dEtt z_wzXC@8-9;uqBEr{4YccQ8byJ7oy2Rl3t7^i^+LkG})J&_eYZh$@yUP{FZ2Pt6dhO zg<`b0Et=dG6;yOzv@jGcZjUCn`*nY`up?UB8BOl=>w#!tSG2f0n%wQzgV91?w74gl z+!HPCjVAY!x+SVm_O_@p6jioIl^s!KXH?l0Rdz>}JyB(ERM{6*Zi_1WqsoD(axkhK ziYkYr%8{t@rl@jzRJkLn+!&X%895l9#u|8l~YmW{;2XmRCzF}JQP*l8C4#RD({La zk3^NzQRUI7@>o=PcT{;is=OzvJP}pSM3pC_%2QG0z0us`(IkzeQ~m$9M7-21bSKSz zI;yL?XXSEPy0sOywS z=TJE56Y9xV_`LKLm*>>0ukhvRw~yNY^x1r?KBk_3h0jahUL)JH`Z~%+|C6m}QP1h~ zsquWE_40gN-&c9~f`4h9awatZl!s6J6YJzTuMwg=i~&9YJ@APzcy)fk&i{-(OH-#b zmR@1VGFmpwd{LuLneR@@p@g!=p?aH<=7p0ut&yoj#@=6Er=3*|UA2?J`B&D-Gpn(! zJd9o%?_YCHqg{nFp3}s6jejLF!fVNYTJpRm#VgEUhW#bYj#rrdjQ<5q6P>?3sw_qo zI>h^=uDPT9@30PO*l|g-Nrf|CW}@mq^!#YF-T%xMt?8FF*_4C1L_bn7{~1hiRB37w zzA_L^GRL^Ze-c}omdekpTh{zkQ<#1)Xrk(TJSpLd=B>sq)6&aa(KJ>Hvvf78T-B6* zh0%O}RQXoTZ9UG^%@5@lHR1JE%x7-{9mm&V`uS_WBy#Wyl#SW`S1s|Aq7JHs$^UIp zpDq(|Ok9r9hGQ@VU2s(iNymR=uv^aW8aoj(~> zzDFcX=TAkI?-ec6`Fo?v_lcdp4#$|0CkTJ>y@FiB+{xM7g<-gLuCI# zRQa++-{Mt7u&m9 z^KrFI?zyQ-xm9j&jT^0UtJbKucO_?2i^~hGTDyNHZY;HjbPOrmW*2HpQ{|Ofv%Nb# z4fh6`^R?Mld%OL~(yHZHY5WOkgLX0A-jPcjZx1xD)Zztl>QBhIx4KYnwrVp|S>q3_ zNeX%UuQnQs?I9gQ%FuLNo11UdtIcM6Pj(*e?x-y+EHA3d^yWkH-1JndF*VcZ3u<%Q+yRCA`hP;KvSmKT>6s#j{w z>Qr65yl1vrZqf5u%G>9vt*NUs<*6(4wN|yR{$Gk~^_kjId4UhOxYVdu>#gQgZE>!> zvm;-7=gfS$UZ+a$;@dhdlMblpW8IZ@)zo6S71vg#8nd%aDxRV2YTVvkURqjM&2{Kq zOL61*>P)NlLN!aBnqHk+YSd^=dq+8r%RJEVcFor66&hP_HJ0OMdvK{W&hTsRo~9$? z)u~Et!TWP-dbTUvs>Y3`dU{K8as}NGpT=-!0iDLB9mguFe zQL7j|-xpQ-qsl;38H`@!WJ~noK=dNVtIJ0aa0Ox9deFGa2GdMmP? zxo|=4yjCNrZb+>?;(t`-kdGdLnVGb+FUL#EUcEMtEJ^;wNhd! z^QX(pbBj!MqdOt*hs$w!vC8;r+M}b{k6l}(50~o8i%Y9xM&3#k)@aQq52(I#^Hndu z)li*b5>zW=R)n_>(zRS~l^!lZDJ?m9kfNun6geASqgkR9n(YtI)_Ej(*N*3s#O&=t zy(|{~>|!IXmdf#5lOkxapQqG{epL@>2Em@aT!7 zrIX{~cVxus$cpn)mf|WeIF^2ED`VIzJb(P*lj9eTsnE z#Dx(bB2TbVHx^l-E8b>4nu-_kqW{t!&%Iiu$`FLqHblP`=Z05C9=3Lljg9G3mR35N znF5~Sm7^tLpwq{XmTIhA<$9&Dc=}wtT=m!7?Bn#%<{uY1SH&leQ>z`W^=w=PeAG)T z*WK+$^Bod8wsUBdc-@kyv_>@)n`u)T?lnE_JC>T&Y<`a9`u@42rH_|JM;|SnH)=2? zWOcL@H?ABl%`_JDS<|-LgtcMUd%&EZvI#TAJS<(UEe)&aku^idaZ=9#wElOns@JrV zVcoN#&oM_nKE^605B5i~+V<~4=)ESXvGLajO0EVDja zzYgp1`+Ta5E!F4l%VyZxydPIy`(z=jb?wHV6*|0ZbLZm4JjJ_s@z#7tzFX&PPd?d0 zf3m)sVV})?wCO6uy_)}OIV#t&T;wR7kBT1iH22xMNuNe+H}yMOS`A~llPY?8_Nh|B z=-+Gj{0cdkf19F_*Hzd?6tlaqjCD;Lb$#@c0#h<1vT`ko6}Yn3PPcMPQZ}nMMO&<` z&lv6Rz&@5Prb0-^fREuGOHM#b7(lF^uwwB42?ltkI$Mp6L$K@#Xc!&!%hJWA)z&SCjHWempl*!Z!J=-x4S$Lh-1d~0#x-o-{`d7(OL>5pEV zno7!wm+Pe~wbp!Tp|%K@T*;S8q1D;h=|*d;5zif4s7=T4oYRfQrDImL3bvxGteJlC z5B%t_*ooWtZ64zAIUsXU(Eba?5xiK49zpyvF;Y1DlhH_jdrNrxb^!s1W<`7O(iI^t z1S`eadi*p+{P8=EHJg?3W6w6?OUt3st+XifH4YA-aci$@ApMEoJ;-^$^m{5Fqsh(55d zZ%5&_!eHN#!rKc&h5f}h7Y-M876wUsH`jZMI}5vtI|_Y;y`=6azPg`ho{G{_G>H#-9 zpoQ5Iv1rsslk2d2Y`|Wvz?$TEE1Qj?F^?z3+x)K7;Myb+5|?y7!#oKQEe*3ml_5i5 zMm-(RIiICA&BAwN4A+_JTnxJdhh^eRK;&}Z6_1xbSvv91aW5c6%ajArSg4lkY15vb zEeXAY>I};B=3dlT>5Ml5oL+j0=~?x*G_Fh;EfZ1=X9C5ESkCWtvV~8J_UT=dSGDCi zJOStq^$ZK#A1#fK95qM_xGvWlCGcYj%y~TY157Fqb1`e8#alXrHjmB2t&Qajf-)0b^)p2MRoVZD{*;=L@|{Yo5(9y zOLaaUY+|WAi$E|ZILyd@r7;6sRc52sTpqKl(c@z$PK_NuMg_jA9Gk1wjZ#%S!&oSL z48l{5ml`fn9P6&hmXUPjm_ZgxXLJsXk4Zk8Nef({S!yh|nl-vk-1D?ZT{*5*o6f6O z;UJclTcz48T~n!^&bp*yBpe?*Zhel-9p0z&JD;OiPv={AzJkdV{N8-oL;?sFU|G}> z1|HpceDwaY(uBJCVur4|B(@hX+Q|6^{1DbB>w%A+6d@C{AK0|zSJa_t32fuFfIm1_Lxh5Wh!OCOrSN}F^f~~f;oQw>`=SgT6mul@n zCmroA$f8ZnGT_T(-%O*~?t_V9)s>b@Mt*z1Mpb)&kpy1ZoxFBX0kjMA?ZTDzo^I8Et+RfxRaUFuukzd6 z!yzgS7T!|aTX?Q`dtqPU_CmTw>ZWqq8o3M5@r}7gvTz>B*2(1RT5F{w#k-=F<60Td zTo{gCLWa33NNZo@e9a=tdR(~t&!cCxaI@y_iX<6jNqqv8@r*C2rbk2h5QJzBZ8hIf z7n$%5l!lfixA3f4a<%wvPP$S;v4mK4<@9-RD)3*=8KwAmsX5WVaP*Wk=bV=(bhRM)QMG)6V zZ<5!^c%2|Y@eb)pc&sf(cf?(qxF&m>6BkM);K!=zqdIfu1y)J3yBK6>(mQ+6Br7O> zo>`V7Ham>i#Fvzc304#@>QL9Ap~I36mvt}_gNc^YnYhf^$euV>LQ@9@Y!ol+@PZCk zbXd`0RfnrOe5(#jgeV3oM)4y|qJs(bxEJa%2o8T+ zVP;S__}Rg)I0~a6M?^uGKpdHz;cP&V7L*Fk1+*5UOZ`!N($H%`=gr3=v^glrnQ)(m ziR+8vpO%uO)hEaeh-SL`gNFg=;OE0+bY)LR7K$%$FU#U+85GU)TcbnhQ9$eD;iwC2 zRgO%AdT5t5?J&Bd+bO=I21nYIox4dvR`4NVcq{b~sGS#9@=z4&+O89;2Zhwc$YA;R z>7Qp3R0?r01*B<0B-=_pYbRoJwC>2?aj zx98-V)B`F}xcp@x1C)w_O$|JeBP0;1P@yOXQiP4F;sv%whXe7T#BFvV0%BEdFLYKF zh|1|}6O}I1vT53qtggMV{_6o2G?8u%P_Q}o+Gg!JO~rW*XGgaBvi&Kg^q8TsE#+nI zhDjQKS~*|Vp=&8mCu#hO?tVsxbysYy!;foP-o;6@=iX#m+IoGgu(NMhVOKa$4KgH6 z)vvAHzs8Bs|4NX4AsUkzg^9&p>eIT+Laep>vSlE}%raP`SyQ>iymLVmOSJwEMxQZT z3=LnuK~}BhUZMeo3CftLzi)E|ZrMzM)I2e=wftY`iKj5BF$ycqg-CHC5<~d0477%8 zm#(8ED;&etW{HEqNegkMRB2qPuMg#qZUDM(;Sz1H)47;EYrKNpki3Yu^0iOvw|+)T z08@L}+{>kFvFAW4)uTJQG9kCJOab@X-IU)2?r$uMv&OBF#NEW@*O+;m(r3qB{qOw5 z=m;V^j0|)wmxiX>TbozPOXU@;E&F}h>{n0PY-|tCmS?ai=w1YOQe;)L?Jy_Nfx*Io z!VdWABjDry;ug4Uou%_kSUJVWBRr;g_A?vJvmdwhqCZ+G`_lCB2n#4k1SU>yxYIH9 z5H>SgdrUpM(J}Q%F<|Yb6fU1~u9spliGu45b=dTts9(9^W^_#0jhpdHUCjtn`E%V< z+2wm(%UWu^@b#G7F1-=}|3+IRHM`8B_ZF>`w$AZEqO)V!wDP$%*lTXZpC_*^c_V|) zvwo)%Y@z!uovpi`Nqh61nqhC@B-&#$Y(TuH&n>h;#XW=h>wojbAJNZOADbD{6Z}ZH zm7>poz(rt4yI9j9gM6@^&k17&jZhJdkgaLJy8Y6|q<2zX5vrN}Xl3dLjKd_GcuUfX zWS6#vCs^=oZA68G)`!K9OGHoM+%7Y1&@M@xkW;moDzp_Lm2;k;NVH6uny0V)DvS1n z?6cD)Ez@wWG-~&C32h>2x9Hro z<;a&HUygif^5w`Ruq;q(mm>)+_T)7wd!rZkNG^+&{EM8gS*J`^Auz`Rh!A6sVak!d z@=wxNzG~%l3~%U9a0R;1Z6L8Z{fRA1w+QJ83S(}w~jEBgktUy)Gj z78w$~fdpkEuxXR7n}!6}wOvk)mmfD4gb5$qqs@;jGhJI)?=#K40n=SBSKOA;#col5FJ!uxl`8eCHuv9d z^?es0)dPK)tGLO}n+FR=0HRM_)8yy>_`0u#ERjcKIO{87a&@g$Q2?}uDG$m$6z}Vr z^8B<*6Ip?SDNh1GwRpY(KoiHT*Ji+DYvL^epsA=4BlKR_v)5$n!lQv7{5}TxW+z*jS)?p$z>m^|8Q*oOD778@64QYcDY!wX z^TAa(i20c?La9i_kDzMvvq#*SIOJdwSq}b&9Q~2yKr!H2I-ccucsxB$Hh%q@qyG2j z_&+8W5I3EvIk^90P{&W()oX@KmggH>}rhz@@1?C%wvKIAn*go2y{u0M|tnH;>`_AGV9w|nEI$1>Zq zk!`U&wBk?c&};GS!nZqJ@FM`anT5938ry|s<>2>0t*j{%cS$E^e&TJ6#Mr zQ2r?s!@hxnZ=M;u&!1ZagKhdR-IVF?_UfpL*Chg;qmvYD-!v)cLf^YzJ5gBIIzV`P zMdZQ7zZrSRp!cg4^0c=mTDx;IvXIk{M8|r}NX9eF2AQ^Dir!}+914=)aEIa!v6-;9 zgMarcM$z^t{s&PDcTi>)(GLers}EM*Vrm_rHxCLC_srqoTsaW)GHG|pK>Q`7%=yCH znhqgznh2=wGZQ=9Op`>~vz=cmq*Y2a@v(|FQv%Su%=P}A$&`0tfXb^8_6XZnvs71r zB;FJtkI#A@3<1OfhJif17!BmDXz5mk|3unNbWbyM9dd`7P{-MV81Y?n4}W%J zfw!mphg*?UEBxEWlIqME@lvfdYP@;LBFB{;|M1f;y}?@?hr{%C;)U2eR;j*lth`h^ z*1TlTd6*8lY?^bzoW$Z3D$669!DJDMQvSJ){ssi+Abp=HMO;l_pZ02peU=CtVuFWy? zu_Q_WEEJrS?7nd4#C^3^vDi_o4MfjgxU1k3$DB1)JzlBQ#D!sHpXLco6jV~(HheNw z#CZ%RPd;$`0;>c;3W>z1eBQ%B-M`$zxs(`wxB=$5#om2BVcEiK@)qWoYEHMQ!)$5T zxyPgKX)!WZde3U=No>87FC)0NS`k>c5}yFqfESV;&)Fh?3(Y0CypRq6b-d?D`kjwe zm-lMUM>k{ADE@?h3V&!Lch|LZj))6O9t8`J#@d2+U=Nq)577vklsW2VYEitHtChK` zyqTM;i^KzqYcm&fp3OFq&9&J}*FgWU1wGu9k1!zw%*~n@73G;3-cy4(j0_KFrsnF? zFb8t&v*=}V|D{WV*Y{ZIgJf;Gcb?stuA+{r5o1v==kuOWdEU;RDvFOPCK}U#wd?sL zikj7IeU?>EgdOU&$dw&$=_EAshYnTA*mln^11%@-iR$PH@_WL1xkE5oHE=YcyaL`H;qQVQ(ccU! zp`Om3*Yq;)<>^T~Ggp0EjpS^ebjj&c$?Kjb?obG)=FTJgpRO}-Ysqrtqu6~u&jB^( ze(9Hg`*;53i@$riJ?Lu)Hs@hQm9M#$$5>o!dxy_FUslX{v)!*rZ_b?fhciLjyE+Fx zZX}=L(Rf-1t>X-A*4@o-6~iglwUs~ z5P#;vm&`uck^XLDGxo^9DxmNIL*aWs`WO+XNPoA5LVYbr-weJ&9kqgNzV?9h`GZ3} z48mSDq;R?Iq6T<{pm5t${#z0?7^dsC*06P2BKLck;n;< z4NzG9RIl=WBPnl>mxtrTZ$#(=kt9$ly339>Y*mHLf2U|xh!;GAw?{7g2pkBtjKE=Q z^xf3DHQG-L4@)3H8UhJ~5Rz{c5mZay=CO%NiBm{Aov z@`ajP3#m2l4I49*E>&a9bDjHDIIYgSDJzubCzg+7Fk&Y|6l}d)S>C2{LgkB<)7G2D z##8t-=$c^-2{cXkTB0VJL+4Bx9Zro1R`6AqiI{CXG~oj%OhKIm<&7ik3>;=rRylb3_6fo3lhSlAD=t5+<^K>DZ~tL$eP&G*g~Eeq#3csT1V~PTha9`oM#;(zZxE(>Tk43Mzw3i8CJ$N=3)p8*cg;*ILVHhLfl z^Z_0bv$ZW+`KJYnI&o3Uth%cp)Re-i{4)-#9xH&LQdq@46oMK8Q3LCODAZD$$+oi- zJJDynL57{Y54*9Gd#^fBGL3X6b~01V9Cm_rMU!^C2*`SVSJX1al9*W#6dmmd$}0}v zjATbpR9v|HJwj*Kc#upXpT%^eHp8DnSA5hB(UmRMuNS=9j9YmcsLV-Izyq7(9@i`_ zw}z8^>a5Y;_$$nEBgHqH3des@CI2NIOh$D*6nJk&f#aW02_`Z7%Q`cfYX&fXMVFn_ zH~yXjhq%Q(=V*6p9s z;cw{hvpW1u9sZUMU0kVxPY?|vL{i@4w09?wC-xV1LxUAXIZuc0E8Hh4%$;`@M+*Pu zH~)!#{{3UYasEF&mPUn?QH>bxC-`k1VzSV3STN0Z)02xsd+`>>Z}pbFSQj9MW|&`Mq%V%FQJbxZlzVbmts z%0!@@rPMT(;8IY&WBQ<0^kw!ND+qJ8_X^yZQyfggWc8XtVS6*f7Cp^d9lQ(h>1VWr zE+Yljr_DPGEyO%pvk~KnB=4KW0JLJ;zLY$&iWN*SH|sr*#U@^i^<89ULE%PZ;Ch&5uxny~6RzDfg9N+D7LPiXw14*0nk<`CRyZ&_%}5f*Z;-CwK2^lrii*Ujz1rkO^wH(`p~E?$Mr zySm!mZg1_oOm@4!bfJzTwC_to9dF_!x?>-5@4-R|PH#_fq`0GavfD&geBL&G4NW}t zCk{;jF^*)2BDuO2st}vGJ8#MhZ?aoraBUUH`5L%ke9TUwKLU3^C#LzE7}-+R9*UD| z!G~dvrmtgf=-6&Nyn?30tznE_Z%VT>qcri~VQR)dufw&8jPW`0X6@i}enAPa2OEW; z{;IP4q7J{L!)6`qL}LE2XpKd++zYLNd&$!p?0x$}xCilI`q5wg8ohah6Tp6auL(3v zeFjSb4{_N49Fp}cIw1=-Ev#?8cS8T1qrDy}mPe`o3DD;p{uChCZ1Z6WpuZV_{#}tZkloma+8h2*} z<6cZL@I|3_N9NuraZOqw>03H;_{69s6QbcAb`q>l=gQ>kWj>l1cb9^FL5=!TJwtc% z(^hN>iOVsTR%Ks1E0}i(p&zU>Z-)8O`eJ&k0(p93Rheu8J{C5!_I@-m`=*Vg(=xPs zS85AQB{TEbfueJo27ROoL`m*v>PiYu^VNEN_0;k4V@t>GKQVravsB6B!gqr-Yh!H= zG8Nf@+V|l0&l}=x4%k(@U*Fk@Nf+d}ok#zeY1ov!IJ$k1IMW9T>84;RSiev3+nnLB zi3zVYn9!PU*Cao4;U0z7L=u2RLTfDN{7)!>t-ibovo2m^98 z2BQVi`umWmH6|>fPj`|8@F#cb#i&?lRy3Fq ze{mZT-Lv)m5qmphoslMeyk3Z&^Ubb7uxf+^@1__*@Q$btsohs?2k0%@I)yfncmye) zFPil+Q>tvSe7~RNBZIQCjdjR}bqf1)(|i-B$=QV4XfCJ(Ssfnag+9efNBjo>-m3N@ zBb06iuxmnl?XrNj1+9cBY}6LCE3`#_yq@eK;IltOVcR<&LK>)t_>+G~g`$1%hqgv_ zB4EGfhWcj;ud${@w7}cjRkbc*0IUlpMs}3@7%}*o!vG@hT40|7HVQCr^1S-ftpdIY z1dhH`b+6-rgk}@JG@LRmZf>93J}b+0@uKhcBxLq6^D6`z@Mf_OSF(xz2lJId0;cEY~+#?Dc<_bQE0=P|Q!yTM8AgVMFqa zVGIXhzPDQ5o+RHwnUdIaJni!9`^S#sF^~*6C1O`{!VMa@t8@t8~Mu@DyQx z$;~#xr02)S9w5Bi*hBf9yKP+OZR|&i?SZEy7;NvbKTp;{=rdJkrBCFiB3At2qrK^@%XkC%06+)sq8HG!_UeUsrZR8@^*ptR_s#Y1Jr0dnoID zhaYKMCU@EQoN88EE#!pF$-dg+6c!xnp~>Az0aFgQC--LMgrsg=;+<4{zu=3RE8^1*nYNiOab|Ck_yOv z-cXKKJ@c?7%r=R-1ZH!Kb=*{SVZ$T=gwCW#lxvr1?PPMHD=a1YiZ6jz;MQ?~g}DbF zd07_MCX@xG1${(C^HT2pi*2i4rVG7a8WqSSa^)R(=Y!YT4&1sVcn->eB^w%& z-lbITs$MIcD!^|b4L_OVoE&T^BR606!{X{NiH^WXKSUziXBE`-`)LDz4gmIcM7Lud zdr+4LEp=k#(AD32+6<+hdGC9izrl0pljHZ_e*rmyHZ(4M%f!dB8z(MoXjkgCXz|=M z6nA6CSYzywI8<9#s?|DiA(?w6s=tYbdbW=e{#!;Rf)#@}2ctIecXAd;NA&LYmf+|X zQue$%oE%P0zV_Hiam;iA`fCgbf9F+PYOtU?O3CNcbV{!W<3FPZL~@YvSK~%=YM}-u z`B5eGfh^jC&8l{J9ogRQqbJ$+srf>?5Vs3+?ZPx(MCFwkj=19Kym^VeDrefoIew>S zx@m!gZQm|h@D@&@y_(@~L+bWc{tZC~NdDm3P4&(so9fvd*KFO}G1VRQ?`OI(w^gtWcrg8b#`UBSM<$R`IfFGCiVmtYXwi0?J|y`(;$$#MVILf36Dd2Albt%5!@SFl&Y5(~H3*H( zXQn0di%T-di{9+RHKFz~UCn|cgGnoXMPBGnqG@wLWbbXE!u$$9&>~G8`posaE-W!C72!u8i4dsT~6 zxMDwXMttf%ys)hDM-*?*+Mw#WZO1^;6mSOM80z1c3j z5OzzSZ5Ni>h0F1*ZVa(@ySMrpTgx|d|F3FZ-oc6HrDo-A?CZ1(o1cTs%|{Aj_HXXZ zpZPyO@(XwU>rCXZg$MM0HZi}A6NXZll?-u}?$(9phoDJj3 zkr!uWi;~1-I9e)KP~2+o@>$SS;`XgJNeGPoDEHKKSpkI`)=n)uu&VV^k|@44-Of*D z%B9xoQq`HBr*H%~_Y}t3+7P-mJ7z#F5#p+@4ohi;Pj1A#!4(U2LCAwCndl!|TII#- z$6B+;?D=Csn#%KLe61E`g}cac>V^x?p5MH4NNw?kXhF7wYZ|k|?OLYz9G&Z?*e=*d zD8h$(ez__5s}a=AT%vh^NeLe{k=g45ux$$33w29C66WVl4!ax!P;eesEAN6QIeAhS zNCm2OT|V%DE~nXwe52Z&1JTH8$u-{F93lm*JJ7t)tkvuLv|m&onL33kuscARBf3rn zp>TQ!TZ0ZxR82~2Fkk<*<QZaF$36!XlDZ~X>(0@00ulAc8TyTWAXFM_{OTLl)7aEK5_IyYAIAY9P`Vik%^!MSCN#jIg?5o-5Ho2tJ;t~>qtakh;W-ckB zw2|(a#^SNd%T*{8u_2BPfjT=bLtaAY!)zFvu2MRTd}vML8z5OTCY{$t|3U8n}QVgDGA5jS@xt zy1>VNPNH}0!01Opg1yBdjwM6{?Jd)_nPq-c#hd_#fQNyO-{M4|*L_?M8-EXGhJ82w zK=OfY@SE_N1Y3z;Nb@BpuGW(uhW4CPWkXcv zs*9iRs7!6_EOP?zBBg~tw>>&?5FCR>g<2$}{~Z)K&s1%{U&y2n62hOj{~vvAE=Af6 zSgW%mm+*0)b+1ghu0i=`vrsQAPXzRtDdRvTOySg+Nv;4L;Y|j{V%m-^I+OUf!Y?JuwK~dpJxAS!fpfr?U*h7eA~5Kfs~dni(4rliXU zWDm4XW)t4J@I7852NSFb%UOP7B({NVIm`iW1WU>uAOE$3{ zusS7)z;pR-m2D!8EjUBSMb-qX={}qqYw;f{lX+2eQ6xRR3x3Vx-M?u^VMp<{;=^Kz zif=BaL`V#sevJsNaw3LhRFhZSw2S3ZhUbvt`;kSF{-Q{Hju9_Pl2| zQKcy(Q7^jj5S|KoRV(F8j0}{27g{l)d5uS}yn=mZW8I=@?orRg2U3^jB1Oj=<5=V4F7nh3 z+9~wjJH9tsbves3tRIcVOtHdOKkeN*WsRS*?+CV(E`{BElf@#BCe_73sgi-oO&0mi z-bLoIq0HT1J+kiiz`8>7KDUjV=~jeToCFNSf53UWk5`K=mD!3jtG#2lyz(Ay zJ(v=LlJ;TS2`t$sSW=w(mcQ|LzxYSLef81Q@l;Zu6qfuB7cBWPYIi$%ry~du0z^RW zN1E#&m$W{9L|KAd-vpY6l^1IA(c}i=4!faPo0{?bCIvas_E3_*UQk6qVB{K2={-jkU=wu5NXKVSl0byscA0RrFL68if1cU)wS6;;F zN1@L`JTa}e0xLalK&&i=+vZ4lT5_*IvhwFN#z_d@Y_$t@?A8tm((FXf#f zkYze)x3T-V=DtRde@eGHE z3k;#9F`0x6ub?mIoToYU4?H$Dh6(EfjI$FD!cU(d2ay;|VeTaMY=c0e)?^2Dc1rxk zdTypAW21Ba;h^9Bl@((oljw|=1i9h(SA8`%^Z{1` zpr(;)Z&j2>NJ;bD)k8fnX%kR-7PD0I?lsWaulH^D(YL@jn+J&jN!Ai z@5m*s`6lrd+S1+P3;|aMk>BgLAJ95T%!I+hy@mb^yo!VF z2rMHOqvp}&FrHjHyq5Rt{!5QW67zu%%@h(Z6n*5i?Vbj9TXuO0C znQf#Z&VZN2pbEnV6FpTua5A2bcK>x@N3G%A#Oh;ZB(!E1myCBPmeV&r+8WxHW!HSr zI`j=r3vFlxE-C)^42$?b=3Q$W&w%pdY@6ozA>f(k;A0*3`U!F-hgYTOo>>d5VwZMe?` z&;@BqfdFE1qB84=Phf9hCLJ=^Fvko8HI~Ur4CP{~%_UZ(*jjL6`c@=Jhsbz4zp9RB z-BvZ|oQBFyLm;&F|2v@Ke`MCoCO;gU4cK@D{lMt6yOZ6o|WwE^4&msJ{v{E!~HCg3VFsvRNrCa z>0f@x1d_!&nymJGCbFJbqUIEX9wfP z!h!^TneL>As(K8KM{7 zY~CP@KL%ML`WykAKUa)CXT$pYi_vB5Lmc50T}UMP59YtcvE%J+D^uYWM*8CKBHcqx zukfc8-ko%l_vIrC4WNGW(XlMIF%xd4*X<|N z-kJ|CqIF5l6VCG%Ql0p}F~j2j&LI$-+_ZgKw`}ef znZ*z1sLXvGFYViEL1L~&Yu0Qj-6zVBl9{+)m*%HfR%5pGA~(5E)&t6}2Ci+kwRo6Y zCAC;V#KP9KNFa7XYZ8jHH*sD3y~k2G41LOidHeu%KqG`}q1~nsm$dvXil0b|>GNHH z{>vegpt1%25)gJ&0LV{leZc6IUokbB6@TW!`wX{G0JsHmLf%1( zV4a*mP^99A$N;LZk>ioe3(>Qezb|PgDt+*gn!YV0g?3ss>oyo>HR^NpPFWrTTTE=y z9rOrR0`-N?K1xD2Qe(VwgVog!hlty2tD|-qCCu|_Twx^GfH1k)K-Lokfyj|%L9R_& z%E3-SqBG}#8`EX!)LW}xS1g58F_rsc-XIEUd$x2-cFsZTd$cuduPaN6Y6+_yJoqf$ zvk9riaHD~oW|eRQHog4K#5`YNQW#)p7wYlJXHW5?S6I3 zR*jJM9?g7{L+&|%^l=+j!?zXQZ)eAe-*Y=D$BTyxGr}195ho+e@dUrkzr{h*KAH2k zGn5ZG%L^27I^b#Um1C?2fPiVK2z@s5nW^F2E;;8vj|!H!l*n@O$oBj*$wk*g01qC5 z>9!-_g;^j9;L;xq<)sT7D1S_o!aYs(j)KBSWna}T@OObX8@q?=3RLpLxoH7mN`C=|24+GMn3k-~G{waLCb7iIgBOSj!8 zyAwO2RvOMdKJyFLc|j%lJsiI14>=@nRxT>An-p{emJR#bMVTV*wbicVxy9A(ZhFi||!h==T(nPTPg{dBvWFHKE z2{<2U#lgl&VL*M^%?im`ve_c#8}v1E`^qu>FxQed7g6r9V6% zi}~AB+N0MLr`V&IG@m#%cD$!l#qGgD*<%uD#7hlCUDAkj*Oba!ygONG`HY>$MXpz| zJ@O#6rJK6zxz*~M$#sf4L<>D#1hwN8Y(GiAxBe!E4_nFl%K8~a=Xll)4~bCE7IxE( z%xuv!oG87gHtI%*nc-nqA>+!!77i78%FlG&JgJ*zq}X-iRCp?0`^^1*i}zbo+{JIa z5n}IkGn5uLXEm$Mwz^ldO)PO*&(7LHH=|SiNT#%dUz2Y=%n3^%(Bs1KOui+bU_@Mq zw*WLz^f{0Vm$uX(_joL7A|_EuieK?Z13zC!|grY^N`}O<*9W0Y^tMA~!FQ{b-n-kr80Dts@#kUqsE@p~NQuQJB@@p9N zH#m{3$*s-zMK#1<;6q#GL5YCF_;_GN9mgs>4R_6RO?S^GcMZ`7elQWX0}n3YGq-3w zCObn*aBjh?U#1!~k;JM2blR7vEXCY$MvWVegT?PM65ubv3}giwz&PSdj-M#*>J%Zj z&n@g0KnR=wTfjADh|Kj4cac7v#Ar?4|Z(O;?3b*}Cw;Z5nS4zG%iXtVKykYlG)0 zW%cEuh69G_dDG5Wr_Y4Shp*HsEy7s4L3^+xuUOoc_3Oy>JBzf1lu4kPb)2)Rw2du) zOfa8)Ct0S6k2RU5Y2bJv2Of*%r75JG+Dw_k{9V73hQiSoEEu>4UBJa!(y6_O$p(>3CfL;O5MyhjME*RmJyU= zUQG0~eGAG*toBItNa0N)82W38xnHWz)MnFNr+uA&0~-KRdLY#gUFTNIJrSu7Afcd} zzSZ&@yC}c0i*mM$vXP-%iyt3vHi;8ddd|#(6Y~eNbbI3IryhUC_Bu8bMVI59K3y6< ziQ}gI28()Gi)!g%vtw-U**l)&$#MRWNmu;g@`C0|>E6LhD6Nq+>?@Xq_x|54uJch@UKNr^<+6w8K0wFtmgC_m%&{Dtxw`& z0qB*Bg>RnqV}k2K&D0D6Hmg98F{PUqMaXN)6FfuGyZ~nYOJbLv6if${6*6RnXs0aD zG^Kk{6f76cZKf@mg5i0>cX$V0xIkBae(XyRWXgO48`GL+rC|#rBA~+e1P|JHT^H%v z-5UV9Cmrax3xt{2oDkAG>8mbASyKjaBw%_t*_~jvM7)Atb{~n=R=&(kA~h^4bzDmk zsYy@+_$=cpF{~tP*aPBq!}4qRECdHhLTe}pnR+g9K?)|rbpTuv>)A9jV066L&yT92 z3mrBoZrN^Kw;tW+vv?E@%O#?k*zbfdvVo`iMSa(&ZL5hiR9i|~@WNK`4WdB;{i|l0 zRBIfo){mVWJ2AGB6P|ei3Z(qlJmR4Rep{#0^5j?U!^2e6BLxp8cTiJ(o_QC4MTdXE zq1}g7z0stz?ZOAH-jP=w1zBbAWqY_ZG8F%hdhS>B+&1F{am(;nP)?A<{<<;cuj=g2 z=rGM8D2pa{2=M(S65yuv34JQmm0juf>IKABQ|g5#HH_|FUF2TLnH#{Qfk}c#7JNHS9#_ss5Xmn zLr#CofP6+*tFat2x)d{f{7jgi8n1~Ga{>Ng!5veu05L^RGu^IZxUK;{DhCzjwYySJ4Um142wU+sfIW~X`$t5f-+d4PIpWpY#b$J;VWhZ`g=eF8B#B> z*X5`<?NpgLILTLR*`Pm1^-lT%$!tn`#ysTwBT;QV9WAah zzv2UESyJ@T8O&EMnU`RwA}J&H$z{Y=kk5-I(b$FVaelW?X@}3ZFmdrLfkU~7LVCL5$R0**S(ol>?I5)W_5C~~# z2K9hZN;kh#x9u9%`$N>6NIY>2;XJUcF^ZY;3 zkqp-JiZveOCT4#trB?T6UshE^I z-hq$KpC=IR>IFQT?eEyw1p`1pW=MG0nt0 zM0<+s+|cpmYyDwvuhz@rpbPw*y?XmKIv}%q#}Qk=z+gjrzyeEpnA<6(Uf?h?IJrGn zl-ah#uX2Nsvr~&~5WM21Xk7#I7&)HSz}&}4w2$Zv0ET@yy6wa{`;menpBWpp+!iw; zobUv{&4)O2FeF8bh%dGTkYFQA9EJ^41{-71mFkSp30x@t9K*>mBa|Jk8Y3DBNl~>f z;m+8YWBFM?dg^LTkr$N6VPuI!bYg8}|e!U@f>&8}6@@pMb^==nqCvRWBft7W3kFn*Wxk+i6{5gjztOZl znest1-g77^b?s2HyuF5!Zv5caWhi||Hk7h?(n*w(QIT8ru05KP2X6W>x>@5Vd(+ot zrCU?It-xWi^HFrN=O|L@+EHYAdyOL9`0MMABH2I)@f&{nJx(&TzfVRflHmgB-5TQU z!MriV-Xk4ouG<6J3fsI_C6E~;29^nB?97ZOUUnymKq~VB0n<5lOW%Q!-$0+MOWgNX3+r zU~W2qy;UrHm9!-JdrF84wzf-8FX*3dtgu%@A`WD}zSp&+lg$#5G3LgcRUtNm)JB0} zNv{Y!60GK(hs~N`Jj$Ay|JA3y&#f6KLBVumL+D|%XQIkP4eSm?gfCB*DgJcw)w9=kaeqWui}<>~Y1Px4Vt@q54h zsx#1s~|BXLak7keYxf{z1@Eui5o=mW$X=qG+m?N3E9m&cO z!NP1amq|(+8EOyum@v6HVN)09b-1E{>^2M;*X;p2cYb$=3i%|D`fjFOtjH;D{Ho}6 ziIeCag-ca{LX>g*+XfSSm&I*Y?Djh|7DfDUs+c>&#pda_WIAS_HfNMvd~#HTbRSWy z^`|H}M=KQ(E9*Q$k1C?eO`wt@1W&glSTiE4m@}9f1-4M+_Tt5kx$PnArdo|Dt6UU% zW5e_y-tn)JBeq6l*HAhcy5wJ#M(FLG?u)K=jFKSdnJ+I%(34QR-ZWIy`}_@2(cY6F zPcsAiy%~bpVF}RLyB&n{^jfmnn?@>(RWD-b6FdY2eTN4BQ5`NbN*(SLVhwuiQIDtT zU6bN|?*CU!3bWZfm`sXP3?#Na6$5<_7a3^j3K+~7HwPhk3Vy;ua0K&CW`TPE?gxQ* zlmpEn2O*Yb4#`S$C}L^mrlBXIj!+0e;Je;C0QS@SQg?b!_FbDs#j8RF~O)=akX3 z&1OtOQaAIG1fCpAg627(Vw)g=DUpc;r{qF)_JT*R?*x-j_$c3!KjYV!XQmnNpkl*J zAVUiVDQt;+FkSSAleHG}pg*UI6EF|ooW|X$Ge4P+UV;bGtR^3G8uOi58^xJ|I#U`nVPOhe~}B;872yo zOWXv2|1L@XQ*8pbKGCm;I7~-oAMSiiC*~28@^;&q2UKq=0u?Q1Pbg2~nIq&?08+5Flrojb@kD8tts5Xm* z6>Je!`Sp)(6L)!O)HG4jD^53BEp2iqHGePw3AB1$`;Yv3)iLYf366C-%yiB?pC~Mj zWaPo_ppFC%WR*pEkcFlWdIxra;F0Q_cC56?c4MgQ#uuc{Wk^1iaidnz3|S`9=xl3h zdKqJJx^OsMlmc?k0dl2eNI%Sm8$f;mHAe3~i$E{~eKvK5Z0bx7dMCHH!gJ8rYTVwM zjEx+^(~Pp&6ra!p8`q(0fqRfFPiRi<<0Lw87vg6jJYl}U!fgp%o|$GddY$-*hADx)|HuqiKs{f|H_Ar*ue1QI2`-B3|!g zP<*>ak)d-NMYn1M9qIrEhg>#zT_Y$PBfe~>OL;=LZ+rwL<0f0L{uGa(Y|+a`P&TT% zM$p5IpdY%%2ojJ?!Q@YJ0bG0@uFG}$=01rZ-pUtTzWf<2cPwM%kl@)|^SBiB^k^-2 zL@OULfM|(lE<6!^BDX$5sbs+NqM5W%kej7wS?r4*x%@%dtoVh@QnKV{Sw?*aRgw#r z??B$HU{w8Oal3A4hq1O~n}L;sKbl#8OCh=kN7Hb6; zAHc{)9AXcw*e(pkimva%fkCwWON5p274Kt8Hw%+r)0qk22HM+muhs73qFpHa7Kz2F z7rNKHck$p)YpfmQB-$mD)Sag9Kf>`4!AtgLYoDN2I__jP1<_=n=~#?b9`NBdCZ0tf zpJ8d9qZ$`7ISC1U?u9Z|nu)V((s{|>>l(qC_lsYHZ?P%A3Vf8$ zi{FOKQYN?LOTn4OY<*|v9cq+-YXUXm2Vl_>7MG2|OqlBeQ+K`0EFU$5VfN${fk7;P zEOz#_>6_i%aLXF>QmuK+w61Ss2!rC?qf|rBgu~8luMrVHku&A&nk9+2%XMx(XEffm zHD1`DYb|3w;Zdf1JDXbA&Br#(&Nv0x>7s`8GR!eIXc~L$g%~Py&a{UhpjUlKq#<6t zyHbhf6Npn9HmzquvV8p*>F~q|8!{$u%?LnOu=b_dXv1F^Ngp}z)tzPN^eV}||9TRM zo|cV?27z_UhM|>^%;@Cr+L@gZrJgEDy-`mAyA?0>p6#&1CJ$y6Ieh-1L|3OL@6}$L ziRGXVRu^fSKJ~Q8@LV}?`k~{w5@lB`brRylL`q^35fg_@sn2W&AJ2rYGsqE5*Ji@R zlaTvFD`|p7morM|nT^V98%ZBE*|mQj-{NsII#JU%uBn~9fP#`Vsr}}@;Q#UTvL>CU` z$T^c?&%IL#T`F$V(mBkwjkR2NcT9N2v~^Q7y4wNgh zFik#Ky6=4sZvII`f9RwxjHqvK3;<5`02#?w>jZvXt93APQ>y=(ksA(4*tluSF_~_y z1t1$EJT2A27%Jld6>&bPSa(k4jm&Pi?Ubs~eA+bJAmbZqX!7Q!A-_Lo`}W$KWm~r0 ze`C#3b4*JKv>ead2G$mV#%lm&?{D1$iuyU1dHQi#*5^XslE6JwpC0D z=N*!5L4Ctt7C{FU$jI~q&iJ?d$u{G?5zOLi{Z}X_>;~)`0CzzZ6AKzt*_B)RJ8O?^ zkT^St66eRmZ3hk4#maqe69 z|IB-T|LPAs7P@2j-7`aan4kUpA~SL64PuyUf`_*RnPY$r0*ncO!1Q`3QouLc$x5P| zXKiIgirpVIXUxjN;H1o|w?uReAOS=`By|ud*wY_8jFblqa2QPRc`$TW7K+!sEQ?{+ z&#?i=ORV61(dT4?!O~8kLSAHtAPq?`?$ilFqa*ApQ-3}w6&z!2Jq*0DEw}Pja?>lz z-5jt|W$>}Xieepr|63x-z;-DNx`5K|8cLFQa?{tye4Yk_t4%@djCbtjpS)KnT_ zf7)aoB!6#6xOz6zpIm*Hw3gP*l1Ph+NP`;8Wo&2R{Hv$Tgo%5@nj5`)MyNzTk|9Z_ zXELOJLMrif<9(SCmAGkBD<5I?I*g`IS1eyFYKNs$<_b1h>ORh3d=Uc$mLG_-taH}F zLt^-mCryiepCEx`ZF4{}CDsq=T2$WzS4Xs9f4Apkb>&G)`W0JqSnonff8Fn!-uV!9 zs-&WoWV>IfB!VWWq<`T#NkGcQDl=3FZR)IWsE}Qg;jrF_+WnbFrMr}QWylkXel&ST zI}*xpjOOqdtr=wdSmF~?e{;4VvohW_ON*Z`gnr2C69dxoB?>#aY+CU-$l42m4s+=Ok|ledsGxZlE-f0w~ycEW7oxqAbJdp2y#Ifkv-8%dyOPu zXT$3>?A#a;otvaPq;u{J%C~khC_AJ07sMyv#i=bS`=jmrf{p>N`#;~GyTb-A6sS9U zxib*e4=A^?iBgbOBs=LU3pR)Ug9@UqO<`aI;3x)?bGsy+{~{&;{(XY&EjSUD128)h zS92wWYk7H4+!3GHH^}VZE)dsljl^B-Pne1W$;rW<+{K~jrNc(r4zZFPG4A4saTiE) zk<$L4aTnC-9`OOd`k0>^Vn4n~`HvK4g%f;+@34_9DmDjdOr3@;0P0Ali=1HhC0v`%Ly z42}d&BP}|2Ykf|Gul5F~!HC{0r}20X82bf1FR$E;voL^MMHHT~6n(FOrMNq>8L(M; zD|m`7cA~Dgd4~?y<|*FfnusUWE9Ui_@e|8>`UM?&ZE@3Ov5@funeO6y_3XFlka8RG zQ>v&pmvQYcV_e3&DePW-6lpr5;~AIn2wcXS3U?BP;3Rv!-rjfj{Tp%@yHx}~NBB*- zi|01UUC6Y@&^W>u#R7lQm;`XWA%4l)cSP~e8b^T?Msf=z0-v>OIFj@l6NgW*qy0ei zdHl4Er(n(-*!{Z}cigt#p-hQ#^heJk|L@D5s+)ErE5tUQhIQZx{)?GFb^!yCoGaZJ zi@;3mHUtj$Kn)^l=1P|^N>V56HnIF(RSM?;ycd*)1%dfU*bea=i6!r6i&4sRe0DIJ zh381w4p`H4#IX=OwSh&=h%*)DYSMfqz?V0jvk8Q zy~P$PjKkqoq^f{zfiJSBbdHZbf-BgL_ZC~c5IGTV;dARb_eam8d9cjJN^n8BRBm75 zYN2;gq;K?~DK%jG_B-3Lqx{ccJ0=^Lnc~`DsC3zU49W-)>vP2WF{vQ3-Zxm0G(^QW zgtEg0X4)83I*vK)n{N#_l59OH`1>b?86nK{^_XI(vcA`D>XIZg7yTXsG@>jI{Zc69!&pc^Z%VWj!W}GvXM5{h z&L;X38EBh2Z~lCp&bsa8!=?T{;zQ$dl7msB2<8Rm9E^g^lO6$_V9O>qkenFXTkWu<2J)7xfHp?h& z>ciPLdz=dXhQ8SuPNK1dTiQps^1(t0Ez6$bNO4E;BwCt%@Jw&nkY{QSfW}UaxBCh9 z-)i?Q#nsf(T&DTrqMqQl`5zf9P$^gy^m@?LY{PIa0M)W0=Qc7CY{DNF>!d*a#`!>m zG5r#gfQdLE`W2i1elroLBsUQ^MQ;2)dkpGli9v(XXQt{r`r*s(i=I`v&Zd}SAUcvm zRE|n$zqVsCmIKSQm3J0oR%H!4bKxC@PrwvxUsEIO+iypg|BmU4292BhFRGf?*v@+> zSQn;W^<#G5;)TWs3o3vZsQ;ecF;V&1+D^s@ZYKVB&9qN8Jd3!`}LsoyA|0Tb+){`&>BIF zU=m~9u1c%m4NuzPB~L6bEj40{b2Ljvm&z537bzA8VxbI*4vfs*$&_1W@*^>>78oKPNh{I z4f8G7P51O94b{jeF?|llPj-1c8vd~@15Dk!+<0P)KTxf)5pW!Mk<6Bm%=~yn6#45kC*iC zc!^kbs>2gcvIiYWB!-w>Gpuv=1LI+4TNC_k-oXLA3@ge7^z_hp&Mgke_ZOqfgVBWh zbQKJtBzv6yF3&jH${<&f(yQAkP^E8+uHN0zTN!G4M1)|bvAFbT&Mqx}ny2G$;c)fd zgw)9xNYnG4w>IxFj*3vm#4&9(8kP2db@J7ZOjv|a=o<8zh-Rg?T6}}#%ajQ4=o|e& zHn{~wsnlj?p)!imp;)Wc6*emtxpkl6w>GbqJ{&0EdUcO%cd%yY>_>Um_JGAsjLYg2 z;j?x>Pwxs??fr$V|v)_b`Bs6LhuV|3qbX2r6* zUk3O4*wcNW_%>`^_7(a-UHkdHt+=O8d%VAmd~fEuRNSA%$`y_pTp1JT&3#8y_pswxW z_5?7_27c-Y^(h_9a7f6;xvWP%eEBT98_DBS7TW#g-=%M#ajVqO!5b}Q?!A_NCCFg1 zh9!Sr%UY)CQPXa1`9jV?Ou12d`pm{eGigNfcTG9zE->jUghH>6MzUM((NjllBi+*Z z`yrTVf_Bp_oj-N-e(sKY9+I2}W&Yp3T21MF(}v`Ur+38Z&ss;{WD^ajKF2 zrOZC|%!Ti zKfPe2^+|884IwfS`dP g6hxjB^!QMOmW5u9r;ia;-j3)b}$3XLn?M_VT-kQ9YSWW zBIu%t5SD$+Fna|}ekSt|vK&U6B%tVcSE$ZMo6F`#$Bq)q+R;vtuB>RW%c4+>p4SB~ zU6XX21QVSUKdFBXVon}?ClKkKmek2$=H@e$C#ASkEQ3=?cLSJm+3@35U-KDGkLPDJ z1SUVJJIIzGRmTL+(W7FIDSp(qpl%1y_=`*>D{)31UZONB`7`qT zW}Mw+?Ie|oL{%&v)-!AN!}GlJzR$ZegB`}V?HM1lcDSNt$M+6(-8)47{BT66tIqRf zXvjy6u4aCW(nhyzjjo-x9z)AKD%R=_;jA)c=(HS5CVR{^U+Zks#uQu7VM-~?8kSEQ zQOk(OHxN=J6CWO-p>;Ye&3 zM{}xx{4!ZGOmlLP^uC)`b*!8!r#O|IZ0ZWSF{+bBy_QC18@_y;G;1QjIS$&Vjh;bB zb1`vH3Mz{o6t(x1dSai?OVpDyJ%?Lqo@`j2^rDU^^@OZqQ&Owjk*&zvuuueIOi^CH zE?`{2Lt$i(E0#>M(w0eBu_4#IBohB0DTPbo!u?D%bz44!Do)ym$nvhOw>Y)MdgpE7 zEU)s@n%_t1gbFF>N!Ni@1S+V(tMpIw+*L!8Bg3 zK4HmUYUbv?Dzu7d1AGC)?BTHOsaBK$Evl_rDLiQ|nZBR~A3$UfL*IL8|jhwV`%rckY@n zK?zKU(`l)Zb2#EWGy0XntmE9iBWn5afs&+3CW8{^T9O=!l^He06!pJVK z10b{gJLP}4@?<-g8S|_U{lbobwv!$2Xo?$^{|3APeigKOZSvGz>1PK!d;8DoT1%3} zvRe7}hUw=LaQn7`BYV^}rcKMY92mOpt;2T7;PZCV?7o377(sXLP1{I|I~#2)GPf0cL@# zz}LVvU=ElESWv``O#gufmz&Np-q95zGWtf5MAuQ?+vS ztLm73E8Wh^+w59;HCPO0t5G!y%E9O1Msc!O4%dRIN>rK;Mv7sv=GXj+7Zr!YdU@Ti z2VpSk4Y`+p%^&qtOK literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/rein.cpython-34.pyc b/tensorlayer/__pycache__/rein.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96dbac137beb8c9e1a924e7282cb2ed7a25c26f7 GIT binary patch literal 5234 zcmb_gTW=J}6|SC}jWJ*m*sw|@Dp6Q_qH%l17ktqg65-8eBi3Poh|M6$HeNShUt;X9Z0std{1e>zB_198J1k(V>Ez`CYZgtpz=9I%m07dQoDQl=tW##4 z3hS0wvqIfEQ$}u@q4iafHSvndGbH)SVNKLSeukd(iu!X#V3w4;!Z2uZ*uCoczrzr= zTXoXPu@YfdrdK+?Zb$tv@nr1ls3SckkA&*z^y9XQwDx3}s3;y})n0(QbS8+p-jVdX z`$_sydq48q(i82(k3z3C@M0CUboZOjzkPb?n}6PH7imab72GAWg_22J=GMc!>3Q~vIi*MtCq!|Vf1}* zAbDel?*)-axKK(Ac(lheS8)+`csuIF{Y3KIpRzM--is7OW;m|Q$u57eFGDUv<9aT5 z7=?>ZrHc5W2>P-HZK3u%68$t!_9Y}ckiw7xz3f7{Ge+Ybp+rw6Qfc$LIK0ejZs8UW ze4UsoGUGO0#wK9uyQ0?W3qhUs4s#)H=csyT?*>U?&h#Aw5ml_ePP%w6SM8E>+**fd3||f6W2ONd80w=Oc%;jyScf+dBe4JwtiCHm`$qQ zw?nS#&GL`_k8Y@$Slw=Xh&vkY+H#{&Ute3JXl@eC zuCF()Q~h#%eR=cxhLK-cZLF?BXy}xNySiClg(r(u=1##VFi;dL;)4 zy>GTeO9EynveR-W8N^aK0P5;gdgim^wu}kk(?S@hGlW&z3;Y9_PMHm`vYbvUc?cMi z9aTW5w50u~vRX-}vLVv438l0|`@K1vhmh=es(DuR!w?(T6P-@#B(Ise2o?YS&C;(l zG%oFnsDquov=g=aJrI&!x}&1UvW-GKNcMr9N0B6N>K`0NxMOI3R>|lne`^<5iQ4cF?DCi|CK=#+6!@eF|HA=y5Pz@?WJE+aD z8<=fe-S2aCRL;K@z_*We>L*-Bdr40myAbkWK|eId2JRCsnx)Mp zKrY097RT-3Vl)&7&%^SsZ8 zl({#X+c8aa0x)`THQB1wt_9P3G_fog-tAlP+RE~TXUE){ zqg76!?^gAuWg1gvHs6SY-+Ior(=hG^@Qy;GHaubcmpP+3CrbFC7sb{$=FPqP7!hAdwyty7RcJFJ!__wK(u6_@oTnzTB1NorxKyM zGST3PLb3;f2-5|CF{r@H7Q~yJBY^=j2|^Dw4Jb`#lbIm#D@L~}dk6&k)oRHiM zZiDVyC|IB-@O9Ylt}z4L1yc)nXH-kM1AdT|P@?bv6!6v8Cdn#@Pr#bDqb|28(rH*~ z&wn=HbA%cK3(m)W!U2`Y5hftD#{0T2f?$AA00)M>bz20I+N1YeaK4#BQ~uuE5>t`9q<6(Fza) z?I@P%*(@S@QiN#{u~IFiW#glCDznl=L9+~<-r>o0r_B2RwkM`e->O77_2QetbsyZ#}vMB?kLjvOFbe&8nq!hl5itv(&f zP7Sfi7@J64Mwt>tz}rSJxn~ee@~X88$3SF4Tb#&f3a~|ARD00 z&BV|Xn*kp-G_=}DW|~gR;3xt1pwL>juvXm~G8Ro;0FB@Ly##5V3-+nItL8Dr7SzvX zU0E7tawDUbN{u&^mQU0XVV!d}z-7b*zrb&QoCrAa(h#o7} zNLo^3dx8Wgn!NW1^sz79KJ4Eqpr;mnDEidbqEGz}Db2;r#z|4x(op2#;o;%o_wnfJ z`ST0B^QX=?ml^vTo4Tqf{}i`=g@=!ShlPxFoT6M}ow6yHSXg0$D(h64+3S>;1!i;Nb0M8#d*|Ms)LKEM3!KkxL)G$gJX?h5X^xb^SxcwroJFwfLA zmZDr@uN?MUhougCRx+(+Q!b<4d1<`zvcisk&C&`>G2IHuJgc!+rsK}j9rm)yUX-Ze zpv2S>HNU8`59z&3?2teNas z;-BwvqilPdx0(>rGM(**AnHl2`O$Yc%+8S4?_edKfnV%D41aU^X=ALyt|FZ2ZjI{-dx{+m{b}eb8AR^5>BOe5=Yi8CZw|ynf1Mdc%(lC(%b#H6jg`$UTpJwa%{HwwT_{uS*48HHZP(V>`dN8nHmQ2o4uz^W%Rl`; zx}j-eb-VpO?r6K~EA4h`V||^beRHeTZf~t_QF(J?W2@a-LwUt*ZQfkJxzWB!^((E7 zm93kbMt*gzy|xCSu~XXa+E!}~o;0H9)NZv}tK{8TFP`QUqj2~6wE`gYzS$C837DbC z&Z>SoN~Cgt&yBh4%%{ganGmpNr6|c32&=Rg1_v^mGaF!aC7W0B5HKWtRYIq%qJ!tM zQOoA?A+o9orL02xy>qSzA?bUnb5;$b2piZFzMnbiH%wiEihnnjwf0w*AIIuIC!!~p z{a#;0%c+cXtU@u8YS~0_GE!y8U&O7i;=z7&&N=5?b860Y{9a_W(k17D(<9QMiwNj@ zxbNcDf5c;SgLvnep&QU6SOFYFtOPOu>98`m;c>LYDruP=EHL$mIgT^wVPy@QykaHD z{wi|VSEK7jDJTl6!AEEZKN*SwJx#0oL!r*f)=tVAbzmi8qa94*t_XX>P=IWR8i5Dd7hoYrZSfBY5tecmg~gq#BTJeQTnFLWtljvU+$tOu=wK;vhw^q+G{) z=|CL25DH+yFfzo?1A@2#4A6xHc!J2xLb5#`Oorm1j3mV3?#CerZ)RlCr+MT(Z1*Bw z3rpYDJ(oQV$RC+-ni<$u{XC41a`)zwdq*7CDHcb2U6JO-E*fVS;uR>Ynoi| zi$rce*^MLc{>~JX7k=O7pGR=~J&O1G3Dr#=f$g>z1Cm&r;L@b;4rV89-@FoiykUgp z-u^|CoVyKcq6mBt4hwZuZojv>)uKD1W2&<`1`?;`@tDVvaX7J_CQ_u$PlDE0Fei_$ zy6HWcSRM>-_bn)Db!Eo0Q*JGQr(nakt9sKiO(`>*Z$hWvc~Z91FzyD1PC}zOJ}mr~ zIg>f3N(7M?C)PLS&3*92!}~ane+f(;0B2P_ltLK@Rfs*Pl|6y8I?4@q5JX0(PS!T; zu`#QKqANp#hI8KGn$dEro~Ky*U{K`%~Zb~X=*ffP|zMzAz0S=IO`o6D^_51hYKiLt-j>CsVxF5-V$d9&(13Pj=#qonf3wAOZ^Ut-;DQfYbE zVbMpX)uAvLi&2)i%Ru%ssc6N}IMn80<2-Cs#J^y`U6c5lEKr$jfwDCEw}jXo@qTcK zbdT6NucO(EZV(3Ph!7xEL3c=}t5f4^Ho#_bkV(oz%l};?m?Sa`XIa%+h4UOznp>Qx zhW@fL_N4;&VopfL#=Trh^(@I*Ct(|m#ku(g z!MoTM5{_yaUxqTu(SNCZS`-_YF3iNxGdlqvCN#9#N#>ePOT{RY^`OvNR$p)28uJxR zT>y*U{JjJDnhWk}xohSz#TC@grcZf3Ws)4DmP+k6lvYmE5nZ0JZ1ue?TPLH|pEmiE zMX}iAi`~W33>0G%x>F@n+eU` qiUyl9fVfE!U1js1AkP_w@;0H8zKjPMauIw*e`hLJmOp5IaOFQ~=X!zw literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/utils.cpython-34.pyc b/tensorlayer/__pycache__/utils.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d89f31f54166a829081ed3ba7e957ce96b64e1d2 GIT binary patch literal 20535 zcmds9U2GfKb-qK2ltf9EEX$VU-|?=!7I!Ji_O6{oYdP84>s_Z_t+7&KhZ}_&kwc0S z$)WBHEr}r$H0yLX4Fcq+1qw7kABq-8kiPVxNP)h!2vYQ+D1!81fC2#)D6kEhr=SS( z(Dpm$&L1UO_Bz|5Ep3iw?%aFsx#ymH{_eT_#qdb#uRi|0+`D-p-WQS22<|_Hul={0 z5JmjgL{*4faIcA+=9kArF6Q6IMK12&_laB|-^WBXE@}ypOA3u(#re5kM@ph{zohMSw|&>ZquViQHkd!E=(I$3<=&&;6n}Ac}*c2pB`6h}I*bh<2l* zI3|jRMR8mdkBBmW9ifpFC;0zSzQ^M+QJfS-JmLobrvv~#1Qw`K`vu4cpGI8C9cR0NA|sz?8O8YgA45P}y|y z(x?}$TE0?u+#}Q?Z#$Ce0UGx+N|@#W&$eW#YHhn?Rs+CQ!J1{rqI;%LH4T|Js?|J( zQ!v-9YSEPW2ZpQ|YgN;B$82;|ZZz{HX%q%L>s2&e_Wq1Q5 zwXL5B{J)2;)ZjdzrWTG>0Y!-dfCV`u+MpL&Q!`aMCf*UEb4(Bg4~va)8ZY;%Fd60t zs;-_#)XO7c;|MpznmaB&528(p&a~M1=RRjbv{O{+=E^_xb&d-ssEU;Z>hPNCoDw@< z5e~+9j3&FC5SVBsG~S78H&;vsd0KSNh)PVfPm6X^Ty38b?S65!ql>mKHYS;}Q;ZHx z-82oY9TR1U!!fGyj;0Y_XGP~3(LO2KXT`_Rc^fPEFz!x?EfKp%<)_36DhE%!AMYaN z`|&n}#9=Xs?`BNcKZtqH?J;o-LqQpu?7VltI5xv^ILAYTJI@|8tY@QRf^6|e(>W*F zAm$0t#tjCJX~Z8M&6L>sF-8N~J1#Z{h5T8(4I%8`?|j`yaaf?do74)$^P=;d=z-#S zMfvCUQ2rnkCwvsA6^f9)MDS^`wHd#+^|Sca`*B(yU8L>r^GQ1tkoHGfIG%Dc98Wpv zvEmUQ2>31Fvi1ovRf?;H(`GHSIUwXpqIg1lJ}x>JL}x~H(qiYYeUkLxzCZpAW(`6| zk^Pr_Ay*FU)v9-Sa4b zrN(bgu$*WIeXk_E3IK~f(n3|(T1kNN(2+!mjOfgEQ=$iRC`vpXa|GD@N8<66m{pXR zeKaL9iW2w}QsTXYPYH+-ezTVn-}adW;sSjHl=yaX>ks@_Q;*^!NQY9NVx_GgY54E= zHXY%&NioIL$Fwh{PZ>Xl@wP0DN*$_HH}rDlfmzqVB3E#|*c z6X3JcyjSB6&8yC{8?+_D`}HtOYalH#m|0XH_#tmK&}*a@s{yVB%gF%DGQb3-I_%ew zPt9yOt2?DEgUY^AG-_}9%~c=w^ZW{3)SI=&Zq|^}*zJN`sW+OAZk0ly#Qa=k_pyx~ zL&B`>-FgR|Ml95z(_0t-cWNsXck>5SR2_IVjY6TDR^|>2be=D)1$_|)dCV#;_4ZF+iF&e5EhQ!fF0NnmSVp%@AR^p9FWFElm$ zQYR_VJvT7*b>D5MmlYsiitLAi=~)8$+wFiT{0oFXxBmpDn0|8U+ zVv&dKvB;x<@56Jb0%n9g_R*3?8L$beSHK}yBRVlkZ&I3)D5!^Z{i%Mk&O){dA$<7W zi1+~quUQuSrg|`Q_ts5f1(KfLBw=-8oHeUxR`p_&+_GRvDC|ibfNvDVXm)p*b_V1< z@ei?IMz1&!uBED3aBO}1z&=PnMfHv9B;x8}R`z0z?w4f~-Pp}q&A`mk6zJQPYL%pm z4W(fgX-G5@xVCe8u>w(#;$7y=2Nk=C>1(O!+lFq^BmIS=T!I!bs7AhsIHE5wyrHrLU%wZENmrTQHVufb)nW9-TnpH=?1-NE9n1)xV z7uiI0(nBmh?JN|z1LcPml`lg%Vip7Mc80`sdX*NTDeG{8cX6x3W1mUWxPe%+!K3#H zRL6XJYbZJfUIM+=)9VoQHKV@kEfSE`**0NL5N(+yLPF?;b+fS9ut;o>RPLrOu-P*M z0S&IC_sw;F+pLt=o%}k~y;X*uB3<-|+1Hy5Ogl`P$bb|hfE3=$+Xqg4KzLav1_8Dx-TD*S!x(*I+Ixxwy#IMB2Fe{Z+>Y1J} z1~OPiwv&OBVd0R0knwJI=sv?^$!4=8jQkH2+ycH)gLUD`#fyQ_x^S5uLKU!Zr6(8` zYzus)t{^=Z_?lq}snA%6tl$4vL+VM92pcXaxf?0jzhryV2^ImrODkSjw6J=4BD*Y5 z2=@qh9~f4{>Ru^`0HQA;`GB-GB8=-+HhxBTY<+A0^s!q_rM~9r@#`m zY}PpxS`5?-OHsXPWBHM!i=f#p^%NE1@{l|)We>u8CY=SsGmzzLCSWR@S6Py;>V!uy zu4Wy|Lz&Gy4oT75)~6PGSWMpP*X?yS>!1_YD&;b%`E^4{me5*j9bZyv4Dz&VL|9#| z1VoHI=k|MQ#7*6*RLy&m9Hmx@-|?z!jq-hHrMQEE5N(aH!=cyVbm-@8w~rQf>l3#? zdVAe0o+WbX=c1&Xodq;#>;(qO`V5wk1Fa@u#t6)naS%Rd!y_7fCDQ>R8sMe!gV44nu}8KqxEQo<~`0EGU9Z;gV65V2ELr zLtcSjL4O4J3^yU5Q_GeE2qEt9%n7!gj`4KzB^?j%A5n19P1x&31L5>nY> z;>bF=ikLE*J2^#flQwwLknSJ~$|i;1-J~*C+!Ntq+Gnp|BLO3I6A1i+4H0G+?obW& znGJzlQt5JcfO;jb*o|XV$RVz16pML}0n=Hxk2Hqc@6EV7z_f(MkkeGT-%}WFzo#%< zZO4u6=G{0Z$xVeGOs?N^LUO54Be)3``)+~-evY?0l)%poML0BfEb;k>uB&@D(Vz)%lT7SlZpGMk4s=&HDTNNT$PKD{ngWoWC&7)I60BzwasyPKtMzlsLg9Nt-bQ!7&d+@Y z%x%xD8&=V(m*;L;g(jjkjy-o*S{vkB%r$lq(5cT-NXu?u8>lO!q=>`p*;THrkH)T~ zou8BV+TX`TeD+K%sg1}2jcZ5aNo`C!t4(Nn?5uVyHX0w(#$qGdF#a9E zH;I2UcuRFs+953!JF1;S$tcQ3w4*3L1W3{6$=EqA8Actyd>E}q(2^h=1=JGM`=rDM9wD*YNZ zN-6d`Bs#;QGs02uq}agT7vQ2UaOK@x-h)xG-$SA^+6!^)MiJtpk3syJ7U1jc&G;zM z-prWj92RYCN{u1LJ>}m|+ZbWSTNZbqQ^9{`#JjMZVBVNF} zizwV$#fL%FrSy>TDijiH@nvkVez;&QZOJM>hn#Oj@(|$ph(dcLTA@mmTjuLjKP-2r zM`&+3)V-<^mN@*J>0I8ke35;RZ3A1b`2_;K@?z0GF(IpEe`uA7U0ciDzHXWTA+*PM72(N=CGP`c4PdN3u|~$*|B&T zn60eJTV%{KpQCrOK$XdP9Li*wjphAD=zu(pxAF{K$S{)^=`u%`OLQScB(u1r$K{K3 z{}Ns1adDGsJ-SJ?w&cg@%_r#cGF`}WlQ-!?RtBsEpsOr`0lo!_@)o@rP;g*o3@Au2 zOkSgQuhWHW26g!~-3{R4;%jGc5zmm)R%&@zdsch4uebUghH@^@Na!<+Z=haJ#;|jo zAl*tD^>tEtq)}Pbc9Nok+z{y4I8-TgTEE!90}?(+l|m)5j)UmO3uw(HN=6tEJKy5W z2i7^%7Qm5If+P}3LvVN_cu+tw_vmTx344NOBTc1n{yS1o=yz*FI7;;*!(>R5IJZTW zvtCoREy~>hS{MQ_JBA#O&A5;gu^!mOD4`>)I)whd3N^}x2yW29^pg>W&{Zf-#Oi+< zLM8A6go%ejw735$#CnnpS{{hed@iMv#wX*RQhy8aSE(FrFLeshaI%v*9DLx$>_?=@ zmwsbSe*baW(+ej*xys`$zFu2lq{5(?YQ_$Bk;AR5l5Sfihf1+WirqP{!>7@R-Ymej zc{-&SwtqUk_`hF6?dl7D6ea7Cn~|pI<}U5dF3<+6nxz@s)$3Vrc4l_d8U2#l%b?oq zPR|5EUXOHpm7bF18J+c<)kym>K~@2h^w2F8`xrb}*|@QK!%aSbLtEH%<5jbs9uBpS ztKDZ^lk1})C}k7q8Tl$Y$;IzOhmil|4yYxH$32RKysarXSN5lHBTkVTiNn(y(T>1h zBp*&ak>@rTo5uARm%V@*D2rtyPf#w95&TKvN|32<`(JS(d5Mf!A!^VL$eV?e(kE(g znG(<;?0iArC=CIb_Yh7O{27+dHz66{iD}S6A zN0q%+wGhg*uj9tA>h@!2O&Vo$@i#$Aeiohe!b;x5i)M&UF2Pi99Sw-)vFp5^I&Zfo zNgH2TfRlO5ryS4U1sSLnDyzD zyn_N4VLxxCbA!Q5?`fJ#Fc-P8(xn{DF=NBlemFsr?Qjxwc^dTuZp6e%h|DYme~wy8Xc*Ym=ZrOE00hfFT1eX+2(Dlc1sfLF zN~RPA@>t=nB1yqp+XykL-JD{@rp$5OOUlad4W_1!K%>fNBKZTG1dX3aE_g9g%teT2 zmv{(^Idu&=QMSHm?s7LuykXCgQkynSvq3?MhC~{`TBV9zFWSv#Uv&c(NgwUYgoIe0hPB&sR|ke3TY=k=53B)EZA~?1M5V zVQ1t~3ku7eNXX0lo8h`@ouN{1tdVsd)Kzdb<#KjQ4mLKv?86d=1LTP}Njfoa(kU78 zgIGpEPcEgk)-QdgBymD&hW& zY|OmuQJnC%q- zHh&r^7C9u3TeZet^s=GJ??OfVid%E4t#DM}t!2^?PW*+@e}rKSit=UB2K) znN@h+>nS+5e0cvOA5|!M@EHy zF$xM!;6%zO-EeS%hj{JreJ!cJe(3jw11zd9m@1HAL=zlC!?`m=M3{1su~L{tXdO`d zp2F)*9-ks(Kz6DH^B>QAcIRCoN+2B{WBXlDV5FM}sLHlKkl6Y;aD$f+T^Ad1Z%9bi z_ZNWAt;)tncT4GG#=m22hK9JGuEu&u8EJtWBg?zTd)+7@B|s0(c|ln{kvM}FNV$KI zWRb+nE*)7R|G@M7VTeAaw&yTBRyDp^H}(5Ugpon)p~2Bt?qNas2R)Cl&Fro12vX9ifmg5&NZIpfF6!6Ock9LdbP4=UHU9m}rIi2)(DwP7 zv1uw7RaIs3;P51qm%s#cT5OTo$YUr;tHu@{*|uzHDD>q?Xx0MRF7)v|tRuMJ=V3v> z_VBRS_HwlfDSDWuO0lz9PtMFQe4P&8D*I@Lj@V#?8{2EG`Ta!lGQUIS=^<8#pFa3c zeA0(P$Hg5gSXD|khRbiu6<~(+60lC5j(1pf`399Fup1_kgTnLdCfFsAZ{m$y@NX=Em_L?^1J| zq^&i4!xN{jg>sI_lKc~*BJ5v8nJC*}3=yQHHi;~Nv$!U;2}GHah%2SE=kYX#@3eM| z^Abi-7Du$HPdks&Gk7<`cl7t1cAo1bwde3Ot-TZ@sK;X;kULf&Gn_v3m%eO((X=Xs z0b}B8(3FVxpYuv^4`qWwsD3D=cQx@YbSJ_~Y&4N-VjB&b80N5Y)%uKYU|?P-JTz3* z*l_lN2eI1OY8rK?;_N<7i_Efc!xm=#fl?wMeV6{A-OU=L0#ZwwaQVoU zMnw6MwT+#5x6)sR8;kPZqo8=N*h!ZI7mmSQIW`&kl5WPre}Dt z3&>>UaV%V-1Xo69;Y!qCM==@n{QqN~wC|DxbP}2+!85pl0RxL*1t)1W@g9!b3h^Fp za2z+*B^Bs^IH3<(cz&6yHPUexkqpoje`q3*yM+aG8&DOGlcX4f?D3;8&vXx{mvr#s zNl8U3H~AJiwQ6cr_DM;wx99>yv9^e`YHh_;F;}pu0x`8bgv+mJl?~uVOwdXgj|Ie_ z1wh2Ofv-&)l4QecVkb+y52}FiZ?5QUS|cRNHWuvJaKjJpwXGwt(mwF7Y~;J#`nAa5p8%Jo?Wg^#Ulny)bSqR zE@?Q@JSZk3g~|)T?i%+{W2i$6^~XL`gqD4%?CTVj8O)~cB#v%Rsvms7&Z<_=FaSO^ zE#DUoj@OS93OKr7KkUJSEJIL`aljy#(nnf9zk&QSGn%&IWx`~r-M1p+0KG2^{bvdUS@H!Rp6-Qq{pR!H&d zU8|{YLt>KpFVI+UmhkGui&s~5!^Y_qK6B!s*Sh-l@|_i3jTov9Ke4bDjIRFZ(Y1^x zZ%5~Vb`fZ^n+-w_Vv)r>EHD3%sZJY2R>RJ!UwSB|5meI`-9)ugZ|)$al>S_94f0<$ zAyXb|HVy?Q5!{FBX!XxIg@!)6*}4+G)Y~{hVK#J?7^t_6?M?l{7g=}f=U(hwXzyT8 z0bw?r6>^hBLvB~$*A(P<=!h}Sjqfao5?;$tS+P3MGApa~!ddN-iB)Wt2+ZJ3F zv2|h=@@4H^Y$B}O0F8x*p!ZOJ0J%SYb+jM4@vL?+UrwFI}cz?|8 zqnRPIWXEw6biJ%Zb||;UAQVoJ5YcfXhA7u@SR08?A#6)KTV#C<h4GC6n`NqT~i5={7@v=nT1Z7L07J@SP4Gm4RZ+U4Fh`^xScVHng zAYJIk5xzj_4A|!)Nrcn}h#bM|9hr+L$FF#x{4aeNh*|iugnxtr5`uj8Au$Qqllaw^ z$p~&p`J-@x5-Wf;%CP>C`oK~7P^`eYPEco%EJ~~#hFBSI{nMM62~UFX8hauMfedcs z)=DgSOkp4(ZU!gV*o@V&;=2zZP_n#=aMST1Zuem&2Qdy}dypICL`7wTCqk6R+`7P$ zfCm?;l+nvLaYx6!u&=TX>wCVHqGxPPK<2exV&;EBsX&v<7=1Z-v@b-J9T%QJKd_Fd%f&?BrZA-_r&n9~9aco*v& z`!J=-PP&iMw3#jB*Qmzt(dFxO;pG~xS=u#unbzz{+=!tGcn(wWAf~WJX|0l%K&y5% zw0`2Km&Er5zV=UXQ7!-$DJyA;T5;a6vWfnJ*6q!e?~|#3g~ahK7z$WnpA=iql6Qc0 z3v~~70PRqN!hD-aA-~|i#zN`wy5ql2_I&+a?=@zYeo{vT6#Ms84P=Ag`vP#f2hp&U z9&G*F4cZFB8bM%y#BL-L1>S-8vq9uw({2lIwWwn}m3pDtgvDF#y$wSx2xPczWY(>U zjr!d?NZ~Ig9DD#ja5EtQ9&z%~i~Oi=%_`3F7sC5)gcjz4a!Ty^6h z^InD>$IH(J44WFkzTk*9CSb9KU)Zl_W91k5^OH{&V94`_I5)7{U&TYfxnL7!1@EUw zz^Y`xY80Y{dVykrs=*Bs?yzG3Mi&~rWsY7-CybNm0f(ZPOJ2MC8RS^k_~@|rLsBY0 z1`aYB4gA;z`~6QO*`vQkh1rGpF+!K}mJc2L6n2`?XD;er(l2=q+*@V|F$@Zlv4Ho_ z&CM!@aaLbl4l}Qj00EvQP{Ge|Y;XJWjzym(%X5&glqg}($&1-f%&lA7*f+E1xGnv9 z)*PQ3$*w!KDt>e$?Vdk*M4oeY@>AqTKa3V73RIN+t*?^(p|&<$>SS6Yshj^~D>^pt;yW-9Y%k4PjR zcZ)cTeW8)V+PHRFOP`frp$1?XPw0Mw?lN>iT_GRRg*c3bvzvfB29D-HQc9@|BB4Uo zI~&9YO<2ggLK0)DL6LBE`}6!$WqCew!l|ipkZPpp!r8R67bYnj#NMhT4^EN=CP}1A zS|*Z45?Mci)OQE2skM5YP`7Ej^qLn)BC(HO(T@WaCUMInABVxKXbe+pmcFAO2d z_%}pdh(dU7h=LK6Cqy9;oF_#g8JrJ@!T_HqL_H}QDN#rZgJ323dQcRG^fh1)i$W%N zZbTGDgYz*_I3UUZlM?lD(U=g0gLnqlX}+Emg-Kiwit>;s4~sHjjEFLx9us9eH!jK( zqI^)4Cq;QmQ~_*?dQv{b|A+Y;mq$eTs3_wSC-^@m0Pq2@KsDT*Cg2ayVO{??HAI)F ziM#s$1U3Btod=wggu??63vf;mGIS0gx+b1FP3Tc&fC4qnaMQRe3Fil(8lasey-8`DyCQ1JD{U&vD6l>Kc^*AizFD-FyJV0Pun+&rp}?5?~Fnn9Y7b!+R@hcmvi} z&BgCft8Tg#TQxqw7z-*W*6nJw)~w=uu;yCIbS*Dgskz>OYdNkr*iu%xR&u?Jb>FPF zO}Az@y%8!Yx^~fSTHa^{AYH3jtk+Qgc&TnWPI1+&o6Qoy7VZ0%a?D1nZg~f)mRnS2 zvurntwWjM$(Gx|-Ra6hqxSesrvPYnqKgkwI4s>PP2q<=d3*=)y^gQ*&9o5R z0dWt5V~FYQuvkwC=ZtVM5DD?FA%4RU-EpDbGu(u@2X;?Vq2c0*l(?5BcIE3q;esnu z;@yN$eX7{AO}yPol|1#E5b#;j?n0K zQvxHarNp~Qx(2f*cDefI*S{`F$Xr!j8OWlO1Li$Zm zjdu-$usSPto)X=YqI*`nh^D)kum^E=N^JbAafixJi4#-~7W#3rhlD>)b|DiEilg|h zCxr9egn!+g5J%7vl;M%BukF)|TGR{oc!coI(+}^|)A1fbkOY0%IVZXxi6nArGH;!YQWc2cYl6H;AB+~7Pya8#hYm&_W$^J3>2(FeivO!a4aslFeAhXMqr zHG&YdMC@s?aW{FVmzp00)a{Oh)clbVb>eC|>crKwKNF@zFa(*1t-2?~u}V_UjxOt& z^&z2NOqEZFFDAv#1+gLai7gK%(fJGZwp({Z9 z5e0H$XSSCDeKj{>!D>|v|}8BiI}+=JRdJJT`#Ix3Day)n>>*;D4vVU8^+v(|)il%}lK?psaS zw42hcHLO|3QZ>twP0QW1)w*2WmhPJ6S1Q?#D=Y0LYrdRx10ZG7HFI*+bW3YRr`EA@ zvRSlRc4!|D zV_6b(>g}2%S1x^oU8=W=Wo5VQwtGo;DJNGh|98Uo2kf?~%!UOrJDP;E!G|v$%W>qq zT*MEoATFX&USjL&CakOAmL7nBo#y=-Z_v0(9^LR+B79H}qqGXr5`$S~4T3L zTCrQ;TCki7!7KqxSSn$WMto}Jt9j{G@(e2fa@lOW6+EunxLf2~XrkF}w6^o6Qs#CK z&?P4sfH6Msd5iPet8;U(+m=dTC{C z!Q|%g!d#HIU}A@@ylKH!E-|(6SR2OeHr25)Cd$dn*DG8-CpYbOy$oUD$`)*EmnTcD zNoGIW?;6EO5TJe33R|I};hU1AMDN(Zc$a}A(5z}effU&b1;eun28xdZ3mm`ej2vuq zYhxD%w?nLXph*J8&<6sR+QuZ0p2sAQ1HKDipbnTZ_Si#9>Sf3#s9pnyV2(&)l>Vf& z6j4w{b%UXPyv8E7iXeRW)`<8a2d~;TJfUVda(C7&Vg-_({vcs?qMr@BY}IACO^#ML zB{cRV4!}2>Vl=wjOgj_up7@8@FDGj*glna4m0U+|?%M|Gr?|e+jl`TcjLL4T(fpEX zqZy~&XqXsT8UndltJg`oI8Yi^nYu(hfwMX%%Qc946mK(c-mf`r3|~hN-zIdEjI|f_ zauHg0 zW|h`kHi-?A%DvPDHoHb3q`{T!p0O@&TD9t$TU>*>x2w=oq>COh`m)`^u*0N@bx1P; zNa2sXv+vM{gmcXMn!TB@Avyi_xl^xbW2{%h^Bv;pm513?AEEWJ$6(Xe;D zK$fpsfT?j_VM)Fs36HQ}?Ix6mHk-L0isG@IPu=QcG5ND!I%{m!K_{-(s#Q|+Yo?Yg zk+s%2zNpn0@Nn`Wn~csMQgG5s zIcsJM0rHm}kcBLbSCq1qHwcKNTkJK#xEdBZ5ES`<+C(%M|{PDGPwkG+DW z0rb>MA(Rg`MBrR_BMsDNHUx5Mt;@Y3YL&cVFNs;9M!2F`E*E_U%;voT(iq;LKjPjH z(-Im(O;hDTUtxHIzQXW~EibWM^pY4PFB5q%g+bp5DP$sz;H6mXdnp$91zy0=0>3a4 z(}OEBGDuNmw)s!#)v)&|RNoG&=M|VOXEj5PMILgM1!ze!hNIlnyb6hftIzjm;S{TEV1mQ5C9^U8PvBaEF8YXn{nZ%do1urdq z52D@0+5wi53>|jI+GRpe`1p5d@r71vcsXfjm{%yUz<{-g0gi`bQ6?kqVdVm=D_C!$ zSS+4I^xX+SqBdY-VFR~NkCsFXy@aEi#2tMfD;7QXn+XF8m=w7k5j&$|XN+UlX>kur zAOMQGK$BNld8tH4ZV!l^@qRdCp$IsS?~n5pBSh6-LJ1I}C6o!Vb5I~mk9Ch1fs!A) z2Kq6?sRp9T6z#_kmLG<3HYPUyT-=FB^RD4j8@&LyF+WW07XlzZ>c_4V2)2%xuRx=) z8eYPh=ZA~7(kiKTRLFZqC=L;tk0>@rvJtvK`&EHb4B~42VslH8^3)Guj>1t{io$(Q z99&}RKtu9p_Cto6AcKfXJpq{zAR!$FVyGT@q#T9eDiB(+XXV~lUldn~Kg`d5^wGLV z<)Am@BkvB;QE#|XEO@5-gs|Kid~WI)JbDQi5~{a~u*W8X9NM%a<0(?4zE#Quc_}1U ztr7>}G@xS{LA?5W}wJF5Jv z{Zu`&ym+#pBV$CR1v>a%Q0J6y&+0K}KO~sBFlTRRLliedyOVWshpbWNbF@w-q_!E4 zLxn7{wY%4f98#xoSDm2)Syt*I9p>n8i4LTG6s0(1C)IOw{yZJ#aq!Z5K6+_Ax75e! z&L`;b5*^6&Qa9*8W&+eb(A6q^h);o{x=DA2G#uCoLmCn+i&yEn*XTe>Umre0XG1u6 z_&PZp#8afIwK5(xr1A7Xf2B(*w?sXm&nUj3Qk_g7Un4~tmGtPVqzy@rvVz@#DuV1! zh#K+$pv?xwJ!n~^V2}odGGy%rJ&GGpmWz}vFeJ9V#d!&=Z}2F=Mw$oGIw()T;f32_ zv4aKSy$uaNUPDl7q@*;)e?__p&2H3$t933?K1M`^lTCCvt2AA^Mky4)3OxW~N05E7 zo)jvR=z~mxQbnSwBWUkEXizpjaDoP=AB!x6Eo0dH-3TIq9wIFK=Ho5>4+q9^$B-Ii#$_DJYM`DEz%Cc!u!S_?$en;lwnKM^G}0yN7X|56_`W zARG9T!I2QjOmgP?rCVd3iBLO6kGKP^ueKr|2b z6u_ey>M7t1ESh@T^d>^eQxl-P zHw55JdGA0#Xa5>uzE!L4lql*f&ZjAmsv z;U)4HwdvS9RezbeO0-2TPU{-dXN5zshehXf)T{i}x{b)Aa}6g!Rc{arValvpw|*6* z)aTJyKdjUp+-OJW6jDs}&f$<~KD*A#%z3ACs1oojJG>I3A1|HZhqF{#=^P4Z>vK7m zW(N!7&N}5OH}g6u;Hb9=H=?jwqQfj5LL)w#QE#GvqICX9d&A*K?|QV5VlMI$l}iO0 zW5$MU`)Gg^Tiqn+iqz{vI1z_VLRjWGjCIh^e<87n6KJGZBp$?x7Kspb-~$qJi`k_4 z2dJfmhKW^pPB6oQeK`H&g|s?>Pzshju)ctmU&<&T{}j$Cati#p{kdTArCf6;bzAy5 zR5?Du&@{2ouM?0+{=gQ&!;d8w{E#NbBEqvrJVeEuX@(RgN3L7j+>91)SR$lMq;<<` zQAnbtuxh_rt7DOh7T+1z{!=}`VS#b*^W|qk4If0uWu&%0{1f4#NL~lsXM7O8YzT*! zhIRm7p5Wx+)${}(rwLwWo%3xy$Fl}|oXkmB$avUn?LHkpgJy*)DLf2_-05Ld=84S*Je5%mu3qHPteu~Hoo*- z4JInY*))tW!tIZO#@J3`SYTopUUVE3Tc3__xUqbdJ@z58qR93^YeV9ywx{TbYwib1 zkzns5Kp||@+V~qplhNLpAdakK@6<|0Y~={pJvhOTip!^Ct$=|1{PZ+}@C@fNQvU&` z8o=T03dYtVMd@!We@1)*k7UHx1$sClwmvRgAgZr{5KKXAK~di^ZWF$|4nQ@-7=f;N zb>pkXosFh$=wE_{Mto2hzG?abT%7cJqs7*o#^VCvfeA(H6Jvx;4;qLU@S_{Q&}}?V zF$b!8fT-5DGiVXWQ_ul>K*s5Wf(|~uk8bECY%&1Vz8^G%T_b1+_6P(Okpi2&u)7Ok z4yIPDixfK%#*PqMcXZF*;+|>hg`}ohu<>!tn`GV>qJrM=X0flK?_<5)M^!fPfx*U4 zfEZkXIRC$q^!tO9`GC=xT-u69TZ`Ev<}Wb&B2zoaA7f3U&9G2Xk!{`QzBDUXqnF`U zDyXB!5?Ju>C>am3DwYY^qXlc^0r);WOwC8s_FaNUD!;dymb|Nl6q&p}8XQgK78bMz z(07;E%P zUYqgQg?HUrkk%I7Ap0ISUt z`h|VtVoRRouy)taG!VFs9@*!a&Ky?-7ZL39W%w>I?0h$b@=Q2ElXAh4DMHNFIS=Cv z_B@O?D9ss=nip<3pOH)fq5_y~yxbyX%0_9rrV2~0?*TOBeCxepa(H`sy>Q@jlMbw-B}{* zgg$`}HHGs%J{06?A0LXXE6=ErDu?l@RW}>%X&yd*BvXX$ZlR&BHb(-Iq-g?4|)R{Q}`5f(1uN5~?;%0FcX95qfDr*TXh2N6L^ zBWjc}p2gJ|zSG7LWEYI#FJmMTOEQe}C_Td^bVhg18Rxl9+IR+6)5h}&hMM?*9Ig@> z+Vr7c`m%}jDo(Vvfa&lJ=txBH&-x`eho(Ux)Y1EfcpoYg(IYmCNGY)u2BipVS9@Xu z=5L`_euO)+L-pQTT9>paKdfh05GU)f&e>?2O}FN5KT3tnvSh=0WbwY%AfS7X@}Q;4 z21Ei9J=*Z-$Wun3_@TAUt=+ZtSOPdO4k6zPQ{ZB7v4y9s>-Al6<=d3Rrzxdv_$yaAy~gSIYfZxJ2oujLyQ9xIK-Ob(~=9YobRa&<1QmAGGk@F3)IXlO6&UpecT6AdpFg*~4yAC?+N#c!D6y zQ@tJPCmMoqQCic=OTUdq?S`I`18M^ps}>!AC?*z>R!^*?F6Ih~Dli5IaP}WG#fESq z4$(9iPlUt>X25mab7=9AtY|}Q%@X5-D&YDX%V*e%M#Ph?D;Sq#X7TDBZW|j@FvltU zV02z)D++0tEALz|FXZHfRXUU|EU&BtCKTQL5^l~*3=;0oOQ?2KnU|M$R#x%|TMb(n zQceZs)za($(%=v+LzZ2JG!tYTLKdk7E2Zjsnc&9n4|fLtG{~h36?E0Ei56V9-AE0p9;R&wI{c{KE!*5bVu{W8vH3ddE4! z%S0Og0{)PJ{lmlJXsl2>8(1wP@XZE89nrgq1E`1^2T<9if%#8HH5<4`Vc;LtFH^u8 zsnN_a0N$dk{z16dx;{xLU|W3ipbrnDhHfwp=;2~^s`K;f$ONh{TB)p;X0DBa=Lah%7Bxk=Zed^K&?k zCpzPEZlmRANz(f;`b(Lg0TQGP2~~gkCd5&aIatEtX^9L7%6mX9gk11`3d3-&`w0&S zv7lTB2zcd+UPJH>r6pkfio{TrUK#;0LX&-q%kg>)lp`Pmrx%G5h)x88gkSyu@j!0+ zh&T$sNAWt8qcOygwMVf7EmP3OIK%o|Y6E-JBbmaWfds^gmMI4zQzknRc&bp3V(=Hu;?!$@=;u^&4AT-EziVFr` zerP|rbAiPGcP>^bC#%@_M!T-CUa|(8d%lyQYb-fH;B}s7&VNjiK$sNW)vPxNr)Cwx z>}Ka2H~NNM#R496jP}%RLbR0>Hn~B(AcBA1Q5`Us`Vt*5oFX=@XE9BEfiTML{!dz#cJ4)5WB z*}IKNj`fvFZ3~K@^6QB$6s$3!5C#+Kn{?p08ckSQ4S9(s>`9!6kwfqoFj?`XY~zFM z8_<-EnLSC=3saT;5*OO=#|&i+O@S)T`PIhI4`|ZfSpF^<37AM6(t?SA8Fp1{JWVqe z^9)V^9>Q&k=B=lNy36-ceebz^FWvXvx9A=QQvGp&)!?3bS{MW^F5rPXkOhm`;m$u@ zr!_513i$sdWMio$@B{o8@Q@!(%O<>LqPNe~nx%Rhwr#cldI~*3Afxpkt7+F9)bCvi zir!SP|Hk*wo5YNN$c{rlz@wYB>exeHj(QL%^*Mgk5Z6J7=FuNEt~rz2RRY=67l z#=DQ8c@PNd9EKU;zq<&Nfj^jGx3Z~-alJvk!-zN6AQzsv9O-{+`0*0ex|al*cXKG^ zn|~qX(99T8>)`2%q%j?RVXB@^RG;I|-+i(Klbt`rs^qR*$Je23jF45qBLN>kb$}~T zKmk^$87LE|5hxa%K>j2856%__-DMVD%%;qfXaSp%m`h&0{W+vYH+ajg|4Jqu2LlJ0 ztrlJ~!S4NIiSqb+oiMtPH)iBK-VC6FpQ1){a^|A^vb^NGYHwQ=geoW&#u7c)uQsb) z!&$kq6eUa}lL0(Spu*Q*Y;Fbuj%A&#s&f#il=0xqsf+nf%&pm*SO9b8_*r@n)g15F z$gjDLI$m~B^d{<0ydcdIrOWEI~vm6~zS13po^qL!klk0QTRk+Y`A-clsP6-^T? zap4g4E)?_L&{aKGuMz4Ft$SWYw20zG^oO%#67isZf5W5wO2>bH{Mz`dhvn#@(TT&K PJ9c7pXi`p|9zXV9ACLL- literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/visualize.cpython-34.pyc b/tensorlayer/__pycache__/visualize.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5df167e42645d564bb08137a739180b6c62a7801 GIT binary patch literal 14925 zcmds8OKcq3b$wO+;a{XAN))BZs4PnsJ3}@lj>oWNNr^0t28ul#c_dromXubptC}pb zyQ=l7_%&q%)?~)cCNQ!}b^+pr4aeC92;gLqEV9WaK~{w%n^|d*00|Nxn2mGJebqmb zlExV?0yn!}zpD5D)VcSbclqz5W5a*>#UEGn4W&L(J)aT8e;Zf)z*5S?*HR6oDxJ8c zDt7yMPF3>C!);DA3aVLDl>ue(J}>u!sxpZCg7S*W8&KY$@`jW*th^EBjVf{6Y?B&B0jCWlkCI$=1!QWiX_{G+OZ5n1X^Y33QHeeoIQS(U7Gu-b^0 zmxFK_@nTE+Eghx9+IPKb&~%smbfg=P)8W|N@Y@O9c$~iKX?L@_6m9u&)eXIB!(H+l zaW#pu%!Rskn|@ljzPPyHrGrv+jM$+h4*ja%T=G2+eMm>8o=rbkUPO&uy1| zNqim4?OgXG_mb7QyIQ;3``gUQmWpX1t_7{_G~W#3TBR5?xiqOA#c7^xr1{k-2upTakRj+v z%+ZgPp2E+^wfS$v81Vdx8+lQw8>C?99Y09zv{NPThF8qMl$$mD)v!udSwj&sNhbRG=Q33vg_hs zcH43HhSg++TP4O$4Z@^vhhgU}VUMEacAg}1n7RGRu!5FGF?R_&|B|y5MGfB#JC)p9 z@skzQfMSvjEz~33bdw+ooy`@@7Hh-1oz3P;-UOKr^yYVIy01Pk_YqrwY`GnepE%UZ7V$i3a2NWq4B5RJ|^1f0(Q0haa z>ZnLmj}HKExvT*LI?y1du`JLWrYF}d75}lI*AOL_=?Aiz@Am*wKs9u(4TpD1!|s1z zyUQ{)*BeRD!s{sWK!C#AM==*;U$-B>6ok4)6=#;*q=se87PBNJVxT?pw_nr+ zwr(UEp+ZNSf{eAOvEB?>E8xX|7L(0A;U?>Pyb`T9JZH&w{Kt?aPNbcNAICr=H^gvu z{QDOkI=^XR7hkfQxLX99&6fH!!uHKAw}}bvlhhe#U%h&@-T5m{*eU~Iyr?NZGhg`p ztaI_gg)Dg8Oc)C!jdE91vY$pTif7C;%sThKIO}}z;jD8$@tdu3D_lNbGNX9Ly}H=J zg(2K=kKe%+{}lpmUy+~FSqw7Leqzhs6YU6slba6j{3Q33?DOeKY+1knb{j=-Oce^n zaeL#TuO6UuK|#<`?3(tC!Y*b{6cE(B4Vq__K%U#yU3fv!|W#~g4;tU72hZ{ zd-?J0w#;4BR2H5w5CqVlL#&jCsocrKK*9=AYjqDiqy^Nl?C&5`du7*Q}kBJu6eb+=yy! zBfeUeA{_|fau7m};)-8E&_!HZeP7lCb4DH1rVhGT%5@=4e-`=iU=}}mj=>}!So(Fw zsivrgh~L|vVJfom|Ea&o#x9hq@E0`1lxZ|?GCFc7=|bL=8UMVcPyxvrjs`wUbC2a zR6QBvyP`i*$$(mw7i4!764z1>^6JSU^<-Q?k($F^Dq1dOPs z1?bJFWrVfx1&07a2dDey8G7|kEr>DV2@;Qo5P?H#>cB=>Ke{xeCKU`Y$djsd$B*8uJGsa5Xb7D5v?EbMAD6^Zl(^XM5J!TENf3LuY2aHP`U#iOl^{ zlq4`5HEnh$0|n6(Of{_NR^7S3Rjt9`^fhYe#Hd)Hw%AXcAkLoBlDMna{cJdKEz*56 zQEx}X5JB%(QOzZoXzNq%q=q+;RZnt<_cyNJ%K&Oo6eRpN>it= zFR+KOlX|8W<9opr?4PdQqOSH|P9W9y`8xU>6y_aSX?Y3#OX^c$aW^ahP_RRLDvel{KnjRhyj^h#t}mw z2%_x%fc|nL&B|LHgst_YuToAc8)`)&)%eQG%P58Ip*9l1>1GMe6wkv_15cB)9cyLsQKJWxt$3 zAvDU;tP|9w{Hzm3vMU4?MU{GqSq{zOK!S=Dbf^N^#qaucU;ANgukM^_&vVW@EkC^e zO|SyB1hzSL1c?-^&0QGbX1H_CBM@C{K{6*@b1ttWN$b-5eDA|@q?hN3)bpOdG4Hm5 z`FPC?gbZSNC22N3+xMi?-qlCdYBt)P*SZ>0?^>`&Z3yNr5z|Bzt)0(xOJvA&VCF@A z9$HNBGgxfTeu3Cd!8uo6sDErZ%O8Jd_2kVW~bG40yG+&Fr zr%?%mRvRYzHN*^mQHqvU$+K3azXF*$3Oq8c>o8dANp)!*kOe5NIQ>~=_cEVsGH%v% zs52>*fougdjf>I|qe2g4laLmrwKy%BF;_-=;j}W;!S2d%_f4-1b+)a(gi0&W&sP|1 zRtk8pYHRu;$7feJQfn)D0Zq%N(CUF(5 zGxl-2XuXarXC1@es5Kz*dBk(p0Pd&lQF|iyjB0yuc0s8cIT(I8SaJfQa{dD%q)BX^ zJb)6Q0R(*osFjE)}t?(o4FnOLK@l`vpFwk@RqPBsfLjm~aqf zesi!kVQ9+gbPZEs23MlZw2%Zz!%qjeD#(=PiT!Dz<*vtmX&|+30kC9qrGxF=kmk9* z09dR~I*`pQy3JD@hjRg}AWr%lgNxLJ`WijEpkk`S!b_9h{ z>ICUj@WA7=0<9y14brBXA#j%BlEF=qEgU zEyMJ}Sqa2+4DP$^Z@7&a2s#5~S1y8FZe!UMW8DzYuAB!BU92`koKv2Mg<&Mdm4yoz z7y6J`=v6^uQBd_a0aR(80x2C6)1?(P1oX-KLG_K3o$+skYhknrE+0B)Bho!EuHQoO z(s6wYKl*nV2yb|qF(D80@nf9;Z2h32zsjdyV{i&VI^ud>HC|sL-s`vdWB|>P^wnp1 zms_s6X<g z?=tuXgL@3#Ly#6j)ETd(g|LcK3;tky7`GW_n&ZP9ALS@p3bklGOwwF6&lOSRVH$*q5UCyAlm}td zeMOEe?3E*ce6XE!?Xz%58K|Sgc`MT=0bHOlyMhHbUBQAIBUmV9a8rbo5r&h8Tc}#j z&y6qvQ^$wEW_(9FQU7Ah9;A9=)EmEZc zlXi4}nhatJ6ZrU_AwK@bh>t0o6-`5$*b`h?=cVEZRh z>oYO%67i9diNBW0y_amadbt>3M;!VSI85m`V%kqd;m=GO8c`i9BAcm$<;k5m&`HsHqM=RjqQR}M^S6|BYi>%=C&Kg9lEZRNq0jpx-Ab@8H%6Tz+cj;fHe?emIa_@Wc6q_~GCT z4F=0e0es_VLpJ6W_dX`%n{BT-0fCP9V@wY-!>}ZCcNX9LL}=Vo`uT|5$wZx12OtSZ zyl5Y19kE~af*Ox?UA+yPpnKDSF+(pxH}z&wzSR;62n=>a%KP!U?=ZVS_d8 zrE|K>X?EZSFAs3%D8b{eD5x1`r5OR4YEiR^GZ$9;>nuoq^_U;~K72^lp&|!vqssYP z*zjxBB{vRgU{}t|qLKot{opODKOTYK-%-%X@vlH3b z1Gd_%)L1T;xfAz%I=kq~8+4}0OdlAV9m}UrRInFqcNs%CJ)kR#CA%~ND2 zZUX86GEj-3W4D3usa3EZuZVw*{%`3IR9|4#IP^yNm-F0TNMkl)NMpu;l^76|Q*`G} z>8NqX+d1_%N#E>PU0kv{ND|)^`b_+9?5*ueaceul364y<*p`)H-kXDiow2B1=P_jq z9?Ed+wpU==$tgu@Z@6huj*y%CEK9pvTWQ3|-B++~ng=C4BKI}if&_$Nh?C38AP&-@ zpCG!&9vn$W8__1vuF36Mb=x8T>2}_bR7YlT4+5Q^N-YEIw9MY z=XaUg`;RDBPa;WK=V0ayTNC)3hM_lMy^)!EJ2U;SPS@LLZ)I@H&)A@N#m0YXlR$%7_n2q53*zYxTO+GLSmZYcFsI++}-R4t;NpO+weM$!Tg7DLf7ng4fi^f!^0jO83Lu_XXkYHY8jrR8Xb_J zG~89~Dic{nAi~3-4u^gH9pp=gVhG7*wBg74yST56)q{jebQtqoHXXTz+180TLTN4m z{LHa6#?s+#n;P(d|1jxb*+Ik7FxHi{D9s2~=^Z}%HUp|_=(+4S)@HoJ2wl)wGGh9b zar-k9Ic_ogGxj++6Q8gSLy?208@Zt1(GSmdkGsZEau65yar|$ng8B@+UU9w!ZA`Fk z85opnyS2*y;B-&I!0saFbung%Zu?HcG7ekRI&#?L4q%-MbOBrNC6{d;0+hrgrvnfZ zgnj_NF%HM7-0`>zD0t%`xQ$V@^*@RnHBtb3?Uawi=L#gjps)x3V%P)tPXX2h zML{44dtlU1@rWibQ2_G{;LSRDa3XN_DBPhiTy*Ue40D>|F!m9GcEBy>kOTvMz^R}* za?5+j0T$^clX2T6lRIgKOzumQK2Bjm9{(NKfIl;Az&{Fu05Ta9y^l;r@r+DHtV<@- z%lnY~BfN!O<&)~)HdHNo>Waq`O;Yo}H<_MD1ThO6Z#(oXNafMxD$Nwk6g zXJmnxzZigP{H?@8L)@LoyweXuI!)b$Y_Q1Af*uGDQjj5<~QAI=(o^;%?~lGpn$S)Y=mHcMWO>eY*J0%?hVEjpO3UVJ8kp3=%3 z9(L*Ky_S6URB=BY-8IW+P?NsQfX2Ljz(6c_lF?$Xr@45&*<-XnVuH(Hi9wCQLH7E% zCam_LyEN6$HH$?~YME{qaisuLsH)9RO$_x)o*Xnnp`f!J)D^r2_z$6-za{WrKtxTA zTI04kl^n;pByIN5F2$UGbMW(m4B6y|6UOv5lulN&Ka7gotjFuCaa(+pEUbwCPp}iZXhbt;N@oItvCFu vY%Bgef>CsSlu~RU2d9m_e?$54qsLBPJQ{+Yo{*1dhLG!C4f4A literal 0 HcmV?d00001 diff --git a/tensorlayer/__pycache__/visualize.cpython-35.pyc b/tensorlayer/__pycache__/visualize.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e4aaf9b3fd1d5d8376cdbccd8a8399f38a22bfa GIT binary patch literal 14830 zcmds8OLH98b-vv*gEt8934)}=mQ0!$85od|fi?Y6<)X!DVXB6?T;EL~CN_qHNs-aZ56Sq{^ zZa>eda$b43&8bE~H3w9AP+7ds%l(il58=L`yaDA6DsM=6!^#^`-l+1%lsB%tL&}>_ z^-)#E^GW3$R>=H5T0kD$9#P&=c{O#d5$^}pH|)}_ThbWc0!(*R!%GLjPhP!-`+2%GHOO&FVrIv~_PdV+2PchG`Xr)7yMpUl{ zVIA>-miAjZN=LNsdX=E**8Oy}8;{eG*xm5k3Eg;{zUXOpv$7m*`EkV!y-LGf_8V~} ziL%Uvns%FhT6kk|aluQ6r0N*4LrEO^6~DRcdmj3bj!8Y6eo$XY;?%mEs;bTI;=|uO zt~YSSXAvYyty*f;R!L5+=GAIJ?OJMAsVBC2!hu>l|M(8Z;Y6YDNQ0%35eJ{!F8h-B zI+okH?nmw=t8>|j6D{{Fa%Uy0!l?xf-w9E^B#A9IS^4<9QN#1K!#r8j@A%2O4*S}b zYQptqYrEuX?QZXHGb>vhNDFZ_XlK90SG`MZ%`i(78hpZBVDH=IxWFphM?-S+kTMi8&NjbO(wwYGHuh0frL zW%ehZjv#)g%IW842-hGkS&BbEq^{KCyxRIM*94m^G3$U3OOaDI9{B1pR!6B_g!tNO z>i29l{dhnnwpz`pUA(ARM!eLBTUb5&@u1o@OFGEKw03h!pH<0#TEz>Umj?1oK<=8Y zPB6=(v+6F&EENmxkFmG2sdn5(!>oSn1fkOi;>20rcIv@~AIi!&*sijmPE>Pzx4L3- z%R)bQ!_3Xeas|zLMQXlm8bD3Y**)bZCKjc3EtEaGGa>yx?>RLcHJ$TuaVRZlHv|~v8h)s`3(JShFwvii z=O(Uf*mS_(!jAUR!3@&UJO@!6Dg$uL3P}U$aAzjd;m(|t^T-sZxxm{hk1hkZ>ai?p zdBEJri)4an^gs_Fn8y`QB2dQ$th3fJ>$E*?4OlN(eW*2tba_?oBeFf!1+ETWS$D&0 zDi0i68&UeMvMj3uZYB@W-BNgIx)EE|a%vi2XMH=g)O$!+D&}^+y99)G-1e%=^nOCX zlcjR{LFnG*tg|xfR3Au1f-98+FprPSQZC9`7W4Wr#;T7XAW%_Mq{DA+Rs9zCQ_`&( zZ+OnK@A!`(L!3xE4L^}%w|7*8j1PlmfOSx_lesK zv9Ddb*6#dOCv24fFJ9D?pPA3UHtSrvcrgoJH50}HNu$&ilI*8ZZsHj;4YSU@FU~q& zd@$==Nc?82)C%htie?l~xltE8pfHR(Ztx{s@n0d}-WAC?oy8y>?Z>w4JyCHWGP&vS z!XM+Fl1)B+3|kf$fZayn8&eNLaokei-bd+zf{3NSHTh3as&?*9JszUqg~;4R#8$`J zKSnz5B5w{4v!9%ZY!97Oe4Wtjl}ESR0(VhUO?b*cUVnW7v0@(j%8J|aQ#)*>g&ki* zQ5p8Jd3^0AesP$<01Xq`3%5vWt)$i_;I~=VpJ7FU5LS>{t9!5^Eue*AR{hx~XjhaX*HKt-!)>DL&iwxvZrFxwMcMuoC=P9NAWgN8zo~Vk(*Yynf!E#Pe3r%7dl^i7U@|6xaRpaQ zk)}=_w?1c&4IjyU$*KxW;~5d3w%i-I;!%uhU;V6#X-m460RmJ9ft)Yb|v@uO1&#k0;dQN%i=! z3Qw>o)Al6b#nQ7DOQXRF3-$ICyeo)n-fqn1(D!dDxj2fdr_n_=kEe`Y~|5lN7EB82c8R#OKy z%KFizVRei#d`H<5v%)SesMF{gtkbC{u)rVv7+Ix*@Ks4eyr zCy2ABv>xv0bw3+UT#a8A&ddoSAQkn&>u+?9FEMStgd5jBtb4 z1``Fdik`p*<4tEv8C|K<*Js#3*hxK8i}5{U3ieM|Z&6qK&nP?V$n+)cdE*kG<<2+) zN%^D;Sfm!`>kuaIc2`?7+X1V%zgw5GU4dWhjNZ=NgwV_=4lFJf5mNLyR%1I8U&G#} z#E5GJTYh71J;Z>_F5`%y1_V)de?Wh^k!Izs4#L)Y(pM>`l?}Bb5oUbl^few{IZ<6o47P-oBv|(lbYT_RQIn8JTrzzVAuPy+ag4>Z!;K(F{qXJ_#cL!Xnjs z0@@yGNs`;@w4s=0f|6g#pb&~=an=cHQhwG6BiR*#ilRY1#}tP`aUi+G3Oe+F?BaL) zny>w^x>se+l;%0-8!bP4<6STT)dco9b_B^2tj!%5-e$OS&O?w?Ye6z6U30FiBuVS? z{Cw}jQl#tiMCy6Z-!F;@C2153)w30L%pXqzjY46&@N+lcZ&gZ&{Qtw)@M{Nk^ zE)k1F^sAlEc1vW)bYSL1Jp<(>_z4WPr@ugKr{J6~Ez~};occ%KS^a79hwuHyQ-wrP zdW+*m*ZMp%rn&0ILYl8eAke6LL75E`J&Ty(EQ-|W-RO~%by9_~y^c`#c6O`D>4#Awcg*(9U`(psDjm@${fdf~J@+`;bh zNcT-I4|le${xT{pLoHunuvsqPy{fM11&+_IY^2s!YHg?X)hUEzxp>pftLyg%0Sylq{lZL0h8U32p5O z_?>V{QieO7f|4cbFb^`8io@;}bmtpE+yXi4xM0G7AjZywe0A9Bh=UhMIX z2)siTEbFw%FcTL=s1bRkG#&_+af3-8(cK1VGeR3+k8ygF6|* z_9E+bL+erzV98Do_BBsIV``~3-PXt8^o2Ue`3o|Zy7sr_jhr=zPND7jH5Y^q6w;(> z>O$`-;1c)D5C~QFzY#2F-N*cS{#Ld!2Qn5iyW}7IaKpLk)VG_ZCN{v#%eSMjKDTl4 zckTm>skgI}BX`ZN`7FzCl>=y@9 z>lUC&Hdi{--VJG<>kDYb`lN%|%%VGuWG~|IpyFR~#Wcj#^aK!O#2U9>w$9p<)_8uv zI%SVo6*T!G-n~9Ffw=>m5J2InMRsEW13-_M6!M714n?*C zZidOn0AYA8H9&;Hmqw_0h)VDs5d(?|FxUz_+z^aH3{}Yi2aHjG5$w^eRV%@9B@hcI zig{REHfWx2S+GWF!kQGAWbKqMSTIs|Va5)KMg33eHcUMlmuLv#IL~D)m~|*Xq`P6U z`BCYwm~CCK>!4mcg2E_ulFTXDp&{C)=#9ZPpeJleoZwhtM6;0)jQW4{Y3fHcq`3}g zVoW9I%m}BJY%7KUs~PJtp;lpT;}&MJFk;fYNjtJXO$IcDNq+R#>h7aIg&B)3zhJY1 zqqLG?^iG+h<2H07z45qu}Z@7&aNI8RIS1*A|ZUfm>W7H7Mu3i8L zU8*!g97$e)VPV9@)rE_f7W!aV$W(!1(M$EaK&doOX_StO#nOrzg7{?lpz+4Z&cwIE zwJ_QQa}QOs5$PTb*KeVC@r1sGAN^YlgeAPn*eeJ?#y-*skk$_x`fGgpbp~gcH0pX@ zC0<`9*z33XWDw1f;)RNZyIj5MriCpb8V^J4RGRK;q~TvsbrW!nql@c}#@Dl(!Zx$% zZ?N*)48Dn=I9bkNVl{bjeTTuf7~Ey>9)ffrM4jw5hjE)>ra3;$@llR~ zrBIF5!z9gB0?nYhS5}ax9EBnuQ+Zd%CvxLBW8&X&u*oCV1oYEW_#1<|c64COx`WtV zd(1k6zavb8BoW%QqmuH>iz=@Ok=eaM1b7c_^Og3QH$)5+QG&eHB`v!m1q^0aq~OMg z6bc*M3_!jJpUFck)GLSNMvQ=(<3j*5zM~!a0#Ju;I%F^lF_4g=_5$?K4yDK&Rw9yx z#9mv9lqf~E&bL!U78%JiX@LPHvpFd8<=-J+#)Wo7Ll7~5TFMuxkofwZ1?ZG2r<(nb_A=XgspJdfOuR>C!eU+dm=m7XqBRDW2 zU|BJ@lN1@8sUy5asx)BIj_yyBK}=!x9{mdh$KM#iaoA=B$3$>|8Z{0_ZNY(*+D}E#&ny|* zP95VRo2i2(%%_nq?^P1*1|b>NEP^Khb9sPYX#NmDbU%tqU{SqFHK@7z(nWE9ppGv< ze063HWZtWwdgX!;B3RSTx++H2#~1=;mn?WQSugHgyv!5O%lvzAuprC$uuPJfzCeu0 zY+uER>N;c-jzha*ZDEgGOAC^>N3!wMP{f=5He+!9Q~Dl)=gYIH9(g8GY|xFxk)cPr z-Dh=WUHaoxuu& zfWaz*H3ncUlx{Kz8OSMO_b71;&u&ssT}DKmoX8D8L_I@T6~?TuK~Q}u*DtJs|3V!% z(>#6df)8-aS0#tS&p0j$uNw)*m*eHoQ1Ou1Q(ob zej&~_I5C5%a!LT-c+-%CIpf~LguL7KixUv&xGKhUBQwNGGPkt&<|jhqp3)zW$em2o zR&@YwfP9Pgakvrt6)&jr_}10ium-v}9T+owBD7L(7Uf$lVSYeaN94F4t@{p>OqGO& zN)$F&(_R**OO<9PZ1Cm)S&kAs?}~z|Ayk?Xkf|Cqn>b`)#lOyiCq|IXR`FW+Rp5) zb2dAIojqWy%}R}>Qi(fp&!@ACuD(vEnat>cnc1;<`a}hL(RPb5JkwM94CtE(?&wDd z`=NU^a03U#z3G5 z8rW-#+D0IUH0@dYRmi|)5w3RolbZ#mKjK_Qh2Cjx!l zJVkbbCU6c21AQ3UbsGhrSOx2mFHSW2Kc$<{djV7vP#WP0&Le&yirI)EiWvh?Vl>c9 z(VeB@QR8s8bNX!(z1bnU_+52iB)%z>nK;?lTifM7G6(e^QmCFnlCsVlrqG&%?RN&@>zReOGt>XxbX|(}Rt5+9{2fRFI?`{6ziN2| zrZG--wt{5)>5J^a7){k?3+I5CT$z0OsNy&$*MjW~^ESY21mDAEucB-6flVOi9P$FN zuTUcJ4m^MZip5QXS77=Fwt6(6mb!Wayuj_!MLDHGcaH2A9^F~_Xygk03D~V`KjnO| z!C|%Y^2d$4oBg1**tvEae#J4^{xDAHd>yahUWam2*rOmrnsl7$obFyN!8cT+0}>QR zx_VuCGOGyWcNo;*TCZU?t8_Sqglt9|eyqQP`|@}#NT@-FF;8LB(OZ~port#+JjhY8 zgP=H`j&$49@3Y`=2gnW@md2DTX;GRHpwioXw!?tB8hS4KjkOsUFya;zmW+%(Ghx3p znZwqav|qB%!(I5Kbr@P4+}TJ01rL94u6wvOfs#YGxR2w1LlsnJ;Ma=NEc{!E09_pz zgopAR>SOoN3ydzZT@^Ey&}MAd_MunCSc^7C#+rNqj8XwDpbEZZuFXThkyzsN@?nBd z4nQ`>;W~9xcxM4RJT$z(|4<9t`fo)>8Yuw0cFNDhs|wt}kT3@S48{QNQ9v^RPml(} z7#P)4T%Aco6u>hBZ?hgAoCtV53hEjIMZZqN7^nFS10Nx%2HeUlP4fpN3aTTMyoV5A zkzVQ;w_WPElV+&nzBK9M6lUYm-+}@7kzoM-URVQA$C%}P)G>-@)G=aR>X`1^huk0H zMP`aO>PkJHQ&X*;biBLv(DR#x0ddRkIw@AcR}DvFpkzU~YQx=@~$iC~)SSuuD$w zwcfi2i2KRst|{J6L+ksjU95F!hc~w9iJL&_vT7P?x6q znPsuU%OH~JTM?HFFn=oA{8Ys_FXzcPBNW0p+d*AiU03q9FTcTsdEfOmgQMsuu|dq zLq+}vM*b~D96>bYhj?J<5yq%UYcdI%Vo9^FW@3bA=mw%G4P9wQ-g?8o#>> network = tl.layers.DenseLayer(network, n_units=100, name = 'dense_lrelu', - ... act= lambda x : tl.act.lrelu(x, 0.2)) + -------- + >>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.lrelu(x, 0.2), name='dense') + + Returns + ------- + Tensor + A ``Tensor`` in the same type as ``x``. References ------------ - - `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013) `_ + - `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013) `__ + """ - with tf.name_scope(name) as scope: - # x = tf.nn.relu(x) - # m_x = tf.nn.relu(-x) - # x -= alpha * m_x - x = tf.maximum(x, alpha * x) + # with tf.name_scope(name) as scope: + # x = tf.nn.relu(x) + # m_x = tf.nn.relu(-x) + # x -= alpha * m_x + x = tf.maximum(x, alpha * x, name=name) return x -#Shortcut -lrelu = leaky_relu -def pixel_wise_softmax(output, name='pixel_wise_softmax'): +def swish(x, name='swish'): + """The Swish function. + See `Swish: a Self-Gated Activation Function `__. + + Parameters + ---------- + x : Tensor + input. + name: str + function name (optional). + + Returns + ------- + Tensor + A ``Tensor`` in the same type as ``x``. + + """ + with tf.name_scope(name): + x = tf.nn.sigmoid(x) * x + return x + + +@deprecated("2018-06-30", "This API will be deprecated soon as tf.nn.softmax can do the same thing.") +def pixel_wise_softmax(x, name='pixel_wise_softmax'): """Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1. Usually be used for image segmentation. Parameters - ------------ - output : tensor - - For 2d image, 4D tensor [batch_size, height, weight, channel], channel >= 2. - - For 3d image, 5D tensor [batch_size, depth, height, weight, channel], channel >= 2. + ---------- + x : Tensor + input. + - For 2d image, 4D tensor (batch_size, height, weight, channel), where channel >= 2. + - For 3d image, 5D tensor (batch_size, depth, height, weight, channel), where channel >= 2. + name : str + function name (optional) + + Returns + ------- + Tensor + A ``Tensor`` in the same type as ``x``. Examples - --------- + -------- >>> outputs = pixel_wise_softmax(network.outputs) >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5) References - ----------- - - `tf.reverse `_ + ---------- + - `tf.reverse `__ + """ - with tf.name_scope(name) as scope: - return tf.nn.softmax(output) - ## old implementation - # exp_map = tf.exp(output) - # if output.get_shape().ndims == 4: # 2d image - # evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, True])) - # elif output.get_shape().ndims == 5: # 3d image - # evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, False, True])) - # else: - # raise Exception("output parameters should be 2d or 3d image, not %s" % str(output._shape)) - # return tf.div(exp_map, evidence) + with tf.name_scope(name): + return tf.nn.softmax(x) + + +# Alias +linear = identity +lrelu = leaky_relu diff --git a/tensorlayer/cli/__init__.py b/tensorlayer/cli/__init__.py new file mode 100644 index 0000000..1857582 --- /dev/null +++ b/tensorlayer/cli/__init__.py @@ -0,0 +1 @@ +"""The tensorlayer.cli module provides a command-line tool for some common tasks.""" diff --git a/tensorlayer/cli/__main__.py b/tensorlayer/cli/__main__.py new file mode 100644 index 0000000..1a65b3d --- /dev/null +++ b/tensorlayer/cli/__main__.py @@ -0,0 +1,14 @@ +import argparse + +from tensorlayer.cli import train + +if __name__ == "__main__": + parser = argparse.ArgumentParser(prog='tl') + subparsers = parser.add_subparsers(dest='cmd') + train_parser = subparsers.add_parser('train', help='train a model using multiple local GPUs or CPUs.') + train.build_arg_parser(train_parser) + args = parser.parse_args() + if args.cmd == 'train': + train.main(args) + else: + parser.print_help() diff --git a/tensorlayer/cli/train.py b/tensorlayer/cli/train.py new file mode 100644 index 0000000..24744a4 --- /dev/null +++ b/tensorlayer/cli/train.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python +# encoding: utf-8 +""" +tl train +======== + +(Alpha release - usage might change later) + +The tensorlayer.cli.train module provides the ``tl train`` subcommand. +It helps the user bootstrap a TensorFlow/TensorLayer program for distributed training +using multiple GPU cards or CPUs on a computer. + +You need to first setup the `CUDA_VISIBLE_DEVICES `_ +to tell ``tl train`` which GPUs are available. If the CUDA_VISIBLE_DEVICES is not given, +``tl train`` would try best to discover all available GPUs. + +In distribute training, each TensorFlow program needs a TF_CONFIG environment variable to describe +the cluster. It also needs a master daemon to +monitor all trainers. ``tl train`` is responsible +for automatically managing these two tasks. + +Usage +----- + +tl train [-h] [-p NUM_PSS] [-c CPU_TRAINERS] [args [args ...]] + +.. code-block:: bash + + # example of using GPU 0 and 1 for training mnist + CUDA_VISIBLE_DEVICES="0,1" + tl train example/tutorial_mnist_distributed.py + + # example of using CPU trainers for inception v3 + tl train -c 16 example/tutorial_imagenet_inceptionV3_distributed.py + + # example of using GPU trainers for inception v3 with customized arguments + # as CUDA_VISIBLE_DEVICES is not given, tl would try to discover all available GPUs + tl train example/tutorial_imagenet_inceptionV3_distributed.py -- --batch_size 16 + + +Command-line Arguments +---------------------- + +- ``file``: python file path. + +- ``NUM_PSS`` : The number of parameter servers. + +- ``CPU_TRAINERS``: The number of CPU trainers. + + It is recommended that ``NUM_PSS + CPU_TRAINERS <= cpu count`` + +- ``args``: Any parameter after ``--`` would be passed to the python program. + + +Notes +----- +A parallel training program would require multiple parameter servers +to help parallel trainers to exchange intermediate gradients. +The best number of parameter servers is often proportional to the +size of your model as well as the number of CPUs available. +You can control the number of parameter servers using the ``-p`` parameter. + +If you have a single computer with massive CPUs, you can use the ``-c`` parameter +to enable CPU-only parallel training. +The reason we are not supporting GPU-CPU co-training is because GPU and +CPU are running at different speeds. Using them together in training would +incur stragglers. + +""" + +import argparse +import json +import multiprocessing +import os +import platform +import re +import subprocess +import sys + +PORT_BASE = 10000 + + +def _get_gpu_ids(): + if 'CUDA_VISIBLE_DEVICES' in os.environ: + return [int(x) for x in os.environ.get('CUDA_VISIBLE_DEVICES', '').split(',')] + if platform.system() in ['Darwin', 'Linux']: + return [int(d.replace('nvidia', '')) for d in os.listdir('/dev') if re.match('^nvidia\d+$', d)] + else: + print('Please set CUDA_VISIBLE_DEVICES (see http://acceleware.com/blog/cudavisibledevices-masking-gpus)') + return [] + + +GPU_IDS = _get_gpu_ids() + + +def create_tf_config(cluster_spec, task_type, task_index): + return { + 'cluster': cluster_spec, + 'task': { + 'type': task_type, + 'index': task_index + }, + } + + +def create_tf_jobs(cluster_spec, prog, args): + gpu_assignment = dict((('worker', idx), gpu_idx) for (idx, gpu_idx) in enumerate(GPU_IDS)) + for job_type in cluster_spec: + for task_index in range(len(cluster_spec[job_type])): + new_env = os.environ.copy() + new_env.update({ + 'CUDA_VISIBLE_DEVICES': str(gpu_assignment.get((job_type, task_index), '')), + 'TF_CONFIG': json.dumps(create_tf_config(cluster_spec, job_type, task_index)), + }) + yield subprocess.Popen(['python3', prog] + args, env=new_env) + + +def validate_arguments(args): + if args.num_pss < 1: + print('Value error: must have ore than one parameter servers.') + exit(1) + + if not GPU_IDS: + num_cpus = multiprocessing.cpu_count() + if args.cpu_trainers > num_cpus: + print('Value error: there are %s available CPUs but you are requiring %s.' % (num_cpus, args.cpu_trainers)) + exit(1) + + if not os.path.isfile(args.file): + print('Value error: model trainning file does not exist') + exit(1) + + +def main(args): + validate_arguments(args) + num_workers = len(GPU_IDS) if GPU_IDS else args.cpu_trainers + print('Using program %s with args %s' % (args.file, ' '.join(args.args))) + print('Using %d workers, %d parameter servers, %d GPUs.' % (num_workers, args.num_pss, len(GPU_IDS))) + cluster_spec = { + 'ps': ['localhost:%d' % (PORT_BASE + i) for i in range(args.num_pss)], + 'worker': ['localhost:%d' % (PORT_BASE + args.num_pss + i) for i in range(num_workers)] + } + processes = list(create_tf_jobs(cluster_spec, args.file, args.args)) + try: + print('Press ENTER to exit the training ...') + sys.stdin.readline() + except KeyboardInterrupt: # https://docs.python.org/3/library/exceptions.html#KeyboardInterrupt + print('Keyboard interrupt received') + finally: + print('stopping all subprocesses ...') + for p in processes: + p.kill() + for p in processes: + p.wait() + print('END') + + +def build_arg_parser(parser): + parser.add_argument('-p', '--pss', dest='num_pss', type=int, default=1, help='number of parameter servers') + parser.add_argument('-c', '--cpu_trainers', dest='cpu_trainers', type=int, default=1, help='number of CPU trainers') + parser.add_argument('file', help='model trainning file path') + parser.add_argument('args', nargs='*', type=str, help='arguments to ') + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + build_arg_parser(parser) + args = parser.parse_args() + main(args) diff --git a/tensorlayer/cost.py b/tensorlayer/cost.py index f04133c..3934afd 100644 --- a/tensorlayer/cost.py +++ b/tensorlayer/cost.py @@ -1,24 +1,38 @@ -#! /usr/bin/python -# -*- coding: utf8 -*- - - +# -*- coding: utf-8 -*- +import logging import tensorflow as tf -import numbers -from tensorflow.python.framework import ops -from tensorflow.python.ops import standard_ops -## Cost Functions +__all__ = [ + 'cross_entropy', + 'sigmoid_cross_entropy', + 'binary_cross_entropy', + 'mean_squared_error', + 'normalized_mean_square_error', + 'absolute_difference_error', + 'dice_coe', + 'dice_hard_coe', + 'iou_coe', + 'cross_entropy_seq', + 'cross_entropy_seq_with_mask', + 'cosine_similarity', + 'li_regularizer', + 'lo_regularizer', + 'maxnorm_regularizer', + 'maxnorm_o_regularizer', + 'maxnorm_i_regularizer', +] + def cross_entropy(output, target, name=None): - """It is a softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy of two distributions, implement + """Softmax cross-entropy operation, returns the TensorFlow expression of cross-entropy for two distributions, it implements softmax internally. See ``tf.nn.sparse_softmax_cross_entropy_with_logits``. Parameters ---------- - output : Tensorflow variable - A distribution with shape: [batch_size, n_feature]. - target : Tensorflow variable + output : Tensor + A batch of distribution with shape: [batch_size, num of classes]. + target : Tensor A batch of index with shape: [batch_size, ]. name : string Name of this loss. @@ -29,213 +43,362 @@ def cross_entropy(output, target, name=None): References ----------- - - About cross-entropy: `wiki `_.\n - - The code is borrowed from: `here `_. + - About cross-entropy: ``__. + - The code is borrowed from: ``__. + """ - try: # old - return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, targets=target)) - except: # TF 1.0 - assert name is not None, "Please give a unique name to tl.cost.cross_entropy for TF1.0+" - return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output, name=name)) + # try: # old + # return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, targets=target)) + # except: # TF 1.0 + if name is None: + raise Exception("Please give a unique name to tl.cost.cross_entropy for TF1.0+") + return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output, name=name)) + def sigmoid_cross_entropy(output, target, name=None): - """It is a sigmoid cross-entropy operation, see ``tf.nn.sigmoid_cross_entropy_with_logits``. - """ - try: # TF 1.0 - return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output, name=name)) - except: - return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, targets=target)) + """Sigmoid cross-entropy operation, see ``tf.nn.sigmoid_cross_entropy_with_logits``. + Parameters + ---------- + output : Tensor + A batch of distribution with shape: [batch_size, num of classes]. + target : Tensor + A batch of index with shape: [batch_size, ]. + name : string + Name of this loss. -def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'): - """Computes binary cross entropy given `output`. + """ + # try: # TF 1.0 + return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output, name=name)) + # except: + # return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, targets=target)) - For brevity, let `x = output`, `z = target`. The binary cross entropy loss is - loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i])) +def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'): + """Binary cross entropy operation. Parameters ---------- - output : tensor of type `float32` or `float64`. - target : tensor of the same type and shape as `output`. + output : Tensor + Tensor with type of `float32` or `float64`. + target : Tensor + The target distribution, format the same with `output`. epsilon : float - A small value to avoid output is zero. - name : string - An optional name to attach to this layer. + A small value to avoid output to be zero. + name : str + An optional name to attach to this function. References ----------- - - `DRAW `_ + - `ericjang-DRAW `__ + """ -# from tensorflow.python.framework import ops -# with ops.op_scope([output, target], name, "bce_loss") as name: -# output = ops.convert_to_tensor(output, name="preds") -# target = ops.convert_to_tensor(targets, name="target") + # from tensorflow.python.framework import ops + # with ops.op_scope([output, target], name, "bce_loss") as name: + # output = ops.convert_to_tensor(output, name="preds") + # target = ops.convert_to_tensor(targets, name="target") with tf.name_scope(name): - return tf.reduce_mean(tf.reduce_sum(-(target * tf.log(output + epsilon) + - (1. - target) * tf.log(1. - output + epsilon)), axis=1)) + return tf.reduce_mean(tf.reduce_sum(-(target * tf.log(output + epsilon) + (1. - target) * tf.log(1. - output + epsilon)), axis=1)) + + # For brevity, let `x = output`, `z = target`. The binary cross entropy loss is + # + # loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i])) -def mean_squared_error(output, target, is_mean=False): - """Return the TensorFlow expression of mean-squre-error of two distributions. +def mean_squared_error(output, target, is_mean=False, name="mean_squared_error"): + """Return the TensorFlow expression of mean-square-error (L2) of two batch of data. Parameters ---------- - output : 2D or 4D tensor. - target : 2D or 4D tensor. - is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default). + output : Tensor + 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel]. + target : Tensor + The target distribution, format the same with `output`. + is_mean : boolean + Whether compute the mean or sum for each example. + - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data. + - If False, use ``tf.reduce_sum`` (default). References ------------ - - `Wiki Mean Squared Error `_ + - `Wiki Mean Squared Error `__ + """ - with tf.name_scope("mean_squared_error_loss"): - if output.get_shape().ndims == 2: # [batch_size, n_feature] + with tf.name_scope(name): + if output.get_shape().ndims == 2: # [batch_size, n_feature] if is_mean: mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1)) else: mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1)) - elif output.get_shape().ndims == 4: # [batch_size, w, h, c] + elif output.get_shape().ndims == 3: # [batch_size, w, h] + if is_mean: + mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2])) + else: + mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2])) + elif output.get_shape().ndims == 4: # [batch_size, w, h, c] if is_mean: mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3])) else: mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3])) + else: + raise Exception("Unknow dimension") return mse +def normalized_mean_square_error(output, target): + """Return the TensorFlow expression of normalized mean-square-error of two distributions. -def dice_coe(output, target, epsilon=1e-10): - """Sørensen–Dice coefficient for comparing the similarity of two distributions, - usually be used for binary image segmentation i.e. labels are binary. - The coefficient = [0, 1], 1 if totally match. + Parameters + ---------- + output : Tensor + 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel]. + target : Tensor + The target distribution, format the same with `output`. + + """ + with tf.name_scope("mean_squared_error_loss"): + if output.get_shape().ndims == 2: # [batch_size, n_feature] + nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1)) + nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1)) + elif output.get_shape().ndims == 3: # [batch_size, w, h] + nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1, 2])) + nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1, 2])) + elif output.get_shape().ndims == 4: # [batch_size, w, h, c] + nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1, 2, 3])) + nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1, 2, 3])) + nmse = tf.reduce_mean(nmse_a / nmse_b) + return nmse + + +def absolute_difference_error(output, target, is_mean=False): + """Return the TensorFlow expression of absolute difference error (L1) of two batch of data. + + Parameters + ---------- + output : Tensor + 2D, 3D or 4D tensor i.e. [batch_size, n_feature], [batch_size, height, width] or [batch_size, height, width, channel]. + target : Tensor + The target distribution, format the same with `output`. + is_mean : boolean + Whether compute the mean or sum for each example. + - If True, use ``tf.reduce_mean`` to compute the loss between one target and predict data. + - If False, use ``tf.reduce_sum`` (default). + + """ + with tf.name_scope("mean_squared_error_loss"): + if output.get_shape().ndims == 2: # [batch_size, n_feature] + if is_mean: + loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), 1)) + else: + loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), 1)) + elif output.get_shape().ndims == 3: # [batch_size, w, h] + if is_mean: + loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), [1, 2])) + else: + loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), [1, 2])) + elif output.get_shape().ndims == 4: # [batch_size, w, h, c] + if is_mean: + loss = tf.reduce_mean(tf.reduce_mean(tf.abs(output - target), [1, 2, 3])) + else: + loss = tf.reduce_mean(tf.reduce_sum(tf.abs(output - target), [1, 2, 3])) + else: + raise Exception("Unknow dimension") + return loss + + +def dice_coe(output, target, loss_type='jaccard', axis=(1, 2, 3), smooth=1e-5): + """Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity + of two batch of data, usually be used for binary image segmentation + i.e. labels are binary. The coefficient between 0 to 1, 1 means totally match. Parameters ----------- - output : tensor - A distribution with shape: [batch_size, ....], (any dimensions). - target : tensor + output : Tensor A distribution with shape: [batch_size, ....], (any dimensions). - epsilon : float - An optional name to attach to this layer. + target : Tensor + The target distribution, format the same with `output`. + loss_type : str + ``jaccard`` or ``sorensen``, default is ``jaccard``. + axis : tuple of int + All dimensions are reduced, default ``[1,2,3]``. + smooth : float + This small value will be added to the numerator and denominator. + - If both output and target are empty, it makes sure dice is 1. + - If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice. Examples --------- >>> outputs = tl.act.pixel_wise_softmax(network.outputs) - >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_, epsilon=1e-5) + >>> dice_loss = 1 - tl.cost.dice_coe(outputs, y_) References ----------- - - `wiki-dice `_ + - `Wiki-Dice `__ + """ - # inse = tf.reduce_sum( tf.mul(output, target) ) - # l = tf.reduce_sum( tf.mul(output, output) ) - # r = tf.reduce_sum( tf.mul(target, target) ) - inse = tf.reduce_sum( output * target ) - l = tf.reduce_sum( output * output ) - r = tf.reduce_sum( target * target ) - dice = 2 * (inse) / (l + r) - if epsilon == 0: - return dice + inse = tf.reduce_sum(output * target, axis=axis) + if loss_type == 'jaccard': + l = tf.reduce_sum(output * output, axis=axis) + r = tf.reduce_sum(target * target, axis=axis) + elif loss_type == 'sorensen': + l = tf.reduce_sum(output, axis=axis) + r = tf.reduce_sum(target, axis=axis) else: - return tf.clip_by_value(dice, 0, 1.0-epsilon) - - -def dice_hard_coe(output, target, epsilon=1e-10): - """Non-differentiable Sørensen–Dice coefficient for comparing the similarity of two distributions, - usually be used for binary image segmentation i.e. labels are binary. - The coefficient = [0, 1], 1 if totally match. + raise Exception("Unknow loss_type") + ## old axis=[0,1,2,3] + # dice = 2 * (inse) / (l + r) + # epsilon = 1e-5 + # dice = tf.clip_by_value(dice, 0, 1.0-epsilon) # if all empty, dice = 1 + ## new haodong + dice = (2. * inse + smooth) / (l + r + smooth) + ## + dice = tf.reduce_mean(dice) + return dice + + +def dice_hard_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): + """Non-differentiable Sørensen–Dice coefficient for comparing the similarity + of two batch of data, usually be used for binary image segmentation i.e. labels are binary. + The coefficient between 0 to 1, 1 if totally match. Parameters ----------- output : tensor A distribution with shape: [batch_size, ....], (any dimensions). target : tensor - A distribution with shape: [batch_size, ....], (any dimensions). - epsilon : float - An optional name to attach to this layer. - - Examples - --------- - >>> outputs = pixel_wise_softmax(network.outputs) - >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5) + The target distribution, format the same with `output`. + threshold : float + The threshold value to be true. + axis : tuple of integer + All dimensions are reduced, default ``(1,2,3)``. + smooth : float + This small value will be added to the numerator and denominator, see ``dice_coe``. References ----------- - - `wiki-dice `_ - """ - output = tf.cast(output > 0.5, dtype=tf.float32) - target = tf.cast(target > 0.5, dtype=tf.float32) - inse = tf.reduce_sum( output * target ) - l = tf.reduce_sum( output * output ) - r = tf.reduce_sum( target * target ) - dice = 2 * (inse) / (l + r) - if epsilon == 0: - return dice - else: - return tf.clip_by_value(dice, 0, 1.0-epsilon) + - `Wiki-Dice `__ -def iou_coe(output, target, threshold=0.5, epsilon=1e-10): - """Non-differentiable Intersection over Union, usually be used for evaluating binary image segmentation. - The coefficient = [0, 1], 1 means totally match. + """ + output = tf.cast(output > threshold, dtype=tf.float32) + target = tf.cast(target > threshold, dtype=tf.float32) + inse = tf.reduce_sum(tf.multiply(output, target), axis=axis) + l = tf.reduce_sum(output, axis=axis) + r = tf.reduce_sum(target, axis=axis) + ## old axis=[0,1,2,3] + # hard_dice = 2 * (inse) / (l + r) + # epsilon = 1e-5 + # hard_dice = tf.clip_by_value(hard_dice, 0, 1.0-epsilon) + ## new haodong + hard_dice = (2. * inse + smooth) / (l + r + smooth) + ## + hard_dice = tf.reduce_mean(hard_dice) + return hard_dice + + +def iou_coe(output, target, threshold=0.5, axis=(1, 2, 3), smooth=1e-5): + """Non-differentiable Intersection over Union (IoU) for comparing the + similarity of two batch of data, usually be used for evaluating binary image segmentation. + The coefficient between 0 to 1, and 1 means totally match. Parameters ----------- output : tensor - A distribution with shape: [batch_size, ....], (any dimensions). + A batch of distribution with shape: [batch_size, ....], (any dimensions). target : tensor - A distribution with shape: [batch_size, ....], (any dimensions). + The target distribution, format the same with `output`. threshold : float The threshold value to be true. - epsilon : float - A small value to avoid zero denominator when both output and target output nothing. - - Examples - --------- - >>> outputs = tl.act.pixel_wise_softmax(network.outputs) - >>> iou = tl.cost.iou_coe(outputs[:,:,:,0], y_[:,:,:,0]) + axis : tuple of integer + All dimensions are reduced, default ``(1,2,3)``. + smooth : float + This small value will be added to the numerator and denominator, see ``dice_coe``. Notes ------ - - IOU cannot be used as training loss, people usually use dice coefficient for training, and IOU for evaluating. + - IoU cannot be used as training loss, people usually use dice coefficient for training, IoU and hard-dice for evaluating. + """ pre = tf.cast(output > threshold, dtype=tf.float32) truth = tf.cast(target > threshold, dtype=tf.float32) - intersection = tf.reduce_sum(pre * truth) - union = tf.reduce_sum(tf.cast((pre + truth) > threshold, dtype=tf.float32)) - return tf.reduce_sum(intersection) / (tf.reduce_sum(union) + epsilon) - - -def cross_entropy_seq(logits, target_seqs, batch_size=None):#, batch_size=1, num_steps=None): + inse = tf.reduce_sum(tf.multiply(pre, truth), axis=axis) # AND + union = tf.reduce_sum(tf.cast(tf.add(pre, truth) >= 1, dtype=tf.float32), axis=axis) # OR + ## old axis=[0,1,2,3] + # epsilon = 1e-5 + # batch_iou = inse / (union + epsilon) + ## new haodong + batch_iou = (inse + smooth) / (union + smooth) + iou = tf.reduce_mean(batch_iou) + return iou #, pre, truth, inse, union + + +# ## test soft/hard dice and iou +# import numpy as np +# y = np.zeros((1,10,10,1)) +# # y[0,0:5,0:5]=1.0 +# o = np.zeros((1,10,10,1)) +# # o[:,:,:,:] = 0 # what we want: dice=0 iou=0 OK +# # o[0,0:2,0:2]=0.3 # what we want: dice larger iou=0 OK +# # o[0,0:2,0:2]=0.6 # what we want: dice larger iou small OK +# # o[0,0:3,0:3]=0.6 # what we want: dice larger iou larger OK +# # o[0,0:3,0:3]=1 # what we want: dice larger iou same OK +# # o[0,0:5,0:5]=1 # what we want: dice=1 iou=1 OK +# # o[0,0:5,0:5]=0.3 # what we want: dice smaller iou=0 OK +# # o[0,0:5,0:5]=1e-2 # what we want: dice≈0 iou=0 OK +# # o[0,8:10,8:10]=1.0 # what we want: dice=0 iou=0 OK +# # o[0,8:10,8:10]=1e-10 # what we want: dice=0 iou=0 OK +# # y[:,:,:,:] = o[:,:,:,:] = 0 # what we want: dice=1 iou=1 OK +# ## why in u-net, dice=1 hard-dice=1 iou=1 exist?? print bug? +# +# d = dice_coe(o, y, 'jaccard', smooth=1.) +# hd = dice_hard_coe(o, y, smooth=1e-5) +# i = iou_coe(o, y, smooth=1e-5) +# sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) +# # sess.run(tf.local_variables_initializer()) +# print(sess.run([d,hd,i])) +# # p, t, i, u = sess.run([pre, truth, inse, union]) +# # import pprint +# # pprint.pprint(((y>0.5)*(o>0.5)).astype(int).tolist()) +# # pprint.pprint(p.tolist()) +# # pprint.pprint(t.tolist()) +# # pprint.pprint(i) +# # pprint.pprint(u) +# exit() + + +def cross_entropy_seq(logits, target_seqs, batch_size=None): #, batch_size=1, num_steps=None): """Returns the expression of cross-entropy of two sequences, implement - softmax internally. Normally be used for Fixed Length RNN outputs. + softmax internally. Normally be used for fixed length RNN outputs, see `PTB example `__. Parameters ---------- - logits : Tensorflow variable - 2D tensor, ``network.outputs``, [batch_size*n_steps (n_examples), number of output units] - target_seqs : Tensorflow variable - target : 2D tensor [batch_size, n_steps], if the number of step is dynamic, please use ``cross_entropy_seq_with_mask`` instead. + logits : Tensor + 2D tensor with shape of `[batch_size * n_steps, n_classes]`. + target_seqs : Tensor + The target sequence, 2D tensor `[batch_size, n_steps]`, if the number of step is dynamic, please use ``tl.cost.cross_entropy_seq_with_mask`` instead. batch_size : None or int. - If not None, the return cost will be divided by batch_size. + Whether to divide the cost by batch size. + - If integer, the return cost will be divided by `batch_size`. + - If None (default), the return cost will not be divided by anything. Examples -------- - >>> see PTB tutorial for more details - >>> input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) - >>> targets = tf.placeholder(tf.int32, [batch_size, num_steps]) + >>> see `PTB example `__.for more details + >>> input_data = tf.placeholder(tf.int32, [batch_size, n_steps]) + >>> targets = tf.placeholder(tf.int32, [batch_size, n_steps]) + >>> # build the network + >>> print(net.outputs) + ... (batch_size * n_steps, n_classes) >>> cost = tl.cost.cross_entropy_seq(network.outputs, targets) + """ - try: # TF 1.0 - sequence_loss_by_example_fn = tf.contrib.legacy_seq2seq.sequence_loss_by_example - except: - sequence_loss_by_example_fn = tf.nn.seq2seq.sequence_loss_by_example - - loss = sequence_loss_by_example_fn( - [logits], - [tf.reshape(target_seqs, [-1])], - [tf.ones_like(tf.reshape(target_seqs, [-1]), dtype=tf.float32)]) - # [tf.ones([batch_size * num_steps])]) - cost = tf.reduce_sum(loss) #/ batch_size + # try: # TF 1.0 + sequence_loss_by_example_fn = tf.contrib.legacy_seq2seq.sequence_loss_by_example + # except: + # sequence_loss_by_example_fn = tf.nn.seq2seq.sequence_loss_by_example + + loss = sequence_loss_by_example_fn([logits], [tf.reshape(target_seqs, [-1])], [tf.ones_like(tf.reshape(target_seqs, [-1]), dtype=tf.float32)]) + # [tf.ones([batch_size * num_steps])]) + cost = tf.reduce_sum(loss) #/ batch_size if batch_size is not None: cost = cost / batch_size return cost @@ -243,37 +406,64 @@ def cross_entropy_seq(logits, target_seqs, batch_size=None):#, batch_size=1, num def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None): """Returns the expression of cross-entropy of two sequences, implement - softmax internally. Normally be used for Dynamic RNN outputs. + softmax internally. Normally be used for Dynamic RNN with Synced sequence input and output. Parameters ----------- - logits : network identity outputs - 2D tensor, ``network.outputs``, [batch_size, number of output units]. - target_seqs : int of tensor, like word ID. - [batch_size, ?] - input_mask : the mask to compute loss - The same size with target_seqs, normally 0 and 1. + logits : Tensor + 2D tensor with shape of [batch_size * ?, n_classes], `?` means dynamic IDs for each example. + - Can be get from `DynamicRNNLayer` by setting ``return_seq_2d`` to `True`. + target_seqs : Tensor + int of tensor, like word ID. [batch_size, ?], `?` means dynamic IDs for each example. + input_mask : Tensor + The mask to compute loss, it has the same size with `target_seqs`, normally 0 or 1. return_details : boolean - - If False (default), only returns the loss. - - If True, returns the loss, losses, weights and targets (reshape to one vetcor). + Whether to return detailed losses. + - If False (default), only returns the loss. + - If True, returns the loss, losses, weights and targets (see source code). Examples -------- - - see Image Captioning Example. + >>> batch_size = 64 + >>> vocab_size = 10000 + >>> embedding_size = 256 + >>> input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="input") + >>> target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target") + >>> input_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="mask") + >>> net = tl.layers.EmbeddingInputlayer( + ... inputs = input_seqs, + ... vocabulary_size = vocab_size, + ... embedding_size = embedding_size, + ... name = 'seq_embedding') + >>> net = tl.layers.DynamicRNNLayer(net, + ... cell_fn = tf.contrib.rnn.BasicLSTMCell, + ... n_hidden = embedding_size, + ... dropout = (0.7 if is_train else None), + ... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs), + ... return_seq_2d = True, + ... name = 'dynamicrnn') + >>> print(net.outputs) + ... (?, 256) + >>> net = tl.layers.DenseLayer(net, n_units=vocab_size, name="output") + >>> print(net.outputs) + ... (?, 10000) + >>> loss = tl.cost.cross_entropy_seq_with_mask(net.outputs, target_seqs, input_mask) + """ - targets = tf.reshape(target_seqs, [-1]) # to one vector - weights = tf.to_float(tf.reshape(input_mask, [-1])) # to one vector like targets + targets = tf.reshape(target_seqs, [-1]) # to one vector + weights = tf.to_float(tf.reshape(input_mask, [-1])) # to one vector like targets losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights #losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others - try: ## TF1.0 - loss = tf.divide(tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !! - tf.reduce_sum(weights), - name="seq_loss_with_mask") - except: ## TF0.12 - loss = tf.div(tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !! - tf.reduce_sum(weights), - name="seq_loss_with_mask") + # try: ## TF1.0 + loss = tf.divide( + tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !! + tf.reduce_sum(weights), + name="seq_loss_with_mask") + # except: ## TF0.12 + # loss = tf.div(tf.reduce_sum(losses), # loss from mask. reduce_sum before element-wise mul with mask !! + # tf.reduce_sum(weights), + # name="seq_loss_with_mask") if return_details: return loss, losses, weights, targets else: @@ -281,282 +471,269 @@ def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details= def cosine_similarity(v1, v2): - """Cosine similarity [-1, 1], `wiki `_. + """Cosine similarity [-1, 1]. Parameters - ----------- - v1, v2 : tensor of [batch_size, n_feature], with the same number of features. + ---------- + v1, v2 : Tensor + Tensor with the same shape [batch_size, n_feature]. Returns - ----------- - a tensor of [batch_size, ] + ------- + Tensor + a tensor of shape [batch_size]. + + References + ---------- + - ``__. + """ - try: ## TF1.0 - cost = tf.reduce_sum(tf.multiply(v1, v2), 1) / (tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) * tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1))) - except: ## TF0.12 - cost = tf.reduce_sum(tf.mul(v1, v2), reduction_indices=1) / (tf.sqrt(tf.reduce_sum(tf.mul(v1, v1), reduction_indices=1)) * tf.sqrt(tf.reduce_sum(tf.mul(v2, v2), reduction_indices=1))) + # try: ## TF1.0 + cost = tf.reduce_sum(tf.multiply(v1, v2), 1) / (tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) * tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1))) + # except: ## TF0.12 + # cost = tf.reduce_sum(tf.mul(v1, v2), reduction_indices=1) / (tf.sqrt(tf.reduce_sum(tf.mul(v1, v1), reduction_indices=1)) * tf.sqrt(tf.reduce_sum(tf.mul(v2, v2), reduction_indices=1))) return cost ## Regularization Functions def li_regularizer(scale, scope=None): - """li regularization removes the neurons of previous layer, `i` represents `inputs`.\n - Returns a function that can be used to apply group li regularization to weights.\n - The implementation follows `TensorFlow contrib `_. - - Parameters - ---------- - scale : float - A scalar multiplier `Tensor`. 0.0 disables the regularizer. - scope: An optional scope name for TF12+. - - Returns - -------- - A function with signature `li(weights, name=None)` that apply Li regularization. - - Raises - ------ - ValueError : if scale is outside of the range [0.0, 1.0] or if scale is not a float. - """ - import numbers - from tensorflow.python.framework import ops - from tensorflow.python.ops import standard_ops - # from tensorflow.python.platform import tf_logging as logging - - if isinstance(scale, numbers.Integral): - raise ValueError('scale cannot be an integer: %s' % scale) - if isinstance(scale, numbers.Real): - if scale < 0.: - raise ValueError('Setting a scale less than 0 on a regularizer: %g' % - scale) - if scale >= 1.: - raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % - scale) - if scale == 0.: - logging.info('Scale of 0 disables regularizer.') - return lambda _, name=None: None - - def li(weights, name=None): - """Applies li regularization to weights.""" - with tf.name_scope('li_regularizer') as scope: - my_scale = ops.convert_to_tensor(scale, - dtype=weights.dtype.base_dtype, - name='scale') - if tf.__version__ <= '0.12': - standard_ops_fn = standard_ops.mul - else: + """Li regularization removes the neurons of previous layer. The `i` represents `inputs`. + Returns a function that can be used to apply group li regularization to weights. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + scope: str + An optional scope name for this function. + + Returns + -------- + A function with signature `li(weights, name=None)` that apply Li regularization. + + Raises + ------ + ValueError : if scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + import numbers + from tensorflow.python.framework import ops + from tensorflow.python.ops import standard_ops + # from tensorflow.python.platform import tf_logging as logging + + if isinstance(scale, numbers.Integral): + raise ValueError('scale cannot be an integer: %s' % scale) + if isinstance(scale, numbers.Real): + if scale < 0.: + raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) + if scale >= 1.: + raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % scale) + if scale == 0.: + logging.info('Scale of 0 disables regularizer.') + return lambda _, name=None: None + + def li(weights): + """Applies li regularization to weights.""" + with tf.name_scope('li_regularizer') as scope: + my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') + # if tf.__version__ <= '0.12': + # standard_ops_fn = standard_ops.mul + # else: standard_ops_fn = standard_ops.multiply - return standard_ops_fn( - my_scale, - standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))), - name=scope) - return li - - - -def lo_regularizer(scale, scope=None): - """lo regularization removes the neurons of current layer, `o` represents `outputs`\n - Returns a function that can be used to apply group lo regularization to weights.\n - The implementation follows `TensorFlow contrib `_. - - Parameters - ---------- - scale : float - A scalar multiplier `Tensor`. 0.0 disables the regularizer. - scope: An optional scope name for TF12+. - - Returns - ------- - A function with signature `lo(weights, name=None)` that apply Lo regularization. - - Raises - ------ - ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. - """ - import numbers - from tensorflow.python.framework import ops - from tensorflow.python.ops import standard_ops - # from tensorflow.python.platform import tf_logging as logging - - if isinstance(scale, numbers.Integral): - raise ValueError('scale cannot be an integer: %s' % scale) - if isinstance(scale, numbers.Real): - if scale < 0.: - raise ValueError('Setting a scale less than 0 on a regularizer: %g' % - scale) - if scale >= 1.: - raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % - scale) - if scale == 0.: - logging.info('Scale of 0 disables regularizer.') - return lambda _, name=None: None - - def lo(weights, name='lo_regularizer'): - """Applies group column regularization to weights.""" - with tf.name_scope(name) as scope: - my_scale = ops.convert_to_tensor(scale, - dtype=weights.dtype.base_dtype, - name='scale') - if tf.__version__ <= '0.12': - standard_ops_fn = standard_ops.mul - else: + return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 1))), name=scope) + + return li + + +def lo_regularizer(scale): + """Lo regularization removes the neurons of current layer. The `o` represents `outputs` + Returns a function that can be used to apply group lo regularization to weights. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + ------- + A function with signature `lo(weights, name=None)` that apply Lo regularization. + + Raises + ------ + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + import numbers + from tensorflow.python.framework import ops + from tensorflow.python.ops import standard_ops + # from tensorflow.python.platform import tf_logging as logging + + if isinstance(scale, numbers.Integral): + raise ValueError('scale cannot be an integer: %s' % scale) + if isinstance(scale, numbers.Real): + if scale < 0.: + raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) + if scale >= 1.: + raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % scale) + if scale == 0.: + logging.info('Scale of 0 disables regularizer.') + return lambda _, name=None: None + + def lo(weights, name='lo_regularizer'): + """Applies group column regularization to weights.""" + with tf.name_scope(name) as scope: + my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') + # if tf.__version__ <= '0.12': + # standard_ops_fn = standard_ops.mul + # else: standard_ops_fn = standard_ops.multiply - return standard_ops_fn( - my_scale, - standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 0))), - name=scope) - return lo - -def maxnorm_regularizer(scale=1.0, scope=None): - """Max-norm regularization returns a function that can be used - to apply max-norm regularization to weights. - About max-norm: `wiki `_.\n - The implementation follows `TensorFlow contrib `_. - - Parameters - ---------- - scale : float - A scalar multiplier `Tensor`. 0.0 disables the regularizer. - scope: An optional scope name. - - Returns - --------- - A function with signature `mn(weights, name=None)` that apply Lo regularization. - - Raises - -------- - ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. - """ - import numbers - from tensorflow.python.framework import ops - from tensorflow.python.ops import standard_ops - - if isinstance(scale, numbers.Integral): - raise ValueError('scale cannot be an integer: %s' % scale) - if isinstance(scale, numbers.Real): - if scale < 0.: - raise ValueError('Setting a scale less than 0 on a regularizer: %g' % - scale) - # if scale >= 1.: - # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % - # scale) - if scale == 0.: - logging.info('Scale of 0 disables regularizer.') - return lambda _, name=None: None - - def mn(weights, name='max_regularizer'): - """Applies max-norm regularization to weights.""" - with tf.name_scope(name) as scope: - my_scale = ops.convert_to_tensor(scale, - dtype=weights.dtype.base_dtype, - name='scale') - if tf.__version__ <= '0.12': - standard_ops_fn = standard_ops.mul - else: - standard_ops_fn = standard_ops.multiply - return standard_ops_fn(my_scale, standard_ops.reduce_max(standard_ops.abs(weights)), name=scope) - return mn - -def maxnorm_o_regularizer(scale, scope): - """Max-norm output regularization removes the neurons of current layer.\n - Returns a function that can be used to apply max-norm regularization to each column of weight matrix.\n - The implementation follows `TensorFlow contrib `_. - - Parameters - ---------- - scale : float - A scalar multiplier `Tensor`. 0.0 disables the regularizer. - scope: An optional scope name. - - Returns - --------- - A function with signature `mn_o(weights, name=None)` that apply Lo regularization. - - Raises - --------- - ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. - """ - import numbers - from tensorflow.python.framework import ops - from tensorflow.python.ops import standard_ops - - if isinstance(scale, numbers.Integral): - raise ValueError('scale cannot be an integer: %s' % scale) - if isinstance(scale, numbers.Real): - if scale < 0.: - raise ValueError('Setting a scale less than 0 on a regularizer: %g' % - scale) - # if scale >= 1.: - # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % - # scale) - if scale == 0.: - logging.info('Scale of 0 disables regularizer.') - return lambda _, name=None: None - - def mn_o(weights, name='maxnorm_o_regularizer'): - """Applies max-norm regularization to weights.""" - with tf.name_scope(name) as scope: - my_scale = ops.convert_to_tensor(scale, - dtype=weights.dtype.base_dtype, - name='scale') - if tf.__version__ <= '0.12': - standard_ops_fn = standard_ops.mul - else: - standard_ops_fn = standard_ops.multiply - return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 0)), name=scope) - return mn_o - -def maxnorm_i_regularizer(scale, scope=None): - """Max-norm input regularization removes the neurons of previous layer.\n - Returns a function that can be used to apply max-norm regularization to each row of weight matrix.\n - The implementation follows `TensorFlow contrib `_. - - Parameters - ---------- - scale : float - A scalar multiplier `Tensor`. 0.0 disables the regularizer. - scope: An optional scope name. - - Returns - --------- - A function with signature `mn_i(weights, name=None)` that apply Lo regularization. - - Raises - --------- - ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. - """ - import numbers - from tensorflow.python.framework import ops - from tensorflow.python.ops import standard_ops - - if isinstance(scale, numbers.Integral): - raise ValueError('scale cannot be an integer: %s' % scale) - if isinstance(scale, numbers.Real): - if scale < 0.: - raise ValueError('Setting a scale less than 0 on a regularizer: %g' % - scale) - # if scale >= 1.: - # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % - # scale) - if scale == 0.: - logging.info('Scale of 0 disables regularizer.') - return lambda _, name=None: None - - def mn_i(weights, name='maxnorm_i_regularizer'): - """Applies max-norm regularization to weights.""" - with tf.name_scope(name) as scope: - my_scale = ops.convert_to_tensor(scale, - dtype=weights.dtype.base_dtype, - name='scale') - if tf.__version__ <= '0.12': - standard_ops_fn = standard_ops.mul - else: - standard_ops_fn = standard_ops.multiply - return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope) - return mn_i + return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.sqrt(standard_ops.reduce_sum(tf.square(weights), 0))), name=scope) + return lo +def maxnorm_regularizer(scale=1.0): + """Max-norm regularization returns a function that can be used to apply max-norm regularization to weights. + More about max-norm, see `wiki-max norm `_. + The implementation follows `TensorFlow contrib `__. -# + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + --------- + A function with signature `mn(weights, name=None)` that apply Lo regularization. + + Raises + -------- + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + import numbers + from tensorflow.python.framework import ops + from tensorflow.python.ops import standard_ops + + if isinstance(scale, numbers.Integral): + raise ValueError('scale cannot be an integer: %s' % scale) + if isinstance(scale, numbers.Real): + if scale < 0.: + raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) + # if scale >= 1.: + # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % + # scale) + if scale == 0.: + logging.info('Scale of 0 disables regularizer.') + return lambda _, name=None: None + + def mn(weights, name='max_regularizer'): + """Applies max-norm regularization to weights.""" + with tf.name_scope(name) as scope: + my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') + # if tf.__version__ <= '0.12': + # standard_ops_fn = standard_ops.mul + # else: + standard_ops_fn = standard_ops.multiply + return standard_ops_fn(my_scale, standard_ops.reduce_max(standard_ops.abs(weights)), name=scope) + + return mn + + +def maxnorm_o_regularizer(scale): + """Max-norm output regularization removes the neurons of current layer. + Returns a function that can be used to apply max-norm regularization to each column of weight matrix. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + --------- + A function with signature `mn_o(weights, name=None)` that apply Lo regularization. + + Raises + --------- + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + import numbers + from tensorflow.python.framework import ops + from tensorflow.python.ops import standard_ops + + if isinstance(scale, numbers.Integral): + raise ValueError('scale cannot be an integer: %s' % scale) + if isinstance(scale, numbers.Real): + if scale < 0.: + raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) + # if scale >= 1.: + # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % + # scale) + if scale == 0.: + logging.info('Scale of 0 disables regularizer.') + return lambda _, name=None: None + + def mn_o(weights, name='maxnorm_o_regularizer'): + """Applies max-norm regularization to weights.""" + with tf.name_scope(name) as scope: + my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') + if tf.__version__ <= '0.12': + standard_ops_fn = standard_ops.mul + else: + standard_ops_fn = standard_ops.multiply + return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 0)), name=scope) + + return mn_o + + +def maxnorm_i_regularizer(scale): + """Max-norm input regularization removes the neurons of previous layer. + Returns a function that can be used to apply max-norm regularization to each row of weight matrix. + The implementation follows `TensorFlow contrib `__. + + Parameters + ---------- + scale : float + A scalar multiplier `Tensor`. 0.0 disables the regularizer. + + Returns + --------- + A function with signature `mn_i(weights, name=None)` that apply Lo regularization. + + Raises + --------- + ValueError : If scale is outside of the range [0.0, 1.0] or if scale is not a float. + + """ + import numbers + from tensorflow.python.framework import ops + from tensorflow.python.ops import standard_ops + + if isinstance(scale, numbers.Integral): + raise ValueError('scale cannot be an integer: %s' % scale) + if isinstance(scale, numbers.Real): + if scale < 0.: + raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale) + # if scale >= 1.: + # raise ValueError('Setting a scale greater than 1 on a regularizer: %g' % + # scale) + if scale == 0.: + logging.info('Scale of 0 disables regularizer.') + return lambda _, name=None: None + + def mn_i(weights, name='maxnorm_i_regularizer'): + """Applies max-norm regularization to weights.""" + with tf.name_scope(name) as scope: + my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale') + if tf.__version__ <= '0.12': + standard_ops_fn = standard_ops.mul + else: + standard_ops_fn = standard_ops.multiply + return standard_ops_fn(my_scale, standard_ops.reduce_sum(standard_ops.reduce_max(standard_ops.abs(weights), 1)), name=scope) + + return mn_i diff --git a/tensorlayer/db.py b/tensorlayer/db.py index af9f5ba..3ffe2f9 100644 --- a/tensorlayer/db.py +++ b/tensorlayer/db.py @@ -1,39 +1,28 @@ #! /usr/bin/python -# -*- coding: utf8 -*- +# -*- coding: utf-8 -*- """ Experimental Database Management System. Latest Version """ - -import tensorflow as tf -import tensorlayer as tl -import numpy as np +import inspect +import pickle import time -import math - - import uuid +from datetime import datetime -import pymongo import gridfs -import pickle from pymongo import MongoClient -from datetime import datetime -import inspect def AutoFill(func): - def func_wrapper(self,*args,**kwargs): - d=inspect.getcallargs(func,self,*args,**kwargs) - d['args'].update({"studyID":self.studyID}) - return func(**d) - return func_wrapper - - - + def func_wrapper(self, *args, **kwargs): + d = inspect.getcallargs(func, self, *args, **kwargs) + d['args'].update({"studyID": self.studyID}) + return func(**d) + return func_wrapper class TensorDB(object): @@ -41,13 +30,18 @@ class TensorDB(object): Parameters ------------- - ip : string, localhost or IP address. - port : int, port number. - db_name : string, database name. - user_name : string, set to None if it donnot need authentication. - password : string. - - Properties + ip : str + Localhost or IP address. + port : int + Port number. + db_name : str + Database name. + user_name : str + User name. Set to None if it donnot need authentication. + password : str + Password + + Attributes ------------ db : ``pymongo.MongoClient[db_name]``, xxxxxx datafs : ``gridfs.GridFS(self.db, collection="datafs")``, xxxxxxxxxx @@ -59,43 +53,30 @@ class TensorDB(object): db.TestLog : Collection for studyID : string, unique ID, if None random generate one. - Dependencies + Notes ------------- - 1 : MongoDB, as TensorDB is based on MongoDB, you need to install it in your - local machine or remote machine. - 2 : pip install pymongo, for MongoDB python API. - - Optional Tools - ---------------- - 1 : You may like to install MongoChef or Mongo Management Studo APP for - visualizing or testing your MongoDB. + - MongoDB, as TensorDB is based on MongoDB, you need to install it in your local machine or remote machine. + - pip install pymongo, for MongoDB python API. + - You may like to install MongoChef or Mongo Management Studo APP for visualizing or testing your MongoDB. """ - def __init__( - self, - ip = 'localhost', - port = 27017, - db_name = 'db_name', - user_name = None, - password = 'password', - studyID=None - ): + + def __init__(self, ip='localhost', port=27017, db_name='db_name', user_name=None, password='password', studyID=None): ## connect mongodb client = MongoClient(ip, port) self.db = client[db_name] if user_name != None: self.db.authenticate(user_name, password) - if studyID is None: - self.studyID=str(uuid.uuid1()) + self.studyID = str(uuid.uuid1()) else: - self.studyID=studyID + self.studyID = studyID ## define file system (Buckets) self.datafs = gridfs.GridFS(self.db, collection="datafs") self.modelfs = gridfs.GridFS(self.db, collection="modelfs") self.paramsfs = gridfs.GridFS(self.db, collection="paramsfs") - self.archfs=gridfs.GridFS(self.db,collection="ModelArchitecture") + self.archfs = gridfs.GridFS(self.db, collection="ModelArchitecture") ## print("[TensorDB] Connect SUCCESS {}:{} {} {} {}".format(ip, port, db_name, user_name, studyID)) @@ -104,16 +85,19 @@ def __init__( self.db_name = db_name self.user_name = user_name - def __autofill(self,args): - return args.update({'studyID':self.studyID}) + @classmethod + def __autofill(self, args): + return args.update({'studyID': self.studyID}) - def __serialization(self,ps): + @staticmethod + def __serialization(ps): return pickle.dumps(ps, protocol=2) - def __deserialization(self,ps): + @staticmethod + def __deserialization(ps): return pickle.loads(ps) - def save_params(self, params=[], args={}):#, file_name='parameters'): + def save_params(self, params=None, args=None): #, file_name='parameters'): """ Save parameters into MongoDB Buckets, and save the file ID into Params Collections. Parameters @@ -125,32 +109,38 @@ def save_params(self, params=[], args={}):#, file_name='parameters'): --------- f_id : the Buckets ID of the parameters. """ + if params is None: + params = [] + if args is None: + args = {} self.__autofill(args) s = time.time() - f_id = self.paramsfs.put(self.__serialization(params))#, file_name=file_name) + f_id = self.paramsfs.put(self.__serialization(params)) #, file_name=file_name) args.update({'f_id': f_id, 'time': datetime.utcnow()}) self.db.Params.insert_one(args) # print("[TensorDB] Save params: {} SUCCESS, took: {}s".format(file_name, round(time.time()-s, 2))) - print("[TensorDB] Save params: SUCCESS, took: {}s".format(round(time.time()-s, 2))) + print("[TensorDB] Save params: SUCCESS, took: {}s".format(round(time.time() - s, 2))) return f_id @AutoFill - def find_one_params(self, args={},sort=None): + def find_one_params(self, args=None, sort=None): """ Find one parameter from MongoDB Buckets. Parameters ---------- - args : dictionary, find items. + args : dictionary + For finding items. Returns -------- params : the parameters, return False if nothing found. f_id : the Buckets ID of the parameters, return False if nothing found. """ - + if args is None: + args = {} s = time.time() # print(args) - d = self.db.Params.find_one(filter=args,sort=sort) + d = self.db.Params.find_one(filter=args, sort=sort) if d is not None: f_id = d['f_id'] @@ -159,13 +149,13 @@ def find_one_params(self, args={},sort=None): return False, False try: params = self.__deserialization(self.paramsfs.get(f_id).read()) - print("[TensorDB] Find one params SUCCESS, {} took: {}s".format(args, round(time.time()-s, 2))) + print("[TensorDB] Find one params SUCCESS, {} took: {}s".format(args, round(time.time() - s, 2))) return params, f_id - except: + except Exception: return False, False @AutoFill - def find_all_params(self, args={}): + def find_all_params(self, args=None): """ Find all parameter from MongoDB Buckets Parameters @@ -177,32 +167,34 @@ def find_all_params(self, args={}): params : the parameters, return False if nothing found. """ - + if args is None: + args = {} s = time.time() pc = self.db.Params.find(args) if pc is not None: f_id_list = pc.distinct('f_id') params = [] - for f_id in f_id_list: # you may have multiple Buckets files + for f_id in f_id_list: # you may have multiple Buckets files tmp = self.paramsfs.get(f_id).read() params.append(self.__deserialization(tmp)) else: print("[TensorDB] FAIL! Cannot find any: {}".format(args)) return False - print("[TensorDB] Find all params SUCCESS, took: {}s".format(round(time.time()-s, 2))) + print("[TensorDB] Find all params SUCCESS, took: {}s".format(round(time.time() - s, 2))) return params @AutoFill - def del_params(self, args={}): + def del_params(self, args=None): """ Delete params in MongoDB uckets. Parameters ----------- args : dictionary, find items to delete, leave it empty to delete all parameters. """ - + if args is None: + args = {} pc = self.db.Params.find(args) f_id_list = pc.distinct('f_id') # remove from Buckets @@ -213,18 +205,19 @@ def del_params(self, args={}): print("[TensorDB] Delete params SUCCESS: {}".format(args)) - def _print_dict(self, args): + @staticmethod + def _print_dict(args): # return " / ".join(str(key) + ": "+ str(value) for key, value in args.items()) string = '' for key, value in args.items(): if key is not '_id': - string += str(key) + ": "+ str(value) + " / " + string += str(key) + ": " + str(value) + " / " return string ## =========================== LOG =================================== ## @AutoFill - def train_log(self, args={}): + def train_log(self, args=None): """Save the training log. Parameters @@ -235,26 +228,28 @@ def train_log(self, args={}): --------- >>> db.train_log(time=time.time(), {'loss': loss, 'acc': acc}) """ - + if args is None: + args = {} _result = self.db.TrainLog.insert_one(args) _log = self._print_dict(args) #print("[TensorDB] TrainLog: " +_log) return _result @AutoFill - def del_train_log(self, args={}): + def del_train_log(self, args=None): """ Delete train log. Parameters ----------- args : dictionary, find items to delete, leave it empty to delete all log. """ - + if args is None: + args = {} self.db.TrainLog.delete_many(args) print("[TensorDB] Delete TrainLog SUCCESS") @AutoFill - def valid_log(self, args={}): + def valid_log(self, args=None): """Save the validating log. Parameters @@ -265,26 +260,29 @@ def valid_log(self, args={}): --------- >>> db.valid_log(time=time.time(), {'loss': loss, 'acc': acc}) """ - + if args is None: + args = {} _result = self.db.ValidLog.insert_one(args) # _log = "".join(str(key) + ": " + str(value) for key, value in args.items()) _log = self._print_dict(args) - print("[TensorDB] ValidLog: " +_log) + print("[TensorDB] ValidLog: " + _log) return _result @AutoFill - def del_valid_log(self, args={}): + def del_valid_log(self, args=None): """ Delete validation log. Parameters ----------- args : dictionary, find items to delete, leave it empty to delete all log. """ + if args is None: + args = {} self.db.ValidLog.delete_many(args) print("[TensorDB] Delete ValidLog SUCCESS") @AutoFill - def test_log(self, args={}): + def test_log(self, args=None): """Save the testing log. Parameters @@ -295,35 +293,44 @@ def test_log(self, args={}): --------- >>> db.test_log(time=time.time(), {'loss': loss, 'acc': acc}) """ - + if args is None: + args = {} _result = self.db.TestLog.insert_one(args) # _log = "".join(str(key) + str(value) for key, value in args.items()) _log = self._print_dict(args) - print("[TensorDB] TestLog: " +_log) + print("[TensorDB] TestLog: " + _log) return _result @AutoFill - def del_test_log(self, args={}): + def del_test_log(self, args=None): """ Delete test log. Parameters ----------- args : dictionary, find items to delete, leave it empty to delete all log. """ + if args is None: + args = {} self.db.TestLog.delete_many(args) print("[TensorDB] Delete TestLog SUCCESS") - ## =========================== Network Architecture ================== ## + # =========================== Network Architecture ================== ## @AutoFill - def save_model_architecture(self,s,args={}): + def save_model_architecture(self, s, args=None): + if args is None: + args = {} + self.__autofill(args) - fid=self.archfs.put(s,filename="modelarchitecture") - args.update({"fid":fid}) + fid = self.archfs.put(s, filename="modelarchitecture") + args.update({"fid": fid}) self.db.march.insert_one(args) @AutoFill - def load_model_architecture(self,args={}): + def load_model_architecture(self, args=None): + + if args is None: + args = {} d = self.db.march.find_one(args) if d is not None: @@ -333,11 +340,10 @@ def load_model_architecture(self,args={}): # "print find" else: print("[TensorDB] FAIL! Cannot find: {}".format(args)) - print ("no idtem") + print("no idtem") return False, False try: archs = self.archfs.get(fid).read() - '''print("[TensorDB] Find one params SUCCESS, {} took: {}s".format(args, round(time.time()-s, 2)))''' return archs, fid except Exception as e: print("exception") @@ -345,7 +351,7 @@ def load_model_architecture(self,args={}): return False, False @AutoFill - def save_job(self, script=None, args={}): + def save_job(self, script=None, args=None): """Save the job. Parameters @@ -364,6 +370,10 @@ def save_job(self, script=None, args={}): >>> import _your_script ... running your script """ + + if args is None: + args = {} + self.__autofill(args) if script is not None: _script = open(script, 'rb').read() @@ -375,7 +385,7 @@ def save_job(self, script=None, args={}): return _result @AutoFill - def find_one_job(self, args={}): + def find_one_job(self, args=None): """ Find one job from MongoDB Job Collections. Parameters @@ -387,6 +397,8 @@ def find_one_job(self, args={}): dictionary : contains all meta data and script. """ + if args is None: + args = {} temp = self.db.Job.find_one(args) @@ -402,151 +414,36 @@ def find_one_job(self, args={}): return temp - def push_job(self,margs, wargs,dargs,epoch): + def push_job(self, margs, wargs, dargs, epoch): - ms,mid=self.load_model_architecture(margs) - weight,wid=self.find_one_params(wargs) - args={"weight":wid,"model":mid,"dargs":dargs,"epoch":epoch,"time":datetime.utcnow(),"Running":False} + _ms, mid = self.load_model_architecture(margs) + _weight, wid = self.find_one_params(wargs) + args = {"weight": wid, "model": mid, "dargs": dargs, "epoch": epoch, "time": datetime.utcnow(), "Running": False} self.__autofill(args) self.db.JOBS.insert_one(args) def peek_job(self): - args={'Running':False} + args = {'Running': False} self.__autofill(args) - m=self.db.JOBS.find_one(args) + m = self.db.JOBS.find_one(args) print(m) if m is None: return False - s=self.paramsfs.get(m['weight']).read() - w=self.__deserialization(s) + s = self.paramsfs.get(m['weight']).read() + w = self.__deserialization(s) - ach=self.archfs.get(m['model']).read() + ach = self.archfs.get(m['model']).read() - return m['_id'], ach,w,m["dargs"],m['epoch'] + return m['_id'], ach, w, m["dargs"], m['epoch'] - def run_job(self,jid): - self.db.JOBS.find_one_and_update({'_id':jid},{'$set': {'Running': True,"Since":datetime.utcnow()}}) + def run_job(self, jid): + self.db.JOBS.find_one_and_update({'_id': jid}, {'$set': {'Running': True, "Since": datetime.utcnow()}}) - def del_job(self,jid): - self.db.JOBS.find_one_and_update({'_id':jid},{'$set': {'Running': True,"Finished":datetime.utcnow()}}) + def del_job(self, jid): + self.db.JOBS.find_one_and_update({'_id': jid}, {'$set': {'Running': True, "Finished": datetime.utcnow()}}) def __str__(self): _s = "[TensorDB] Info:\n" _t = _s + " " + str(self.db) return _t - - # def save_bulk_data(self, data=None, filename='filename'): - # """ Put bulk data into TensorDB.datafs, return file ID. - # When you have a very large data, you may like to save it into GridFS Buckets - # instead of Collections, then when you want to load it, XXXX - # - # Parameters - # ----------- - # data : serialized data. - # filename : string, GridFS Buckets. - # - # References - # ----------- - # - MongoDB find, xxxxx - # """ - # s = time.time() - # f_id = self.datafs.put(data, filename=filename) - # print("[TensorDB] save_bulk_data: {} took: {}s".format(filename, round(time.time()-s, 2))) - # return f_id - # - # def save_collection(self, data=None, collect_name='collect_name'): - # """ Insert data into MongoDB Collections, return xx. - # - # Parameters - # ----------- - # data : serialized data. - # collect_name : string, MongoDB collection name. - # - # References - # ----------- - # - MongoDB find, xxxxx - # """ - # s = time.time() - # rl = self.db[collect_name].insert_many(data) - # print("[TensorDB] save_collection: {} took: {}s".format(collect_name, round(time.time()-s, 2))) - # return rl - # - # def find(self, args={}, collect_name='collect_name'): - # """ Find data from MongoDB Collections. - # - # Parameters - # ----------- - # args : dictionary, arguments for finding. - # collect_name : string, MongoDB collection name. - # - # References - # ----------- - # - MongoDB find, xxxxx - # """ - # s = time.time() - # - # pc = self.db[collect_name].find(args) # pymongo.cursor.Cursor object - # flist = pc.distinct('f_id') - # fldict = {} - # for f in flist: # you may have multiple Buckets files - # # fldict[f] = pickle.loads(self.datafs.get(f).read()) - # # s2 = time.time() - # tmp = self.datafs.get(f).read() - # # print(time.time()-s2) - # fldict[f] = pickle.loads(tmp) - # # print(time.time()-s2) - # # exit() - # # print(round(time.time()-s, 2)) - # data = [fldict[x['f_id']][x['id']] for x in pc] - # data = np.asarray(data) - # print("[TensorDB] find: {} get: {} took: {}s".format(collect_name, pc.count(), round(time.time()-s, 2))) - # return data - - - -class DBLogger: - """ """ - def __init__(self,db,model): - self.db=db - self.model=model - - def on_train_begin(self,logs={}): - print("start") - - def on_train_end(self,logs={}): - print("end") - - def on_epoch_begin(self,epoch,logs={}): - self.epoch=epoch - self.et=time.time() - return - - def on_epoch_end(self, epoch, logs={}): - self.et=time.time()-self.et - print("ending") - print(epoch) - logs['epoch']=epoch - logs['time']=datetime.utcnow() - logs['stepTime']=self.et - logs['acc']=np.asscalar(logs['acc']) - print(logs) - - w=self.model.Params - fid=self.db.save_params(w,logs) - logs.update({'params':fid}) - self.db.valid_log(logs) - def on_batch_begin(self, batch,logs={}): - self.t=time.time() - self.losses = [] - self.batch=batch - - def on_batch_end(self, batch, logs={}): - self.t2=time.time()-self.t - logs['acc']=np.asscalar(logs['acc']) - #logs['loss']=np.asscalar(logs['loss']) - logs['step_time']=self.t2 - logs['time']=datetime.utcnow() - logs['epoch']=self.epoch - logs['batch']=self.batch - self.db.train_log(logs) diff --git a/tensorlayer/distributed.py b/tensorlayer/distributed.py new file mode 100644 index 0000000..e9d0335 --- /dev/null +++ b/tensorlayer/distributed.py @@ -0,0 +1,327 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +import json, os, time +import tensorflow as tf +from tensorflow.python.training import session_run_hook + +__all__ = ['TaskSpecDef', 'TaskSpec', 'DistributedSession', 'StopAtTimeHook', 'LoadCheckpoint'] + + +class TaskSpecDef(object): + """Specification for a distributed task. + + It contains the job name, index of the task, + the parameter servers and the worker servers. If you want to use the last worker + for continuous evaluation you can call the method `use_last_worker_as_evaluator` + which returns a new :class:`TaskSpecDef` object without the last worker in the + cluster specification. + + Parameters + ---------- + task_type : str + Task type. One of `master`, `worker` or `ps`. + index : int + The zero-based index of the task. Distributed training jobs will have a single + master task, one or more parameter servers, and one or more workers. + trial : int + The identifier of the trial being run. + ps_hosts : str OR list of str + A string with a coma separate list of hosts for the parameter servers + or a list of hosts. + worker_hosts : str OR list of str + A string with a coma separate list of hosts for the worker servers + or a list of hosts. + master : str + A string with the master hosts + + Notes + ---------- + master might not be included in TF_CONFIG and can be None. The shard_index is adjusted + in any case to assign 0 to master and >= 1 to workers. + This implementation doesn't support sparse arrays in the `TF_CONFIG` variable as the + official TensorFlow documentation shows, as it is not a supported by the json + definition. + + References + ---------- + - `ML-engine trainer considerations `__ + + """ + + def __init__(self, task_type='master', index=0, trial=None, ps_hosts=None, worker_hosts=None, master=None): + self.type = task_type + self._index = int(index) + self._cluster_spec = None + self.num_workers = 1 + self.num_ps = 0 + self.shard_index = int(index) + self._master = True + self.trial = trial + self.ps_hosts = ps_hosts + self.worker_hosts = worker_hosts + self.master = master + self._server = None + + if ps_hosts and worker_hosts: + self.ps_hosts = ps_hosts if isinstance(ps_hosts, list) else ps_hosts.split(',') + self.num_ps = len(self.ps_hosts) + self.worker_hosts = worker_hosts if isinstance(worker_hosts, list) else worker_hosts.split(',') + if master is not None and len(master) > 0: + self._cluster_spec = tf.train.ClusterSpec({'ps': self.ps_hosts, 'worker': self.worker_hosts, 'master': master}) + # master is a worker too + self.num_workers = len(self.worker_hosts) + 1 + if self.type == 'worker': + self.shard_index = self._index + 1 + self._master = self.type == 'master' + else: + self._cluster_spec = tf.train.ClusterSpec({'ps': self.ps_hosts, 'worker': self.worker_hosts}) + self.num_workers = len(self.worker_hosts) + if self.type == 'worker': + self.shard_index = self._index + self._master = self.type == 'worker' and self._index == 0 + + def is_ps(self): + """Returns true if this server is a parameter server""" + return self.type == 'ps' + + def is_worker(self): + """Returns true if this server is a worker server""" + return self.type == 'worker' + + def is_master(self): + """Returns true if this server is the master server""" + return self._master + + def is_evaluator(self): + """Returns true if this server is the evaluator server""" + return self.type == 'worker' and self.num_workers == self._index + + def device_fn(self): + """Returns the function with the specification to create the graph in this server""" + current_device = '/job:{}/task:{}'.format(self.type, self._index) + ps_devices = '/job:ps' + return tf.train.replica_device_setter(ps_device=ps_devices, worker_device=current_device, cluster=self._cluster_spec) + + def create_server(self): + if self._server is None and self.ps_hosts and self.worker_hosts and not self.is_evaluator(): + # create server and join if it is a parameter server + self._server = tf.train.Server(self._cluster_spec, job_name=self.type, task_index=self._index) + if self.is_ps(): + self._server.join() + + def target(self): + if self._server is None: + self.create_server() + if self._server is not None: + return self._server.target + else: + return None + + def use_last_worker_as_evaluator(self): + """Returns a new :class:`TaskSpecDef` where the last worker has been removed from + the list of worker_hosts, so it is not used for training anymore. You can call + is_evaluator to know whether this server is the evaluator one or not. + In case there is only one server for training this method raises an exception, as + you cannot use any server for evaluation. + + """ + if self.num_workers <= 1: + raise Exception('You need more than one worker instance to use one as evaluator') + return TaskSpecDef( + task_type=self.type, index=self._index, trial=self.trial, ps_hosts=self.ps_hosts, worker_hosts=self.worker_hosts[:-1], master=self.master) + + +def create_task_spec_def(): + """Returns the a :class:`TaskSpecDef` based on the environment variables for distributed training. + + References + ---------- + - `ML-engine trainer considerations `__ + - `TensorPort Distributed Computing `__ + + """ + if 'TF_CONFIG' in os.environ: + # TF_CONFIG is used in ML-engine + env = json.loads(os.environ.get('TF_CONFIG', '{}')) + task_data = env.get('task', None) or {'type': 'master', 'index': 0} + cluster_data = env.get('cluster', None) or {'ps': None, 'worker': None, 'master': None} + return TaskSpecDef( + task_type=task_data['type'], + index=task_data['index'], + trial=task_data['trial'] if 'trial' in task_data else None, + ps_hosts=cluster_data['ps'], + worker_hosts=cluster_data['worker'], + master=cluster_data['master'] if 'master' in cluster_data else None) + elif 'JOB_NAME' in os.environ: + # JOB_NAME, TASK_INDEX, PS_HOSTS, WORKER_HOSTS and MASTER_HOST are used in TensorPort + return TaskSpecDef( + task_type=os.environ['JOB_NAME'], + index=os.environ['TASK_INDEX'], + ps_hosts=os.environ.get('PS_HOSTS', None), + worker_hosts=os.environ.get('WORKER_HOSTS', None), + master=os.environ.get('MASTER_HOST', None)) + else: + raise Exception('You need to setup TF_CONFIG or JOB_NAME to define the task.') + + +def create_distributed_session(task_spec=None, + checkpoint_dir=None, + scaffold=None, + hooks=None, + chief_only_hooks=None, + save_checkpoint_secs=600, + save_summaries_steps=object(), + save_summaries_secs=object(), + config=None, + stop_grace_period_secs=120, + log_step_count_steps=100): + """Creates a distributed session. + + It calls `MonitoredTrainingSession` to create a :class:`MonitoredSession` for distributed training. + + Parameters + ---------- + task_spec : :class:`TaskSpecDef`. + The task spec definition from create_task_spec_def() + checkpoint_dir : str. + Optional path to a directory where to restore variables. + scaffold : ``Scaffold`` + A `Scaffold` used for gathering or building supportive ops. + If not specified, a default one is created. It's used to finalize the graph. + hooks : list of ``SessionRunHook`` objects. + Optional + chief_only_hooks : list of ``SessionRunHook`` objects. + Activate these hooks if `is_chief==True`, ignore otherwise. + save_checkpoint_secs : int + The frequency, in seconds, that a checkpoint is saved + using a default checkpoint saver. If `save_checkpoint_secs` is set to + `None`, then the default checkpoint saver isn't used. + save_summaries_steps : int + The frequency, in number of global steps, that the + summaries are written to disk using a default summary saver. If both + `save_summaries_steps` and `save_summaries_secs` are set to `None`, then + the default summary saver isn't used. Default 100. + save_summaries_secs : int + The frequency, in secs, that the summaries are written + to disk using a default summary saver. If both `save_summaries_steps` and + `save_summaries_secs` are set to `None`, then the default summary saver + isn't used. Default not enabled. + config : ``tf.ConfigProto`` + an instance of `tf.ConfigProto` proto used to configure the session. + It's the `config` argument of constructor of `tf.Session`. + stop_grace_period_secs : int + Number of seconds given to threads to stop after + `close()` has been called. + log_step_count_steps : int + The frequency, in number of global steps, that the + global step/sec is logged. + + Examples + -------- + A simple example for distributed training where all the workers use the same dataset: + + >>> task_spec = TaskSpec() + >>> with tf.device(task_spec.device_fn()): + >>> tensors = create_graph() + >>> with tl.DistributedSession(task_spec=task_spec, + ... checkpoint_dir='/tmp/ckpt') as session: + >>> while not session.should_stop(): + >>> session.run(tensors) + + An example where the dataset is shared among the workers + (see https://www.tensorflow.org/programmers_guide/datasets): + + >>> task_spec = TaskSpec() + >>> # dataset is a :class:`tf.data.Dataset` with the raw data + >>> dataset = create_dataset() + >>> if task_spec is not None: + >>> dataset = dataset.shard(task_spec.num_workers, task_spec.shard_index) + >>> # shuffle or apply a map function to the new sharded dataset, for example: + >>> dataset = dataset.shuffle(buffer_size=10000) + >>> dataset = dataset.batch(batch_size) + >>> dataset = dataset.repeat(num_epochs) + >>> # create the iterator for the dataset and the input tensor + >>> iterator = dataset.make_one_shot_iterator() + >>> next_element = iterator.get_next() + >>> with tf.device(task_spec.device_fn()): + >>> # next_element is the input for the graph + >>> tensors = create_graph(next_element) + >>> with tl.DistributedSession(task_spec=task_spec, + ... checkpoint_dir='/tmp/ckpt') as session: + >>> while not session.should_stop(): + >>> session.run(tensors) + + References + ---------- + - `MonitoredTrainingSession `__ + + """ + target = task_spec.target() if task_spec is not None else None + is_chief = task_spec.is_master() if task_spec is not None else True + return tf.train.MonitoredTrainingSession( + master=target, + is_chief=is_chief, + checkpoint_dir=checkpoint_dir, + scaffold=scaffold, + save_checkpoint_secs=save_checkpoint_secs, + save_summaries_steps=save_summaries_steps, + save_summaries_secs=save_summaries_secs, + log_step_count_steps=log_step_count_steps, + stop_grace_period_secs=stop_grace_period_secs, + config=config, + hooks=hooks, + chief_only_hooks=chief_only_hooks) + + +class StopAtTimeHook(session_run_hook.SessionRunHook): + """Hook that requests stop after a specified time. + + Parameters + ---------- + time_running: int + Maximum time running in seconds + + """ + + def __init__(self, time_running): + self._time_running = time_running + self._end_time = 0 + + def begin(self): + self._end_time = time.time() + self._time_running + + def after_run(self, run_context, run_values): + if time.time() > self._end_time: + run_context.request_stop() + + +class LoadCheckpoint(session_run_hook.SessionRunHook): + """Hook that loads a checkpoint after the session is created. + + >>> from tensorflow.python.ops import variables as tf_variables + >>> from tensorflow.python.training.monitored_session import SingularMonitoredSession + >>> + >>> tensors = create_graph() + >>> saver = tf.train.Saver(var_list=tf_variables.trainable_variables()) + >>> checkpoint_hook = LoadCheckpoint(saver, my_checkpoint_file) + >>> with tf.SingularMonitoredSession(hooks=[checkpoint_hook]) as session: + >>> while not session.should_stop(): + >>> session.run(tensors) + + """ + + def __init__(self, saver, checkpoint): + self._saver = saver + self._checkpoint = checkpoint + self._loaded = False + + def after_create_session(self, session, coord): + if not self._loaded: + self._loaded = True + self._saver.restore(self._checkpoint) + + +# Alias +TaskSpec = create_task_spec_def +DistributedSession = create_distributed_session diff --git a/tensorlayer/files.py b/tensorlayer/files.py index b4ac65b..0f51400 100644 --- a/tensorlayer/files.py +++ b/tensorlayer/files.py @@ -1,48 +1,165 @@ -#! /usr/bin/python -# -*- coding: utf8 -*- +# -*- coding: utf-8 -*- +""" +A collections of helper functions to work with dataset. + +Load benchmark dataset, save and restore model, save and load variables. +TensorFlow provides ``.ckpt`` file format to save and restore the models, while +we suggest to use standard python file format ``.npz`` to save models for the +sake of cross-platform. + +.. code-block:: python + + ## save model as .ckpt + saver = tf.train.Saver() + save_path = saver.save(sess, "model.ckpt") + # restore model from .ckpt + saver = tf.train.Saver() + saver.restore(sess, "model.ckpt") + + ## save model as .npz + tl.files.save_npz(network.all_params , name='model.npz') + # restore model from .npz (method 1) + load_params = tl.files.load_npz(name='model.npz') + tl.files.assign_params(sess, load_params, network) + # restore model from .npz (method 2) + tl.files.load_and_assign_npz(sess=sess, name='model.npz', network=network) + + ## you can assign the pre-trained parameters as follow + # 1st parameter + tl.files.assign_params(sess, [load_params[0]], network) + # the first three parameters + tl.files.assign_params(sess, load_params[:3], network) + +""" - -import tensorflow as tf +import gzip import os -import numpy as np +import pickle import re import sys import tarfile -import gzip import zipfile -from . import visualize -from . import nlp -import pickle -from six.moves import urllib -from six.moves import cPickle -from six.moves import zip + +import numpy as np +import tensorflow as tf +from six.moves import cPickle, zip from tensorflow.python.platform import gfile +from . import _logging as logging +from . import nlp, utils, visualize + +__all__ = [ + 'load_mnist_dataset', + 'load_fashion_mnist_dataset', + 'load_cifar10_dataset', + 'load_ptb_dataset', + 'load_matt_mahoney_text8_dataset', + 'load_imdb_dataset', + 'load_nietzsche_dataset', + 'load_wmt_en_fr_dataset', + 'load_flickr25k_dataset', + 'load_flickr1M_dataset', + 'load_cyclegan_dataset', + 'download_file_from_google_drive', + 'load_celebA_dataset', + 'load_voc_dataset', + 'save_npz', + 'load_npz', + 'assign_params', + 'load_and_assign_npz', + 'save_npz_dict', + 'load_and_assign_npz_dict', + 'save_ckpt', + 'load_ckpt', + 'save_any_to_npy', + 'load_npy_to_any', + 'file_exists', + 'folder_exists', + 'del_file', + 'del_folder', + 'read_file', + 'load_file_list', + 'load_folder_list', + 'exists_or_mkdir', + 'maybe_download_and_extract', + 'natural_keys', + 'npz_to_W_pdf', +] + ## Load dataset functions -def load_mnist_dataset(shape=(-1,784), path="data/mnist/"): - """Automatically download MNIST dataset - and return the training, validation and test set with 50000, 10000 and 10000 - digit images respectively. +def load_mnist_dataset(shape=(-1, 784), path='data'): + """Load the original mnist. + + Automatically download MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 digit images respectively. Parameters ---------- shape : tuple - The shape of digit images, defaults to (-1,784) - path : string - Path to download data to, defaults to data/mnist/ + The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)). + path : str + The path that the data is downloaded to. + + Returns + ------- + X_train, y_train, X_val, y_val, X_test, y_test: tuple + Return splitted training/validation/test set respectively. Examples -------- - >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784)) + >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784), path='datasets') >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) """ - # We first define functions for loading MNIST images and labels. + return _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/') + + +def load_fashion_mnist_dataset(shape=(-1, 784), path='data'): + """Load the fashion mnist. + + Automatically download fashion-MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 fashion images respectively, `examples `__. + + Parameters + ---------- + shape : tuple + The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)). + path : str + The path that the data is downloaded to. + + Returns + ------- + X_train, y_train, X_val, y_val, X_test, y_test: tuple + Return splitted training/validation/test set respectively. + + Examples + -------- + >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1,784), path='datasets') + >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_fashion_mnist_dataset(shape=(-1, 28, 28, 1)) + """ + return _load_mnist_dataset(shape, path, name='fashion_mnist', url='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/') + + +def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'): + """A generic function to load mnist-like dataset. + + Parameters: + ---------- + shape : tuple + The shape of digit images. + path : str + The path that the data is downloaded to. + name : str + The dataset name you want to use(the default is 'mnist'). + url : str + The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/'). + """ + path = os.path.join(path, name) + + # Define functions for loading mnist-like data's images and labels. # For convenience, they also download the requested files if needed. def load_mnist_images(path, filename): - filepath = maybe_download_and_extract(filename, path, 'http://yann.lecun.com/exdb/mnist/') + filepath = maybe_download_and_extract(filename, path, url) - print(filepath) + logging.info(filepath) # Read the inputs in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=16) @@ -55,7 +172,7 @@ def load_mnist_images(path, filename): return data / np.float32(256) def load_mnist_labels(path, filename): - filepath = maybe_download_and_extract(filename, path, 'http://yann.lecun.com/exdb/mnist/') + filepath = maybe_download_and_extract(filename, path, url) # Read the labels in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=8) @@ -63,7 +180,7 @@ def load_mnist_labels(path, filename): return data # Download and read the training and test set images and labels. - print("Load or Download MNIST > {}".format(path)) + logging.info("Load or Download {0} > {1}".format(name.upper(), path)) X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz') y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz') X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz') @@ -84,8 +201,10 @@ def load_mnist_labels(path, filename): return X_train, y_train, X_val, y_val, X_test, y_test -def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data/cifar10/', plotable=False, second=3): - """The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with +def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data', plotable=False): + """Load CIFAR-10 dataset. + + It consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. The dataset is divided into five training batches and one test batch, each with @@ -97,43 +216,25 @@ def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data/cifar10/', plotable=F Parameters ---------- shape : tupe - The shape of digit images: e.g. (-1, 3, 32, 32) , (-1, 32, 32, 3) , (-1, 32*32*3) - plotable : True, False - Whether to plot some image examples. - second : int - If ``plotable`` is True, ``second`` is the display time. - path : string - Path to download data to, defaults to data/cifar10/ + The shape of digit images e.g. (-1, 3, 32, 32) and (-1, 32, 32, 3). + path : str + The path that the data is downloaded to, defaults is ``data/cifar10/``. + plotable : boolean + Whether to plot some image examples, False as default. Examples -------- - >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=True) - - Notes - ------ - CIFAR-10 images can only be display without color change under uint8. - >>> X_train = np.asarray(X_train, dtype=np.uint8) - >>> plt.ion() - >>> fig = plt.figure(1232) - >>> count = 1 - >>> for row in range(10): - >>> for col in range(10): - >>> a = fig.add_subplot(10, 10, count) - >>> plt.imshow(X_train[count-1], interpolation='nearest') - >>> plt.gca().xaxis.set_major_locator(plt.NullLocator()) # 不显示刻度(tick) - >>> plt.gca().yaxis.set_major_locator(plt.NullLocator()) - >>> count = count + 1 - >>> plt.draw() - >>> plt.pause(3) + >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3)) References ---------- - - `CIFAR website `_ - - `Data download link `_ - - `Code references `_ - """ + - `CIFAR website `__ + - `Data download link `__ + - ``__ - print("Load or Download cifar10 > {}".format(path)) + """ + path = os.path.join(path, 'cifar10') + logging.info("Load or Download cifar10 > {}".format(path)) #Helper function to unpickle the data def unpickle(file): @@ -153,7 +254,7 @@ def unpickle(file): #Unpickle file and fill in data X_train = None y_train = [] - for i in range(1,6): + for i in range(1, 6): data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "data_batch_{}".format(i))) if i == 1: X_train = data_dic['data'] @@ -161,7 +262,7 @@ def unpickle(file): X_train = np.vstack((X_train, data_dic['data'])) y_train += data_dic['labels'] - test_data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "test_batch")) + test_data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "test_batch")) X_test = test_data_dic['data'] y_test = np.array(test_data_dic['labels']) @@ -179,37 +280,37 @@ def unpickle(file): y_train = np.array(y_train) - if plotable == True: - print('\nCIFAR-10') + if plotable: + logging.info('\nCIFAR-10') import matplotlib.pyplot as plt fig = plt.figure(1) - print('Shape of a training image: X_train[0]',X_train[0].shape) + logging.info('Shape of a training image: X_train[0] %s' % X_train[0].shape) - plt.ion() # interactive mode + plt.ion() # interactive mode count = 1 - for row in range(10): - for col in range(10): - a = fig.add_subplot(10, 10, count) + for _ in range(10): # each row + for _ in range(10): # each column + _ = fig.add_subplot(10, 10, count) if shape == (-1, 3, 32, 32): # plt.imshow(X_train[count-1], interpolation='nearest') - plt.imshow(np.transpose(X_train[count-1], (1, 2, 0)), interpolation='nearest') + plt.imshow(np.transpose(X_train[count - 1], (1, 2, 0)), interpolation='nearest') # plt.imshow(np.transpose(X_train[count-1], (2, 1, 0)), interpolation='nearest') elif shape == (-1, 32, 32, 3): - plt.imshow(X_train[count-1], interpolation='nearest') + plt.imshow(X_train[count - 1], interpolation='nearest') # plt.imshow(np.transpose(X_train[count-1], (1, 0, 2)), interpolation='nearest') else: raise Exception("Do not support the given 'shape' to plot the image examples") - plt.gca().xaxis.set_major_locator(plt.NullLocator()) # 不显示刻度(tick) + plt.gca().xaxis.set_major_locator(plt.NullLocator()) # 不显示刻度(tick) plt.gca().yaxis.set_major_locator(plt.NullLocator()) count = count + 1 - plt.draw() # interactive mode - plt.pause(3) # interactive mode + plt.draw() # interactive mode + plt.pause(3) # interactive mode - print("X_train:",X_train.shape) - print("y_train:",y_train.shape) - print("X_test:",X_test.shape) - print("y_test:",y_test.shape) + logging.info("X_train: %s" % X_train.shape) + logging.info("y_train: %s" % y_train.shape) + logging.info("X_test: %s" % X_test.shape) + logging.info("y_test: %s" % y_test.shape) X_train = np.asarray(X_train, dtype=np.float32) X_test = np.asarray(X_test, dtype=np.float32) @@ -219,57 +320,43 @@ def unpickle(file): return X_train, y_train, X_test, y_test -def load_ptb_dataset(path='data/ptb/'): - """Penn TreeBank (PTB) dataset is used in many LANGUAGE MODELING papers, +def load_ptb_dataset(path='data'): + """Load Penn TreeBank (PTB) dataset. + + It is used in many LANGUAGE MODELING papers, including "Empirical Evaluation and Combination of Advanced Language Modeling Techniques", "Recurrent Neural Network Regularization". - It consists of 929k training words, 73k validation words, and 82k test words. It has 10k words in its vocabulary. - In "Recurrent Neural Network Regularization", they trained regularized LSTMs - of two sizes; these are denoted the medium LSTM and large LSTM. Both LSTMs - have two layers and are unrolled for 35 steps. They initialize the hidden - states to zero. They then use the final hidden states of the current - minibatch as the initial hidden state of the subsequent minibatch - (successive minibatches sequentially traverse the training set). - The size of each minibatch is 20. - - The medium LSTM has 650 units per layer and its parameters are initialized - uniformly in [−0.05, 0.05]. They apply 50% dropout on the non-recurrent - connections. They train the LSTM for 39 epochs with a learning rate of 1, - and after 6 epochs they decrease it by a factor of 1.2 after each epoch. - They clip the norm of the gradients (normalized by minibatch size) at 5. - - The large LSTM has 1500 units per layer and its parameters are initialized - uniformly in [−0.04, 0.04]. We apply 65% dropout on the non-recurrent - connections. They train the model for 55 epochs with a learning rate of 1; - after 14 epochs they start to reduce the learning rate by a factor of 1.15 - after each epoch. They clip the norm of the gradients (normalized by - minibatch size) at 10. - Parameters ---------- - path : : string - Path to download data to, defaults to data/ptb/ + path : str + The path that the data is downloaded to, defaults is ``data/ptb/``. Returns -------- - train_data, valid_data, test_data, vocabulary size + train_data, valid_data, test_data : list of int + The training, validating and testing data in integer format. + vocab_size : int + The vocabulary size. Examples -------- >>> train_data, valid_data, test_data, vocab_size = tl.files.load_ptb_dataset() - Code References + References --------------- - ``tensorflow.models.rnn.ptb import reader`` + - `Manual download `__ + + Notes + ------ + - If you want to get the raw data, see the source code. - Download Links - --------------- - - `Manual download `_ """ - print("Load or Download Penn TreeBank (PTB) dataset > {}".format(path)) + path = os.path.join(path, 'ptb') + logging.info("Load or Download Penn TreeBank (PTB) dataset > {}".format(path)) #Maybe dowload and uncompress tar, or load exsisting files filename = 'simple-examples.tgz' @@ -286,40 +373,42 @@ def load_ptb_dataset(path='data/ptb/'): train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id) valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id) test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id) - vocabulary = len(word_to_id) + vocab_size = len(word_to_id) - # print(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '', ''] - # print(train_data) # ... 214, 5, 23, 1, 2] - # print(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '': 2 ... } - # print(vocabulary) # 10000 + # logging.info(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '', ''] + # logging.info(train_data) # ... 214, 5, 23, 1, 2] + # logging.info(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '': 2 ... } + # logging.info(vocabulary) # 10000 # exit() - return train_data, valid_data, test_data, vocabulary + return train_data, valid_data, test_data, vocab_size + +def load_matt_mahoney_text8_dataset(path='data'): + """Load Matt Mahoney's dataset. -def load_matt_mahoney_text8_dataset(path='data/mm_test8/'): - """Download a text file from Matt Mahoney's website + Download a text file from Matt Mahoney's website if not present, and make sure it's the right size. Extract the first file enclosed in a zip file as a list of words. This dataset can be used for Word Embedding. Parameters ---------- - path : : string - Path to download data to, defaults to data/mm_test8/ + path : str + The path that the data is downloaded to, defaults is ``data/mm_test8/``. Returns -------- - word_list : a list - a list of string (word).\n - e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...] + list of str + The raw text data e.g. [.... 'their', 'families', 'who', 'were', 'expelled', 'from', 'jerusalem', ...] Examples -------- >>> words = tl.files.load_matt_mahoney_text8_dataset() >>> print('Data size', len(words)) - """ - print("Load or Download matt_mahoney_text8 Dataset> {}".format(path)) + """ + path = os.path.join(path, 'mm_test8') + logging.info("Load or Download matt_mahoney_text8 Dataset> {}".format(path)) filename = 'text8.zip' url = 'http://mattmahoney.net/dc/' @@ -327,23 +416,36 @@ def load_matt_mahoney_text8_dataset(path='data/mm_test8/'): with zipfile.ZipFile(os.path.join(path, filename)) as f: word_list = f.read(f.namelist()[0]).split() - + for idx, _ in enumerate(word_list): + word_list[idx] = word_list[idx].decode() return word_list -def load_imdb_dataset(path='data/imdb/', nb_words=None, skip_top=0, - maxlen=None, test_split=0.2, seed=113, - start_char=1, oov_char=2, index_from=3): - """Load IMDB dataset +def load_imdb_dataset(path='data', nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2, index_from=3): + """Load IMDB dataset. Parameters ---------- - path : : string - Path to download data to, defaults to data/imdb/ + path : str + The path that the data is downloaded to, defaults is ``data/imdb/``. + nb_words : int + Number of words to get. + skip_top : int + Top most frequent words to ignore (they will appear as oov_char value in the sequence data). + maxlen : int + Maximum sequence length. Any longer sequence will be truncated. + seed : int + Seed for reproducible data shuffling. + start_char : int + The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character. + oov_char : int + Words that were cut out because of the num_words or skip_top limit will be replaced with this character. + index_from : int + Index actual words with this index and higher. Examples -------- - >>> X_train, y_train, X_test, y_test = tl.files.load_imbd_dataset( + >>> X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset( ... nb_words=20000, test_split=0.2) >>> print('X_train.shape', X_train.shape) ... (20000,) [[1, 62, 74, ... 1033, 507, 27],[1, 60, 33, ... 13, 1053, 7]..] @@ -352,8 +454,10 @@ def load_imdb_dataset(path='data/imdb/', nb_words=None, skip_top=0, References ----------- - - `Modified from keras. `_ + - `Modified from keras. `__ + """ + path = os.path.join(path, 'imdb') filename = "imdb.pkl" url = 'https://s3.amazonaws.com/text-datasets/' @@ -387,9 +491,7 @@ def load_imdb_dataset(path='data/imdb/', nb_words=None, skip_top=0, X = new_X labels = new_labels if not X: - raise Exception('After filtering for sequences shorter than maxlen=' + - str(maxlen) + ', no sequence was kept. ' - 'Increase maxlen.') + raise Exception('After filtering for sequences shorter than maxlen=' + str(maxlen) + ', no sequence was kept. ' 'Increase maxlen.') if not nb_words: nb_words = max([max(x) for x in X]) @@ -415,14 +517,19 @@ def load_imdb_dataset(path='data/imdb/', nb_words=None, skip_top=0, return X_train, y_train, X_test, y_test -def load_nietzsche_dataset(path='data/nietzsche/'): + +def load_nietzsche_dataset(path='data'): """Load Nietzsche dataset. - Returns a string. Parameters ---------- - path : string - Path to download data to, defaults to data/nietzsche/ + path : str + The path that the data is downloaded to, defaults is ``data/nietzsche/``. + + Returns + -------- + str + The content. Examples -------- @@ -430,8 +537,10 @@ def load_nietzsche_dataset(path='data/nietzsche/'): >>> words = tl.files.load_nietzsche_dataset() >>> words = basic_clean_str(words) >>> words = words.split() + """ - print("Load or Download nietzsche dataset > {}".format(path)) + logging.info("Load or Download nietzsche dataset > {}".format(path)) + path = os.path.join(path, 'nietzsche') filename = "nietzsche.txt" url = 'https://s3.amazonaws.com/text-datasets/' @@ -441,16 +550,17 @@ def load_nietzsche_dataset(path='data/nietzsche/'): words = f.read() return words -def load_wmt_en_fr_dataset(path='data/wmt_en_fr/'): - """It will download English-to-French translation data from the WMT'15 - Website (10^9-French-English corpus), and the 2013 news test from - the same site as development set. + +def load_wmt_en_fr_dataset(path='data'): + """Load WMT'15 English-to-French translation dataset. + + It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set. Returns the directories of training data and test data. Parameters ---------- - path : string - Path to download data to, defaults to data/wmt_en_fr/ + path : str + The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``. References ---------- @@ -459,14 +569,16 @@ def load_wmt_en_fr_dataset(path='data/wmt_en_fr/'): Notes ----- Usually, it will take a long time to download this dataset. + """ + path = os.path.join(path, 'wmt_en_fr') # URLs for WMT data. _WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/" _WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/" def gunzip_file(gz_path, new_path): """Unzips from gz_path into new_path.""" - print("Unpacking %s to %s" % (gz_path, new_path)) + logging.info("Unpacking %s to %s" % (gz_path, new_path)) with gzip.open(gz_path, "rb") as gz_file: with open(new_path, "wb") as new_file: for line in gz_file: @@ -488,17 +600,17 @@ def get_wmt_enfr_dev_set(path): dev_name = "newstest2013" dev_path = os.path.join(path, "newstest2013") if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")): - print("Extracting tgz file %s" % dev_file) + logging.info("Extracting tgz file %s" % dev_file) with tarfile.open(dev_file, "r:gz") as dev_tar: - fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr") - en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en") - fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix. - en_dev_file.name = dev_name + ".en" - dev_tar.extract(fr_dev_file, path) - dev_tar.extract(en_dev_file, path) + fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr") + en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en") + fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix. + en_dev_file.name = dev_name + ".en" + dev_tar.extract(fr_dev_file, path) + dev_tar.extract(en_dev_file, path) return dev_path - print("Load or Download WMT English-to-French translation > {}".format(path)) + logging.info("Load or Download WMT English-to-French translation > {}".format(path)) train_path = get_wmt_enfr_train_set(path) dev_path = get_wmt_enfr_dev_set(path) @@ -506,30 +618,624 @@ def get_wmt_enfr_dev_set(path): return train_path, dev_path -## Load and save network -def save_npz(save_list=[], name='model.npz', sess=None): +def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False): + """Load Flickr25K dataset. + + Returns a list of images by a given tag from Flick25k dataset, + it will download Flickr25k from `the official website `__ + at the first time you use it. + + Parameters + ------------ + tag : str or None + What images to return. + - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search `__. + - If you want to get all images, set to ``None``. + + path : str + The path that the data is downloaded to, defaults is ``data/flickr25k/``. + n_threads : int + The number of thread to read image. + printable : boolean + Whether to print infomation when reading images, default is ``False``. + + Examples + ----------- + Get images with tag of sky + + >>> images = tl.files.load_flickr25k_dataset(tag='sky') + + Get all images + + >>> images = tl.files.load_flickr25k_dataset(tag=None, n_threads=100, printable=True) + + """ + path = os.path.join(path, 'flickr25k') + + filename = 'mirflickr25k.zip' + url = 'http://press.liacs.nl/mirflickr/mirflickr25k/' + + # download dataset + if folder_exists(path + "/mirflickr") is False: + logging.info("[*] Flickr25k is nonexistent in {}".format(path)) + maybe_download_and_extract(filename, path, url, extract=True) + del_file(path + '/' + filename) + + # return images by the given tag. + # 1. image path list + folder_imgs = path + "/mirflickr" + path_imgs = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False) + path_imgs.sort(key=natural_keys) + + # 2. tag path list + folder_tags = path + "/mirflickr/meta/tags" + path_tags = load_file_list(path=folder_tags, regx='\\.txt', printable=False) + path_tags.sort(key=natural_keys) + + # 3. select images + if tag is None: + logging.info("[Flickr25k] reading all images") + else: + logging.info("[Flickr25k] reading images with tag: {}".format(tag)) + images_list = [] + for idx, _v in enumerate(path_tags): + tags = read_file(folder_tags + '/' + path_tags[idx]).split('\n') + # logging.info(idx+1, tags) + if tag is None or tag in tags: + images_list.append(path_imgs[idx]) + + images = visualize.read_images(images_list, folder_imgs, n_threads=n_threads, printable=printable) + return images + + +def load_flickr1M_dataset(tag='sky', size=10, path="data", n_threads=50, printable=False): + """Load Flick1M dataset. + + Returns a list of images by a given tag from Flickr1M dataset, + it will download Flickr1M from `the official website `__ + at the first time you use it. + + Parameters + ------------ + tag : str or None + What images to return. + - If you want to get images with tag, use string like 'dog', 'red', see `Flickr Search `__. + - If you want to get all images, set to ``None``. + + size : int + integer between 1 to 10. 1 means 100k images ... 5 means 500k images, 10 means all 1 million images. Default is 10. + path : str + The path that the data is downloaded to, defaults is ``data/flickr25k/``. + n_threads : int + The number of thread to read image. + printable : boolean + Whether to print infomation when reading images, default is ``False``. + + Examples + ---------- + Use 200k images + + >>> images = tl.files.load_flickr1M_dataset(tag='zebra', size=2) + + Use 1 Million images + + >>> images = tl.files.load_flickr1M_dataset(tag='zebra') + + """ + path = os.path.join(path, 'flickr1M') + logging.info("[Flickr1M] using {}% of images = {}".format(size * 10, size * 100000)) + images_zip = [ + 'images0.zip', 'images1.zip', 'images2.zip', 'images3.zip', 'images4.zip', 'images5.zip', 'images6.zip', 'images7.zip', 'images8.zip', 'images9.zip' + ] + tag_zip = 'tags.zip' + url = 'http://press.liacs.nl/mirflickr/mirflickr1m/' + + # download dataset + for image_zip in images_zip[0:size]: + image_folder = image_zip.split(".")[0] + # logging.info(path+"/"+image_folder) + if folder_exists(path + "/" + image_folder) is False: + # logging.info(image_zip) + logging.info("[Flickr1M] {} is missing in {}".format(image_folder, path)) + maybe_download_and_extract(image_zip, path, url, extract=True) + del_file(path + '/' + image_zip) + os.system("mv {} {}".format(path + '/images', path + '/' + image_folder)) + else: + logging.info("[Flickr1M] {} exists in {}".format(image_folder, path)) + + # download tag + if folder_exists(path + "/tags") is False: + logging.info("[Flickr1M] tag files is nonexistent in {}".format(path)) + maybe_download_and_extract(tag_zip, path, url, extract=True) + del_file(path + '/' + tag_zip) + else: + logging.info("[Flickr1M] tags exists in {}".format(path)) + + # 1. image path list + images_list = [] + images_folder_list = [] + for i in range(0, size): + images_folder_list += load_folder_list(path=os.path.join(path, 'images%d' % i)) + images_folder_list.sort(key=lambda s: int(s.split('/')[-1])) # folder/images/ddd + + for folder in images_folder_list[0:size * 10]: + tmp = load_file_list(path=folder, regx='\\.jpg', printable=False) + tmp.sort(key=lambda s: int(s.split('.')[-2])) # ddd.jpg + images_list.extend([folder + '/' + x for x in tmp]) + + # 2. tag path list + tag_list = [] + tag_folder_list = load_folder_list(path + "/tags") + tag_folder_list.sort(key=lambda s: int(s.split('/')[-1])) # folder/images/ddd + + for folder in tag_folder_list[0:size * 10]: + tmp = load_file_list(path=folder, regx='\\.txt', printable=False) + tmp.sort(key=lambda s: int(s.split('.')[-2])) # ddd.txt + tmp = [folder + '/' + s for s in tmp] + tag_list += tmp + + # 3. select images + logging.info("[Flickr1M] searching tag: {}".format(tag)) + select_images_list = [] + for idx, _val in enumerate(tag_list): + tags = read_file(tag_list[idx]).split('\n') + if tag in tags: + select_images_list.append(images_list[idx]) + + logging.info("[Flickr1M] reading images with tag: {}".format(tag)) + images = visualize.read_images(select_images_list, '', n_threads=n_threads, printable=printable) + return images + + +def load_cyclegan_dataset(filename='summer2winter_yosemite', path='data'): + """Load images from CycleGAN's database, see `this link `__. + + Parameters + ------------ + filename : str + The dataset you want, see `this link `__. + path : str + The path that the data is downloaded to, defaults is `data/cyclegan` + + Examples + --------- + >>> im_train_A, im_train_B, im_test_A, im_test_B = load_cyclegan_dataset(filename='summer2winter_yosemite') + + """ + path = os.path.join(path, 'cyclegan') + url = 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/' + + if folder_exists(os.path.join(path, filename)) is False: + logging.info("[*] {} is nonexistent in {}".format(filename, path)) + maybe_download_and_extract(filename + '.zip', path, url, extract=True) + del_file(os.path.join(path, filename + '.zip')) + + def load_image_from_folder(path): + path_imgs = load_file_list(path=path, regx='\\.jpg', printable=False) + return visualize.read_images(path_imgs, path=path, n_threads=10, printable=False) + + im_train_A = load_image_from_folder(os.path.join(path, filename, "trainA")) + im_train_B = load_image_from_folder(os.path.join(path, filename, "trainB")) + im_test_A = load_image_from_folder(os.path.join(path, filename, "testA")) + im_test_B = load_image_from_folder(os.path.join(path, filename, "testB")) + + def if_2d_to_3d(images): # [h, w] --> [h, w, 3] + for i, _v in enumerate(images): + if len(images[i].shape) == 2: + images[i] = images[i][:, :, np.newaxis] + images[i] = np.tile(images[i], (1, 1, 3)) + return images + + im_train_A = if_2d_to_3d(im_train_A) + im_train_B = if_2d_to_3d(im_train_B) + im_test_A = if_2d_to_3d(im_test_A) + im_test_B = if_2d_to_3d(im_test_B) + + return im_train_A, im_train_B, im_test_A, im_test_B + + +def download_file_from_google_drive(ID, destination): + """Download file from Google Drive. + + See ``tl.files.load_celebA_dataset`` for example. + + Parameters + -------------- + ID : str + The driver ID. + destination : str + The destination for save file. + + """ + from tqdm import tqdm + import requests + + def save_response_content(response, destination, chunk_size=32 * 1024): + total_size = int(response.headers.get('content-length', 0)) + with open(destination, "wb") as f: + for chunk in tqdm(response.iter_content(chunk_size), total=total_size, unit='B', unit_scale=True, desc=destination): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + + def get_confirm_token(response): + for key, value in response.cookies.items(): + if key.startswith('download_warning'): + return value + return None + + URL = "https://docs.google.com/uc?export=download" + session = requests.Session() + + response = session.get(URL, params={'id': ID}, stream=True) + token = get_confirm_token(response) + + if token: + params = {'id': ID, 'confirm': token} + response = session.get(URL, params=params, stream=True) + save_response_content(response, destination) + + +def load_celebA_dataset(path='data'): + """Load CelebA dataset + + Return a list of image path. + + Parameters + ----------- + path : str + The path that the data is downloaded to, defaults is ``data/celebA/``. + + """ + data_dir = 'celebA' + filename, drive_id = "img_align_celeba.zip", "0B7EVK8r0v71pZjFTYXZWM3FlRnM" + save_path = os.path.join(path, filename) + image_path = os.path.join(path, data_dir) + if os.path.exists(image_path): + logging.info('[*] {} already exists'.format(save_path)) + else: + exists_or_mkdir(path) + download_file_from_google_drive(drive_id, save_path) + zip_dir = '' + with zipfile.ZipFile(save_path) as zf: + zip_dir = zf.namelist()[0] + zf.extractall(path) + os.remove(save_path) + os.rename(os.path.join(path, zip_dir), image_path) + + data_files = load_file_list(path=image_path, regx='\\.jpg', printable=False) + for i, _v in enumerate(data_files): + data_files[i] = os.path.join(image_path, data_files[i]) + return data_files + + +def load_voc_dataset(path='data', dataset='2012', contain_classes_in_person=False): + """Pascal VOC 2007/2012 Dataset. + + It has 20 objects: + aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, diningtable, dog, horse, motorbike, person, pottedplant, sheep, sofa, train, tvmonitor + and additional 3 classes : head, hand, foot for person. + + Parameters + ----------- + path : str + The path that the data is downloaded to, defaults is ``data/VOC``. + dataset : str + The VOC dataset version, `2012`, `2007`, `2007test` or `2012test`. We usually train model on `2007+2012` and test it on `2007test`. + contain_classes_in_person : boolean + Whether include head, hand and foot annotation, default is False. + + Returns + --------- + imgs_file_list : list of str + Full paths of all images. + imgs_semseg_file_list : list of str + Full paths of all maps for semantic segmentation. Note that not all images have this map! + imgs_insseg_file_list : list of str + Full paths of all maps for instance segmentation. Note that not all images have this map! + imgs_ann_file_list : list of str + Full paths of all annotations for bounding box and object class, all images have this annotations. + classes : list of str + Classes in order. + classes_in_person : list of str + Classes in person. + classes_dict : dictionary + Class label to integer. + n_objs_list : list of int + Number of objects in all images in ``imgs_file_list`` in order. + objs_info_list : list of str + Darknet format for the annotation of all images in ``imgs_file_list`` in order. ``[class_id x_centre y_centre width height]`` in ratio format. + objs_info_dicts : dictionary + The annotation of all images in ``imgs_file_list``, ``{imgs_file_list : dictionary for annotation}``, + format from `TensorFlow/Models/object-detection `__. + + Examples + ---------- + >>> imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, + >>> classes, classes_in_person, classes_dict, + >>> n_objs_list, objs_info_list, objs_info_dicts = tl.files.load_voc_dataset(dataset="2012", contain_classes_in_person=False) + >>> idx = 26 + >>> print(classes) + ... ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] + >>> print(classes_dict) + ... {'sheep': 16, 'horse': 12, 'bicycle': 1, 'bottle': 4, 'cow': 9, 'sofa': 17, 'car': 6, 'dog': 11, 'cat': 7, 'person': 14, 'train': 18, 'diningtable': 10, 'aeroplane': 0, 'bus': 5, 'pottedplant': 15, 'tvmonitor': 19, 'chair': 8, 'bird': 2, 'boat': 3, 'motorbike': 13} + >>> print(imgs_file_list[idx]) + ... data/VOC/VOC2012/JPEGImages/2007_000423.jpg + >>> print(n_objs_list[idx]) + ... 2 + >>> print(imgs_ann_file_list[idx]) + ... data/VOC/VOC2012/Annotations/2007_000423.xml + >>> print(objs_info_list[idx]) + ... 14 0.173 0.461333333333 0.142 0.496 + ... 14 0.828 0.542666666667 0.188 0.594666666667 + >>> ann = tl.prepro.parse_darknet_ann_str_to_list(objs_info_list[idx]) + >>> print(ann) + ... [[14, 0.173, 0.461333333333, 0.142, 0.496], [14, 0.828, 0.542666666667, 0.188, 0.594666666667]] + >>> c, b = tl.prepro.parse_darknet_ann_list_to_cls_box(ann) + >>> print(c, b) + ... [14, 14] [[0.173, 0.461333333333, 0.142, 0.496], [0.828, 0.542666666667, 0.188, 0.594666666667]] + + References + ------------- + - `Pascal VOC2012 Website `__. + - `Pascal VOC2007 Website `__. + + """ + path = os.path.join(path, 'VOC') + + def _recursive_parse_xml_to_dict(xml): + """Recursively parses XML contents to python dict. + + We assume that `object` tags are the only ones that can appear + multiple times at the same level of a tree. + + Args: + xml: xml tree obtained by parsing XML file contents using lxml.etree + + Returns: + Python dictionary holding XML contents. + + """ + if not xml: + # if xml is not None: + return {xml.tag: xml.text} + result = {} + for child in xml: + child_result = _recursive_parse_xml_to_dict(child) + if child.tag != 'object': + result[child.tag] = child_result[child.tag] + else: + if child.tag not in result: + result[child.tag] = [] + result[child.tag].append(child_result[child.tag]) + return {xml.tag: result} + + from lxml import etree # pip install lxml + import xml.etree.ElementTree as ET + + if dataset == "2012": + url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/" + tar_filename = "VOCtrainval_11-May-2012.tar" + extracted_filename = "VOC2012" #"VOCdevkit/VOC2012" + logging.info(" [============= VOC 2012 =============]") + elif dataset == "2012test": + extracted_filename = "VOC2012test" #"VOCdevkit/VOC2012" + logging.info(" [============= VOC 2012 Test Set =============]") + logging.info(" \nAuthor: 2012test only have person annotation, so 2007test is highly recommended for testing !\n") + import time + time.sleep(3) + if os.path.isdir(os.path.join(path, extracted_filename)) is False: + logging.info("For VOC 2012 Test data - online registration required") + logging.info( + " Please download VOC2012test.tar from: \n register: http://host.robots.ox.ac.uk:8080 \n voc2012 : http://host.robots.ox.ac.uk:8080/eval/challenges/voc2012/ \ndownload: http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar" + ) + logging.info(" unzip VOC2012test.tar,rename the folder to VOC2012test and put it into %s" % path) + exit() + # # http://host.robots.ox.ac.uk:8080/eval/downloads/VOC2012test.tar + # url = "http://host.robots.ox.ac.uk:8080/eval/downloads/" + # tar_filename = "VOC2012test.tar" + elif dataset == "2007": + url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/" + tar_filename = "VOCtrainval_06-Nov-2007.tar" + extracted_filename = "VOC2007" + logging.info(" [============= VOC 2007 =============]") + elif dataset == "2007test": + # http://host.robots.ox.ac.uk/pascal/VOC/voc2007/index.html#testdata + # http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar + url = "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/" + tar_filename = "VOCtest_06-Nov-2007.tar" + extracted_filename = "VOC2007test" + logging.info(" [============= VOC 2007 Test Set =============]") + else: + raise Exception("Please set the dataset aug to 2012, 2012test or 2007.") + + # download dataset + if dataset != "2012test": + from sys import platform as _platform + if folder_exists(os.path.join(path, extracted_filename)) is False: + logging.info("[VOC] {} is nonexistent in {}".format(extracted_filename, path)) + maybe_download_and_extract(tar_filename, path, url, extract=True) + del_file(os.path.join(path, tar_filename)) + if dataset == "2012": + if _platform == "win32": + os.system("mv {}\VOCdevkit\VOC2012 {}\VOC2012".format(path, path)) + else: + os.system("mv {}/VOCdevkit/VOC2012 {}/VOC2012".format(path, path)) + elif dataset == "2007": + if _platform == "win32": + os.system("mv {}\VOCdevkit\VOC2007 {}\VOC2007".format(path, path)) + else: + os.system("mv {}/VOCdevkit/VOC2007 {}/VOC2007".format(path, path)) + elif dataset == "2007test": + if _platform == "win32": + os.system("mv {}\VOCdevkit\VOC2007 {}\VOC2007test".format(path, path)) + else: + os.system("mv {}/VOCdevkit/VOC2007 {}/VOC2007test".format(path, path)) + del_folder(os.path.join(path, 'VOCdevkit')) + # object classes(labels) NOTE: YOU CAN CUSTOMIZE THIS LIST + classes = [ + "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", + "pottedplant", "sheep", "sofa", "train", "tvmonitor" + ] + if contain_classes_in_person: + classes_in_person = ["head", "hand", "foot"] + else: + classes_in_person = [] + + classes += classes_in_person # use extra 3 classes for person + + classes_dict = utils.list_string_to_dict(classes) + logging.info("[VOC] object classes {}".format(classes_dict)) + + # 1. image path list + # folder_imgs = path+"/"+extracted_filename+"/JPEGImages/" + folder_imgs = os.path.join(path, extracted_filename, "JPEGImages") + imgs_file_list = load_file_list(path=folder_imgs, regx='\\.jpg', printable=False) + logging.info("[VOC] {} images found".format(len(imgs_file_list))) + imgs_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])) # 2007_000027.jpg --> 2007000027 + imgs_file_list = [os.path.join(folder_imgs, s) for s in imgs_file_list] + # logging.info('IM',imgs_file_list[0::3333], imgs_file_list[-1]) + if dataset != "2012test": + ##======== 2. semantic segmentation maps path list + # folder_semseg = path+"/"+extracted_filename+"/SegmentationClass/" + folder_semseg = os.path.join(path, extracted_filename, "SegmentationClass") + imgs_semseg_file_list = load_file_list(path=folder_semseg, regx='\\.png', printable=False) + logging.info("[VOC] {} maps for semantic segmentation found".format(len(imgs_semseg_file_list))) + imgs_semseg_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])) # 2007_000032.png --> 2007000032 + imgs_semseg_file_list = [os.path.join(folder_semseg, s) for s in imgs_semseg_file_list] + # logging.info('Semantic Seg IM',imgs_semseg_file_list[0::333], imgs_semseg_file_list[-1]) + ##======== 3. instance segmentation maps path list + # folder_insseg = path+"/"+extracted_filename+"/SegmentationObject/" + folder_insseg = os.path.join(path, extracted_filename, "SegmentationObject") + imgs_insseg_file_list = load_file_list(path=folder_insseg, regx='\\.png', printable=False) + logging.info("[VOC] {} maps for instance segmentation found".format(len(imgs_semseg_file_list))) + imgs_insseg_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])) # 2007_000032.png --> 2007000032 + imgs_insseg_file_list = [os.path.join(folder_insseg, s) for s in imgs_insseg_file_list] + # logging.info('Instance Seg IM',imgs_insseg_file_list[0::333], imgs_insseg_file_list[-1]) + else: + imgs_semseg_file_list = [] + imgs_insseg_file_list = [] + # 4. annotations for bounding box and object class + # folder_ann = path+"/"+extracted_filename+"/Annotations/" + folder_ann = os.path.join(path, extracted_filename, "Annotations") + imgs_ann_file_list = load_file_list(path=folder_ann, regx='\\.xml', printable=False) + logging.info("[VOC] {} XML annotation files for bounding box and object class found".format(len(imgs_ann_file_list))) + imgs_ann_file_list.sort(key=lambda s: int(s.replace('.', ' ').replace('_', '').split(' ')[-2])) # 2007_000027.xml --> 2007000027 + imgs_ann_file_list = [os.path.join(folder_ann, s) for s in imgs_ann_file_list] + # logging.info('ANN',imgs_ann_file_list[0::3333], imgs_ann_file_list[-1]) + + if dataset == "2012test": # remove unused images in JPEG folder + imgs_file_list_new = [] + for ann in imgs_ann_file_list: + ann = os.path.split(ann)[-1].split('.')[0] + for im in imgs_file_list: + if ann in im: + imgs_file_list_new.append(im) + break + imgs_file_list = imgs_file_list_new + logging.info("[VOC] keep %d images" % len(imgs_file_list_new)) + + # parse XML annotations + def convert(size, box): + dw = 1. / size[0] + dh = 1. / size[1] + x = (box[0] + box[1]) / 2.0 + y = (box[2] + box[3]) / 2.0 + w = box[1] - box[0] + h = box[3] - box[2] + x = x * dw + w = w * dw + y = y * dh + h = h * dh + return x, y, w, h + + def convert_annotation(file_name): + """Given VOC2012 XML Annotations, returns number of objects and info.""" + in_file = open(file_name) + out_file = "" + tree = ET.parse(in_file) + root = tree.getroot() + size = root.find('size') + w = int(size.find('width').text) + h = int(size.find('height').text) + n_objs = 0 + + for obj in root.iter('object'): + if dataset != "2012test": + difficult = obj.find('difficult').text + cls = obj.find('name').text + if cls not in classes or int(difficult) == 1: + continue + else: + cls = obj.find('name').text + if cls not in classes: + continue + cls_id = classes.index(cls) + xmlbox = obj.find('bndbox') + b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) + bb = convert((w, h), b) + + out_file += str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n' + n_objs += 1 + if cls in "person": + for part in obj.iter('part'): + cls = part.find('name').text + if cls not in classes_in_person: + continue + cls_id = classes.index(cls) + xmlbox = part.find('bndbox') + b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) + bb = convert((w, h), b) + # out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n') + out_file += str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n' + n_objs += 1 + in_file.close() + return n_objs, out_file + + logging.info("[VOC] Parsing xml annotations files") + n_objs_list = [] + objs_info_list = [] # Darknet Format list of string + objs_info_dicts = {} + for idx, ann_file in enumerate(imgs_ann_file_list): + n_objs, objs_info = convert_annotation(ann_file) + n_objs_list.append(n_objs) + objs_info_list.append(objs_info) + with tf.gfile.GFile(ann_file, 'r') as fid: + xml_str = fid.read() + xml = etree.fromstring(xml_str) + data = _recursive_parse_xml_to_dict(xml)['annotation'] + objs_info_dicts.update({imgs_file_list[idx]: data}) + + return imgs_file_list, imgs_semseg_file_list, imgs_insseg_file_list, imgs_ann_file_list, \ + classes, classes_in_person, classes_dict,\ + n_objs_list, objs_info_list, objs_info_dicts + + +def save_npz(save_list=None, name='model.npz', sess=None): """Input parameters and the file name, save parameters into .npz file. Use tl.utils.load_npz() to restore. Parameters ---------- - save_list : a list - Parameters want to be saved. - name : a string or None - The name of the .npz file. + save_list : list of tensor + A list of parameters (tensor) to be saved. + name : str + The name of the `.npz` file. sess : None or Session + Session may be required in some case. Examples -------- - >>> tl.files.save_npz(network.all_params, name='model_test.npz', sess=sess) - ... File saved to: model_test.npz - >>> load_params = tl.files.load_npz(name='model_test.npz') - ... Loading param0, (784, 800) - ... Loading param1, (800,) - ... Loading param2, (800, 800) - ... Loading param3, (800,) - ... Loading param4, (800, 10) - ... Loading param5, (10,) - >>> put parameters into a TensorLayer network, please see assign_params() + Save model to npz + + >>> tl.files.save_npz(network.all_params, name='model.npz', sess=sess) + + Load model from npz (Method 1) + + >>> load_params = tl.files.load_npz(name='model.npz') + >>> tl.files.assign_params(sess, load_params, network) + + Load model from npz (Method 2) + + >>> tl.files.load_and_assign_npz(sess=sess, name='model.npz', network=network) Notes ----- @@ -537,136 +1243,311 @@ def save_npz(save_list=[], name='model.npz', sess=None): References ---------- - - `Saving dictionary using numpy `_ + `Saving dictionary using numpy `__ + """ - ## save params into a list + if save_list is None: + save_list = [] + save_list_var = [] if sess: save_list_var = sess.run(save_list) else: try: - for k, value in enumerate(save_list): - save_list_var.append(value.eval()) - except: - print(" Fail to save model, Hint: pass the session into this function, save_npz(network.all_params, name='model.npz', sess=sess)") + save_list_var.extend([v.eval() for v in save_list]) + except Exception: + logging.info(" Fail to save model, Hint: pass the session into this function, tl.files.save_npz(network.all_params, name='model.npz', sess=sess)") np.savez(name, params=save_list_var) save_list_var = None del save_list_var - print("[*] %s saved" % name) + logging.info("[*] %s saved" % name) - ## save params into a dictionary - # rename_dict = {} - # for k, value in enumerate(save_dict): - # rename_dict.update({'param'+str(k) : value.eval()}) - # np.savez(name, **rename_dict) - # print('Model is saved to: %s' % name) def load_npz(path='', name='model.npz'): """Load the parameters of a Model saved by tl.files.save_npz(). Parameters ---------- - path : a string - Folder path to .npz file. - name : a string or None - The name of the .npz file. + path : str + Folder path to `.npz` file. + name : str + The name of the `.npz` file. Returns -------- - params : list + list of array A list of parameters in order. Examples -------- - - See save_npz and assign_params + - See ``tl.files.save_npz`` References ---------- - - `Saving dictionary using numpy `_ - """ - ## if save_npz save params into a dictionary - # d = np.load( path+name ) - # params = [] - # print('Load Model') - # for key, val in sorted( d.items() ): - # params.append(val) - # print('Loading %s, %s' % (key, str(val.shape))) - # return params - ## if save_npz save params into a list - d = np.load( path+name ) - # for val in sorted( d.items() ): - # params = val - # return params + - `Saving dictionary using numpy `__ + + """ + d = np.load(path + name) return d['params'] - # print(d.items()[0][1]['params']) - # exit() - # return d.items()[0][1]['params'] + def assign_params(sess, params, network): """Assign the given parameters to the TensorLayer network. Parameters ---------- - sess : TensorFlow Session - params : a list - A list of parameters in order. - network : a :class:`Layer` class - The network to be assigned + sess : Session + TensorFlow Session. + params : list of array + A list of parameters (array) in order. + network : :class:`Layer` + The network to be assigned. + + Returns + -------- + list of operations + A list of tf ops in order that assign params. Support sess.run(ops) manually. Examples -------- - >>> Save your network as follow: - >>> tl.files.save_npz(network.all_params, name='model_test.npz') - >>> network.print_params() - ... - ... Next time, load and assign your network as follow: - >>> sess.run(tf.initialize_all_variables()) # re-initialize, then save and assign - >>> load_params = tl.files.load_npz(name='model_test.npz') - >>> tl.files.assign_params(sess, load_params, network) - >>> network.print_params() + - See ``tl.files.save_npz`` References ---------- - - `Assign value to a TensorFlow variable `_ + - `Assign value to a TensorFlow variable `__ + """ ops = [] for idx, param in enumerate(params): ops.append(network.all_params[idx].assign(param)) - sess.run(ops) + if sess is not None: + sess.run(ops) + return ops + def load_and_assign_npz(sess=None, name=None, network=None): """Load model from npz and assign to a network. Parameters ------------- - sess : TensorFlow Session - name : string - Model path. - network : a :class:`Layer` class - The network to be assigned + sess : Session + TensorFlow Session. + name : str + The name of the `.npz` file. + network : :class:`Layer` + The network to be assigned. Returns -------- - Returns False if faild to model is not exist. + False or network + Returns False, if the model is not exist. Examples - --------- - >>> tl.files.load_and_assign_npz(sess=sess, name='net.npz', network=net) + -------- + - See ``tl.files.save_npz`` + """ - assert network is not None - assert sess is not None + if network is None: + raise ValueError("network is None.") + if sess is None: + raise ValueError("session is None.") if not os.path.exists(name): - print("[!] Load {} failed!".format(name)) + logging.info("[!] Load {} failed!".format(name)) return False else: params = load_npz(name=name) assign_params(sess, params, network) - print("[*] Load {} SUCCESS!".format(name)) + logging.info("[*] Load {} SUCCESS!".format(name)) return network -# Load and save variables -def save_any_to_npy(save_dict={}, name='file.npy'): - """Save variables to .npy file. + +def save_npz_dict(save_list=None, name='model.npz', sess=None): + """Input parameters and the file name, save parameters as a dictionary into .npz file. + + Use ``tl.files.load_and_assign_npz_dict()`` to restore. + + Parameters + ---------- + save_list : list of parameters + A list of parameters (tensor) to be saved. + name : str + The name of the `.npz` file. + sess : Session + TensorFlow Session. + + """ + if sess is None: + raise ValueError("session is None.") + if save_list is None: + save_list = [] + + save_list_names = [tensor.name for tensor in save_list] + save_list_var = sess.run(save_list) + save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)} + np.savez(name, **save_var_dict) + save_list_var = None + save_var_dict = None + del save_list_var + del save_var_dict + logging.info("[*] Model saved in npz_dict %s" % name) + + +def load_and_assign_npz_dict(name='model.npz', sess=None): + """Restore the parameters saved by ``tl.files.save_npz_dict()``. + + Parameters + ---------- + name : str + The name of the `.npz` file. + sess : Session + TensorFlow Session. + + """ + if sess is None: + raise ValueError("session is None.") + + if not os.path.exists(name): + logging.info("[!] Load {} failed!".format(name)) + return False + + params = np.load(name) + if len(params.keys()) != len(set(params.keys())): + raise Exception("Duplication in model npz_dict %s" % name) + ops = list() + for key in params.keys(): + try: + # tensor = tf.get_default_graph().get_tensor_by_name(key) + # varlist = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=key) + varlist = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=key) + if len(varlist) > 1: + raise Exception("[!] Multiple candidate variables to be assigned for name %s" % key) + elif len(varlist) == 0: + raise KeyError + else: + ops.append(varlist[0].assign(params[key])) + logging.info("[*] params restored: %s" % key) + except KeyError: + logging.info("[!] Warning: Tensor named %s not found in network." % key) + + sess.run(ops) + logging.info("[*] Model restored from npz_dict %s" % name) + + +def save_ckpt(sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list=None, global_step=None, printable=False): + """Save parameters into `ckpt` file. + + Parameters + ------------ + sess : Session + TensorFlow Session. + mode_name : str + The name of the model, default is ``model.ckpt``. + save_dir : str + The path / file directory to the `ckpt`, default is ``checkpoint``. + var_list : list of tensor + The parameters / variables (tensor) to be saved. If empty, save all global variables (default). + global_step : int or None + Step number. + printable : boolean + Whether to print all parameters information. + + See Also + -------- + load_ckpt + + """ + if sess is None: + raise ValueError("session is None.") + if var_list is None: + var_list = [] + + ckpt_file = os.path.join(save_dir, mode_name) + if var_list == []: + var_list = tf.global_variables() + + logging.info("[*] save %s n_params: %d" % (ckpt_file, len(var_list))) + + if printable: + for idx, v in enumerate(var_list): + logging.info(" param {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) + + saver = tf.train.Saver(var_list) + saver.save(sess, ckpt_file, global_step=global_step) + + +def load_ckpt(sess=None, mode_name='model.ckpt', save_dir='checkpoint', var_list=None, is_latest=True, printable=False): + """Load parameters from `ckpt` file. + + Parameters + ------------ + sess : Session + TensorFlow Session. + mode_name : str + The name of the model, default is ``model.ckpt``. + save_dir : str + The path / file directory to the `ckpt`, default is ``checkpoint``. + var_list : list of tensor + The parameters / variables (tensor) to be saved. If empty, save all global variables (default). + is_latest : boolean + Whether to load the latest `ckpt`, if False, load the `ckpt` with the name of ```mode_name``. + printable : boolean + Whether to print all parameters information. + + Examples + ---------- + Save all global parameters. + + >>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', save_dir='model', printable=True) + + Save specific parameters. + + >>> tl.files.save_ckpt(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', printable=True) + + Load latest ckpt. + + >>> tl.files.load_ckpt(sess=sess, var_list=net.all_params, save_dir='model', printable=True) + + Load specific ckpt. + + >>> tl.files.load_ckpt(sess=sess, mode_name='model.ckpt', var_list=net.all_params, save_dir='model', is_latest=False, printable=True) + + """ + if sess is None: + raise ValueError("session is None.") + if var_list is None: + var_list = [] + + if is_latest: + ckpt_file = tf.train.latest_checkpoint(save_dir) + else: + ckpt_file = os.path.join(save_dir, mode_name) + + if not var_list: + var_list = tf.global_variables() + + logging.info("[*] load %s n_params: %d" % (ckpt_file, len(var_list))) + + if printable: + for idx, v in enumerate(var_list): + logging.info(" param {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) + + try: + saver = tf.train.Saver(var_list) + saver.restore(sess, ckpt_file) + except Exception as e: + logging.info(e) + logging.info("[*] load ckpt fail ...") + + +def save_any_to_npy(save_dict=None, name='file.npy'): + """Save variables to `.npy` file. + + Parameters + ------------ + save_dict : directory + The variables to be saved. + name : str + File name. Examples --------- @@ -674,90 +1555,110 @@ def save_any_to_npy(save_dict={}, name='file.npy'): >>> data = tl.files.load_npy_to_any(name='test.npy') >>> print(data) ... {'data': ['a','b']} + """ + if save_dict is None: + save_dict = {} np.save(name, save_dict) + def load_npy_to_any(path='', name='file.npy'): - """Load .npy file. + """Load `.npy` file. + + Parameters + ------------ + path : str + Path to the file (optional). + name : str + File name. Examples --------- - - see save_any_to_npy() + - see tl.files.save_any_to_npy() + """ file_path = os.path.join(path, name) try: - npy = np.load(file_path).item() - except: - npy = np.load(file_path) - finally: - try: - return npy - except: - print("[!] Fail to load %s" % file_path) - exit() + return np.load(file_path).item() + except Exception: + return np.load(file_path) + raise Exception("[!] Fail to load %s" % file_path) -# Visualizing npz files -def npz_to_W_pdf(path=None, regx='w1pre_[0-9]+\.(npz)'): - """Convert the first weight matrix of .npz file to .pdf by using tl.visualize.W(). +def file_exists(filepath): + """Check whether a file exists by given file path.""" + return os.path.isfile(filepath) + + +def folder_exists(folderpath): + """Check whether a folder exists by given folder path.""" + return os.path.isdir(folderpath) + + +def del_file(filepath): + """Delete a file by given file path.""" + os.remove(filepath) - Parameters - ---------- - path : a string or None - A folder path to npz files. - regx : a string - Regx for the file name. + +def del_folder(folderpath): + """Delete a folder by given folder path.""" + os.rmdir(folderpath) + + +def read_file(filepath): + """Read a file and return a string. Examples - -------- - >>> Convert the first weight matrix of w1_pre...npz file to w1_pre...pdf. - >>> tl.files.npz_to_W_pdf(path='/Users/.../npz_file/', regx='w1pre_[0-9]+\.(npz)') + --------- + >>> data = tl.files.read_file('data.txt') + """ - file_list = load_file_list(path=path, regx=regx) - for f in file_list: - W = load_npz(path, f)[0] - print("%s --> %s" % (f, f.split('.')[0]+'.pdf')) - visualize.W(W, second=10, saveable=True, name=f.split('.')[0], fig_idx=2012) + with open(filepath, 'r') as afile: + return afile.read() -## Helper functions def load_file_list(path=None, regx='\.npz', printable=True): - """Return a file list in a folder by given a path and regular expression. + r"""Return a file list in a folder by given a path and regular expression. Parameters ---------- - path : a string or None - A folder path. - regx : a string + path : str or None + A folder path, if `None`, use the current directory. + regx : str The regx of file name. - printable : boolean, whether to print the files infomation. + printable : boolean + Whether to print the files infomation. Examples ---------- >>> file_list = tl.files.load_file_list(path=None, regx='w1pre_[0-9]+\.(npz)') + """ - if path == False: + if path is None: path = os.getcwd() file_list = os.listdir(path) return_list = [] - for idx, f in enumerate(file_list): + for _, f in enumerate(file_list): if re.search(regx, f): return_list.append(f) # return_list.sort() if printable: - print('Match file list = %s' % return_list) - print('Number of files = %d' % len(return_list)) + logging.info('Match file list = %s' % return_list) + logging.info('Number of files = %d' % len(return_list)) return return_list + def load_folder_list(path=""): """Return a folder list in a folder by given a folder path. Parameters ---------- - path : a string or None + path : str A folder path. + """ - return [os.path.join(path,o) for o in os.listdir(path) if os.path.isdir(os.path.join(path,o))] + return [os.path.join(path, o) for o in os.listdir(path) if os.path.isdir(os.path.join(path, o))] + def exists_or_mkdir(path, verbose=True): """Check a folder by given name, if not exist, create the folder and return False, @@ -765,94 +1666,152 @@ def exists_or_mkdir(path, verbose=True): Parameters ---------- - path : a string + path : str A folder path. verbose : boolean - If True, prints results, deaults is True + If True (default), prints results. Returns -------- - True if folder exist, otherwise, returns False and create the folder + boolean + True if folder already exist, otherwise, returns False and create the folder. Examples -------- >>> tl.files.exists_or_mkdir("checkpoints/train") + """ if not os.path.exists(path): if verbose: - print("[*] creates %s ..." % path) + logging.info("[*] creates %s ..." % path) os.makedirs(path) return False else: if verbose: - print("[!] %s exists ..." % path) + logging.info("[!] %s exists ..." % path) return True + def maybe_download_and_extract(filename, working_directory, url_source, extract=False, expected_bytes=None): """Checks if file exists in working_directory otherwise tries to dowload the file, and optionally also tries to extract the file if format is ".zip" or ".tar" Parameters - ---------- - filename : string + ----------- + filename : str The name of the (to be) dowloaded file. - working_directory : string + working_directory : str A folder path to search for the file in and dowload the file to - url : string + url : str The URL to download the file from - extract : bool, defaults to False - If True, tries to uncompress the dowloaded file is ".tar.gz/.tar.bz2" or ".zip" file - expected_bytes : int/None - If set tries to verify that the downloaded file is of the specified size, otherwise raises an Exception, - defaults to None which corresponds to no check being performed + extract : boolean + If True, tries to uncompress the dowloaded file is ".tar.gz/.tar.bz2" or ".zip" file, default is False. + expected_bytes : int or None + If set tries to verify that the downloaded file is of the specified size, otherwise raises an Exception, defaults is None which corresponds to no check being performed. + Returns ---------- - filepath to dowloaded (uncompressed) file + str + File path of the dowloaded (uncompressed) file. Examples -------- - >>> down_file = tl.files.maybe_download_and_extract(filename = 'train-images-idx3-ubyte.gz', - working_directory = 'data/', - url_source = 'http://yann.lecun.com/exdb/mnist/') - >>> tl.files.maybe_download_and_extract(filename = 'ADEChallengeData2016.zip', - working_directory = 'data/', - url_source = 'http://sceneparsing.csail.mit.edu/data/', - extract=True) + >>> down_file = tl.files.maybe_download_and_extract(filename='train-images-idx3-ubyte.gz', + ... working_directory='data/', + ... url_source='http://yann.lecun.com/exdb/mnist/') + >>> tl.files.maybe_download_and_extract(filename='ADEChallengeData2016.zip', + ... working_directory='data/', + ... url_source='http://sceneparsing.csail.mit.edu/data/', + ... extract=True) + """ + # We first define a download function, supporting both Python 2 and 3. def _download(filename, working_directory, url_source): def _dlProgress(count, blockSize, totalSize): - if(totalSize != 0): + if (totalSize != 0): percent = float(count * blockSize) / float(totalSize) * 100.0 sys.stdout.write("\r" "Downloading " + filename + "...%d%%" % percent) sys.stdout.flush() + if sys.version_info[0] == 2: from urllib import urlretrieve else: from urllib.request import urlretrieve filepath = os.path.join(working_directory, filename) - urlretrieve(url_source+filename, filepath, reporthook=_dlProgress) + urlretrieve(url_source + filename, filepath, reporthook=_dlProgress) + sys.stdout.write('\n') exists_or_mkdir(working_directory, verbose=False) filepath = os.path.join(working_directory, filename) if not os.path.exists(filepath): _download(filename, working_directory, url_source) - print() statinfo = os.stat(filepath) - print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') - if(not(expected_bytes is None) and (expected_bytes != statinfo.st_size)): + logging.info('Succesfully downloaded %s %s bytes.' % (filename, statinfo.st_size)) #, 'bytes.') + if (not (expected_bytes is None) and (expected_bytes != statinfo.st_size)): raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser?') - if(extract): + if (extract): if tarfile.is_tarfile(filepath): - print('Trying to extract tar file') + logging.info('Trying to extract tar file') tarfile.open(filepath, 'r').extractall(working_directory) - print('... Success!') + logging.info('... Success!') elif zipfile.is_zipfile(filepath): - print('Trying to extract zip file') + logging.info('Trying to extract zip file') with zipfile.ZipFile(filepath) as zf: zf.extractall(working_directory) - print('... Success!') + logging.info('... Success!') else: - print("Unknown compression_format only .tar.gz/.tar.bz2/.tar and .zip supported") + logging.info("Unknown compression_format only .tar.gz/.tar.bz2/.tar and .zip supported") return filepath + + +def natural_keys(text): + """Sort list of string with number in human order. + + Examples + ---------- + >>> l = ['im1.jpg', 'im31.jpg', 'im11.jpg', 'im21.jpg', 'im03.jpg', 'im05.jpg'] + >>> l.sort(key=tl.files.natural_keys) + ... ['im1.jpg', 'im03.jpg', 'im05', 'im11.jpg', 'im21.jpg', 'im31.jpg'] + >>> l.sort() # that is what we dont want + ... ['im03.jpg', 'im05', 'im1.jpg', 'im11.jpg', 'im21.jpg', 'im31.jpg'] + + References + ---------- + - `link `__ + + """ + + # - alist.sort(key=natural_keys) sorts in human order + # http://nedbatchelder.com/blog/200712/human_sorting.html + # (See Toothy's implementation in the comments) + def atoi(text): + return int(text) if text.isdigit() else text + + return [atoi(c) for c in re.split('(\d+)', text)] + + +# Visualizing npz files +def npz_to_W_pdf(path=None, regx='w1pre_[0-9]+\.(npz)'): + r"""Convert the first weight matrix of `.npz` file to `.pdf` by using `tl.visualize.W()`. + + Parameters + ---------- + path : str + A folder path to `npz` files. + regx : str + Regx for the file name. + + Examples + --------- + Convert the first weight matrix of w1_pre...npz file to w1_pre...pdf. + + >>> tl.files.npz_to_W_pdf(path='/Users/.../npz_file/', regx='w1pre_[0-9]+\.(npz)') + + """ + file_list = load_file_list(path=path, regx=regx) + for f in file_list: + W = load_npz(path, f)[0] + logging.info("%s --> %s" % (f, f.split('.')[0] + '.pdf')) + visualize.draw_weights(W, second=10, saveable=True, name=f.split('.')[0], fig_idx=2012) diff --git a/tensorlayer/iterate.py b/tensorlayer/iterate.py index 9778fc6..777f905 100644 --- a/tensorlayer/iterate.py +++ b/tensorlayer/iterate.py @@ -1,31 +1,32 @@ #! /usr/bin/python -# -*- coding: utf8 -*- - - +# -*- coding: utf-8 -*- import numpy as np from six.moves import xrange +__all__ = [ + 'minibatches', + 'seq_minibatches', + 'seq_minibatches2', + 'ptb_iterator', +] + + def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): """Generate a generator that input a group of example in numpy.array and - their labels, return the examples and labels by the given batchsize. + their labels, return the examples and labels by the given batch size. Parameters ---------- inputs : numpy.array - (X) The input features, every row is a example. + The input features, every row is a example. targets : numpy.array - (y) The labels of inputs, every row is a example. + The labels of inputs, every row is a example. batch_size : int The batch size. shuffle : boolean Indicating whether to use a shuffling queue, shuffle the dataset before return. - Hints - ------- - - If you have two inputs, e.g. X1 (1000, 100) and X2 (1000, 80), you can ``np.hstack((X1, X2)) - into (1000, 180) and feed into ``inputs``, then you can split a batch of X1 and X2. - Examples -------- >>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']]) @@ -41,6 +42,13 @@ def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): ... (array([['e', 'e'], ... ['f', 'f']], ... dtype='>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']]) >>> y = np.asarray([0, 1, 2, 3, 4, 5]) >>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=y, batch_size=2, seq_length=2, stride=1): @@ -77,7 +100,8 @@ def seq_minibatches(inputs, targets, batch_size, seq_length, stride=1): ... ... - - Many to One + Many to One + >>> return_last = True >>> num_steps = 2 >>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']]) @@ -96,52 +120,38 @@ def seq_minibatches(inputs, targets, batch_size, seq_length, stride=1): ... ['d' 'd'] ... ['d' 'd'] ... ['e' 'e']] [3 4] + """ assert len(inputs) == len(targets) n_loads = (batch_size * stride) + (seq_length - stride) for start_idx in range(0, len(inputs) - n_loads + 1, (batch_size * stride)): - seq_inputs = np.zeros((batch_size, seq_length) + inputs.shape[1:], - dtype=inputs.dtype) - seq_targets = np.zeros((batch_size, seq_length) + targets.shape[1:], - dtype=targets.dtype) + seq_inputs = np.zeros((batch_size, seq_length) + inputs.shape[1:], dtype=inputs.dtype) + seq_targets = np.zeros((batch_size, seq_length) + targets.shape[1:], dtype=targets.dtype) for b_idx in xrange(batch_size): start_seq_idx = start_idx + (b_idx * stride) end_seq_idx = start_seq_idx + seq_length seq_inputs[b_idx] = inputs[start_seq_idx:end_seq_idx] seq_targets[b_idx] = targets[start_seq_idx:end_seq_idx] - flatten_inputs = seq_inputs.reshape((-1,) + inputs.shape[1:]) - flatten_targets = seq_targets.reshape((-1,) + targets.shape[1:]) + flatten_inputs = seq_inputs.reshape((-1, ) + inputs.shape[1:]) + flatten_targets = seq_targets.reshape((-1, ) + targets.shape[1:]) yield flatten_inputs, flatten_targets + def seq_minibatches2(inputs, targets, batch_size, num_steps): """Generate a generator that iterates on two list of words. Yields (Returns) the source contexts and - the target context by the given batch_size and num_steps (sequence_length), - see ``PTB tutorial``. In TensorFlow's tutorial, this generates the batch_size pointers into the raw - PTB data, and allows minibatch iteration along these pointers. - - - Hint, if the input data are images, you can modify the code as follow. - - .. code-block:: python - - from - data = np.zeros([batch_size, batch_len) - to - data = np.zeros([batch_size, batch_len, inputs.shape[1], inputs.shape[2], inputs.shape[3]]) + the target context by the given batch_size and num_steps (sequence_length). + In TensorFlow's tutorial, this generates the `batch_size` pointers into the raw PTB data, and allows minibatch iteration along these pointers. Parameters ---------- - inputs : a list - the context in list format; note that context usually be - represented by splitting by space, and then convert to unique - word IDs. - targets : a list - the context in list format; note that context usually be - represented by splitting by space, and then convert to unique - word IDs. + inputs : list of data + The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs. + targets : list of data + The context in list format; note that context usually be represented by splitting by space, and then convert to unique word IDs. batch_size : int - the batch size. + The batch size. num_steps : int - the number of unrolls. i.e. sequence_length + The number of unrolls. i.e. sequence length Yields ------ @@ -174,16 +184,15 @@ def seq_minibatches2(inputs, targets, batch_size, num_steps): ... [[ 26. 27. 28.] ... [ 36. 37. 38.]] - Code References - --------------- - - ``tensorflow/models/rnn/ptb/reader.py`` + Notes + ----- + - Hint, if the input data are images, you can modify the source code `data = np.zeros([batch_size, batch_len)` to `data = np.zeros([batch_size, batch_len, inputs.shape[1], inputs.shape[2], inputs.shape[3]])`. """ assert len(inputs) == len(targets) data_len = len(inputs) batch_len = data_len // batch_size # data = np.zeros([batch_size, batch_len]) - data = np.zeros((batch_size, batch_len) + inputs.shape[1:], - dtype=inputs.dtype) + data = np.zeros((batch_size, batch_len) + inputs.shape[1:], dtype=inputs.dtype) data2 = np.zeros([batch_size, batch_len]) for i in range(batch_size): @@ -196,21 +205,16 @@ def seq_minibatches2(inputs, targets, batch_size, num_steps): raise ValueError("epoch_size == 0, decrease batch_size or num_steps") for i in range(epoch_size): - x = data[:, i*num_steps:(i+1)*num_steps] - x2 = data2[:, i*num_steps:(i+1)*num_steps] + x = data[:, i * num_steps:(i + 1) * num_steps] + x2 = data2[:, i * num_steps:(i + 1) * num_steps] yield (x, x2) def ptb_iterator(raw_data, batch_size, num_steps): - """ - Generate a generator that iterates on a list of words, see PTB tutorial. Yields (Returns) the source contexts and - the target context by the given batch_size and num_steps (sequence_length).\n - see ``PTB tutorial``. - - e.g. x = [0, 1, 2] y = [1, 2, 3] , when batch_size = 1, num_steps = 3, - raw_data = [i for i in range(100)] + """Generate a generator that iterates on a list of words, see `PTB example `__. + Yields the source contexts and the target context by the given batch_size and num_steps (sequence_length). - In TensorFlow's tutorial, this generates batch_size pointers into the raw + In TensorFlow's tutorial, this generates `batch_size` pointers into the raw PTB data, and allows minibatch iteration along these pointers. Parameters @@ -254,10 +258,6 @@ def ptb_iterator(raw_data, batch_size, num_steps): ... [16 17 18]] ... [[ 7 8 9] ... [17 18 19]] - - Code References - ---------------- - - ``tensorflow/models/rnn/ptb/reader.py`` """ raw_data = np.array(raw_data, dtype=np.int32) @@ -273,160 +273,6 @@ def ptb_iterator(raw_data, batch_size, num_steps): raise ValueError("epoch_size == 0, decrease batch_size or num_steps") for i in range(epoch_size): - x = data[:, i*num_steps:(i+1)*num_steps] - y = data[:, i*num_steps+1:(i+1)*num_steps+1] + x = data[:, i * num_steps:(i + 1) * num_steps] + y = data[:, i * num_steps + 1:(i + 1) * num_steps + 1] yield (x, y) - - - -# def minibatches_for_sequence2D(inputs, targets, batch_size, sequence_length, stride=1): -# """ -# Input a group of example in 2D numpy.array and their labels. -# Return the examples and labels by the given batchsize, sequence_length. -# Use for RNN. -# -# Parameters -# ---------- -# inputs : numpy.array -# (X) The input features, every row is a example. -# targets : numpy.array -# (y) The labels of inputs, every row is a example. -# batchsize : int -# The batch size must be a multiple of sequence_length: int(batch_size % sequence_length) == 0 -# sequence_length : int -# The sequence length -# stride : int -# The stride step -# -# Examples -# -------- -# >>> sequence_length = 2 -# >>> batch_size = 4 -# >>> stride = 1 -# >>> X_train = np.asarray([[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15],[16,17,18],[19,20,21],[22,23,24]]) -# >>> y_train = np.asarray(['0','1','2','3','4','5','6','7']) -# >>> print('X_train = %s' % X_train) -# >>> print('y_train = %s' % y_train) -# >>> for batch in minibatches_for_sequence2D(X_train, y_train, batch_size=batch_size, sequence_length=sequence_length, stride=stride): -# >>> inputs, targets = batch -# >>> print(inputs) -# >>> print(targets) -# ... [[ 1. 2. 3.] -# ... [ 4. 5. 6.] -# ... [ 4. 5. 6.] -# ... [ 7. 8. 9.]] -# ... [1 2] -# ... [[ 4. 5. 6.] -# ... [ 7. 8. 9.] -# ... [ 7. 8. 9.] -# ... [ 10. 11. 12.]] -# ... [2 3] -# ... ... -# ... [[ 16. 17. 18.] -# ... [ 19. 20. 21.] -# ... [ 19. 20. 21.] -# ... [ 22. 23. 24.]] -# ... [6 7] -# """ -# print('len(targets)=%d batch_size=%d sequence_length=%d stride=%d' % (len(targets), batch_size, sequence_length, stride)) -# assert len(inputs) == len(targets), '1 feature vector have 1 target vector/value' #* sequence_length -# # assert int(batch_size % sequence_length) == 0, 'batch_size % sequence_length must == 0\ -# # batch_size is number of examples rather than number of targets' -# -# # print(inputs.shape, len(inputs), len(inputs[0])) -# -# n_targets = int(batch_size/sequence_length) -# # n_targets = int(np.ceil(batch_size/sequence_length)) -# X = np.empty(shape=(0,len(inputs[0])), dtype=np.float32) -# y = np.zeros(shape=(1, n_targets), dtype=np.int32) -# -# for idx in range(sequence_length, len(inputs), stride): # go through all example during 1 epoch -# for n in range(n_targets): # for num of target -# X = np.concatenate((X, inputs[idx-sequence_length+n:idx+n])) -# y[0][n] = targets[idx-1+n] -# # y = np.vstack((y, targets[idx-1+n])) -# yield X, y[0] -# X = np.empty(shape=(0,len(inputs[0]))) -# # y = np.empty(shape=(1,0)) -# -# -# def minibatches_for_sequence4D(inputs, targets, batch_size, sequence_length, stride=1): # -# """ -# Input a group of example in 4D numpy.array and their labels. -# Return the examples and labels by the given batchsize, sequence_length. -# Use for RNN. -# -# Parameters -# ---------- -# inputs : numpy.array -# (X) The input features, every row is a example. -# targets : numpy.array -# (y) The labels of inputs, every row is a example. -# batchsize : int -# The batch size must be a multiple of sequence_length: int(batch_size % sequence_length) == 0 -# sequence_length : int -# The sequence length -# stride : int -# The stride step -# -# Examples -# -------- -# >>> sequence_length = 2 -# >>> batch_size = 2 -# >>> stride = 1 -# >>> X_train = np.asarray([[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15],[16,17,18],[19,20,21],[22,23,24]]) -# >>> y_train = np.asarray(['0','1','2','3','4','5','6','7']) -# >>> X_train = np.expand_dims(X_train, axis=1) -# >>> X_train = np.expand_dims(X_train, axis=3) -# >>> for batch in minibatches_for_sequence4D(X_train, y_train, batch_size=batch_size, sequence_length=sequence_length, stride=stride): -# >>> inputs, targets = batch -# >>> print(inputs) -# >>> print(targets) -# ... [[[[ 1.] -# ... [ 2.] -# ... [ 3.]]] -# ... [[[ 4.] -# ... [ 5.] -# ... [ 6.]]]] -# ... [1] -# ... [[[[ 4.] -# ... [ 5.] -# ... [ 6.]]] -# ... [[[ 7.] -# ... [ 8.] -# ... [ 9.]]]] -# ... [2] -# ... ... -# ... [[[[ 19.] -# ... [ 20.] -# ... [ 21.]]] -# ... [[[ 22.] -# ... [ 23.] -# ... [ 24.]]]] -# ... [7] -# """ -# print('len(targets)=%d batch_size=%d sequence_length=%d stride=%d' % (len(targets), batch_size, sequence_length, stride)) -# assert len(inputs) == len(targets), '1 feature vector have 1 target vector/value' #* sequence_length -# # assert int(batch_size % sequence_length) == 0, 'in LSTM, batch_size % sequence_length must == 0\ -# # batch_size is number of X_train rather than number of targets' -# assert stride >= 1, 'stride must be >=1, at least move 1 step for each iternation' -# -# n_example, n_channels, width, height = inputs.shape -# print('n_example=%d n_channels=%d width=%d height=%d' % (n_example, n_channels, width, height)) -# -# n_targets = int(np.ceil(batch_size/sequence_length)) # 实际为 batchsize/sequence_length + 1 -# print(n_targets) -# X = np.zeros(shape=(batch_size, n_channels, width, height), dtype=np.float32) -# # X = np.zeros(shape=(n_targets, sequence_length, n_channels, width, height), dtype=np.float32) -# y = np.zeros(shape=(1,n_targets), dtype=np.int32) -# # y = np.empty(shape=(0,1), dtype=np.float32) -# # time.sleep(2) -# for idx in range(sequence_length, n_example-n_targets+2, stride): # go through all example during 1 epoch -# for n in range(n_targets): # for num of target -# # print(idx+n, inputs[idx-sequence_length+n : idx+n].shape) -# X[n*sequence_length : (n+1)*sequence_length] = inputs[idx+n-sequence_length : idx+n] -# # X[n] = inputs[idx-sequence_length+n:idx+n] -# y[0][n] = targets[idx+n-1] -# # y = np.vstack((y, targets[idx-1+n])) -# # y = targets[idx: idx+n_targets] -# yield X, y[0] diff --git a/tensorlayer/layers.py b/tensorlayer/layers.py deleted file mode 100755 index 660df9a..0000000 --- a/tensorlayer/layers.py +++ /dev/null @@ -1,5546 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf8 -*- - - - -import tensorflow as tf -import time -from . import visualize -from . import utils -from . import files -from . import cost -from . import iterate -from . import ops -import numpy as np -from six.moves import xrange -import random, warnings -import copy -import inspect -# __all__ = [ -# "Layer", -# "DenseLayer", -# ] - - -# set_keep = locals() -set_keep = globals() -set_keep['_layers_name_list'] =[] -set_keep['name_reuse'] = False - -try: # For TF12 and later - TF_GRAPHKEYS_VARIABLES = tf.GraphKeys.GLOBAL_VARIABLES -except: # For TF11 and before - TF_GRAPHKEYS_VARIABLES = tf.GraphKeys.VARIABLES - -## Variable Operation -def flatten_reshape(variable, name=''): - """Reshapes high-dimension input to a vector. - [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row * mask_col * n_mask] - - Parameters - ---------- - variable : a tensorflow variable - name : a string or None - An optional name to attach to this layer. - - Examples - -------- - >>> W_conv2 = weight_variable([5, 5, 100, 32]) # 64 features for each 5x5 patch - >>> b_conv2 = bias_variable([32]) - >>> W_fc1 = weight_variable([7 * 7 * 32, 256]) - - >>> h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) - >>> h_pool2 = max_pool_2x2(h_conv2) - >>> h_pool2.get_shape()[:].as_list() = [batch_size, 7, 7, 32] - ... [batch_size, mask_row, mask_col, n_mask] - >>> h_pool2_flat = tl.layers.flatten_reshape(h_pool2) - ... [batch_size, mask_row * mask_col * n_mask] - >>> h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob) - ... - """ - dim = 1 - for d in variable.get_shape()[1:].as_list(): - dim *= d - return tf.reshape(variable, shape=[-1, dim], name=name) - -def clear_layers_name(): - """Clear all layer names in set_keep['_layers_name_list'], - enable layer name reuse. - - Examples - --------- - >>> network = tl.layers.InputLayer(x, name='input_layer') - >>> network = tl.layers.DenseLayer(network, n_units=800, name='relu1') - ... - >>> tl.layers.clear_layers_name() - >>> network2 = tl.layers.InputLayer(x, name='input_layer') - >>> network2 = tl.layers.DenseLayer(network2, n_units=800, name='relu1') - ... - """ - set_keep['_layers_name_list'] =[] - -def set_name_reuse(enable=True): - """Enable or disable reuse layer name. By default, each layer must has unique - name. When you want two or more input placeholder (inference) share the same - model parameters, you need to enable layer name reuse, then allow the - parameters have same name scope. - - Parameters - ------------ - enable : boolean, enable name reuse. - - Examples - ------------ - >>> def embed_seq(input_seqs, is_train, reuse): - >>> with tf.variable_scope("model", reuse=reuse): - >>> tl.layers.set_name_reuse(reuse) - >>> network = tl.layers.EmbeddingInputlayer( - ... inputs = input_seqs, - ... vocabulary_size = vocab_size, - ... embedding_size = embedding_size, - ... name = 'e_embedding') - >>> network = tl.layers.DynamicRNNLayer(network, - ... cell_fn = tf.nn.rnn_cell.BasicLSTMCell, - ... n_hidden = embedding_size, - ... dropout = (0.7 if is_train else None), - ... initializer = w_init, - ... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs), - ... return_last = True, - ... name = 'e_dynamicrnn',) - >>> return network - >>> - >>> net_train = embed_seq(t_caption, is_train=True, reuse=False) - >>> net_test = embed_seq(t_caption, is_train=False, reuse=True) - - - see ``tutorial_ptb_lstm.py`` for example. - """ - set_keep['name_reuse'] = enable - -def initialize_rnn_state(state): - """Return the initialized RNN state. - The input is LSTMStateTuple or State of RNNCells. - - Parameters - ----------- - state : a RNN state. - """ - try: # TF1.0 - LSTMStateTuple = tf.contrib.rnn.LSTMStateTuple - except: - LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple - - if isinstance(state, LSTMStateTuple): - c = state.c.eval() - h = state.h.eval() - return (c, h) - else: - new_state = state.eval() - return new_state - -def print_all_variables(train_only=False): - """Print all trainable and non-trainable variables - without tl.layers.initialize_global_variables(sess) - - Parameters - ---------- - train_only : boolean - If True, only print the trainable variables, otherwise, print all variables. - """ - # tvar = tf.trainable_variables() if train_only else tf.all_variables() - if train_only: - t_vars = tf.trainable_variables() - print(" [*] printing trainable variables") - else: - try: # TF1.0 - t_vars = tf.global_variables() - except: # TF0.12 - t_vars = tf.all_variables() - print(" [*] printing global variables") - for idx, v in enumerate(t_vars): - print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) - -def get_variables_with_name(name, train_only=True, printable=False): - """Get variable list by a given name scope. - - Examples - --------- - >>> dense_vars = tl.layers.get_variable_with_name('dense', True, True) - """ - print(" [*] geting variables with %s" % name) - # tvar = tf.trainable_variables() if train_only else tf.all_variables() - if train_only: - t_vars = tf.trainable_variables() - else: - try: # TF1.0 - t_vars = tf.global_variables() - except: # TF0.12 - t_vars = tf.all_variables() - - d_vars = [var for var in t_vars if name in var.name] - if printable: - for idx, v in enumerate(d_vars): - print(" got {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) - return d_vars - -def get_layers_with_name(network=None, name="", printable=False): - """Get layer list in a network by a given name scope. - - Examples - --------- - >>> layers = tl.layers.get_layers_with_name(network, "CNN", True) - """ - assert network is not None - print(" [*] geting layers with %s" % name) - - layers = [] - i = 0 - for layer in network.all_layers: - # print(type(layer.name)) - if name in layer.name: - layers.append(layer) - if printable: - # print(layer.name) - print(" got {:3}: {:15} {}".format(i, layer.name, str(layer.get_shape()))) - i = i + 1 - return layers - -def list_remove_repeat(l=None): - """Remove the repeated items in a list, and return the processed list. - You may need it to create merged layer like Concat, Elementwise and etc. - - Parameters - ---------- - l : a list - - Examples - --------- - >>> l = [2, 3, 4, 2, 3] - >>> l = list_remove_repeat(l) - ... [2, 3, 4] - """ - l2 = [] - [l2.append(i) for i in l if not i in l2] - return l2 - -def initialize_global_variables(sess=None): - """Excute ``sess.run(tf.global_variables_initializer())`` for TF12+ or - sess.run(tf.initialize_all_variables()) for TF11. - - Parameters - ---------- - sess : a Session - """ - assert sess is not None - try: # TF12 - sess.run(tf.global_variables_initializer()) - except: # TF11 - sess.run(tf.initialize_all_variables()) - - -## Basic layer -class Layer(object): - """ - The :class:`Layer` class represents a single layer of a neural network. It - should be subclassed when implementing new types of layers. - Because each layer can keep track of the layer(s) feeding into it, a - network's output :class:`Layer` instance can double as a handle to the full - network. - - Parameters - ---------- - inputs : a :class:`Layer` instance - The `Layer` class feeding into this layer. - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - inputs = None, - name ='layer' - ): - self.inputs = inputs - scope_name=tf.get_variable_scope().name - if scope_name: - name = scope_name + '/' + name - if (name in set_keep['_layers_name_list']) and name_reuse == False: - raise Exception("Layer '%s' already exists, please choice other 'name' or reuse this layer\ - \nHint : Use different name for different 'Layer' (The name is used to control parameter sharing)" % name) - else: - self.name = name - if name not in ['', None, False]: - set_keep['_layers_name_list'].append(name) - - - def print_params(self, details=True): - ''' Print all info of parameters in the network''' - for i, p in enumerate(self.all_params): - if details: - try: - print(" param {:3}: {:15} (mean: {:<18}, median: {:<18}, std: {:<18}) {}".format(i, str(p.eval().shape), p.eval().mean(), np.median(p.eval()), p.eval().std(), p.name)) - except Exception as e: - print(str(e)) - raise Exception("Hint: print params details after tl.layers.initialize_global_variables(sess) or use network.print_params(False).") - else: - print(" param {:3}: {:15} {}".format(i, str(p.get_shape()), p.name)) - print(" num of params: %d" % self.count_params()) - - def print_layers(self): - ''' Print all info of layers in the network ''' - for i, p in enumerate(self.all_layers): - print(" layer %d: %s" % (i, str(p))) - - def count_params(self): - ''' Return the number of parameters in the network ''' - n_params = 0 - for i, p in enumerate(self.all_params): - n = 1 - # for s in p.eval().shape: - for s in p.get_shape(): - try: - s = int(s) - except: - s = 1 - if s: - n = n * s - n_params = n_params + n - return n_params - - def __str__(self): - # print("\nIt is a Layer class") - # self.print_params(False) - # self.print_layers() - return " Last layer is: %s" % self.__class__.__name__ - -## Input layer -class InputLayer(Layer): - """ - The :class:`InputLayer` class is the starting layer of a neural network. - - Parameters - ---------- - inputs : a placeholder or tensor - The input tensor data. - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - inputs = None, - name ='input_layer' - ): - Layer.__init__(self, inputs=inputs, name=name) - print(" [TL] InputLayer %s: %s" % (self.name, inputs.get_shape())) - self.outputs = inputs - self.all_layers = [] - self.all_params = [] - self.all_drop = {} - -## OneHot layer -class OneHotInputLayer(Layer): - """ - The :class:`OneHotInputLayer` class is the starting layer of a neural network, see ``tf.one_hot``. - - Parameters - ---------- - inputs : a placeholder or tensor - The input tensor data. - name : a string or None - An optional name to attach to this layer. - depth : If the input indices is rank N, the output will have rank N+1. The new axis is created at dimension axis (default: the new axis is appended at the end). - on_value : If on_value is not provided, it will default to the value 1 with type dtype. - default, None - off_value : If off_value is not provided, it will default to the value 0 with type dtype. - default, None - axis : default, None - dtype : default, None - """ - def __init__( - self, - inputs = None, - depth = None, - on_value = None, - off_value = None, - axis = None, - dtype=None, - name ='input_layer' - ): - Layer.__init__(self, inputs=inputs, name=name) - assert depth != None, "depth is not given" - print(" [TL]:Instantiate OneHotInputLayer %s: %s" % (self.name, inputs.get_shape())) - self.outputs = tf.one_hot(inputs, depth, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype) - self.all_layers = [] - self.all_params = [] - self.all_drop = {} - -## Word Embedding Input layer -class Word2vecEmbeddingInputlayer(Layer): - """ - The :class:`Word2vecEmbeddingInputlayer` class is a fully connected layer, - for Word Embedding. Words are input as integer index. - The output is the embedded word vector. - - Parameters - ---------- - inputs : placeholder - For word inputs. integer index format. - train_labels : placeholder - For word labels. integer index format. - vocabulary_size : int - The size of vocabulary, number of words. - embedding_size : int - The number of embedding dimensions. - num_sampled : int - The Number of negative examples for NCE loss. - nce_loss_args : a dictionary - The arguments for tf.nn.nce_loss() - E_init : embedding initializer - The initializer for initializing the embedding matrix. - E_init_args : a dictionary - The arguments for embedding initializer - nce_W_init : NCE decoder biases initializer - The initializer for initializing the nce decoder weight matrix. - nce_W_init_args : a dictionary - The arguments for initializing the nce decoder weight matrix. - nce_b_init : NCE decoder biases initializer - The initializer for tf.get_variable() of the nce decoder bias vector. - nce_b_init_args : a dictionary - The arguments for tf.get_variable() of the nce decoder bias vector. - name : a string or None - An optional name to attach to this layer. - - Variables - -------------- - nce_cost : a tensor - The NCE loss. - outputs : a tensor - The outputs of embedding layer. - normalized_embeddings : tensor - Normalized embedding matrix - - Examples - -------- - - Without TensorLayer : see tensorflow/examples/tutorials/word2vec/word2vec_basic.py - >>> train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) - >>> train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) - >>> embeddings = tf.Variable( - ... tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) - >>> embed = tf.nn.embedding_lookup(embeddings, train_inputs) - >>> nce_weights = tf.Variable( - ... tf.truncated_normal([vocabulary_size, embedding_size], - ... stddev=1.0 / math.sqrt(embedding_size))) - >>> nce_biases = tf.Variable(tf.zeros([vocabulary_size])) - >>> cost = tf.reduce_mean( - ... tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, - ... inputs=embed, labels=train_labels, - ... num_sampled=num_sampled, num_classes=vocabulary_size, - ... num_true=1)) - - - With TensorLayer : see tutorial_word2vec_basic.py - >>> train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) - >>> train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) - >>> emb_net = tl.layers.Word2vecEmbeddingInputlayer( - ... inputs = train_inputs, - ... train_labels = train_labels, - ... vocabulary_size = vocabulary_size, - ... embedding_size = embedding_size, - ... num_sampled = num_sampled, - ... name ='word2vec_layer', - ... ) - >>> cost = emb_net.nce_cost - >>> train_params = emb_net.all_params - >>> train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize( - ... cost, var_list=train_params) - >>> normalized_embeddings = emb_net.normalized_embeddings - - References - ---------- - - `tensorflow/examples/tutorials/word2vec/word2vec_basic.py `_ - """ - def __init__( - self, - inputs = None, - train_labels = None, - vocabulary_size = 80000, - embedding_size = 200, - num_sampled = 64, - nce_loss_args = {}, - E_init = tf.random_uniform_initializer(minval=-1.0, maxval=1.0), - E_init_args = {}, - nce_W_init = tf.truncated_normal_initializer(stddev=0.03), - nce_W_init_args = {}, - nce_b_init = tf.constant_initializer(value=0.0), - nce_b_init_args = {}, - name ='word2vec_layer', - ): - Layer.__init__(self, name=name) - self.inputs = inputs - print(" [TL] Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) - # Look up embeddings for inputs. - # Note: a row of 'embeddings' is the vector representation of a word. - # for the sake of speed, it is better to slice the embedding matrix - # instead of transfering a word id to one-hot-format vector and then - # multiply by the embedding matrix. - # embed is the outputs of the hidden layer (embedding layer), it is a - # row vector with 'embedding_size' values. - with tf.variable_scope(name) as vs: - embeddings = tf.get_variable(name='embeddings', - shape=(vocabulary_size, embedding_size), - initializer=E_init, - **E_init_args) - embed = tf.nn.embedding_lookup(embeddings, self.inputs) - # Construct the variables for the NCE loss (i.e. negative sampling) - nce_weights = tf.get_variable(name='nce_weights', - shape=(vocabulary_size, embedding_size), - initializer=nce_W_init, - **nce_W_init_args) - nce_biases = tf.get_variable(name='nce_biases', - shape=(vocabulary_size), - initializer=nce_b_init, - **nce_b_init_args) - - # Compute the average NCE loss for the batch. - # tf.nce_loss automatically draws a new sample of the negative labels - # each time we evaluate the loss. - self.nce_cost = tf.reduce_mean( - tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, - inputs=embed, labels=train_labels, - num_sampled=num_sampled, num_classes=vocabulary_size, - **nce_loss_args)) - - self.outputs = embed - self.normalized_embeddings = tf.nn.l2_normalize(embeddings, 1) - - self.all_layers = [self.outputs] - self.all_params = [embeddings, nce_weights, nce_biases] - self.all_drop = {} - -class EmbeddingInputlayer(Layer): - """ - The :class:`EmbeddingInputlayer` class is a fully connected layer, - for Word Embedding. Words are input as integer index. - The output is the embedded word vector. - - If you have a pre-train matrix, you can assign the matrix into it. - To train a word embedding matrix, you can used class:`Word2vecEmbeddingInputlayer`. - - Note that, do not update this embedding matrix. - - Parameters - ---------- - inputs : placeholder - For word inputs. integer index format. - a 2D tensor : [batch_size, num_steps(num_words)] - vocabulary_size : int - The size of vocabulary, number of words. - embedding_size : int - The number of embedding dimensions. - E_init : embedding initializer - The initializer for initializing the embedding matrix. - E_init_args : a dictionary - The arguments for embedding initializer - name : a string or None - An optional name to attach to this layer. - - Variables - ------------ - outputs : a tensor - The outputs of embedding layer. - the outputs 3D tensor : [batch_size, num_steps(num_words), embedding_size] - - Examples - -------- - >>> vocabulary_size = 50000 - >>> embedding_size = 200 - >>> model_file_name = "model_word2vec_50k_200" - >>> batch_size = None - ... - >>> all_var = tl.files.load_npy_to_any(name=model_file_name+'.npy') - >>> data = all_var['data']; count = all_var['count'] - >>> dictionary = all_var['dictionary'] - >>> reverse_dictionary = all_var['reverse_dictionary'] - >>> tl.files.save_vocab(count, name='vocab_'+model_file_name+'.txt') - >>> del all_var, data, count - ... - >>> load_params = tl.files.load_npz(name=model_file_name+'.npz') - >>> x = tf.placeholder(tf.int32, shape=[batch_size]) - >>> y_ = tf.placeholder(tf.int32, shape=[batch_size, 1]) - >>> emb_net = tl.layers.EmbeddingInputlayer( - ... inputs = x, - ... vocabulary_size = vocabulary_size, - ... embedding_size = embedding_size, - ... name ='embedding_layer') - >>> tl.layers.initialize_global_variables(sess) - >>> tl.files.assign_params(sess, [load_params[0]], emb_net) - >>> word = b'hello' - >>> word_id = dictionary[word] - >>> print('word_id:', word_id) - ... 6428 - ... - >>> words = [b'i', b'am', b'hao', b'dong'] - >>> word_ids = tl.files.words_to_word_ids(words, dictionary) - >>> context = tl.files.word_ids_to_words(word_ids, reverse_dictionary) - >>> print('word_ids:', word_ids) - ... [72, 1226, 46744, 20048] - >>> print('context:', context) - ... [b'i', b'am', b'hao', b'dong'] - ... - >>> vector = sess.run(emb_net.outputs, feed_dict={x : [word_id]}) - >>> print('vector:', vector.shape) - ... (1, 200) - >>> vectors = sess.run(emb_net.outputs, feed_dict={x : word_ids}) - >>> print('vectors:', vectors.shape) - ... (4, 200) - - """ - def __init__( - self, - inputs = None, - vocabulary_size = 80000, - embedding_size = 200, - E_init = tf.random_uniform_initializer(-0.1, 0.1), - E_init_args = {}, - name ='embedding_layer', - ): - Layer.__init__(self, name=name) - self.inputs = inputs - print(" [TL] EmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) - - with tf.variable_scope(name) as vs: - embeddings = tf.get_variable(name='embeddings', - shape=(vocabulary_size, embedding_size), - initializer=E_init, - **E_init_args) - embed = tf.nn.embedding_lookup(embeddings, self.inputs) - - self.outputs = embed - - self.all_layers = [self.outputs] - self.all_params = [embeddings] - self.all_drop = {} - -## Dense layer -class DenseLayer(Layer): - """ - The :class:`DenseLayer` class is a fully connected layer. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - n_units : int - The number of units of the layer. - act : activation function - The function that is applied to the layer activations. - W_init : weights initializer - The initializer for initializing the weight matrix. - b_init : biases initializer or None - The initializer for initializing the bias vector. If None, skip biases. - W_init_args : dictionary - The arguments for the weights tf.get_variable. - b_init_args : dictionary - The arguments for the biases tf.get_variable. - name : a string or None - An optional name to attach to this layer. - - Examples - -------- - >>> network = tl.layers.InputLayer(x, name='input_layer') - >>> network = tl.layers.DenseLayer( - ... network, - ... n_units=800, - ... act = tf.nn.relu, - ... W_init=tf.truncated_normal_initializer(stddev=0.1), - ... name ='relu_layer' - ... ) - - >>> Without TensorLayer, you can do as follow. - >>> W = tf.Variable( - ... tf.random_uniform([n_in, n_units], -1.0, 1.0), name='W') - >>> b = tf.Variable(tf.zeros(shape=[n_units]), name='b') - >>> y = tf.nn.relu(tf.matmul(inputs, W) + b) - - Notes - ----- - If the input to this layer has more than two axes, it need to flatten the - input by using :class:`FlattenLayer` in this case. - """ - def __init__( - self, - layer = None, - n_units = 100, - act = tf.identity, - W_init = tf.truncated_normal_initializer(stddev=0.1), - b_init = tf.constant_initializer(value=0.0), - W_init_args = {}, - b_init_args = {}, - name ='dense_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - if self.inputs.get_shape().ndims != 2: - raise Exception("The input dimension must be rank 2, please reshape or flatten it") - - n_in = int(self.inputs.get_shape()[-1]) - self.n_units = n_units - print(" [TL] DenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) - with tf.variable_scope(name) as vs: - W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, **W_init_args ) - if b_init: - b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, **b_init_args ) - self.outputs = act(tf.matmul(self.inputs, W) + b) - else: - self.outputs = act(tf.matmul(self.inputs, W)) - - # Hint : list(), dict() is pass by value (shallow), without them, it is - # pass by reference. - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - if b_init: - self.all_params.extend( [W, b] ) - else: - self.all_params.extend( [W] ) - -class ReconLayer(DenseLayer): - """ - The :class:`ReconLayer` class is a reconstruction layer `DenseLayer` which - use to pre-train a `DenseLayer`. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - x_recon : tensorflow variable - The variables used for reconstruction. - name : a string or None - An optional name to attach to this layer. - n_units : int - The number of units of the layer, should be equal to x_recon - act : activation function - The activation function that is applied to the reconstruction layer. - Normally, for sigmoid layer, the reconstruction activation is sigmoid; - for rectifying layer, the reconstruction activation is softplus. - - Examples - -------- - >>> network = tl.layers.InputLayer(x, name='input_layer') - >>> network = tl.layers.DenseLayer(network, n_units=196, - ... act=tf.nn.sigmoid, name='sigmoid1') - >>> recon_layer1 = tl.layers.ReconLayer(network, x_recon=x, n_units=784, - ... act=tf.nn.sigmoid, name='recon_layer1') - >>> recon_layer1.pretrain(sess, x=x, X_train=X_train, X_val=X_val, - ... denoise_name=None, n_epoch=1200, batch_size=128, - ... print_freq=10, save=True, save_name='w1pre_') - - Methods - ------- - pretrain(self, sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batch_size=128, print_freq=10, save=True, save_name='w1pre_') - Start to pre-train the parameters of previous DenseLayer. - - Notes - ----- - The input layer should be `DenseLayer` or a layer has only one axes. - You may need to modify this part to define your own cost function. - By default, the cost is implemented as follow: - - For sigmoid layer, the implementation can be `UFLDL `_ - - For rectifying layer, the implementation can be `Glorot (2011). Deep Sparse Rectifier Neural Networks `_ - """ - def __init__( - self, - layer = None, - x_recon = None, - name = 'recon_layer', - n_units = 784, - act = tf.nn.softplus, - ): - DenseLayer.__init__(self, layer=layer, n_units=n_units, act=act, name=name) - print(" [TL] %s is a ReconLayer" % self.name) - - # y : reconstruction outputs; train_params : parameters to train - # Note that: train_params = [W_encoder, b_encoder, W_decoder, b_encoder] - y = self.outputs - self.train_params = self.all_params[-4:] - - # ===================================================================== - # - # You need to modify the below cost function and optimizer so as to - # implement your own pre-train method. - # - # ===================================================================== - lambda_l2_w = 0.004 - learning_rate = 0.0001 - print(" lambda_l2_w: %f" % lambda_l2_w) - print(" learning_rate: %f" % learning_rate) - - # Mean-squre-error i.e. quadratic-cost - mse = tf.reduce_sum(tf.squared_difference(y, x_recon), 1) - mse = tf.reduce_mean(mse) # in theano: mse = ((y - x) ** 2 ).sum(axis=1).mean() - # mse = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(y, x_recon)), 1)) - # mse = tf.reduce_mean(tf.squared_difference(y, x_recon)) # : Error - # mse = tf.sqrt(tf.reduce_mean(tf.square(y - x_recon))) # : Error - # Cross-entropy - # ce = cost.cross_entropy(y, x_recon) # : list , list , Error (only be used for softmax output) - # ce = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, x_recon)) # : list , list , Error (only be used for softmax output) - # ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, x_recon)) # : list , index , Error (only be used for softmax output) - L2_w = tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[0]) \ - + tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[2]) # faster than the code below - # L2_w = lambda_l2_w * tf.reduce_mean(tf.square(self.train_params[0])) + lambda_l2_w * tf.reduce_mean( tf.square(self.train_params[2])) - # DropNeuro - P_o = cost.lo_regularizer(0.03)(self.train_params[0]) # + cost.lo_regularizer(0.5)(self.train_params[2]) # : if add lo on decoder, no neuron will be broken - P_i = cost.li_regularizer(0.03)(self.train_params[0]) # + cost.li_regularizer(0.001)(self.train_params[2]) - - # L1 of activation outputs - activation_out = self.all_layers[-2] - L1_a = 0.001 * tf.reduce_mean(activation_out) # : theano: T.mean( self.a[i] ) # some neuron are broken, white and black - # L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, 0) ) # : some neuron are broken, white and black - # L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, 1) ) # : some neuron are broken, white and black - # KL Divergence - beta = 4 - rho = 0.15 - p_hat = tf.reduce_mean(activation_out, 0) # theano: p_hat = T.mean( self.a[i], axis=0 ) - try: ## TF1.0 - KLD = beta * tf.reduce_sum( rho * tf.log(tf.divide(rho, p_hat)) + (1- rho) * tf.log((1- rho)/ (tf.subtract(float(1), p_hat))) ) - except: ## TF0.12 - KLD = beta * tf.reduce_sum( rho * tf.log(tf.div(rho, p_hat)) + (1- rho) * tf.log((1- rho)/ (tf.sub(float(1), p_hat))) ) - # KLD = beta * tf.reduce_sum( rho * tf.log(rho/ p_hat) + (1- rho) * tf.log((1- rho)/(1- p_hat)) ) - # theano: L1_a = l1_a[i] * T.sum( rho[i] * T.log(rho[i]/ p_hat) + (1- rho[i]) * T.log((1- rho[i])/(1- p_hat)) ) - # Total cost - if act == tf.nn.softplus: - print(' use: mse, L2_w, L1_a') - self.cost = mse + L1_a + L2_w - elif act == tf.nn.sigmoid: - # ---------------------------------------------------- - # Cross-entropy was used in Denoising AE - # print(' use: ce, L2_w, KLD') - # self.cost = ce + L2_w + KLD - # ---------------------------------------------------- - # Mean-squared-error was used in Vanilla AE - print(' use: mse, L2_w, KLD') - self.cost = mse + L2_w + KLD - # ---------------------------------------------------- - # Add DropNeuro penalty (P_o) can remove neurons of AE - # print(' use: mse, L2_w, KLD, P_o') - # self.cost = mse + L2_w + KLD + P_o - # ---------------------------------------------------- - # Add DropNeuro penalty (P_i) can remove neurons of previous layer - # If previous layer is InputLayer, it means remove useless features - # print(' use: mse, L2_w, KLD, P_i') - # self.cost = mse + L2_w + KLD + P_i - else: - raise Exception("Don't support the given reconstruct activation function") - - self.train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, - epsilon=1e-08, use_locking=False).minimize(self.cost, var_list=self.train_params) - # self.train_op = tf.train.GradientDescentOptimizer(1.0).minimize(self.cost, var_list=self.train_params) - - def pretrain(self, sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batch_size=128, print_freq=10, - save=True, save_name='w1pre_'): - # ==================================================== - # - # You need to modify the cost function in __init__() so as to - # get your own pre-train method. - # - # ==================================================== - print(" [*] %s start pretrain" % self.name) - print(" batch_size: %d" % batch_size) - if denoise_name: - print(" denoising layer keep: %f" % self.all_drop[set_keep[denoise_name]]) - dp_denoise = self.all_drop[set_keep[denoise_name]] - else: - print(" no denoising layer") - - for epoch in range(n_epoch): - start_time = time.time() - for X_train_a, _ in iterate.minibatches(X_train, X_train, batch_size, shuffle=True): - dp_dict = utils.dict_to_one( self.all_drop ) - if denoise_name: - dp_dict[set_keep[denoise_name]] = dp_denoise - feed_dict = {x: X_train_a} - feed_dict.update(dp_dict) - sess.run(self.train_op, feed_dict=feed_dict) - - if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: - print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) - train_loss, n_batch = 0, 0 - for X_train_a, _ in iterate.minibatches(X_train, X_train, batch_size, shuffle=True): - dp_dict = utils.dict_to_one( self.all_drop ) - feed_dict = {x: X_train_a} - feed_dict.update(dp_dict) - err = sess.run(self.cost, feed_dict=feed_dict) - train_loss += err - n_batch += 1 - print(" train loss: %f" % (train_loss/ n_batch)) - val_loss, n_batch = 0, 0 - for X_val_a, _ in iterate.minibatches(X_val, X_val, batch_size, shuffle=True): - dp_dict = utils.dict_to_one( self.all_drop ) - feed_dict = {x: X_val_a} - feed_dict.update(dp_dict) - err = sess.run(self.cost, feed_dict=feed_dict) - val_loss += err - n_batch += 1 - print(" val loss: %f" % (val_loss/ n_batch)) - if save: - try: - visualize.W(self.train_params[0].eval(), second=10, saveable=True, shape=[28,28], name=save_name+str(epoch+1), fig_idx=2012) - files.save_npz([self.all_params[0]] , name=save_name+str(epoch+1)+'.npz') - except: - raise Exception("You should change the visualize.W() in ReconLayer.pretrain(), if you want to save the feature images for different dataset") - -## Noise layer -class DropoutLayer(Layer): - """ - The :class:`DropoutLayer` class is a noise layer which randomly set some - values to zero by a given keeping probability. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - keep : float - The keeping probability, the lower more values will be set to zero. - is_fix : boolean - Default False, if True, the keeping probability is fixed and cannot be changed via feed_dict. - is_train : boolean - If False, skip this layer, default is True. - seed : int or None - An integer or None to create random seed. - name : a string or None - An optional name to attach to this layer. - - Examples - -------- - - Define network - >>> network = tl.layers.InputLayer(x, name='input_layer') - >>> network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1') - >>> network = tl.layers.DenseLayer(network, n_units=800, act = tf.nn.relu, name='relu1') - >>> ... - - - For training, enable dropout as follow. - >>> feed_dict = {x: X_train_a, y_: y_train_a} - >>> feed_dict.update( network.all_drop ) # enable noise layers - >>> sess.run(train_op, feed_dict=feed_dict) - >>> ... - - - For testing, disable dropout as follow. - >>> dp_dict = tl.utils.dict_to_one( network.all_drop ) # disable noise layers - >>> feed_dict = {x: X_val_a, y_: y_val_a} - >>> feed_dict.update(dp_dict) - >>> err, ac = sess.run([cost, acc], feed_dict=feed_dict) - >>> ... - - Notes - ------- - - A frequent question regarding :class:`DropoutLayer` is that why it donot have `is_train` like :class:`BatchNormLayer`. - In many simple cases, user may find it is better to use one inference instead of two inferences for training and testing seperately, :class:`DropoutLayer` - allows you to control the dropout rate via `feed_dict`. However, you can fix the keeping probability by setting `is_fix` to True. - """ - def __init__( - self, - layer = None, - keep = 0.5, - is_fix = False, - is_train = True, - seed = None, - name = 'dropout_layer', - ): - Layer.__init__(self, name=name) - if is_train is False: - print(" [TL] skip DropoutLayer") - self.outputs = layer.outputs - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - else: - self.inputs = layer.outputs - print(" [TL] DropoutLayer %s: keep:%f is_fix:%s" % (self.name, keep, is_fix)) - - # The name of placeholder for keep_prob is the same with the name - # of the Layer. - if is_fix: - self.outputs = tf.nn.dropout(self.inputs, keep, seed=seed, name=name) - else: - set_keep[name] = tf.placeholder(tf.float32) - self.outputs = tf.nn.dropout(self.inputs, set_keep[name], seed=seed, name=name) # 1.2 - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - if is_fix is False: - self.all_drop.update( {set_keep[name]: keep} ) - self.all_layers.extend( [self.outputs] ) - - # print(set_keep[name]) - # Tensor("Placeholder_2:0", dtype=float32) - # print(denoising1) - # Tensor("Placeholder_2:0", dtype=float32) - # print(self.all_drop[denoising1]) - # 0.8 - # - # https://www.tensorflow.org/versions/r0.8/tutorials/mnist/tf/index.html - # The optional feed_dict argument allows the caller to override the - # value of tensors in the graph. Each key in feed_dict can be one of - # the following types: - # If the key is a Tensor, the value may be a Python scalar, string, - # list, or numpy ndarray that can be converted to the same dtype as that - # tensor. Additionally, if the key is a placeholder, the shape of the - # value will be checked for compatibility with the placeholder. - # If the key is a SparseTensor, the value should be a SparseTensorValue. - -class GaussianNoiseLayer(Layer): - """ - The :class:`GaussianNoiseLayer` class is noise layer that adding noise with - normal distribution to the activation. - - Parameters - ------------ - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - mean : float - stddev : float - is_train : boolean - If False, skip this layer, default is True. - seed : int or None - An integer or None to create random seed. - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - mean = 0.0, - stddev = 1.0, - is_train = True, - seed = None, - name = 'gaussian_noise_layer', - ): - Layer.__init__(self, name=name) - if is_train is False: - print(" [TL] skip GaussianNoiseLayer") - self.outputs = layer.outputs - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - else: - self.inputs = layer.outputs - print(" [TL] GaussianNoiseLayer %s: mean:%f stddev:%f" % (self.name, mean, stddev)) - with tf.variable_scope(name) as vs: - # noise = np.random.normal(0.0 , sigma , tf.to_int64(self.inputs).get_shape()) - noise = tf.random_normal(shape = self.inputs.get_shape(), mean=mean, stddev=stddev, seed=seed) - self.outputs = self.inputs + noise - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - -class DropconnectDenseLayer(Layer): - """ - The :class:`DropconnectDenseLayer` class is ``DenseLayer`` with DropConnect - behaviour which randomly remove connection between this layer to previous - layer by a given keeping probability. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - keep : float - The keeping probability, the lower more values will be set to zero. - n_units : int - The number of units of the layer. - act : activation function - The function that is applied to the layer activations. - W_init : weights initializer - The initializer for initializing the weight matrix. - b_init : biases initializer - The initializer for initializing the bias vector. - W_init_args : dictionary - The arguments for the weights tf.get_variable(). - b_init_args : dictionary - The arguments for the biases tf.get_variable(). - name : a string or None - An optional name to attach to this layer. - - Examples - -------- - >>> network = tl.layers.InputLayer(x, name='input_layer') - >>> network = tl.layers.DropconnectDenseLayer(network, keep = 0.8, - ... n_units=800, act = tf.nn.relu, name='dropconnect_relu1') - >>> network = tl.layers.DropconnectDenseLayer(network, keep = 0.5, - ... n_units=800, act = tf.nn.relu, name='dropconnect_relu2') - >>> network = tl.layers.DropconnectDenseLayer(network, keep = 0.5, - ... n_units=10, act = tl.activation.identity, name='output_layer') - - References - ---------- - - `Wan, L. (2013). Regularization of neural networks using dropconnect `_ - """ - def __init__( - self, - layer = None, - keep = 0.5, - n_units = 100, - act = tf.identity, - W_init = tf.truncated_normal_initializer(stddev=0.1), - b_init = tf.constant_initializer(value=0.0), - W_init_args = {}, - b_init_args = {}, - name ='dropconnect_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - if self.inputs.get_shape().ndims != 2: - raise Exception("The input dimension must be rank 2") - n_in = int(self.inputs.get_shape()[-1]) - self.n_units = n_units - print(" [TL] DropconnectDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) - - with tf.variable_scope(name) as vs: - W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, **W_init_args ) - b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, **b_init_args ) - self.outputs = act(tf.matmul(self.inputs, W) + b)#, name=name) # 1.2 - - set_keep[name] = tf.placeholder(tf.float32) - W_dropcon = tf.nn.dropout(W, set_keep[name]) - self.outputs = act(tf.matmul(self.inputs, W_dropcon) + b) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_drop.update( {set_keep[name]: keep} ) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( [W, b] ) - -## Convolutional layer (Pro) - -class Conv1dLayer(Layer): - """ - The :class:`Conv1dLayer` class is a 1D CNN layer, see `tf.nn.conv1d `_. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer, [batch, in_width, in_channels]. - act : activation function, None for identity. - shape : list of shape - shape of the filters, [filter_length, in_channels, out_channels]. - stride : an int. - The number of entries by which the filter is moved right at each step. - padding : a string from: "SAME", "VALID". - The type of padding algorithm to use. - use_cudnn_on_gpu : An optional bool. Defaults to True. - data_format : An optional string from "NHWC", "NCHW". Defaults to "NHWC", the data is stored in the order of [batch, in_width, in_channels]. The "NCHW" format stores data as [batch, in_channels, in_width]. - W_init : weights initializer - The initializer for initializing the weight matrix. - b_init : biases initializer or None - The initializer for initializing the bias vector. If None, skip biases. - W_init_args : dictionary - The arguments for the weights tf.get_variable(). - b_init_args : dictionary - The arguments for the biases tf.get_variable(). - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - act = tf.identity, - shape = [5, 1, 5], - stride = 1, - padding='SAME', - use_cudnn_on_gpu=None, - data_format=None, - W_init = tf.truncated_normal_initializer(stddev=0.02), - b_init = tf.constant_initializer(value=0.0), - W_init_args = {}, - b_init_args = {}, - name ='cnn_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - print(" [TL] Conv1dLayer %s: shape:%s stride:%s pad:%s act:%s" % - (self.name, str(shape), str(stride), padding, act.__name__)) - if act is None: - act = tf.identity - with tf.variable_scope(name) as vs: - W = tf.get_variable(name='W_conv1d', shape=shape, initializer=W_init, **W_init_args ) - if b_init: - b = tf.get_variable(name='b_conv1d', shape=(shape[-1]), initializer=b_init, **b_init_args ) - self.outputs = act( tf.nn.conv1d(self.inputs, W, stride=stride, padding=padding, - use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format) + b ) #1.2 - else: - self.outputs = act( tf.nn.conv1d(self.inputs, W, stride=stride, padding=padding, - use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format)) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - if b_init: - self.all_params.extend( [W, b] ) - else: - self.all_params.extend( [W] ) - -class Conv2dLayer(Layer): - """ - The :class:`Conv2dLayer` class is a 2D CNN layer, see `tf.nn.conv2d `_. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - act : activation function - The function that is applied to the layer activations. - shape : list of shape - shape of the filters, [filter_height, filter_width, in_channels, out_channels]. - strides : a list of ints. - The stride of the sliding window for each dimension of input.\n - It Must be in the same order as the dimension specified with format. - padding : a string from: "SAME", "VALID". - The type of padding algorithm to use. - W_init : weights initializer - The initializer for initializing the weight matrix. - b_init : biases initializer or None - The initializer for initializing the bias vector. If None, skip biases. - W_init_args : dictionary - The arguments for the weights tf.get_variable(). - b_init_args : dictionary - The arguments for the biases tf.get_variable(). - use_cudnn_on_gpu : an optional string from: "NHWC", "NCHW". Defaults to "NHWC". - data_format : an optional bool. Defaults to True. - name : a string or None - An optional name to attach to this layer. - - Notes - ------ - - shape = [h, w, the number of output channel of previous layer, the number of output channels] - - the number of output channel of a layer is its last dimension. - - Examples - -------- - >>> x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) - >>> network = tl.layers.InputLayer(x, name='input_layer') - >>> network = tl.layers.Conv2dLayer(network, - ... act = tf.nn.relu, - ... shape = [5, 5, 1, 32], # 32 features for each 5x5 patch - ... strides=[1, 1, 1, 1], - ... padding='SAME', - ... W_init=tf.truncated_normal_initializer(stddev=5e-2), - ... W_init_args={}, - ... b_init = tf.constant_initializer(value=0.0), - ... b_init_args = {}, - ... name ='cnn_layer1') # output: (?, 28, 28, 32) - >>> network = tl.layers.PoolLayer(network, - ... ksize=[1, 2, 2, 1], - ... strides=[1, 2, 2, 1], - ... padding='SAME', - ... pool = tf.nn.max_pool, - ... name ='pool_layer1',) # output: (?, 14, 14, 32) - - >>> Without TensorLayer, you can implement 2d convolution as follow. - >>> W = tf.Variable(W_init(shape=[5, 5, 1, 32], ), name='W_conv') - >>> b = tf.Variable(b_init(shape=[32], ), name='b_conv') - >>> outputs = tf.nn.relu( tf.nn.conv2d(inputs, W, - ... strides=[1, 1, 1, 1], - ... padding='SAME') + b ) - """ - def __init__( - self, - layer = None, - act = tf.identity, - shape = [5, 5, 1, 100], - strides=[1, 1, 1, 1], - padding='SAME', - W_init = tf.truncated_normal_initializer(stddev=0.02), - b_init = tf.constant_initializer(value=0.0), - W_init_args = {}, - b_init_args = {}, - use_cudnn_on_gpu = None, - data_format = None, - name ='cnn_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - print(" [TL] Conv2dLayer %s: shape:%s strides:%s pad:%s act:%s" % - (self.name, str(shape), str(strides), padding, act.__name__)) - - with tf.variable_scope(name) as vs: - W = tf.get_variable(name='W_conv2d', shape=shape, initializer=W_init, **W_init_args ) - if b_init: - b = tf.get_variable(name='b_conv2d', shape=(shape[-1]), initializer=b_init, **b_init_args ) - self.outputs = act( tf.nn.conv2d(self.inputs, W, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format) + b ) - else: - self.outputs = act( tf.nn.conv2d(self.inputs, W, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format)) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - if b_init: - self.all_params.extend( [W, b] ) - else: - self.all_params.extend( [W] ) - -class DeConv2dLayer(Layer): - """ - The :class:`DeConv2dLayer` class is deconvolutional 2D layer, see `tf.nn.conv2d_transpose `_. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - act : activation function - The function that is applied to the layer activations. - shape : list of shape - shape of the filters, [height, width, output_channels, in_channels], filter's in_channels dimension must match that of value. - output_shape : list of output shape - representing the output shape of the deconvolution op. - strides : a list of ints. - The stride of the sliding window for each dimension of the input tensor. - padding : a string from: "SAME", "VALID". - The type of padding algorithm to use. - W_init : weights initializer - The initializer for initializing the weight matrix. - b_init : biases initializer - The initializer for initializing the bias vector. If None, skip biases. - W_init_args : dictionary - The arguments for the weights initializer. - b_init_args : dictionary - The arguments for the biases initializer. - name : a string or None - An optional name to attach to this layer. - - Notes - ----- - - shape = [h, w, the number of output channels of this layer, the number of output channel of previous layer] - - output_shape = [batch_size, any, any, the number of output channels of this layer] - - the number of output channel of a layer is its last dimension. - - Examples - --------- - - A part of the generator in DCGAN example - >>> batch_size = 64 - >>> inputs = tf.placeholder(tf.float32, [batch_size, 100], name='z_noise') - >>> net_in = tl.layers.InputLayer(inputs, name='g/in') - >>> net_h0 = tl.layers.DenseLayer(net_in, n_units = 8192, - ... W_init = tf.random_normal_initializer(stddev=0.02), - ... act = tf.identity, name='g/h0/lin') - >>> print(net_h0.outputs._shape) - ... (64, 8192) - >>> net_h0 = tl.layers.ReshapeLayer(net_h0, shape = [-1, 4, 4, 512], name='g/h0/reshape') - >>> net_h0 = tl.layers.BatchNormLayer(net_h0, act=tf.nn.relu, is_train=is_train, name='g/h0/batch_norm') - >>> print(net_h0.outputs._shape) - ... (64, 4, 4, 512) - >>> net_h1 = tl.layers.DeConv2dLayer(net_h0, - ... shape = [5, 5, 256, 512], - ... output_shape = [batch_size, 8, 8, 256], - ... strides=[1, 2, 2, 1], - ... act=tf.identity, name='g/h1/decon2d') - >>> net_h1 = tl.layers.BatchNormLayer(net_h1, act=tf.nn.relu, is_train=is_train, name='g/h1/batch_norm') - >>> print(net_h1.outputs._shape) - ... (64, 8, 8, 256) - - - U-Net - >>> .... - >>> conv10 = tl.layers.Conv2dLayer(conv9, act=tf.nn.relu, - ... shape=[3,3,1024,1024], strides=[1,1,1,1], padding='SAME', - ... W_init=w_init, b_init=b_init, name='conv10') - >>> print(conv10.outputs) - ... (batch_size, 32, 32, 1024) - >>> deconv1 = tl.layers.DeConv2dLayer(conv10, act=tf.nn.relu, - ... shape=[3,3,512,1024], strides=[1,2,2,1], output_shape=[batch_size,64,64,512], - ... padding='SAME', W_init=w_init, b_init=b_init, name='devcon1_1') - """ - def __init__( - self, - layer = None, - act = tf.identity, - shape = [3, 3, 128, 256], - output_shape = [1, 256, 256, 128], - strides = [1, 2, 2, 1], - padding = 'SAME', - W_init = tf.truncated_normal_initializer(stddev=0.02), - b_init = tf.constant_initializer(value=0.0), - W_init_args = {}, - b_init_args = {}, - name ='decnn2d_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - print(" [TL] DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % - (self.name, str(shape), str(output_shape), str(strides), padding, act.__name__)) - # print(" DeConv2dLayer: Untested") - with tf.variable_scope(name) as vs: - W = tf.get_variable(name='W_deconv2d', shape=shape, initializer=W_init, **W_init_args ) - if b_init: - b = tf.get_variable(name='b_deconv2d', shape=(shape[-2]), initializer=b_init, **b_init_args ) - self.outputs = act( tf.nn.conv2d_transpose(self.inputs, W, output_shape=output_shape, strides=strides, padding=padding) + b ) - else: - self.outputs = act( tf.nn.conv2d_transpose(self.inputs, W, output_shape=output_shape, strides=strides, padding=padding)) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - if b_init: - self.all_params.extend( [W, b] ) - else: - self.all_params.extend( [W] ) - -class Conv3dLayer(Layer): - """ - The :class:`Conv3dLayer` class is a 3D CNN layer, see `tf.nn.conv3d `_. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - act : activation function - The function that is applied to the layer activations. - shape : list of shape - shape of the filters, [filter_depth, filter_height, filter_width, in_channels, out_channels]. - strides : a list of ints. 1-D of length 4. - The stride of the sliding window for each dimension of input. Must be in the same order as the dimension specified with format. - padding : a string from: "SAME", "VALID". - The type of padding algorithm to use. - W_init : weights initializer - The initializer for initializing the weight matrix. - b_init : biases initializer - The initializer for initializing the bias vector. - W_init_args : dictionary - The arguments for the weights initializer. - b_init_args : dictionary - The arguments for the biases initializer. - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - act = tf.identity, - shape = [2, 2, 2, 64, 128], - strides=[1, 2, 2, 2, 1], - padding='SAME', - W_init = tf.truncated_normal_initializer(stddev=0.02), - b_init = tf.constant_initializer(value=0.0), - W_init_args = {}, - b_init_args = {}, - name ='cnn3d_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - print(" [TL] Conv3dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) - - with tf.variable_scope(name) as vs: - # W = tf.Variable(W_init(shape=shape, **W_init_args), name='W_conv') - # b = tf.Variable(b_init(shape=[shape[-1]], **b_init_args), name='b_conv') - W = tf.get_variable(name='W_conv3d', shape=shape, initializer=W_init, **W_init_args ) - b = tf.get_variable(name='b_conv3d', shape=(shape[-1]), initializer=b_init, **b_init_args ) - self.outputs = act( tf.nn.conv3d(self.inputs, W, strides=strides, padding=padding, name=None) + b ) - - # self.outputs = act( tf.nn.conv3d(self.inputs, W, strides=strides, padding=padding, name=None) + b ) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( [W, b] ) - -class DeConv3dLayer(Layer): - """The :class:`DeConv3dLayer` class is deconvolutional 3D layer, see `tf.nn.conv3d_transpose `_. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - act : activation function - The function that is applied to the layer activations. - shape : list of shape - shape of the filters, [depth, height, width, output_channels, in_channels], filter's in_channels dimension must match that of value. - output_shape : list of output shape - representing the output shape of the deconvolution op. - strides : a list of ints. - The stride of the sliding window for each dimension of the input tensor. - padding : a string from: "SAME", "VALID". - The type of padding algorithm to use. - W_init : weights initializer - The initializer for initializing the weight matrix. - b_init : biases initializer - The initializer for initializing the bias vector. - W_init_args : dictionary - The arguments for the weights initializer. - b_init_args : dictionary - The arguments for the biases initializer. - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - act = tf.identity, - shape = [2, 2, 2, 128, 256], - output_shape = [1, 12, 32, 32, 128], - strides = [1, 2, 2, 2, 1], - padding = 'SAME', - W_init = tf.truncated_normal_initializer(stddev=0.02), - b_init = tf.constant_initializer(value=0.0), - W_init_args = {}, - b_init_args = {}, - name ='decnn3d_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - print(" [TL] DeConv3dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % - (self.name, str(shape), str(output_shape), str(strides), padding, act.__name__)) - - with tf.variable_scope(name) as vs: - W = tf.get_variable(name='W_deconv3d', shape=shape, initializer=W_init, **W_init_args ) - b = tf.get_variable(name='b_deconv3d', shape=(shape[-2]), initializer=b_init, **b_init_args ) - - self.outputs = act( tf.nn.conv3d_transpose(self.inputs, W, output_shape=output_shape, strides=strides, padding=padding) + b ) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( [W, b] ) - -class UpSampling2dLayer(Layer): - """The :class:`UpSampling2dLayer` class is upSampling 2d layer, see `tf.image.resize_images `_. - - Parameters - ----------- - layer : a layer class with 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. - size : a tupe of int or float. - (height, width) scale factor or new size of height and width. - is_scale : boolean, if True (default), size is scale factor, otherwise, size is number of pixels of height and width. - method : 0, 1, 2, 3. ResizeMethod. Defaults to ResizeMethod.BILINEAR. - - ResizeMethod.BILINEAR, Bilinear interpolation. - - ResizeMethod.NEAREST_NEIGHBOR, Nearest neighbor interpolation. - - ResizeMethod.BICUBIC, Bicubic interpolation. - - ResizeMethod.AREA, Area interpolation. - align_corners : bool. If true, exactly align all 4 corners of the input and output. Defaults to false. - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - size = [], - is_scale = True, - method = 0, - align_corners = False, - name ='upsample2d_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - if len(self.inputs.get_shape()) == 3: - if is_scale: - size_h = size[0] * int(self.inputs.get_shape()[0]) - size_w = size[1] * int(self.inputs.get_shape()[1]) - size = [size_h, size_w] - elif len(self.inputs.get_shape()) == 4: - if is_scale: - size_h = size[0] * int(self.inputs.get_shape()[1]) - size_w = size[1] * int(self.inputs.get_shape()[2]) - size = [size_h, size_w] - else: - raise Exception("Donot support shape %s" % self.inputs.get_shape()) - print(" [TL] UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" % - (name, is_scale, size, method, align_corners)) - with tf.variable_scope(name) as vs: - try: - self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) - except: # for TF 0.10 - self.outputs = tf.image.resize_images(self.inputs, new_height=size[0], new_width=size[1], method=method, align_corners=align_corners) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - -class DownSampling2dLayer(Layer): - """The :class:`DownSampling2dLayer` class is downSampling 2d layer, see `tf.image.resize_images `_. - - Parameters - ----------- - layer : a layer class with 4-D Tensor of shape [batch, height, width, channels] or 3-D Tensor of shape [height, width, channels]. - size : a tupe of int or float. - (height, width) scale factor or new size of height and width. - is_scale : boolean, if True (default), size is scale factor, otherwise, size is number of pixels of height and width. - method : 0, 1, 2, 3. ResizeMethod. Defaults to ResizeMethod.BILINEAR. - - ResizeMethod.BILINEAR, Bilinear interpolation. - - ResizeMethod.NEAREST_NEIGHBOR, Nearest neighbor interpolation. - - ResizeMethod.BICUBIC, Bicubic interpolation. - - ResizeMethod.AREA, Area interpolation. - align_corners : bool. If true, exactly align all 4 corners of the input and output. Defaults to false. - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - size = [], - is_scale = True, - method = 0, - align_corners = False, - name ='downsample2d_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - if len(self.inputs.get_shape()) == 3: - if is_scale: - size_h = size[0] * int(self.inputs.get_shape()[0]) - size_w = size[1] * int(self.inputs.get_shape()[1]) - size = [size_h, size_w] - elif len(self.inputs.get_shape()) == 4: - if is_scale: - size_h = size[0] * int(self.inputs.get_shape()[1]) - size_w = size[1] * int(self.inputs.get_shape()[2]) - size = [size_h, size_w] - else: - raise Exception("Donot support shape %s" % self.inputs.get_shape()) - print(" [TL] DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % - (name, is_scale, size, method, align_corners)) - with tf.variable_scope(name) as vs: - try: - self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) - except: # for TF 0.10 - self.outputs = tf.image.resize_images(self.inputs, new_height=size[0], new_width=size[1], method=method, align_corners=align_corners) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - -class AtrousConv2dLayer(Layer): - """The :class:`AtrousConv2dLayer` class is Atrous convolution (a.k.a. convolution with holes or dilated convolution) 2D layer, see `tf.nn.atrous_conv2d `_. - - Parameters - ----------- - layer : a layer class with 4-D Tensor of shape [batch, height, width, channels]. - filters : A 4-D Tensor with the same type as value and shape [filter_height, filter_width, in_channels, out_channels]. filters' in_channels dimension must match that of value. Atrous convolution is equivalent to standard convolution with upsampled filters with effective height filter_height + (filter_height - 1) * (rate - 1) and effective width filter_width + (filter_width - 1) * (rate - 1), produced by inserting rate - 1 zeros along consecutive elements across the filters' spatial dimensions. - n_filter : number of filter. - filter_size : tuple (height, width) for filter size. - rate : A positive int32. The stride with which we sample input values across the height and width dimensions. Equivalently, the rate by which we upsample the filter values by inserting zeros across the height and width dimensions. In the literature, the same parameter is sometimes called input stride or dilation. - act : activation function, None for linear. - padding : A string, either 'VALID' or 'SAME'. The padding algorithm. - W_init : weights initializer. The initializer for initializing the weight matrix. - b_init : biases initializer or None. The initializer for initializing the bias vector. If None, skip biases. - W_init_args : dictionary. The arguments for the weights tf.get_variable(). - b_init_args : dictionary. The arguments for the biases tf.get_variable(). - name : a string or None, an optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - n_filter = 32, - filter_size = (3,3), - rate = 2, - act = None, - padding = 'SAME', - W_init = tf.truncated_normal_initializer(stddev=0.02), - b_init = tf.constant_initializer(value=0.0), - W_init_args = {}, - b_init_args = {}, - name = 'atrou2d' - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - print(" [TL] AtrousConv2dLayer %s: n_filter:%d filter_size:%s rate:%d pad:%s act:%s" % - (self.name, n_filter, filter_size, rate, padding, act.__name__)) - if act is None: - act = tf.identity - with tf.variable_scope(name) as vs: - shape = [filter_size[0], filter_size[1], int(self.inputs.get_shape()[-1]), n_filter] - filters = tf.get_variable(name='filter', shape=shape, initializer=W_init, **W_init_args ) - if b_init: - b = tf.get_variable(name='b', shape=(n_filter), initializer=b_init, **b_init_args ) - self.outputs = act(tf.nn.atrous_conv2d(self.inputs, filters, rate, padding) + b) - else: - self.outputs = act(tf.nn.atrous_conv2d(self.inputs, filters, rate, padding)) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - if b_init: - self.all_params.extend( [filters, b] ) - else: - self.all_params.extend( [filters] ) - -class SeparableConv2dLayer(Layer):# Untested - """The :class:`SeparableConv2dLayer` class is 2-D convolution with separable filters, see `tf.layers.separable_conv2d `_. - - Parameters - ----------- - layer : a layer class - filters : integer, the dimensionality of the output space (i.e. the number output of filters in the convolution). - kernel_size : a tuple or list of N positive integers specifying the spatial dimensions of of the filters. Can be a single integer to specify the same value for all spatial dimensions. - strides : a tuple or list of N positive integers specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1. - padding : one of "valid" or "same" (case-insensitive). - data_format : A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shapedata_format = 'NWHC' (batch, width, height, channels) while channels_first corresponds to inputs with shape (batch, channels, width, height). - dilation_rate : an integer or tuple/list of 2 integers, specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1. - depth_multiplier : The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to num_filters_in * depth_multiplier. - act (activation) : Activation function. Set it to None to maintain a linear activation. - use_bias : Boolean, whether the layer uses a bias. - depthwise_initializer : An initializer for the depthwise convolution kernel. - pointwise_initializer : An initializer for the pointwise convolution kernel. - bias_initializer : An initializer for the bias vector. If None, no bias will be applied. - depthwise_regularizer : Optional regularizer for the depthwise convolution kernel. - pointwise_regularizer : Optional regularizer for the pointwise convolution kernel. - bias_regularizer : Optional regularizer for the bias vector. - activity_regularizer : Regularizer function for the output. - name : a string or None, an optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - filters = None, - kernel_size=5, - strides=(1, 1), - padding='valid', - data_format='channels_last', - dilation_rate=(1, 1), - depth_multiplier=1, - act=None, - use_bias=True, - depthwise_initializer=None, - pointwise_initializer=None, - bias_initializer=tf.zeros_initializer, - depthwise_regularizer=None, - pointwise_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - name = 'atrou2d' - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - assert filters is not None - assert tf.__version__ > "0.12.1", "This layer only supports for TF 1.0+" - if act is None: - act = tf.identity - - bias_initializer = bias_initializer() - - print(" [TL] SeparableConv2dLayer %s: filters:%s kernel_size:%s strides:%s padding:%s dilation_rate:%s depth_multiplier:%s act:%s" % - (self.name, str(filters), str(kernel_size), str(strides), padding, str(dilation_rate), str(depth_multiplier), act.__name__)) - - with tf.variable_scope(name) as vs: - self.outputs = tf.layers.separable_conv2d(self.inputs, filters, kernel_size, - strides=strides, padding=padding, data_format=data_format, - dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=act, - use_bias=use_bias, depthwise_initializer=depthwise_initializer, pointwise_initializer=pointwise_initializer, - bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, - pointwise_regularizer=pointwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer,) - #trainable=True, name=None, reuse=None) - - variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( variables ) - -## Initializers for Convuolutional Layers -def deconv2d_bilinear_upsampling_initializer(shape): - """Returns initializer that can be passed to DeConv2dLayer to initalize the - weights to correspond to channel wise bilinear upsampling. - Used in some segmantic segmentation approches such as [FCN](https://arxiv.org/abs/1605.06211) - - Parameters - ---------- - shape : list of shape - shape of the filters, [height, width, output_channels, in_channels], must match that passed to DeConv2dLayer - - Returns - ---------- - tf.constant_initializer - with weights set to correspond to per channel bilinear upsampling when passed as W_int in DeConv2dLayer - - Examples - -------- - >>> rescale_factor = 2 #upsampling by a factor of 2, ie e.g 100->200 - >>> filter_size = (2 * rescale_factor - rescale_factor % 2) #Corresponding bilinear filter size - >>> num_in_channels = 3 - >>> num_out_channels = 3 - >>> deconv_filter_shape = [filter_size, filter_size, num_out_channels, num_in_channels] - >>> x = tf.placeholder(tf.float32, [1, imsize, imsize, num_channels]) - >>> network = tl.layers.InputLayer(x, name='input_layer') - >>> bilinear_init = deconv2d_bilinear_upsampling_initializer(shape=filter_shape) - >>> network = tl.layers.DeConv2dLayer(network, - shape = filter_shape, - output_shape = [1, imsize*rescale_factor, imsize*rescale_factor, num_out_channels], - strides=[1, rescale_factor, rescale_factor, 1], - W_init=bilinear_init, - padding='SAME', - act=tf.identity, name='g/h1/decon2d') - """ - if shape[0] != shape[1]: - raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes') - if shape[3] < shape [2]: - raise Exception('deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels ') - - filter_size = shape[0] - num_out_channels = shape[2] - num_in_channels = shape[3] - - #Create bilinear filter kernel as numpy array - bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32) - scale_factor = (filter_size + 1) // 2 - if filter_size % 2 == 1: - center = scale_factor - 1 - else: - center = scale_factor - 0.5 - for x in range(filter_size): - for y in range(filter_size): - bilinear_kernel[x,y] = (1 - abs(x - center) / scale_factor) * \ - (1 - abs(y - center) / scale_factor) - weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels)) - for i in range(num_out_channels): - weights[:, :, i, i] = bilinear_kernel - - #assign numpy array to constant_initalizer and pass to get_variable - bilinear_weights_init = tf.constant_initializer(value=weights, dtype=tf.float32) - return bilinear_weights_init - -## Convolutional layer (Simplified) -def Conv1d(net, n_filter=32, filter_size=5, stride=1, act=None, - padding='SAME', use_cudnn_on_gpu=None,data_format=None, - W_init = tf.truncated_normal_initializer(stddev=0.02), - b_init = tf.constant_initializer(value=0.0), - W_init_args = {}, b_init_args = {}, name ='conv1d',): - """Wrapper for :class:`Conv1dLayer`, if you don't understand how to use :class:`Conv1dLayer`, this function may be easier. - - Parameters - ---------- - net : TensorLayer layer. - n_filter : number of filter. - filter_size : an int. - stride : an int. - act : None or activation function. - others : see :class:`Conv1dLayer`. - """ - if act is None: - act = tf.identity - net = Conv1dLayer(layer = net, - act = act, - shape = [filter_size, int(net.outputs.get_shape()[-1]), n_filter], - stride = stride, - padding = padding, - use_cudnn_on_gpu = use_cudnn_on_gpu, - data_format = data_format, - W_init = W_init, - b_init = b_init, - W_init_args = W_init_args, - b_init_args = b_init_args, - name = name, - ) - return net - -def Conv2d(net, n_filter=32, filter_size=(3, 3), strides=(1, 1), act = None, - padding='SAME', W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), - W_init_args = {}, b_init_args = {}, use_cudnn_on_gpu = None, data_format = None,name ='conv2d',): - """Wrapper for :class:`Conv2dLayer`, if you don't understand how to use :class:`Conv2dLayer`, this function may be easier. - - Parameters - ---------- - net : TensorLayer layer. - n_filter : number of filter. - filter_size : tuple (height, width) for filter size. - strides : tuple (height, width) for strides. - act : None or activation function. - others : see :class:`Conv2dLayer`. - - Examples - -------- - >>> w_init = tf.truncated_normal_initializer(stddev=0.01) - >>> b_init = tf.constant_initializer(value=0.0) - >>> inputs = InputLayer(x, name='inputs') - >>> conv1 = Conv2d(inputs, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_1') - >>> conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_2') - >>> pool1 = MaxPool2d(conv1, (2, 2), padding='SAME', name='pool1') - >>> conv2 = Conv2d(pool1, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_1') - >>> conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_2') - >>> pool2 = MaxPool2d(conv2, (2, 2), padding='SAME', name='pool2') - """ - assert len(strides) == 2, "len(strides) should be 2, Conv2d and Conv2dLayer are different." - if act is None: - act = tf.identity - net = Conv2dLayer(net, - act = act, - shape = [filter_size[0], filter_size[1], int(net.outputs.get_shape()[-1]), n_filter], # 32 features for each 5x5 patch - strides = [1, strides[0], strides[1], 1], - padding = padding, - W_init = W_init, - W_init_args = W_init_args, - b_init = b_init, - b_init_args = b_init_args, - use_cudnn_on_gpu = use_cudnn_on_gpu, - data_format = data_format, - name = name) - return net - -def DeConv2d(net, n_out_channel = 32, filter_size=(3, 3), - out_size = (30, 30), strides = (2, 2), padding = 'SAME', batch_size = None, act = None, - W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0), - W_init_args = {}, b_init_args = {}, name ='decnn2d'): - """Wrapper for :class:`DeConv2dLayer`, if you don't understand how to use :class:`DeConv2dLayer`, this function may be easier. - - Parameters - ---------- - net : TensorLayer layer. - n_out_channel : int, number of output channel. - filter_size : tuple of (height, width) for filter size. - out_size : tuple of (height, width) of output. - batch_size : int or None, batch_size. If None, try to find the batch_size from the first dim of net.outputs (you should tell the batch_size when define the input placeholder). - strides : tuple of (height, width) for strides. - act : None or activation function. - others : see :class:`DeConv2dLayer`. - """ - assert len(strides) == 2, "len(strides) should be 2, DeConv2d and DeConv2dLayer are different." - if act is None: - act = tf.identity - if batch_size is None: - batch_size = tf.shape(net.outputs)[0] - net = DeConv2dLayer(layer = net, - act = act, - shape = [filter_size[0], filter_size[1], n_out_channel, int(net.outputs.get_shape()[-1])], - output_shape = [batch_size, int(out_size[0]), int(out_size[1]), n_out_channel], - strides = [1, strides[0], strides[1], 1], - padding = padding, - W_init = W_init, - b_init = b_init, - W_init_args = W_init_args, - b_init_args = b_init_args, - name = name) - return net - -def MaxPool1d(net, filter_size, strides, padding='valid', data_format='channels_last', name=None): #Untested - """Wrapper for `tf.layers.max_pooling1d `_ . - - Parameters - ------------ - net : TensorLayer layer, the tensor over which to pool. Must have rank 3. - filter_size (pool_size) : An integer or tuple/list of a single integer, representing the size of the pooling window. - strides : An integer or tuple/list of a single integer, specifying the strides of the pooling operation. - padding : A string. The padding method, either 'valid' or 'same'. Case-insensitive. - data_format : A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch, length, channels) while channels_first corresponds to inputs with shape (batch, channels, length). - name : A string, the name of the layer. - - Returns - -------- - - A :class:`Layer` which the output tensor, of rank 3. - """ - print(" [TL] MaxPool1d %s: filter_size:%s strides:%s padding:%s" % - (name, str(filter_size), str(strides), str(padding))) - outputs = tf.layers.max_pooling1d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) - - net_new = copy.copy(net) - net_new.outputs = outputs - net_new.all_layers.extend( [outputs] ) - return net_new - -def MeanPool1d(net, filter_size, strides, padding='valid', data_format='channels_last', name=None): #Untested - """Wrapper for `tf.layers.average_pooling1d `_ . - - Parameters - ------------ - net : TensorLayer layer, the tensor over which to pool. Must have rank 3. - filter_size (pool_size) : An integer or tuple/list of a single integer, representing the size of the pooling window. - strides : An integer or tuple/list of a single integer, specifying the strides of the pooling operation. - padding : A string. The padding method, either 'valid' or 'same'. Case-insensitive. - data_format : A string, one of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch, length, channels) while channels_first corresponds to inputs with shape (batch, channels, length). - name : A string, the name of the layer. - - Returns - -------- - - A :class:`Layer` which the output tensor, of rank 3. - """ - print(" [TL] MeanPool1d %s: filter_size:%s strides:%s padding:%s" % - (name, str(filter_size), str(strides), str(padding))) - outputs = tf.layers.average_pooling1d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) - - net_new = copy.copy(net) - net_new.outputs = outputs - net_new.all_layers.extend( [outputs] ) - return net_new - -def MaxPool2d(net, filter_size=(2, 2), strides=None, padding='SAME', name='maxpool'): - """Wrapper for :class:`PoolLayer`. - - Parameters - ----------- - net : TensorLayer layer. - filter_size : tuple of (height, width) for filter size. - strides : tuple of (height, width). Default is the same with filter_size. - others : see :class:`PoolLayer`. - """ - if strides is None: - strides = filter_size - assert len(strides) == 2, "len(strides) should be 2, MaxPool2d and PoolLayer are different." - net = PoolLayer(net, ksize=[1, filter_size[0], filter_size[1], 1], - strides=[1, strides[0], strides[1], 1], - padding=padding, - pool = tf.nn.max_pool, - name = name) - return net - -def MeanPool2d(net, filter_size=(2, 2), strides=None, padding='SAME', name='meanpool'): - """Wrapper for :class:`PoolLayer`. - - Parameters - ----------- - net : TensorLayer layer. - filter_size : tuple of (height, width) for filter size. - strides : tuple of (height, width). Default is the same with filter_size. - others : see :class:`PoolLayer`. - """ - if strides is None: - strides = filter_size - assert len(strides) == 2, "len(strides) should be 2, MeanPool2d and PoolLayer are different." - net = PoolLayer(net, ksize=[1, filter_size[0], filter_size[1], 1], - strides=[1, strides[0], strides[1], 1], - padding=padding, - pool = tf.nn.avg_pool, - name = name) - return net - -def MaxPool3d(net, filter_size, strides, padding='valid', data_format='channels_last', name=None): #Untested - """Wrapper for `tf.layers.max_pooling3d `_ . - - Parameters - ------------ - net : TensorLayer layer, the tensor over which to pool. Must have rank 5. - filter_size (pool_size) : An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. - strides : An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. - padding : A string. The padding method, either 'valid' or 'same'. Case-insensitive. - data_format : A string. The ordering of the dimensions in the inputs. channels_last (default) and channels_first are supported. channels_last corresponds to inputs with shape (batch, depth, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, depth, height, width). - name : A string, the name of the layer. - """ - print(" [TL] MaxPool3d %s: filter_size:%s strides:%s padding:%s" % - (name, str(filter_size), str(strides), str(padding))) - outputs = tf.layers.max_pooling3d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) - - net_new = copy.copy(net) - net_new.outputs = outputs - net_new.all_layers.extend( [outputs] ) - return net_new - -def MeanPool3d(net, filter_size, strides, padding='valid', data_format='channels_last', name=None): #Untested - """Wrapper for `tf.layers.average_pooling3d `_ - - Parameters - ------------ - net : TensorLayer layer, the tensor over which to pool. Must have rank 5. - filter_size (pool_size) : An integer or tuple/list of 3 integers: (pool_depth, pool_height, pool_width) specifying the size of the pooling window. Can be a single integer to specify the same value for all spatial dimensions. - strides : An integer or tuple/list of 3 integers, specifying the strides of the pooling operation. Can be a single integer to specify the same value for all spatial dimensions. - padding : A string. The padding method, either 'valid' or 'same'. Case-insensitive. - data_format : A string. The ordering of the dimensions in the inputs. channels_last (default) and channels_first are supported. channels_last corresponds to inputs with shape (batch, depth, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, depth, height, width). - name : A string, the name of the layer. - """ - print(" [TL] MeanPool3d %s: filter_size:%s strides:%s padding:%s" % - (name, str(filter_size), str(strides), str(padding))) - outputs = tf.layers.average_pooling3d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) - - net_new = copy.copy(net) - net_new.outputs = outputs - net_new.all_layers.extend( [outputs] ) - return net_new - -## Super resolution -def SubpixelConv2d(net, scale=2, n_out_channel=None, act=tf.identity, name='subpixel_conv2d'): - """The :class:`SubpixelConv2d` class is a sub-pixel 2d convolutional ayer, usually be used - for super-resolution application. - - Parameters - ------------ - net : TensorLayer layer. - scale : int, upscaling ratio, a wrong setting will lead to Dimension size error. - n_out_channel : int or None, the number of output channels. - Note that, the number of input channels == (scale x scale) x The number of output channels. - If None, automatically set n_out_channel == the number of input channels / (scale x scale). - act : activation function. - name : string. - An optional name to attach to this layer. - - Examples - --------- - >>> # examples here just want to tell you how to set the n_out_channel. - >>> x = np.random.rand(2, 16, 16, 4) - >>> X = tf.placeholder("float32", shape=(2, 16, 16, 4), name="X") - >>> net = InputLayer(X, name='input') - >>> net = SubpixelConv2d(net, scale=2, n_out_channel=1, name='subpixel_conv2d') - >>> y = sess.run(net.outputs, feed_dict={X: x}) - >>> print(x.shape, y.shape) - ... (2, 16, 16, 4) (2, 32, 32, 1) - >>> - >>> x = np.random.rand(2, 16, 16, 4*10) - >>> X = tf.placeholder("float32", shape=(2, 16, 16, 4*10), name="X") - >>> net = InputLayer(X, name='input2') - >>> net = SubpixelConv2d(net, scale=2, n_out_channel=10, name='subpixel_conv2d2') - >>> y = sess.run(net.outputs, feed_dict={X: x}) - >>> print(x.shape, y.shape) - ... (2, 16, 16, 40) (2, 32, 32, 10) - >>> - >>> x = np.random.rand(2, 16, 16, 25*10) - >>> X = tf.placeholder("float32", shape=(2, 16, 16, 25*10), name="X") - >>> net = InputLayer(X, name='input3') - >>> net = SubpixelConv2d(net, scale=5, n_out_channel=None, name='subpixel_conv2d3') - >>> y = sess.run(net.outputs, feed_dict={X: x}) - >>> print(x.shape, y.shape) - ... (2, 16, 16, 250) (2, 80, 80, 10) - - References - ------------ - - `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network `_ - """ - # github/Tetrachrome/subpixel https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py - - _err_log = "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" - - scope_name = tf.get_variable_scope().name - if scope_name: - name = scope_name + '/' + name - - def _phase_shift(I, r): - if tf.__version__ < '1.0': - raise Exception("Only support TF1.0+") - bsize, a, b, c = I.get_shape().as_list() - bsize = tf.shape(I)[0] # Handling Dimension(None) type for undefined batch dim - X = tf.reshape(I, (bsize, a, b, r, r)) - X = tf.transpose(X, (0, 1, 2, 4, 3)) # bsize, a, b, 1, 1 # tf 0.12 - # X = tf.split(1, a, X) # a, [bsize, b, r, r] # tf 0.12 - X = tf.split(X, a, 1) - # X = tf.concat(2, [tf.squeeze(x, axis=1) for x in X]) # bsize, b, a*r, r # tf 0.12 - X = tf.concat([tf.squeeze(x, axis=1) for x in X], 2) - # X = tf.split(1, b, X) # b, [bsize, a*r, r] # tf 0.12 - X = tf.split(X, b, 1) - # X = tf.concat(2, [tf.squeeze(x, axis=1) for x in X]) # bsize, a*r, b*r # tf 0.12 - X = tf.concat([tf.squeeze(x, axis=1) for x in X], 2) - return tf.reshape(X, (bsize, a*r, b*r, 1)) - - def _PS(X, r, n_out_channel): - if n_out_channel > 1: - assert int(X.get_shape()[-1]) == (r ** 2) * n_out_channel, _err_log - Xc = tf.split(X, n_out_channel, 3) - X = tf.concat([_phase_shift(x, r) for x in Xc], 3) - elif n_out_channel == 1: - assert int(X.get_shape()[-1]) == (r ** 2), _err_log - X = _phase_shift(X, r) - else: - print(_err_log) - return X - - inputs = net.outputs - - if n_out_channel is None: - assert int(inputs.get_shape()[-1])/ (scale ** 2) % 1 == 0, _err_log - n_out_channel = int(int(inputs.get_shape()[-1])/ (scale ** 2)) - - print(" [TL] SubpixelConv2d %s: scale: %d n_out_channel: %s act: %s" % (name, scale, n_out_channel, act.__name__)) - - net_new = Layer(inputs, name=name) - # with tf.name_scope(name): - with tf.variable_scope(name) as vs: - net_new.outputs = act(_PS(inputs, r=scale, n_out_channel=n_out_channel)) - - net_new.all_layers = list(net.all_layers) - net_new.all_params = list(net.all_params) - net_new.all_drop = dict(net.all_drop) - net_new.all_layers.extend( [net_new.outputs] ) - return net_new - - -# ## Normalization layer -class LocalResponseNormLayer(Layer): - """The :class:`LocalResponseNormLayer` class is for Local Response Normalization, see ``tf.nn.local_response_normalization`` or ``tf.nn.lrn`` for new TF version. - The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the last dimension), and each vector is normalized independently. - Within a given vector, each component is divided by the weighted, squared sum of inputs within depth_radius. - - Parameters - ----------- - layer : a layer class. Must be one of the following types: float32, half. 4-D. - depth_radius : An optional int. Defaults to 5. 0-D. Half-width of the 1-D normalization window. - bias : An optional float. Defaults to 1. An offset (usually positive to avoid dividing by 0). - alpha : An optional float. Defaults to 1. A scale factor, usually positive. - beta : An optional float. Defaults to 0.5. An exponent. - name : A string or None, an optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - depth_radius = None, - bias = None, - alpha = None, - beta = None, - name ='lrn_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - print(" [TL] LocalResponseNormLayer %s: depth_radius: %d, bias: %f, alpha: %f, beta: %f" % - (self.name, depth_radius, bias, alpha, beta)) - with tf.variable_scope(name) as vs: - self.outputs = tf.nn.lrn(self.inputs, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - -class BatchNormLayer(Layer): - """ - The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. - - Batch normalization on fully-connected or convolutional maps. - - Parameters - ----------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - decay : float, default is 0.9. - A decay factor for ExponentialMovingAverage, use larger value for large dataset. - epsilon : float - A small float number to avoid dividing by 0. - act : activation function. - is_train : boolean - Whether train or inference. - beta_init : beta initializer - The initializer for initializing beta - gamma_init : gamma initializer - The initializer for initializing gamma - name : a string or None - An optional name to attach to this layer. - - References - ---------- - - `Source `_ - - `stackoverflow `_ - """ - def __init__( - self, - layer = None, - decay = 0.9, - epsilon = 0.00001, - act = tf.identity, - is_train = False, - beta_init = tf.zeros_initializer, - gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), # tf.ones_initializer, - name ='batchnorm_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - print(" [TL] BatchNormLayer %s: decay:%f epsilon:%f act:%s is_train:%s" % - (self.name, decay, epsilon, act.__name__, is_train)) - x_shape = self.inputs.get_shape() - params_shape = x_shape[-1:] - - from tensorflow.python.training import moving_averages - from tensorflow.python.ops import control_flow_ops - - with tf.variable_scope(name) as vs: - axis = list(range(len(x_shape) - 1)) - - ## 1. beta, gamma - if tf.__version__ > '0.12.1' and beta_init == tf.zeros_initializer: - beta_init = beta_init() - beta = tf.get_variable('beta', shape=params_shape, - initializer=beta_init, - trainable=is_train)#, restore=restore) - - gamma = tf.get_variable('gamma', shape=params_shape, - initializer=gamma_init, trainable=is_train, - )#restore=restore) - - ## 2. - if tf.__version__ > '0.12.1': - moving_mean_init = tf.zeros_initializer() - else: - moving_mean_init = tf.zeros_initializer - moving_mean = tf.get_variable('moving_mean', - params_shape, - initializer=moving_mean_init, - trainable=False,)# restore=restore) - moving_variance = tf.get_variable('moving_variance', - params_shape, - initializer=tf.constant_initializer(1.), - trainable=False,)# restore=restore) - - ## 3. - # These ops will only be preformed when training. - mean, variance = tf.nn.moments(self.inputs, axis) - try: # TF12 - update_moving_mean = moving_averages.assign_moving_average( - moving_mean, mean, decay, zero_debias=False) # if zero_debias=True, has bias - update_moving_variance = moving_averages.assign_moving_average( - moving_variance, variance, decay, zero_debias=False) # if zero_debias=True, has bias - # print("TF12 moving") - except Exception as e: # TF11 - update_moving_mean = moving_averages.assign_moving_average( - moving_mean, mean, decay) - update_moving_variance = moving_averages.assign_moving_average( - moving_variance, variance, decay) - # print("TF11 moving") - - def mean_var_with_update(): - with tf.control_dependencies([update_moving_mean, update_moving_variance]): - return tf.identity(mean), tf.identity(variance) - - if is_train: - mean, var = mean_var_with_update() - self.outputs = act( tf.nn.batch_normalization(self.inputs, mean, var, beta, gamma, epsilon) ) - else: - self.outputs = act( tf.nn.batch_normalization(self.inputs, moving_mean, moving_variance, beta, gamma, epsilon) ) - - variables = [beta, gamma, moving_mean, moving_variance] - - # print(len(variables)) - # for idx, v in enumerate(variables): - # print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v)) - # exit() - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( variables ) - -# class BatchNormLayer_TF(Layer): # Work well TF contrib https://github.com/tensorflow/tensorflow/blob/b826b79718e3e93148c3545e7aa3f90891744cc0/tensorflow/contrib/layers/python/layers/layers.py#L100 -# """ -# The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. -# -# Batch normalization on fully-connected or convolutional maps. -# -# Parameters -# ----------- -# layer : a :class:`Layer` instance -# The `Layer` class feeding into this layer. -# decay : float -# A decay factor for ExponentialMovingAverage. -# center: If True, subtract `beta`. If False, `beta` is ignored. -# scale: If True, multiply by `gamma`. If False, `gamma` is -# not used. When the next layer is linear (also e.g. `nn.relu`), this can be -# disabled since the scaling can be done by the next layer. -# epsilon : float -# A small float number to avoid dividing by 0. -# act : activation function. -# is_train : boolean -# Whether train or inference. -# beta_init : beta initializer -# The initializer for initializing beta -# gamma_init : gamma initializer -# The initializer for initializing gamma -# name : a string or None -# An optional name to attach to this layer. -# -# References -# ---------- -# - `Source `_ -# - `stackoverflow `_ -# """ -# def __init__( -# self, -# layer = None, -# decay = 0.95,#.999, -# center = True, -# scale = True, -# epsilon = 0.00001, -# act = tf.identity, -# is_train = False, -# beta_init = tf.zeros_initializer, -# # gamma_init = tf.ones_initializer, -# gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), -# name ='batchnorm_layer', -# ): -# Layer.__init__(self, name=name) -# self.inputs = layer.outputs -# print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, act: %s, is_train: %s" % -# (self.name, decay, epsilon, act.__name__, is_train)) -# from tensorflow.contrib.layers.python.layers import utils -# from tensorflow.contrib.framework.python.ops import variables -# from tensorflow.python.ops import init_ops -# from tensorflow.python.ops import nn -# from tensorflow.python.training import moving_averages -# from tensorflow.python.framework import ops -# from tensorflow.python.ops import variable_scope -# variables_collections = None -# outputs_collections=None -# updates_collections=None#ops.GraphKeys.UPDATE_OPS -# # with variable_scope.variable_op_scope([inputs], -# # scope, 'BatchNorm', reuse=reuse) as sc: -# # with variable_scope.variable_op_scope([self.inputs], None, name) as vs: -# with tf.variable_scope(name) as vs: -# inputs_shape = self.inputs.get_shape() -# dtype = self.inputs.dtype.base_dtype -# axis = list(range(len(inputs_shape) - 1)) # [0, 1, 2] -# params_shape = inputs_shape[-1:] -# # Allocate parameters for the beta and gamma of the normalization. -# beta, gamma = None, None -# if center: -# beta_collections = utils.get_variable_collections(variables_collections, -# 'beta') -# beta = variables.model_variable('beta', -# shape=params_shape, -# dtype=dtype, -# # initializer=init_ops.zeros_initializer, -# initializer=beta_init, -# collections=beta_collections,) -# # trainable=trainable) -# if scale: -# gamma_collections = utils.get_variable_collections(variables_collections, -# 'gamma') -# gamma = variables.model_variable('gamma', -# shape=params_shape, -# dtype=dtype, -# # initializer=init_ops.ones_initializer, -# initializer=gamma_init, -# collections=gamma_collections,) -# # trainable=trainable) -# # Create moving_mean and moving_variance variables and add them to the -# # appropiate collections. -# moving_mean_collections = utils.get_variable_collections( -# variables_collections, -# 'moving_mean') -# moving_mean = variables.model_variable( -# 'moving_mean', -# shape=params_shape, -# dtype=dtype, -# # initializer=init_ops.zeros_initializer, -# initializer=tf.zeros_initializer, -# trainable=False, -# collections=moving_mean_collections) -# moving_variance_collections = utils.get_variable_collections( -# variables_collections, -# 'moving_variance') -# moving_variance = variables.model_variable( -# 'moving_variance', -# shape=params_shape, -# dtype=dtype, -# # initializer=init_ops.ones_initializer, -# initializer=tf.constant_initializer(1.), -# trainable=False, -# collections=moving_variance_collections) -# if is_train: -# # Calculate the moments based on the individual batch. -# mean, variance = nn.moments(self.inputs, axis, shift=moving_mean) -# # Update the moving_mean and moving_variance moments. -# # update_moving_mean = moving_averages.assign_moving_average( -# # moving_mean, mean, decay) -# # update_moving_variance = moving_averages.assign_moving_average( -# # moving_variance, variance, decay) -# # if updates_collections is None: -# # # Make sure the updates are computed here. -# # with ops.control_dependencies([update_moving_mean, -# # update_moving_variance]): -# # outputs = nn.batch_normalization( -# # self.inputs, mean, variance, beta, gamma, epsilon) -# -# update_moving_mean = tf.assign(moving_mean, -# moving_mean * decay + mean * (1 - decay)) -# update_moving_variance = tf.assign(moving_variance, -# moving_variance * decay + variance * (1 - decay)) -# with tf.control_dependencies([update_moving_mean, update_moving_variance]): -# outputs = nn.batch_normalization( -# self.inputs, mean, variance, beta, gamma, epsilon) -# # else: -# # # Collect the updates to be computed later. -# # ops.add_to_collections(updates_collections, update_moving_mean) -# # ops.add_to_collections(updates_collections, update_moving_variance) -# # outputs = nn.batch_normalization( -# # self.inputs, mean, variance, beta, gamma, epsilon) -# else: -# # mean, variance = nn.moments(self.inputs, axis, shift=moving_mean) -# outputs = nn.batch_normalization( -# self.inputs, moving_mean, moving_variance, beta, gamma, epsilon) -# # self.inputs, mean, variance, beta, gamma, epsilon) -# outputs.set_shape(self.inputs.get_shape()) -# # if activation_fn: -# self.outputs = act(outputs) -# -# # variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) -# # return utils.collect_named_outputs(outputs_collections, sc.name, outputs) -# variables = [beta, gamma, moving_mean, moving_variance] -# -# mean, variance = nn.moments(self.inputs, axis, shift=moving_mean) -# self.check_mean = mean -# self.check_variance = variance -# -# self.all_layers = list(layer.all_layers) -# self.all_params = list(layer.all_params) -# self.all_drop = dict(layer.all_drop) -# self.all_layers.extend( [self.outputs] ) -# self.all_params.extend( variables ) -# -# class BatchNormLayer5(Layer): # Akara Work well -# """ -# The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. -# -# Batch normalization on fully-connected or convolutional maps. -# -# Parameters -# ----------- -# layer : a :class:`Layer` instance -# The `Layer` class feeding into this layer. -# decay : float -# A decay factor for ExponentialMovingAverage. -# epsilon : float -# A small float number to avoid dividing by 0. -# act : activation function. -# is_train : boolean -# Whether train or inference. -# beta_init : beta initializer -# The initializer for initializing beta -# gamma_init : gamma initializer -# The initializer for initializing gamma -# name : a string or None -# An optional name to attach to this layer. -# -# References -# ---------- -# - `Source `_ -# - `stackoverflow `_ -# """ -# def __init__( -# self, -# layer = None, -# decay = 0.9, -# epsilon = 0.00001, -# act = tf.identity, -# is_train = False, -# beta_init = tf.zeros_initializer, -# # gamma_init = tf.ones_initializer, -# gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), -# name ='batchnorm_layer', -# ): -# Layer.__init__(self, name=name) -# self.inputs = layer.outputs -# print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, act: %s, is_train: %s" % -# (self.name, decay, epsilon, act.__name__, is_train)) -# x_shape = self.inputs.get_shape() -# params_shape = x_shape[-1:] -# -# from tensorflow.python.training import moving_averages -# from tensorflow.python.ops import control_flow_ops -# -# with tf.variable_scope(name) as vs: -# axis = list(range(len(x_shape) - 1)) -# -# ## 1. beta, gamma -# beta = tf.get_variable('beta', shape=params_shape, -# initializer=beta_init, -# trainable=is_train)#, restore=restore) -# -# gamma = tf.get_variable('gamma', shape=params_shape, -# initializer=gamma_init, trainable=is_train, -# )#restore=restore) -# -# ## 2. moving variables during training (not update by gradient!) -# moving_mean = tf.get_variable('moving_mean', -# params_shape, -# initializer=tf.zeros_initializer, -# trainable=False,)# restore=restore) -# moving_variance = tf.get_variable('moving_variance', -# params_shape, -# initializer=tf.constant_initializer(1.), -# trainable=False,)# restore=restore) -# -# batch_mean, batch_var = tf.nn.moments(self.inputs, axis) -# ## 3. -# # These ops will only be preformed when training. -# def mean_var_with_update(): -# try: # TF12 -# update_moving_mean = moving_averages.assign_moving_average( -# moving_mean, batch_mean, decay, zero_debias=False) # if zero_debias=True, has bias -# update_moving_variance = moving_averages.assign_moving_average( -# moving_variance, batch_var, decay, zero_debias=False) # if zero_debias=True, has bias -# # print("TF12 moving") -# except Exception as e: # TF11 -# update_moving_mean = moving_averages.assign_moving_average( -# moving_mean, batch_mean, decay) -# update_moving_variance = moving_averages.assign_moving_average( -# moving_variance, batch_var, decay) -# # print("TF11 moving") -# -# # def mean_var_with_update(): -# with tf.control_dependencies([update_moving_mean, update_moving_variance]): -# # return tf.identity(update_moving_mean), tf.identity(update_moving_variance) -# return tf.identity(batch_mean), tf.identity(batch_var) -# -# # if not is_train: -# if is_train: -# mean, var = mean_var_with_update() -# else: -# mean, var = (moving_mean, moving_variance) -# -# normed = tf.nn.batch_normalization( -# x=self.inputs, -# mean=mean, -# variance=var, -# offset=beta, -# scale=gamma, -# variance_epsilon=epsilon, -# name="tf_bn" -# ) -# self.outputs = act( normed ) -# -# variables = [beta, gamma, moving_mean, moving_variance] -# # print(len(variables)) -# # for idx, v in enumerate(variables): -# # print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v)) -# # exit() -# -# self.all_layers = list(layer.all_layers) -# self.all_params = list(layer.all_params) -# self.all_drop = dict(layer.all_drop) -# self.all_layers.extend( [self.outputs] ) -# self.all_params.extend( variables ) -# # self.all_params.extend( [beta, gamma] ) -# -# class BatchNormLayer4(Layer): # work TFlearn https://github.com/tflearn/tflearn/blob/master/tflearn/layers/normalization.py -# """ -# The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. -# -# Batch normalization on fully-connected or convolutional maps. -# -# Parameters -# ----------- -# layer : a :class:`Layer` instance -# The `Layer` class feeding into this layer. -# decay : float -# A decay factor for ExponentialMovingAverage. -# epsilon : float -# A small float number to avoid dividing by 0. -# act : activation function. -# is_train : boolean -# Whether train or inference. -# beta_init : beta initializer -# The initializer for initializing beta -# gamma_init : gamma initializer -# The initializer for initializing gamma -# name : a string or None -# An optional name to attach to this layer. -# -# References -# ---------- -# - `Source `_ -# - `stackoverflow `_ -# """ -# def __init__( -# self, -# layer = None, -# decay = 0.999, -# epsilon = 0.00001, -# act = tf.identity, -# is_train = None, -# beta_init = tf.zeros_initializer, -# # gamma_init = tf.ones_initializer, -# gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), -# name ='batchnorm_layer', -# ): -# Layer.__init__(self, name=name) -# self.inputs = layer.outputs -# print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, act: %s, is_train: %s" % -# (self.name, decay, epsilon, act.__name__, is_train)) -# input_shape = self.inputs.get_shape() -# # params_shape = input_shape[-1:] -# input_ndim = len(input_shape) -# from tensorflow.python.training import moving_averages -# from tensorflow.python.ops import control_flow_ops -# -# # gamma_init = tf.random_normal_initializer(mean=gamma, stddev=stddev) -# -# # Variable Scope fix for older TF -# scope = name -# try: -# vscope = tf.variable_scope(scope, default_name=name, values=[self.inputs],) -# # reuse=reuse) -# except Exception: -# vscope = tf.variable_op_scope([self.inputs], scope, name)#, reuse=reuse) -# -# with vscope as scope: -# name = scope.name -# # with tf.variable_scope(name) as vs: -# beta = tf.get_variable('beta', shape=[input_shape[-1]], -# initializer=beta_init,) -# # initializer=tf.constant_initializer(beta),) -# # trainable=trainable, )#restore=restore) -# gamma = tf.get_variable('gamma', shape=[input_shape[-1]], -# initializer=gamma_init, )#trainable=trainable,) -# # restore=restore) -# -# axis = list(range(input_ndim - 1)) -# moving_mean = tf.get_variable('moving_mean', -# input_shape[-1:], -# initializer=tf.zeros_initializer, -# trainable=False,) -# # restore=restore) -# moving_variance = tf.get_variable('moving_variance', -# input_shape[-1:], -# initializer=tf.constant_initializer(1.), -# trainable=False,) -# # restore=restore) -# -# # Define a function to update mean and variance -# def update_mean_var(): -# mean, variance = tf.nn.moments(self.inputs, axis) -# -# # Fix TF 0.12 -# try: -# update_moving_mean = moving_averages.assign_moving_average( -# moving_mean, mean, decay, zero_debias=False) # if zero_debias=True, accuracy is high .. -# update_moving_variance = moving_averages.assign_moving_average( -# moving_variance, variance, decay, zero_debias=False) -# except Exception as e: # TF 11 -# update_moving_mean = moving_averages.assign_moving_average( -# moving_mean, mean, decay) -# update_moving_variance = moving_averages.assign_moving_average( -# moving_variance, variance, decay) -# -# with tf.control_dependencies( -# [update_moving_mean, update_moving_variance]): -# return tf.identity(mean), tf.identity(variance) -# -# # Retrieve variable managing training mode -# # is_training = tflearn.get_training_mode() -# if not is_train: # test : mean=0, std=1 -# # if is_train: # train : mean=0, std=1 -# is_training = tf.cast(tf.ones([]), tf.bool) -# else: -# is_training = tf.cast(tf.zeros([]), tf.bool) -# mean, var = tf.cond( -# is_training, update_mean_var, lambda: (moving_mean, moving_variance)) -# # ones zeros -# try: -# inference = tf.nn.batch_normalization( -# self.inputs, mean, var, beta, gamma, epsilon) -# inference.set_shape(input_shape) -# # Fix for old Tensorflow -# except Exception as e: -# inference = tf.nn.batch_norm_with_global_normalization( -# self.inputs, mean, var, beta, gamma, epsilon, -# scale_after_normalization=True, -# ) -# inference.set_shape(input_shape) -# -# variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name) # 2 params beta, gamma -# # variables = [beta, gamma, moving_mean, moving_variance] -# -# # print(len(variables)) -# # for idx, v in enumerate(variables): -# # print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) -# # exit() -# -# # Add attributes for easy access -# # inference.scope = scope -# inference.scope = name -# inference.beta = beta -# inference.gamma = gamma -# -# self.outputs = act( inference ) -# -# self.all_layers = list(layer.all_layers) -# self.all_params = list(layer.all_params) -# self.all_drop = dict(layer.all_drop) -# self.all_layers.extend( [self.outputs] ) -# self.all_params.extend( variables ) - -# class BatchNormLayer2(Layer): # don't work http://r2rt.com/implementing-batch-normalization-in-tensorflow.html -# """ -# The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. -# -# Batch normalization on fully-connected or convolutional maps. -# -# Parameters -# ----------- -# layer : a :class:`Layer` instance -# The `Layer` class feeding into this layer. -# decay : float -# A decay factor for ExponentialMovingAverage. -# epsilon : float -# A small float number to avoid dividing by 0. -# act : activation function. -# is_train : boolean -# Whether train or inference. -# beta_init : beta initializer -# The initializer for initializing beta -# gamma_init : gamma initializer -# The initializer for initializing gamma -# name : a string or None -# An optional name to attach to this layer. -# -# References -# ---------- -# - `Source `_ -# - `stackoverflow `_ -# """ -# def __init__( -# self, -# layer = None, -# decay = 0.999, -# epsilon = 0.00001, -# act = tf.identity, -# is_train = None, -# beta_init = tf.zeros_initializer, -# # gamma_init = tf.ones_initializer, -# gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), -# name ='batchnorm_layer', -# ): -# Layer.__init__(self, name=name) -# self.inputs = layer.outputs -# print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, act: %s, is_train: %s" % -# (self.name, decay, epsilon, act.__name__, is_train)) -# x_shape = self.inputs.get_shape() -# params_shape = x_shape[-1:] -# -# with tf.variable_scope(name) as vs: -# gamma = tf.get_variable("gamma", shape=params_shape, -# initializer=gamma_init) -# beta = tf.get_variable("beta", shape=params_shape, -# initializer=beta_init) -# pop_mean = tf.get_variable("pop_mean", shape=params_shape, -# initializer=tf.zeros_initializer, trainable=False) -# pop_var = tf.get_variable("pop_var", shape=params_shape, -# initializer=tf.constant_initializer(1.), trainable=False) -# -# if is_train: -# batch_mean, batch_var = tf.nn.moments(self.inputs, list(range(len(x_shape) - 1))) -# train_mean = tf.assign(pop_mean, -# pop_mean * decay + batch_mean * (1 - decay)) -# train_var = tf.assign(pop_var, -# pop_var * decay + batch_var * (1 - decay)) -# with tf.control_dependencies([train_mean, train_var]): -# self.outputs = act(tf.nn.batch_normalization(self.inputs, -# batch_mean, batch_var, beta, gamma, epsilon)) -# else: -# self.outputs = act(tf.nn.batch_normalization(self.inputs, -# pop_mean, pop_var, beta, gamma, epsilon)) -# # self.outputs = act( tf.nn.batch_normalization(self.inputs, mean, variance, beta, gamma, epsilon) ) -# # variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) # 8 params in TF12 if zero_debias=True -# variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=vs.name) # 2 params beta, gamma -# # variables = [beta, gamma, moving_mean, moving_variance] -# -# # print(len(variables)) -# # for idx, v in enumerate(variables): -# # print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) -# # exit() -# -# self.all_layers = list(layer.all_layers) -# self.all_params = list(layer.all_params) -# self.all_drop = dict(layer.all_drop) -# self.all_layers.extend( [self.outputs] ) -# self.all_params.extend( variables ) - -# class BatchNormLayer3(Layer): # don't work http://r2rt.com/implementing-batch-normalization-in-tensorflow.html -# """ -# The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization`` and ``tf.nn.moments``. -# -# Batch normalization on fully-connected or convolutional maps. -# -# Parameters -# ----------- -# layer : a :class:`Layer` instance -# The `Layer` class feeding into this layer. -# decay : float -# A decay factor for ExponentialMovingAverage. -# epsilon : float -# A small float number to avoid dividing by 0. -# act : activation function. -# is_train : boolean -# Whether train or inference. -# beta_init : beta initializer -# The initializer for initializing beta -# gamma_init : gamma initializer -# The initializer for initializing gamma -# name : a string or None -# An optional name to attach to this layer. -# -# References -# ---------- -# - `Source `_ -# - `stackoverflow `_ -# """ -# def __init__( -# self, -# layer = None, -# decay = 0.999, -# epsilon = 0.00001, -# act = tf.identity, -# is_train = None, -# beta_init = tf.zeros_initializer, -# # gamma_init = tf.ones_initializer, -# gamma_init = tf.random_normal_initializer(mean=1.0, stddev=0.002), -# name ='batchnorm_layer', -# ): -# """ -# Batch normalization on convolutional maps. -# Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow -# Args: -# x: Tensor, 4D BHWD input maps -# n_out: integer, depth of input maps -# phase_train: boolean tf.Varialbe, true indicates training phase -# scope: string, variable scope -# Return: -# normed: batch-normalized maps -# """ -# Layer.__init__(self, name=name) -# self.inputs = layer.outputs -# print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, act: %s, is_train: %s" % -# (self.name, decay, epsilon, act.__name__, is_train)) -# x_shape = self.inputs.get_shape() -# params_shape = x_shape[-1:] -# -# if is_train: -# phase_train = tf.cast(tf.ones([]), tf.bool) -# else: -# phase_train = tf.cast(tf.zeros([]), tf.bool) -# -# with tf.variable_scope(name) as vs: -# gamma = tf.get_variable("gamma", shape=params_shape, -# initializer=gamma_init) -# beta = tf.get_variable("beta", shape=params_shape, -# initializer=beta_init) -# batch_mean, batch_var = tf.nn.moments(self.inputs, list(range(len(x_shape) - 1)),#[0,1,2], -# name='moments') -# ema = tf.train.ExponentialMovingAverage(decay=decay) -# -# def mean_var_with_update(): -# ema_apply_op = ema.apply([batch_mean, batch_var]) -# with tf.control_dependencies([ema_apply_op]): -# return tf.identity(batch_mean), tf.identity(batch_var) -# -# mean, var = tf.cond(phase_train, -# mean_var_with_update, -# lambda: (ema.average(batch_mean), ema.average(batch_var))) -# normed = tf.nn.batch_normalization(self.inputs, mean, var, beta, gamma, epsilon) -# self.outputs = act( normed ) -# variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=vs.name) # 2 params beta, gamma -# # variables = [beta, gamma, moving_mean, moving_variance] -# -# # print(len(variables)) -# # for idx, v in enumerate(variables): -# # print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) -# # exit() -# -# self.all_layers = list(layer.all_layers) -# self.all_params = list(layer.all_params) -# self.all_drop = dict(layer.all_drop) -# self.all_layers.extend( [self.outputs] ) -# self.all_params.extend( variables ) - -# class BatchNormLayer_old(Layer): # don't work -# """ -# The :class:`BatchNormLayer` class is a normalization layer, see ``tf.nn.batch_normalization``. -# -# Batch normalization on fully-connected or convolutional maps. -# -# Parameters -# ----------- -# layer : a :class:`Layer` instance -# The `Layer` class feeding into this layer. -# decay : float -# A decay factor for ExponentialMovingAverage. -# epsilon : float -# A small float number to avoid dividing by 0. -# is_train : boolean -# Whether train or inference. -# name : a string or None -# An optional name to attach to this layer. -# -# References -# ---------- -# - `tf.nn.batch_normalization `_ -# - `stackoverflow `_ -# - `tensorflow.contrib `_ -# """ -# def __init__( -# self, -# layer = None, -# act = tf.identity, -# decay = 0.999, -# epsilon = 0.001, -# is_train = None, -# name ='batchnorm_layer', -# ): -# Layer.__init__(self, name=name) -# self.inputs = layer.outputs -# print(" [TL] BatchNormLayer %s: decay: %f, epsilon: %f, is_train: %s" % -# (self.name, decay, epsilon, is_train)) -# if is_train == None: -# raise Exception("is_train must be True or False") -# -# # (name, input_var, decay, epsilon, is_train) -# inputs_shape = self.inputs.get_shape() -# axis = list(range(len(inputs_shape) - 1)) -# params_shape = inputs_shape[-1:] -# -# with tf.variable_scope(name) as vs: -# beta = tf.get_variable(name='beta', shape=params_shape, -# initializer=tf.constant_initializer(0.0)) -# gamma = tf.get_variable(name='gamma', shape=params_shape, -# initializer=tf.constant_initializer(1.0)) -# batch_mean, batch_var = tf.nn.moments(self.inputs, -# axis, -# name='moments') -# ema = tf.train.ExponentialMovingAverage(decay=decay) -# -# def mean_var_with_update(): -# ema_apply_op = ema.apply([batch_mean, batch_var]) -# with tf.control_dependencies([ema_apply_op]): -# return tf.identity(batch_mean), tf.identity(batch_var) -# -# if is_train: -# is_train = tf.cast(tf.ones(1), tf.bool) -# else: -# is_train = tf.cast(tf.zeros(1), tf.bool) -# -# is_train = tf.reshape(is_train, []) -# -# # print(is_train) -# # exit() -# -# mean, var = tf.cond( -# is_train, -# mean_var_with_update, -# lambda: (ema.average(batch_mean), ema.average(batch_var)) -# ) -# normed = tf.nn.batch_normalization( -# x=self.inputs, -# mean=mean, -# variance=var, -# offset=beta, -# scale=gamma, -# variance_epsilon=epsilon, -# name='tf_bn' -# ) -# self.outputs = act( normed ) -# -# self.all_layers = list(layer.all_layers) -# self.all_params = list(layer.all_params) -# self.all_drop = dict(layer.all_drop) -# self.all_layers.extend( [self.outputs] ) -# self.all_params.extend( [beta, gamma] ) - -## Pooling layer -class PoolLayer(Layer): - """ - The :class:`PoolLayer` class is a Pooling layer, you can choose - ``tf.nn.max_pool`` and ``tf.nn.avg_pool`` for 2D or - ``tf.nn.max_pool3d`` and ``tf.nn.avg_pool3d`` for 3D. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - ksize : a list of ints that has length >= 4. - The size of the window for each dimension of the input tensor. - strides : a list of ints that has length >= 4. - The stride of the sliding window for each dimension of the input tensor. - padding : a string from: "SAME", "VALID". - The type of padding algorithm to use. - pool : a pooling function - - see `TensorFlow pooling APIs `_ - - class ``tf.nn.max_pool`` - - class ``tf.nn.avg_pool`` - - class ``tf.nn.max_pool3d`` - - class ``tf.nn.avg_pool3d`` - name : a string or None - An optional name to attach to this layer. - - Examples - -------- - - see :class:`Conv2dLayer`. - """ - def __init__( - self, - layer = None, - ksize=[1, 2, 2, 1], - strides=[1, 2, 2, 1], - padding='SAME', - pool = tf.nn.max_pool, - name ='pool_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - print(" [TL] PoolLayer %s: ksize:%s strides:%s padding:%s pool:%s" % - (self.name, str(ksize), str(strides), padding, pool.__name__)) - - self.outputs = pool(self.inputs, ksize=ksize, strides=strides, padding=padding, name=name) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - -## Padding layer -class PadLayer(Layer): - """ - The :class:`PadLayer` class is a Padding layer for any modes and dimensions. - Please see `tf.pad `_ for usage. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - padding : a Tensor of type int32. - mode : one of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive) - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - paddings = None, - mode = 'CONSTANT', - name = 'pad_layer', - ): - Layer.__init__(self, name=name) - assert paddings is not None, "paddings should be a Tensor of type int32. see https://www.tensorflow.org/api_docs/python/tf/pad" - self.inputs = layer.outputs - print(" [TL] PadLayer %s: paddings:%s mode:%s" % - (self.name, list(paddings.get_shape()), mode)) - - self.outputs = tf.pad(self.inputs, paddings=paddings, mode=mode, name=name) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - -## TimeDistributedLayer -class TimeDistributedLayer(Layer): - """ - The :class:`TimeDistributedLayer` class that applies a function to every timestep of the input tensor. - For example, if using :class:`DenseLayer` as the ``layer_class``, inputs [batch_size , length, dim] - outputs [batch_size , length, new_dim]. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer, [batch_size , length, dim] - layer_class : a :class:`Layer` class - args : dictionary - The arguments for the ``layer_class``. - name : a string or None - An optional name to attach to this layer. - - Examples - -------- - >>> batch_size = 32 - >>> timestep = 20 - >>> input_dim = 100 - >>> x = tf.placeholder(dtype=tf.float32, shape=[batch_size, timestep, input_dim], name="encode_seqs") - >>> net = InputLayer(x, name='input') - >>> net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units':50, 'name':'dense'}, name='time_dense') - ... [TL] InputLayer input: (32, 20, 100) - ... [TL] TimeDistributedLayer time_dense: layer_class:DenseLayer - >>> print(net.outputs._shape) - ... (32, 20, 50) - >>> net.print_params(False) - ... param 0: (100, 50) time_dense/dense/W:0 - ... param 1: (50,) time_dense/dense/b:0 - ... num of params: 5050 - """ - def __init__( - self, - layer = None, - layer_class = None, - args = {}, - name ='time_distributed', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - print(" [TL] TimeDistributedLayer %s: layer_class:%s args:%s" % - (self.name, layer_class.__name__, args)) - - if not args: args = dict() - assert isinstance(args, dict), "'args' must be a dict." - - if not isinstance(self.inputs, tf.Tensor): - self.inputs = tf.transpose(tf.stack(self.inputs), [1, 0, 2]) - - input_shape = self.inputs.get_shape() - - timestep = input_shape[1] - x = tf.unstack(self.inputs, axis=1) - - with ops.suppress_stdout(): - for i in range(0, timestep): - with tf.variable_scope(name, reuse=(False if i==0 else True)) as vs: - set_name_reuse((False if i==0 else True)) - net = layer_class(InputLayer(x[i], name=args['name']+str(i)), **args) - # net = layer_class(InputLayer(x[i], name="input_"+args['name']), **args) - x[i] = net.outputs - variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - self.outputs = tf.stack(x, axis=1, name=name) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( variables ) - - - -## Recurrent layer -class RNNLayer(Layer): - """ - The :class:`RNNLayer` class is a RNN layer, you can implement vanilla RNN, - LSTM and GRU with it. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - cell_fn : a TensorFlow's core RNN cell as follow (Note TF1.0+ and TF1.0- are different). - - see `RNN Cells in TensorFlow `_ - cell_init_args : a dictionary - The arguments for the cell initializer. - n_hidden : a int - The number of hidden units in the layer. - initializer : initializer - The initializer for initializing the parameters. - n_steps : a int - The sequence length. - initial_state : None or RNN State - If None, initial_state is zero_state. - return_last : boolean - - If True, return the last output, "Sequence input and single output" - - If False, return all outputs, "Synced sequence input and output" - - In other word, if you want to apply one or more RNN(s) on this layer, set to False. - return_seq_2d : boolean - - When return_last = False - - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. - - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. - name : a string or None - An optional name to attach to this layer. - - Variables - -------------- - outputs : a tensor - The output of this RNN. - return_last = False, outputs = all cell_output, which is the hidden state. - cell_output.get_shape() = (?, n_hidden) - - final_state : a tensor or StateTuple - When state_is_tuple = False, - it is the final hidden and cell states, states.get_shape() = [?, 2 * n_hidden].\n - When state_is_tuple = True, it stores two elements: (c, h), in that order. - You can get the final state after each iteration during training, then - feed it to the initial state of next iteration. - - initial_state : a tensor or StateTuple - It is the initial state of this RNN layer, you can use it to initialize - your state at the begining of each epoch or iteration according to your - training procedure. - - batch_size : int or tensor - Is int, if able to compute the batch_size, otherwise, tensor for ``?``. - - Examples - -------- - - For words - >>> input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) - >>> network = tl.layers.EmbeddingInputlayer( - ... inputs = input_data, - ... vocabulary_size = vocab_size, - ... embedding_size = hidden_size, - ... E_init = tf.random_uniform_initializer(-init_scale, init_scale), - ... name ='embedding_layer') - >>> if is_training: - >>> network = tl.layers.DropoutLayer(network, keep=keep_prob, name='drop1') - >>> network = tl.layers.RNNLayer(network, - ... cell_fn=tf.nn.rnn_cell.BasicLSTMCell, - ... cell_init_args={'forget_bias': 0.0},# 'state_is_tuple': True}, - ... n_hidden=hidden_size, - ... initializer=tf.random_uniform_initializer(-init_scale, init_scale), - ... n_steps=num_steps, - ... return_last=False, - ... name='basic_lstm_layer1') - >>> lstm1 = network - >>> if is_training: - >>> network = tl.layers.DropoutLayer(network, keep=keep_prob, name='drop2') - >>> network = tl.layers.RNNLayer(network, - ... cell_fn=tf.nn.rnn_cell.BasicLSTMCell, - ... cell_init_args={'forget_bias': 0.0}, # 'state_is_tuple': True}, - ... n_hidden=hidden_size, - ... initializer=tf.random_uniform_initializer(-init_scale, init_scale), - ... n_steps=num_steps, - ... return_last=False, - ... return_seq_2d=True, - ... name='basic_lstm_layer2') - >>> lstm2 = network - >>> if is_training: - >>> network = tl.layers.DropoutLayer(network, keep=keep_prob, name='drop3') - >>> network = tl.layers.DenseLayer(network, - ... n_units=vocab_size, - ... W_init=tf.random_uniform_initializer(-init_scale, init_scale), - ... b_init=tf.random_uniform_initializer(-init_scale, init_scale), - ... act = tl.activation.identity, name='output_layer') - - - For CNN+LSTM - >>> x = tf.placeholder(tf.float32, shape=[batch_size, image_size, image_size, 1]) - >>> network = tl.layers.InputLayer(x, name='input_layer') - >>> network = tl.layers.Conv2dLayer(network, - ... act = tf.nn.relu, - ... shape = [5, 5, 1, 32], # 32 features for each 5x5 patch - ... strides=[1, 2, 2, 1], - ... padding='SAME', - ... name ='cnn_layer1') - >>> network = tl.layers.PoolLayer(network, - ... ksize=[1, 2, 2, 1], - ... strides=[1, 2, 2, 1], - ... padding='SAME', - ... pool = tf.nn.max_pool, - ... name ='pool_layer1') - >>> network = tl.layers.Conv2dLayer(network, - ... act = tf.nn.relu, - ... shape = [5, 5, 32, 10], # 10 features for each 5x5 patch - ... strides=[1, 2, 2, 1], - ... padding='SAME', - ... name ='cnn_layer2') - >>> network = tl.layers.PoolLayer(network, - ... ksize=[1, 2, 2, 1], - ... strides=[1, 2, 2, 1], - ... padding='SAME', - ... pool = tf.nn.max_pool, - ... name ='pool_layer2') - >>> network = tl.layers.FlattenLayer(network, name='flatten_layer') - >>> network = tl.layers.ReshapeLayer(network, shape=[-1, num_steps, int(network.outputs._shape[-1])]) - >>> rnn1 = tl.layers.RNNLayer(network, - ... cell_fn=tf.nn.rnn_cell.LSTMCell, - ... cell_init_args={}, - ... n_hidden=200, - ... initializer=tf.random_uniform_initializer(-0.1, 0.1), - ... n_steps=num_steps, - ... return_last=False, - ... return_seq_2d=True, - ... name='rnn_layer') - >>> network = tl.layers.DenseLayer(rnn1, n_units=3, - ... act = tl.activation.identity, name='output_layer') - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see :class:`ReshapeLayer`. - - References - ---------- - - `Neural Network RNN Cells in TensorFlow `_ - - `tensorflow/python/ops/rnn.py `_ - - `tensorflow/python/ops/rnn_cell.py `_ - - see TensorFlow tutorial ``ptb_word_lm.py``, TensorLayer tutorials ``tutorial_ptb_lstm*.py`` and ``tutorial_generate_text.py`` - """ - def __init__( - self, - layer = None, - cell_fn = None,#tf.nn.rnn_cell.BasicRNNCell, - cell_init_args = {}, - n_hidden = 100, - initializer = tf.random_uniform_initializer(-0.1, 0.1), - n_steps = 5, - initial_state = None, - return_last = False, - # is_reshape = True, - return_seq_2d = False, - name = 'rnn_layer', - ): - Layer.__init__(self, name=name) - if cell_fn is None: - raise Exception("Please put in cell_fn") - if 'GRU' in cell_fn.__name__: - try: - cell_init_args.pop('state_is_tuple') - except: - pass - - self.inputs = layer.outputs - - print(" [TL] RNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s " % (self.name, n_hidden, - n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__)) - # You can get the dimension by .get_shape() or ._shape, and check the - # dimension by .with_rank() as follow. - # self.inputs.get_shape().with_rank(2) - # self.inputs.get_shape().with_rank(3) - - # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] - try: - self.inputs.get_shape().with_rank(3) - except: - raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") - - - # is_reshape : boolean (deprecate) - # Reshape the inputs to 3 dimension tensor.\n - # If input is[batch_size, n_steps, n_features], we do not need to reshape it.\n - # If input is [batch_size * n_steps, n_features], we need to reshape it. - # if is_reshape: - # self.inputs = tf.reshape(self.inputs, shape=[-1, n_steps, int(self.inputs._shape[-1])]) - - fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] - - if fixed_batch_size.value: - batch_size = fixed_batch_size.value - print(" RNN batch_size (concurrent processes): %d" % batch_size) - else: - from tensorflow.python.ops import array_ops - batch_size = array_ops.shape(self.inputs)[0] - print(" non specified batch_size, uses a tensor instead.") - self.batch_size = batch_size - - # Simplified version of tensorflow.models.rnn.rnn.py's rnn(). - # This builds an unrolled LSTM for tutorial purposes only. - # In general, use the rnn() or state_saving_rnn() from rnn.py. - # - # The alternative version of the code below is: - # - # from tensorflow.models.rnn import rnn - # inputs = [tf.squeeze(input_, [1]) - # for input_ in tf.split(1, num_steps, inputs)] - # outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state) - outputs = [] - if 'reuse' in inspect.getargspec(cell_fn.__init__).args: - self.cell = cell = cell_fn(num_units=n_hidden, reuse=tf.get_variable_scope().reuse, **cell_init_args) - else: - self.cell = cell = cell_fn(num_units=n_hidden, **cell_init_args) - if initial_state is None: - self.initial_state = cell.zero_state(batch_size, dtype=tf.float32) # 1.2.3 - state = self.initial_state - # with tf.variable_scope("model", reuse=None, initializer=initializer): - with tf.variable_scope(name, initializer=initializer) as vs: - for time_step in range(n_steps): - if time_step > 0: tf.get_variable_scope().reuse_variables() - (cell_output, state) = cell(self.inputs[:, time_step, :], state) - outputs.append(cell_output) - - # Retrieve just the RNN variables. - # rnn_variables = [v for v in tf.all_variables() if v.name.startswith(vs.name)] - rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - print(" n_params : %d" % (len(rnn_variables))) - - if return_last: - # 2D Tensor [batch_size, n_hidden] - self.outputs = outputs[-1] - else: - if return_seq_2d: - # PTB tutorial: stack dense layer after that, or compute the cost from the output - # 2D Tensor [n_example, n_hidden] - try: # TF1.0 - self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_hidden]) - except: # TF0.12 - self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_hidden]) - - - else: - # : stack more RNN layer after that - # 3D Tensor [n_example/n_steps, n_steps, n_hidden] - try: # TF1.0 - self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_steps, n_hidden]) - except: # TF0.12 - self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_steps, n_hidden]) - - self.final_state = state - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - # print(type(self.outputs)) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( rnn_variables ) - -class BiRNNLayer(Layer): - """ - The :class:`BiRNNLayer` class is a Bidirectional RNN layer. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - cell_fn : a TensorFlow's core RNN cell as follow (Note TF1.0+ and TF1.0- are different). - - see `RNN Cells in TensorFlow `_ - cell_init_args : a dictionary - The arguments for the cell initializer. - n_hidden : a int - The number of hidden units in the layer. - initializer : initializer - The initializer for initializing the parameters. - n_steps : a int - The sequence length. - fw_initial_state : None or forward RNN State - If None, initial_state is zero_state. - bw_initial_state : None or backward RNN State - If None, initial_state is zero_state. - dropout : `tuple` of `float`: (input_keep_prob, output_keep_prob). - The input and output keep probability. - n_layer : a int, default is 1. - The number of RNN layers. - return_last : boolean - - If True, return the last output, "Sequence input and single output" - - If False, return all outputs, "Synced sequence input and output" - - In other word, if you want to apply one or more RNN(s) on this layer, set to False. - return_seq_2d : boolean - - When return_last = False - - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. - - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. - name : a string or None - An optional name to attach to this layer. - - Variables - -------------- - outputs : a tensor - The output of this RNN. - return_last = False, outputs = all cell_output, which is the hidden state. - cell_output.get_shape() = (?, n_hidden) - - fw(bw)_final_state : a tensor or StateTuple - When state_is_tuple = False, - it is the final hidden and cell states, states.get_shape() = [?, 2 * n_hidden].\n - When state_is_tuple = True, it stores two elements: (c, h), in that order. - You can get the final state after each iteration during training, then - feed it to the initial state of next iteration. - - fw(bw)_initial_state : a tensor or StateTuple - It is the initial state of this RNN layer, you can use it to initialize - your state at the begining of each epoch or iteration according to your - training procedure. - - batch_size : int or tensor - Is int, if able to compute the batch_size, otherwise, tensor for ``?``. - - Notes - ----- - - Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see :class:`ReshapeLayer`. - - For predicting, the sequence length has to be the same with the sequence length of training, while, for normal - RNN, we can use sequence length of 1 for predicting. - - References - ---------- - - `Source `_ - """ - def __init__( - self, - layer = None, - cell_fn = None, #tf.nn.rnn_cell.LSTMCell, - cell_init_args = {'use_peepholes':True, 'state_is_tuple':True}, - n_hidden = 100, - initializer = tf.random_uniform_initializer(-0.1, 0.1), - n_steps = 5, - fw_initial_state = None, - bw_initial_state = None, - dropout = None, - n_layer = 1, - return_last = False, - return_seq_2d = False, - name = 'birnn_layer', - ): - Layer.__init__(self, name=name) - if cell_fn is None: - raise Exception("Please put in cell_fn") - if 'GRU' in cell_fn.__name__: - try: - cell_init_args.pop('state_is_tuple') - except: - pass - - self.inputs = layer.outputs - - print(" [TL] BiRNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d " % (self.name, n_hidden, - n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) - - fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] - - if fixed_batch_size.value: - self.batch_size = fixed_batch_size.value - print(" RNN batch_size (concurrent processes): %d" % self.batch_size) - else: - from tensorflow.python.ops import array_ops - self.batch_size = array_ops.shape(self.inputs)[0] - print(" non specified batch_size, uses a tensor instead.") - - # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] - try: - self.inputs.get_shape().with_rank(3) - except: - raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") - - with tf.variable_scope(name, initializer=initializer) as vs: - rnn_creator = lambda: cell_fn(num_units=n_hidden, **cell_init_args) - # Apply dropout - if dropout: - if type(dropout) in [tuple, list]: - in_keep_prob = dropout[0] - out_keep_prob = dropout[1] - elif isinstance(dropout, float): - in_keep_prob, out_keep_prob = dropout, dropout - else: - raise Exception("Invalid dropout type (must be a 2-D tuple of " - "float)") - try: # TF 1.0 - DropoutWrapper_fn = tf.contrib.rnn.DropoutWrapper - except: - DropoutWrapper_fn = tf.nn.rnn_cell.DropoutWrapper - cell_creator = lambda: DropoutWrapper_fn(rnn_creator(), - input_keep_prob=in_keep_prob, - output_keep_prob=1.0) # out_keep_prob) - else: - cell_creator = rnn_creator - self.fw_cell = cell_creator() - self.bw_cell = cell_creator() - - # Apply multiple layers - if n_layer > 1: - try: # TF1.0 - MultiRNNCell_fn = tf.contrib.rnn.MultiRNNCell - except: - MultiRNNCell_fn = tf.nn.rnn_cell.MultiRNNCell - - try: - self.fw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True) - self.bw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True) - except: - self.fw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) - self.bw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) - - # Initial state of RNN - if fw_initial_state is None: - self.fw_initial_state = self.fw_cell.zero_state(self.batch_size, dtype=tf.float32) - else: - self.fw_initial_state = fw_initial_state - if bw_initial_state is None: - self.bw_initial_state = self.bw_cell.zero_state(self.batch_size, dtype=tf.float32) - else: - self.bw_initial_state = bw_initial_state - # exit() - # Feedforward to MultiRNNCell - try: ## TF1.0 - list_rnn_inputs = tf.unstack(self.inputs, axis=1) - except: ## TF0.12 - list_rnn_inputs = tf.unpack(self.inputs, axis=1) - - try: # TF1.0 - bidirectional_rnn_fn = tf.contrib.rnn.static_bidirectional_rnn - except: - bidirectional_rnn_fn = tf.nn.bidirectional_rnn - outputs, fw_state, bw_state = bidirectional_rnn_fn( # outputs, fw_state, bw_state = tf.contrib.rnn.static_bidirectional_rnn( - cell_fw=self.fw_cell, - cell_bw=self.bw_cell, - inputs=list_rnn_inputs, - initial_state_fw=self.fw_initial_state, - initial_state_bw=self.bw_initial_state - ) - - if return_last: - self.outputs = outputs[-1] - else: - self.outputs = outputs - if return_seq_2d: - # 2D Tensor [n_example, n_hidden] - try: # TF1.0 - self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_hidden*2]) - except: # TF0.12 - self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_hidden*2]) - else: - # : stack more RNN layer after that - # 3D Tensor [n_example/n_steps, n_steps, n_hidden] - - try: # TF1.0 - self.outputs = tf.reshape(tf.concat(outputs,1), [-1, n_steps, n_hidden*2]) - except: # TF0.12 - self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_steps, n_hidden*2]) - self.fw_final_state = fw_state - self.bw_final_state = bw_state - - # Retrieve just the RNN variables. - rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - print(" n_params : %d" % (len(rnn_variables))) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( rnn_variables ) - -# Advanced Ops for Dynamic RNN -def advanced_indexing_op(input, index): - """Advanced Indexing for Sequences, returns the outputs by given sequence lengths. - When return the last output :class:`DynamicRNNLayer` uses it to get the last outputs with the sequence lengths. - - Parameters - ----------- - input : tensor for data - [batch_size, n_step(max), n_features] - index : tensor for indexing, i.e. sequence_length in Dynamic RNN. - [batch_size] - - Examples - --------- - >>> batch_size, max_length, n_features = 3, 5, 2 - >>> z = np.random.uniform(low=-1, high=1, size=[batch_size, max_length, n_features]).astype(np.float32) - >>> b_z = tf.constant(z) - >>> sl = tf.placeholder(dtype=tf.int32, shape=[batch_size]) - >>> o = advanced_indexing_op(b_z, sl) - >>> - >>> sess = tf.InteractiveSession() - >>> tl.layers.initialize_global_variables(sess) - >>> - >>> order = np.asarray([1,1,2]) - >>> print("real",z[0][order[0]-1], z[1][order[1]-1], z[2][order[2]-1]) - >>> y = sess.run([o], feed_dict={sl:order}) - >>> print("given",order) - >>> print("out", y) - ... real [-0.93021595 0.53820813] [-0.92548317 -0.77135968] [ 0.89952248 0.19149846] - ... given [1 1 2] - ... out [array([[-0.93021595, 0.53820813], - ... [-0.92548317, -0.77135968], - ... [ 0.89952248, 0.19149846]], dtype=float32)] - - References - ----------- - - Modified from TFlearn (the original code is used for fixed length rnn), `references `_. - """ - batch_size = tf.shape(input)[0] - # max_length = int(input.get_shape()[1]) # for fixed length rnn, length is given - max_length = tf.shape(input)[1] # for dynamic_rnn, length is unknown - dim_size = int(input.get_shape()[2]) - index = tf.range(0, batch_size) * max_length + (index - 1) - flat = tf.reshape(input, [-1, dim_size]) - relevant = tf.gather(flat, index) - return relevant - -def retrieve_seq_length_op(data): - """An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features], - it can be used when the features of padding (on right hand side) are all zeros. - - Parameters - ----------- - data : tensor - [batch_size, n_step(max), n_features] with zero padding on right hand side. - - Examples - --------- - >>> data = [[[1],[2],[0],[0],[0]], - ... [[1],[2],[3],[0],[0]], - ... [[1],[2],[6],[1],[0]]] - >>> data = np.asarray(data) - >>> print(data.shape) - ... (3, 5, 1) - >>> data = tf.constant(data) - >>> sl = retrieve_seq_length_op(data) - >>> sess = tf.InteractiveSession() - >>> tl.layers.initialize_global_variables(sess) - >>> y = sl.eval() - ... [2 3 4] - - - Multiple features - >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], - ... [[2,3],[2,4],[3,2],[0,0],[0,0]], - ... [[3,3],[2,2],[5,3],[1,2],[0,0]]] - >>> sl - ... [4 3 4] - - References - ------------ - - Borrow from `TFlearn `_. - """ - with tf.name_scope('GetLength'): - ## TF 1.0 change reduction_indices to axis - used = tf.sign(tf.reduce_max(tf.abs(data), 2)) - length = tf.reduce_sum(used, 1) - ## TF < 1.0 - # used = tf.sign(tf.reduce_max(tf.abs(data), reduction_indices=2)) - # length = tf.reduce_sum(used, reduction_indices=1) - length = tf.cast(length, tf.int32) - return length - -def retrieve_seq_length_op2(data): - """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)], - it can be used when the features of padding (on right hand side) are all zeros. - - Parameters - ----------- - data : tensor - [batch_size, n_step(max)] with zero padding on right hand side. - - Examples - -------- - >>> data = [[1,2,0,0,0], - ... [1,2,3,0,0], - ... [1,2,6,1,0]] - >>> o = retrieve_seq_length_op2(data) - >>> sess = tf.InteractiveSession() - >>> tl.layers.initialize_global_variables(sess) - >>> print(o.eval()) - ... [2 3 4] - """ - return tf.reduce_sum(tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), 1) - - -def retrieve_seq_length_op3(data, pad_val=0): # HangSheng: return tensor for sequence length, if input is tf.string - data_shape_size = data.get_shape().ndims - if data_shape_size == 3: - return tf.reduce_sum(tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), 1) - elif data_shape_size == 2: - return tf.reduce_sum(tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), 1) - elif data_shape_size == 1: - raise ValueError("retrieve_seq_length_op3: data has wrong shape!") - else: - raise ValueError("retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!" % (data_shape_size)) - - -def target_mask_op(data, pad_val=0): # HangSheng: return tensor for mask,if input is tf.string - data_shape_size = data.get_shape().ndims - if data_shape_size == 3: - return tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32) - elif data_shape_size == 2: - return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32) - elif data_shape_size == 1: - raise ValueError("target_mask_op: data has wrong shape!") - else: - raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size)) - - -# Dynamic RNN -class DynamicRNNLayer(Layer): - """ - The :class:`DynamicRNNLayer` class is a Dynamic RNN layer, see ``tf.nn.dynamic_rnn``. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - cell_fn : a TensorFlow's core RNN cell as follow (Note TF1.0+ and TF1.0- are different). - - see `RNN Cells in TensorFlow `_ - cell_init_args : a dictionary - The arguments for the cell initializer. - n_hidden : a int - The number of hidden units in the layer. - initializer : initializer - The initializer for initializing the parameters. - sequence_length : a tensor, array or None. The sequence length of each row of input data, see ``Advanced Ops for Dynamic RNN``. - - If None, it uses ``retrieve_seq_length_op`` to compute the sequence_length, i.e. when the features of padding (on right hand side) are all zeros. - - If using word embedding, you may need to compute the sequence_length from the ID array (the integer features before word embedding) by using ``retrieve_seq_length_op2`` or ``retrieve_seq_length_op``. - - You can also input an numpy array. - - More details about TensorFlow dynamic_rnn in `Wild-ML Blog `_. - initial_state : None or RNN State - If None, initial_state is zero_state. - dropout : `tuple` of `float`: (input_keep_prob, output_keep_prob). - The input and output keep probability. - n_layer : a int, default is 1. - The number of RNN layers. - return_last : boolean - - If True, return the last output, "Sequence input and single output" - - If False, return all outputs, "Synced sequence input and output" - - In other word, if you want to apply one or more RNN(s) on this layer, set to False. - return_seq_2d : boolean - - When return_last = False - - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer or computing cost after it. - - If False, return 3D Tensor [n_example/n_steps(max), n_steps(max), n_hidden], for stacking multiple RNN after it. - name : a string or None - An optional name to attach to this layer. - - Variables - ------------ - outputs : a tensor - The output of this RNN. - return_last = False, outputs = all cell_output, which is the hidden state. - cell_output.get_shape() = (?, n_hidden) - - final_state : a tensor or StateTuple - When state_is_tuple = False, - it is the final hidden and cell states, states.get_shape() = [?, 2 * n_hidden].\n - When state_is_tuple = True, it stores two elements: (c, h), in that order. - You can get the final state after each iteration during training, then - feed it to the initial state of next iteration. - - initial_state : a tensor or StateTuple - It is the initial state of this RNN layer, you can use it to initialize - your state at the begining of each epoch or iteration according to your - training procedure. - - sequence_length : a tensor or array, shape = [batch_size] - The sequence lengths computed by Advanced Opt or the given sequence lengths. - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps(max), n_features], if no, please see :class:`ReshapeLayer`. - - Examples - -------- - >>> input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="input_seqs") - >>> network = tl.layers.EmbeddingInputlayer( - ... inputs = input_seqs, - ... vocabulary_size = vocab_size, - ... embedding_size = embedding_size, - ... name = 'seq_embedding') - >>> network = tl.layers.DynamicRNNLayer(network, - ... cell_fn = tf.contrib.rnn.BasicLSTMCell, # for TF0.2 tf.nn.rnn_cell.BasicLSTMCell, - ... n_hidden = embedding_size, - ... dropout = 0.7, - ... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs), - ... return_seq_2d = True, # stack denselayer or compute cost after it - ... name = 'dynamic_rnn') - ... network = tl.layers.DenseLayer(network, n_units=vocab_size, - ... act=tf.identity, name="output") - - References - ---------- - - `Wild-ML Blog `_ - - `dynamic_rnn.ipynb `_ - - `tf.nn.dynamic_rnn `_ - - `tflearn rnn `_ - - ``tutorial_dynamic_rnn.py`` - """ - def __init__( - self, - layer = None, - cell_fn = None,#tf.nn.rnn_cell.LSTMCell, - cell_init_args = {'state_is_tuple' : True}, - n_hidden = 256, - initializer = tf.random_uniform_initializer(-0.1, 0.1), - sequence_length = None, - initial_state = None, - dropout = None, - n_layer = 1, - return_last = False, - return_seq_2d = False, - dynamic_rnn_init_args={}, - name = 'dyrnn_layer', - ): - Layer.__init__(self, name=name) - if cell_fn is None: - raise Exception("Please put in cell_fn") - if 'GRU' in cell_fn.__name__: - try: - cell_init_args.pop('state_is_tuple') - except: - pass - self.inputs = layer.outputs - - print(" [TL] DynamicRNNLayer %s: n_hidden:%d, in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % (self.name, n_hidden, - self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) - - # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] - try: - self.inputs.get_shape().with_rank(3) - except: - raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps(max), n_features]") - - # Get the batch_size - fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] - if fixed_batch_size.value: - batch_size = fixed_batch_size.value - print(" batch_size (concurrent processes): %d" % batch_size) - else: - from tensorflow.python.ops import array_ops - batch_size = array_ops.shape(self.inputs)[0] - print(" non specified batch_size, uses a tensor instead.") - self.batch_size = batch_size - - # Creats the cell function - # cell_instance_fn=lambda: cell_fn(num_units=n_hidden, **cell_init_args) # HanSheng - rnn_creator = lambda: cell_fn(num_units=n_hidden, **cell_init_args) - - # Apply dropout - if dropout: - if type(dropout) in [tuple, list]: - in_keep_prob = dropout[0] - out_keep_prob = dropout[1] - elif isinstance(dropout, float): - in_keep_prob, out_keep_prob = dropout, dropout - else: - raise Exception("Invalid dropout type (must be a 2-D tuple of " - "float)") - try: # TF1.0 - DropoutWrapper_fn = tf.contrib.rnn.DropoutWrapper - except: - DropoutWrapper_fn = tf.nn.rnn_cell.DropoutWrapper - - # cell_instance_fn1=cell_instance_fn # HanSheng - # cell_instance_fn=DropoutWrapper_fn( - # cell_instance_fn1(), - # input_keep_prob=in_keep_prob, - # output_keep_prob=out_keep_prob) - cell_creator = lambda: DropoutWrapper_fn(rnn_creator(), - input_keep_prob=in_keep_prob, output_keep_prob=1.0)#out_keep_prob) - else: - cell_creator = rnn_creator - self.cell = cell_creator() - # Apply multiple layers - if n_layer > 1: - try: - MultiRNNCell_fn = tf.contrib.rnn.MultiRNNCell - except: - MultiRNNCell_fn = tf.nn.rnn_cell.MultiRNNCell - - # cell_instance_fn2=cell_instance_fn # HanSheng - try: - # cell_instance_fn=lambda: MultiRNNCell_fn([cell_instance_fn2() for _ in range(n_layer)], state_is_tuple=True) # HanSheng - self.cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True) - except: # when GRU - # cell_instance_fn=lambda: MultiRNNCell_fn([cell_instance_fn2() for _ in range(n_layer)]) # HanSheng - self.cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) - - if dropout: - self.cell = DropoutWrapper_fn(self.cell, - input_keep_prob=1.0, output_keep_prob=out_keep_prob) - - # self.cell=cell_instance_fn() # HanSheng - - # Initialize initial_state - if initial_state is None: - self.initial_state = self.cell.zero_state(batch_size, dtype=tf.float32) - else: - self.initial_state = initial_state - - # Computes sequence_length - if sequence_length is None: - try: ## TF1.0 - sequence_length = retrieve_seq_length_op( - self.inputs if isinstance(self.inputs, tf.Tensor) else tf.stack(self.inputs)) - except: ## TF0.12 - sequence_length = retrieve_seq_length_op( - self.inputs if isinstance(self.inputs, tf.Tensor) else tf.pack(self.inputs)) - - # Main - Computes outputs and last_states - with tf.variable_scope(name, initializer=initializer) as vs: - outputs, last_states = tf.nn.dynamic_rnn( - cell=self.cell, - # inputs=X - inputs = self.inputs, - # dtype=tf.float64, - sequence_length=sequence_length, - initial_state = self.initial_state, - **dynamic_rnn_init_args - ) - rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - # print(" n_params : %d" % (len(rnn_variables))) - # Manage the outputs - if return_last: - # [batch_size, n_hidden] - # outputs = tf.transpose(tf.pack(outputs), [1, 0, 2]) # TF1.0 tf.pack --> tf.stack - self.outputs = advanced_indexing_op(outputs, sequence_length) - else: - # [batch_size, n_step(max), n_hidden] - # self.outputs = result[0]["outputs"] - # self.outputs = outputs # it is 3d, but it is a list - if return_seq_2d: - # PTB tutorial: - # 2D Tensor [n_example, n_hidden] - try: # TF1.0 - self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_hidden]) - except: # TF0.12 - self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_hidden]) - else: - # : - # 3D Tensor [batch_size, n_steps(max), n_hidden] - max_length = tf.shape(outputs)[1] - batch_size = tf.shape(outputs)[0] - - - try: # TF1.0 - self.outputs = tf.reshape(tf.concat(outputs, 1), [batch_size, max_length, n_hidden]) - except: # TF0.12 - self.outputs = tf.reshape(tf.concat(1, outputs), [batch_size, max_length, n_hidden]) - # self.outputs = tf.reshape(tf.concat(1, outputs), [-1, max_length, n_hidden]) - - # Final state - self.final_state = last_states - - self.sequence_length = sequence_length - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( rnn_variables ) - -# Bidirectional Dynamic RNN -class BiDynamicRNNLayer(Layer): - """ - The :class:`BiDynamicRNNLayer` class is a RNN layer, you can implement vanilla RNN, - LSTM and GRU with it. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - cell_fn : a TensorFlow's core RNN cell as follow (Note TF1.0+ and TF1.0- are different). - - see `RNN Cells in TensorFlow `_ - cell_init_args : a dictionary - The arguments for the cell initializer. - n_hidden : a int - The number of hidden units in the layer. - initializer : initializer - The initializer for initializing the parameters. - sequence_length : a tensor, array or None - The sequence length of each row of input data, see ``Advanced Ops for Dynamic RNN``. - - If None, it uses ``retrieve_seq_length_op`` to compute the sequence_length, i.e. when the features of padding (on right hand side) are all zeros. - - If using word embedding, you may need to compute the sequence_length from the ID array (the integer features before word embedding) by using ``retrieve_seq_length_op2`` or ``retrieve_seq_length_op``. - - You can also input an numpy array. - - More details about TensorFlow dynamic_rnn in `Wild-ML Blog `_. - fw_initial_state : None or forward RNN State - If None, initial_state is zero_state. - bw_initial_state : None or backward RNN State - If None, initial_state is zero_state. - dropout : `tuple` of `float`: (input_keep_prob, output_keep_prob). - The input and output keep probability. - n_layer : a int, default is 1. - The number of RNN layers. - return_last : boolean - If True, return the last output, "Sequence input and single output"\n - If False, return all outputs, "Synced sequence input and output"\n - In other word, if you want to apply one or more RNN(s) on this layer, set to False. - return_seq_2d : boolean - - When return_last = False - - If True, return 2D Tensor [n_example, 2 * n_hidden], for stacking DenseLayer or computing cost after it. - - If False, return 3D Tensor [n_example/n_steps(max), n_steps(max), 2 * n_hidden], for stacking multiple RNN after it. - name : a string or None - An optional name to attach to this layer. - - Variables - ----------------------- - outputs : a tensor - The output of this RNN. - return_last = False, outputs = all cell_output, which is the hidden state. - cell_output.get_shape() = (?, 2 * n_hidden) - - fw(bw)_final_state : a tensor or StateTuple - When state_is_tuple = False, - it is the final hidden and cell states, states.get_shape() = [?, 2 * n_hidden].\n - When state_is_tuple = True, it stores two elements: (c, h), in that order. - You can get the final state after each iteration during training, then - feed it to the initial state of next iteration. - - fw(bw)_initial_state : a tensor or StateTuple - It is the initial state of this RNN layer, you can use it to initialize - your state at the begining of each epoch or iteration according to your - training procedure. - - sequence_length : a tensor or array, shape = [batch_size] - The sequence lengths computed by Advanced Opt or the given sequence lengths. - - Notes - ----- - Input dimension should be rank 3 : [batch_size, n_steps(max), n_features], if no, please see :class:`ReshapeLayer`. - - - References - ---------- - - `Wild-ML Blog `_ - - `bidirectional_rnn.ipynb `_ - """ - def __init__( - self, - layer = None, - cell_fn = None,#tf.nn.rnn_cell.LSTMCell, - cell_init_args = {'state_is_tuple':True}, - n_hidden = 256, - initializer = tf.random_uniform_initializer(-0.1, 0.1), - sequence_length = None, - fw_initial_state = None, - bw_initial_state = None, - dropout = None, - n_layer = 1, - return_last = False, - return_seq_2d = False, - dynamic_rnn_init_args={}, - name = 'bi_dyrnn_layer', - ): - Layer.__init__(self, name=name) - if cell_fn is None: - raise Exception("Please put in cell_fn") - if 'GRU' in cell_fn.__name__: - try: - cell_init_args.pop('state_is_tuple') - except: - pass - self.inputs = layer.outputs - - print(" [TL] BiDynamicRNNLayer %s: n_hidden:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % - (self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) - - # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] - try: - self.inputs.get_shape().with_rank(3) - except: - raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps(max), n_features]") - - # Get the batch_size - fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] - if fixed_batch_size.value: - batch_size = fixed_batch_size.value - print(" batch_size (concurrent processes): %d" % batch_size) - else: - from tensorflow.python.ops import array_ops - batch_size = array_ops.shape(self.inputs)[0] - print(" non specified batch_size, uses a tensor instead.") - self.batch_size = batch_size - - with tf.variable_scope(name, initializer=initializer) as vs: - # Creats the cell function - # cell_instance_fn=lambda: cell_fn(num_units=n_hidden, **cell_init_args) # HanSheng - rnn_creator = lambda: cell_fn(num_units=n_hidden, **cell_init_args) - - # Apply dropout - if dropout: - if type(dropout) in [tuple, list]: - in_keep_prob = dropout[0] - out_keep_prob = dropout[1] - elif isinstance(dropout, float): - in_keep_prob, out_keep_prob = dropout, dropout - else: - raise Exception("Invalid dropout type (must be a 2-D tuple of " - "float)") - try: - DropoutWrapper_fn = tf.contrib.rnn.DropoutWrapper - except: - DropoutWrapper_fn = tf.nn.rnn_cell.DropoutWrapper - - # cell_instance_fn1=cell_instance_fn # HanSheng - # cell_instance_fn=lambda: DropoutWrapper_fn( - # cell_instance_fn1(), - # input_keep_prob=in_keep_prob, - # output_keep_prob=out_keep_prob) - cell_creator = lambda: DropoutWrapper_fn(rnn_creator(), - input_keep_prob=in_keep_prob, - output_keep_prob=1.0) # out_keep_prob) - else: - cell_creator = rnn_creator - self.fw_cell = cell_creator() - self.bw_cell = cell_creator() - # Apply multiple layers - if n_layer > 1: - try: - MultiRNNCell_fn = tf.contrib.rnn.MultiRNNCell - except: - MultiRNNCell_fn = tf.nn.rnn_cell.MultiRNNCell - - # cell_instance_fn2=cell_instance_fn # HanSheng - # cell_instance_fn=lambda: MultiRNNCell_fn([cell_instance_fn2() for _ in range(n_layer)]) - self.fw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) - self.bw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) - # self.fw_cell=cell_instance_fn() - # self.bw_cell=cell_instance_fn() - # Initial state of RNN - if fw_initial_state is None: - self.fw_initial_state = self.fw_cell.zero_state(self.batch_size, dtype=tf.float32) - else: - self.fw_initial_state = fw_initial_state - if bw_initial_state is None: - self.bw_initial_state = self.bw_cell.zero_state(self.batch_size, dtype=tf.float32) - else: - self.bw_initial_state = bw_initial_state - # Computes sequence_length - if sequence_length is None: - try: ## TF1.0 - sequence_length = retrieve_seq_length_op( - self.inputs if isinstance(self.inputs, tf.Tensor) else tf.stack(self.inputs)) - except: ## TF0.12 - sequence_length = retrieve_seq_length_op( - self.inputs if isinstance(self.inputs, tf.Tensor) else tf.pack(self.inputs)) - - outputs, (states_fw, states_bw) = tf.nn.bidirectional_dynamic_rnn( - cell_fw=self.fw_cell, - cell_bw=self.bw_cell, - inputs=self.inputs, - sequence_length=sequence_length, - initial_state_fw=self.fw_initial_state, - initial_state_bw=self.bw_initial_state, - **dynamic_rnn_init_args - ) - rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - print(" n_params : %d" % (len(rnn_variables))) - # Manage the outputs - try: # TF1.0 - outputs = tf.concat(outputs, 2) - except: # TF0.12 - outputs = tf.concat(2, outputs) - if return_last: - # [batch_size, 2 * n_hidden] - self.outputs = advanced_indexing_op(outputs, sequence_length) - else: - # [batch_size, n_step(max), 2 * n_hidden] - if return_seq_2d: - # PTB tutorial: - # 2D Tensor [n_example, 2 * n_hidden] - try: # TF1.0 - self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, 2 * n_hidden]) - except: # TF0.12 - self.outputs = tf.reshape(tf.concat(1, outputs), [-1, 2 * n_hidden]) - else: - # : - # 3D Tensor [batch_size, n_steps(max), 2 * n_hidden] - max_length = tf.shape(outputs)[1] - batch_size = tf.shape(outputs)[0] - try: # TF1.0 - self.outputs = tf.reshape(tf.concat(outputs, 1), [batch_size, max_length, 2 * n_hidden]) - except: # TF0.12 - self.outputs = tf.reshape(tf.concat(1, outputs), [batch_size, max_length, 2 * n_hidden]) - # self.outputs = tf.reshape(tf.concat(1, outputs), [-1, max_length, 2 * n_hidden]) - - # Final state - self.fw_final_states = states_fw - self.bw_final_states = states_bw - - self.sequence_length = sequence_length - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( rnn_variables ) - -# Seq2seq -class Seq2Seq(Layer): - """ - The :class:`Seq2Seq` class is a simple :class:`DynamicRNNLayer` based Seq2seq layer, - both encoder and decoder are :class:`DynamicRNNLayer`, network details - see `Model `_ - and `Sequence to Sequence Learning with Neural Networks `_ . - - Parameters - ---------- - net_encode_in : a :class:`Layer` instance - Encode sequences, [batch_size, None, n_features]. - net_decode_in : a :class:`Layer` instance - Decode sequences, [batch_size, None, n_features]. - cell_fn : a TensorFlow's core RNN cell as follow (Note TF1.0+ and TF1.0- are different). - - see `RNN Cells in TensorFlow `_ - cell_init_args : a dictionary - The arguments for the cell initializer. - n_hidden : a int - The number of hidden units in the layer. - initializer : initializer - The initializer for initializing the parameters. - encode_sequence_length : tensor for encoder sequence length, see :class:`DynamicRNNLayer` . - decode_sequence_length : tensor for decoder sequence length, see :class:`DynamicRNNLayer` . - initial_state : None or forward RNN State - If None, initial_state is of encoder zero_state. - dropout : `tuple` of `float`: (input_keep_prob, output_keep_prob). - The input and output keep probability. - n_layer : a int, default is 1. - The number of RNN layers. - return_seq_2d : boolean - - When return_last = False - - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer or computing cost after it. - - If False, return 3D Tensor [n_example/n_steps(max), n_steps(max), n_hidden], for stacking multiple RNN after it. - name : a string or None - An optional name to attach to this layer. - - Variables - ------------ - outputs : a tensor - The output of RNN decoder. - - final_state : a tensor or StateTuple - Final state of decoder, see :class:`DynamicRNNLayer` . - - Examples - ---------- - >>> from tensorlayer.layers import * - >>> batch_size = 32 - >>> encode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="encode_seqs") - >>> decode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="decode_seqs") - >>> target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_seqs") - >>> target_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_mask") # tl.prepro.sequences_get_mask() - >>> with tf.variable_scope("model"): - ... # for chatbot, you can use the same embedding layer, - ... # for translation, you may want to use 2 seperated embedding layers - >>> with tf.variable_scope("embedding") as vs: - >>> net_encode = EmbeddingInputlayer( - ... inputs = encode_seqs, - ... vocabulary_size = 10000, - ... embedding_size = 200, - ... name = 'seq_embedding') - >>> vs.reuse_variables() - >>> tl.layers.set_name_reuse(True) - >>> net_decode = EmbeddingInputlayer( - ... inputs = decode_seqs, - ... vocabulary_size = 10000, - ... embedding_size = 200, - ... name = 'seq_embedding') - >>> net = Seq2Seq(net_encode, net_decode, - ... cell_fn = tf.contrib.rnn.BasicLSTMCell, - ... n_hidden = 200, - ... initializer = tf.random_uniform_initializer(-0.1, 0.1), - ... encode_sequence_length = retrieve_seq_length_op2(encode_seqs), - ... decode_sequence_length = retrieve_seq_length_op2(decode_seqs), - ... initial_state = None, - ... dropout = None, - ... n_layer = 1, - ... return_seq_2d = True, - ... name = 'seq2seq') - >>> net_out = DenseLayer(net, n_units=10000, act=tf.identity, name='output') - >>> e_loss = tl.cost.cross_entropy_seq_with_mask(logits=net_out.outputs, target_seqs=target_seqs, input_mask=target_mask, return_details=False, name='cost') - >>> y = tf.nn.softmax(net_out.outputs) - >>> net_out.print_params(False) - - Notes - -------- - - How to feed data: `Sequence to Sequence Learning with Neural Networks `_ - - input_seqs : ``['how', 'are', 'you', ']`` - - decode_seqs : ``['', 'I', 'am', 'fine', ']`` - - target_seqs : ``['I', 'am', 'fine', ' - """ - def __init__( - self, - net_encode_in = None, - net_decode_in = None, - cell_fn = None,#tf.nn.rnn_cell.LSTMCell, - cell_init_args = {'state_is_tuple':True}, - n_hidden = 256, - initializer = tf.random_uniform_initializer(-0.1, 0.1), - encode_sequence_length = None, - decode_sequence_length = None, - initial_state = None, - dropout = None, - n_layer = 1, - # return_last = False, - return_seq_2d = False, - name = 'seq2seq', - ): - Layer.__init__(self, name=name) - if cell_fn is None: - raise Exception("Please put in cell_fn") - if 'GRU' in cell_fn.__name__: - try: - cell_init_args.pop('state_is_tuple') - except: - pass - # self.inputs = layer.outputs - print(" [**] Seq2Seq %s: n_hidden:%d cell_fn:%s dropout:%s n_layer:%d" % - (self.name, n_hidden, cell_fn.__name__, dropout, n_layer)) - - with tf.variable_scope(name) as vs:#, reuse=reuse): - # tl.layers.set_name_reuse(reuse) - # network = InputLayer(self.inputs, name=name+'/input') - network_encode = DynamicRNNLayer(net_encode_in, - cell_fn = cell_fn, - cell_init_args = cell_init_args, - n_hidden = n_hidden, - initial_state = initial_state, - dropout = dropout, - n_layer = n_layer, - sequence_length = encode_sequence_length, - return_last = False, - return_seq_2d = True, - name = name+'_encode') - # vs.reuse_variables() - # tl.layers.set_name_reuse(True) - network_decode = DynamicRNNLayer(net_decode_in, - cell_fn = cell_fn, - cell_init_args = cell_init_args, - n_hidden = n_hidden, - initial_state = network_encode.final_state, - dropout = dropout, - n_layer = n_layer, - sequence_length = decode_sequence_length, - return_last = False, - return_seq_2d = return_seq_2d, - name = name+'_decode') - self.outputs = network_decode.outputs - - rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - # Final state - self.final_state = network_decode.final_state - - # self.sequence_length = sequence_length - self.all_layers = list(network_decode.all_layers) - self.all_params = list(network_decode.all_params) - self.all_drop = dict(network_decode.all_drop) - - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( rnn_variables ) - - self.all_layers = list_remove_repeat(self.all_layers) - self.all_params = list_remove_repeat(self.all_params) - -class PeekySeq2Seq(Layer): - """ - Waiting for contribution. - The :class:`PeekySeq2Seq` class, see `Model `_ - and `Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation `_ . - """ - def __init__( - self, - net_encode_in = None, - net_decode_in = None, - cell_fn = None,#tf.nn.rnn_cell.LSTMCell, - cell_init_args = {'state_is_tuple':True}, - n_hidden = 256, - initializer = tf.random_uniform_initializer(-0.1, 0.1), - in_sequence_length = None, - out_sequence_length = None, - initial_state = None, - dropout = None, - n_layer = 1, - # return_last = False, - return_seq_2d = False, - name = 'peeky_seq2seq', - ): - Layer.__init__(self, name=name) - if cell_fn is None: - raise Exception("Please put in cell_fn") - # self.inputs = layer.outputs - print(" [TL] PeekySeq2seq %s: n_hidden:%d cell_fn:%s dropout:%s n_layer:%d" % - (self.name, n_hidden, cell_fn.__name__, dropout, n_layer)) - -class AttentionSeq2Seq(Layer): - """ - Waiting for contribution. - The :class:`AttentionSeq2Seq` class, see `Model `_ - and `Neural Machine Translation by Jointly Learning to Align and Translate `_ . - """ - def __init__( - self, - net_encode_in = None, - net_decode_in = None, - cell_fn = None,#tf.nn.rnn_cell.LSTMCell, - cell_init_args = {'state_is_tuple':True}, - n_hidden = 256, - initializer = tf.random_uniform_initializer(-0.1, 0.1), - in_sequence_length = None, - out_sequence_length = None, - initial_state = None, - dropout = None, - n_layer = 1, - # return_last = False, - return_seq_2d = False, - name = 'attention_seq2seq', - ): - Layer.__init__(self, name=name) - if cell_fn is None: - raise Exception("Please put in cell_fn") - # self.inputs = layer.outputs - print(" [TL] PeekySeq2seq %s: n_hidden:%d cell_fn:%s dropout:%s n_layer:%d" % - (self.name, n_hidden, cell_fn.__name__, dropout, n_layer)) - -## Shape layer -class FlattenLayer(Layer): - """ - The :class:`FlattenLayer` class is layer which reshape high-dimension - input to a vector. Then we can apply DenseLayer, RNNLayer, ConcatLayer and - etc on the top of it. - - [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row * mask_col * n_mask] - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - name : a string or None - An optional name to attach to this layer. - - Examples - -------- - >>> x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) - >>> network = tl.layers.InputLayer(x, name='input_layer') - >>> network = tl.layers.Conv2dLayer(network, - ... act = tf.nn.relu, - ... shape = [5, 5, 32, 64], - ... strides=[1, 1, 1, 1], - ... padding='SAME', - ... name ='cnn_layer') - >>> network = tl.layers.Pool2dLayer(network, - ... ksize=[1, 2, 2, 1], - ... strides=[1, 2, 2, 1], - ... padding='SAME', - ... pool = tf.nn.max_pool, - ... name ='pool_layer',) - >>> network = tl.layers.FlattenLayer(network, name='flatten_layer') - """ - def __init__( - self, - layer = None, - name ='flatten_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - self.outputs = flatten_reshape(self.inputs, name=name) - self.n_units = int(self.outputs.get_shape()[-1]) - print(" [TL] FlattenLayer %s: %d" % (self.name, self.n_units)) - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - -class ReshapeLayer(Layer): - """ - The :class:`ReshapeLayer` class is layer which reshape the tensor. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - shape : a list - The output shape. - name : a string or None - An optional name to attach to this layer. - - Examples - -------- - - The core of this layer is ``tf.reshape``. - - Use TensorFlow only : - >>> x = tf.placeholder(tf.float32, shape=[None, 3]) - >>> y = tf.reshape(x, shape=[-1, 3, 3]) - >>> sess = tf.InteractiveSession() - >>> print(sess.run(y, feed_dict={x:[[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5],[6,6,6]]})) - ... [[[ 1. 1. 1.] - ... [ 2. 2. 2.] - ... [ 3. 3. 3.]] - ... [[ 4. 4. 4.] - ... [ 5. 5. 5.] - ... [ 6. 6. 6.]]] - """ - def __init__( - self, - layer = None, - shape = [], - name ='reshape_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - self.outputs = tf.reshape(self.inputs, shape=shape, name=name) - print(" [TL] ReshapeLayer %s: %s" % (self.name, self.outputs.get_shape())) - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - -class LambdaLayer(Layer): - """ - The :class:`LambdaLayer` class is a layer which is able to use the provided function. - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - fn : a function - The function that applies to the outputs of previous layer. - fn_args : a dictionary - The arguments for the function (option). - name : a string or None - An optional name to attach to this layer. - - Examples - --------- - >>> x = tf.placeholder(tf.float32, shape=[None, 1], name='x') - >>> network = tl.layers.InputLayer(x, name='input_layer') - >>> network = LambdaLayer(network, lambda x: 2*x, name='lambda_layer') - >>> y = network.outputs - >>> sess = tf.InteractiveSession() - >>> out = sess.run(y, feed_dict={x : [[1],[2]]}) - ... [[2],[4]] - """ - def __init__( - self, - layer = None, - fn = None, - fn_args = {}, - name = 'lambda_layer', - ): - Layer.__init__(self, name=name) - assert layer is not None - assert fn is not None - self.inputs = layer.outputs - print(" [TL] LambdaLayer %s" % self.name) - with tf.variable_scope(name) as vs: - self.outputs = fn(self.inputs, **fn_args) - variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( variables ) - -## Merge layer -class ConcatLayer(Layer): - """ - The :class:`ConcatLayer` class is layer which concat (merge) two or more - :class:`DenseLayer` to a single class:`DenseLayer`. - - Parameters - ---------- - layer : a list of :class:`Layer` instances - The `Layer` class feeding into this layer. - concat_dim : int - Dimension along which to concatenate. - name : a string or None - An optional name to attach to this layer. - - Examples - -------- - >>> sess = tf.InteractiveSession() - >>> x = tf.placeholder(tf.float32, shape=[None, 784]) - >>> inputs = tl.layers.InputLayer(x, name='input_layer') - >>> net1 = tl.layers.DenseLayer(inputs, n_units=800, act = tf.nn.relu, name='relu1_1') - >>> net2 = tl.layers.DenseLayer(inputs, n_units=300, act = tf.nn.relu, name='relu2_1') - >>> network = tl.layers.ConcatLayer(layer = [net1, net2], name ='concat_layer') - ... [TL] InputLayer input_layer (?, 784) - ... [TL] DenseLayer relu1_1: 800, - ... [TL] DenseLayer relu2_1: 300, - ... [TL] ConcatLayer concat_layer, 1100 - ... - >>> tl.layers.initialize_global_variables(sess) - >>> network.print_params() - ... param 0: (784, 800) (mean: 0.000021, median: -0.000020 std: 0.035525) - ... param 1: (800,) (mean: 0.000000, median: 0.000000 std: 0.000000) - ... param 2: (784, 300) (mean: 0.000000, median: -0.000048 std: 0.042947) - ... param 3: (300,) (mean: 0.000000, median: 0.000000 std: 0.000000) - ... num of params: 863500 - >>> network.print_layers() - ... layer 0: Tensor("Relu:0", shape=(?, 800), dtype=float32) - ... layer 1: Tensor("Relu_1:0", shape=(?, 300), dtype=float32) - ... - """ - def __init__( - self, - layer = [], - concat_dim = 1, - name ='concat_layer', - ): - Layer.__init__(self, name=name) - self.inputs = [] - for l in layer: - self.inputs.append(l.outputs) - try: # TF1.0 - self.outputs = tf.concat(self.inputs, concat_dim, name=name) - except: # TF0.12 - self.outputs = tf.concat(concat_dim, self.inputs, name=name) - self.n_units = int(self.outputs.get_shape()[-1]) - print(" [TL] ConcatLayer %s: %d" % (self.name, self.n_units)) - - self.all_layers = list(layer[0].all_layers) - self.all_params = list(layer[0].all_params) - self.all_drop = dict(layer[0].all_drop) - - for i in range(1, len(layer)): - self.all_layers.extend(list(layer[i].all_layers)) - self.all_params.extend(list(layer[i].all_params)) - self.all_drop.update(dict(layer[i].all_drop)) - - self.all_layers = list_remove_repeat(self.all_layers) - self.all_params = list_remove_repeat(self.all_params) - #self.all_drop = list_remove_repeat(self.all_drop) # it is a dict - -class ElementwiseLayer(Layer): - """ - The :class:`ElementwiseLayer` class combines multiple :class:`Layer` which have the same output shapes by a given elemwise-wise operation. - - Parameters - ---------- - layer : a list of :class:`Layer` instances - The `Layer` class feeding into this layer. - combine_fn : a TensorFlow elemwise-merge function - e.g. AND is ``tf.minimum`` ; OR is ``tf.maximum`` ; ADD is ``tf.add`` ; MUL is ``tf.multiply`` and so on. - See `TensorFlow Math API `_ . - name : a string or None - An optional name to attach to this layer. - - Examples - -------- - - AND Logic - >>> net_0 = tl.layers.DenseLayer(net_0, n_units=500, - ... act = tf.nn.relu, name='net_0') - >>> net_1 = tl.layers.DenseLayer(net_1, n_units=500, - ... act = tf.nn.relu, name='net_1') - >>> net_com = tl.layers.ElementwiseLayer(layer = [net_0, net_1], - ... combine_fn = tf.minimum, - ... name = 'combine_layer') - """ - def __init__( - self, - layer = [], - combine_fn = tf.minimum, - name ='elementwise_layer', - ): - Layer.__init__(self, name=name) - - print(" [TL] ElementwiseLayer %s: size:%s fn:%s" % (self.name, layer[0].outputs.get_shape(), combine_fn.__name__)) - - self.outputs = layer[0].outputs - # print(self.outputs._shape, type(self.outputs._shape)) - for l in layer[1:]: - assert str(self.outputs.get_shape()) == str(l.outputs.get_shape()), "Hint: the input shapes should be the same. %s != %s" % (self.outputs.get_shape() , str(l.outputs.get_shape())) - self.outputs = combine_fn(self.outputs, l.outputs, name=name) - - self.all_layers = list(layer[0].all_layers) - self.all_params = list(layer[0].all_params) - self.all_drop = dict(layer[0].all_drop) - - for i in range(1, len(layer)): - self.all_layers.extend(list(layer[i].all_layers)) - self.all_params.extend(list(layer[i].all_params)) - self.all_drop.update(dict(layer[i].all_drop)) - - self.all_layers = list_remove_repeat(self.all_layers) - self.all_params = list_remove_repeat(self.all_params) - # self.all_drop = list_remove_repeat(self.all_drop) - -# Extend -class ExpandDimsLayer(Layer): - """ - The :class:`ExpandDimsLayer` class inserts a dimension of 1 into a tensor's shape, - see `tf.expand_dims() `_ . - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - axis : int, 0-D (scalar). - Specifies the dimension index at which to expand the shape of input. - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - axis = None, - name = 'expand_dims', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - - print(" [TL] ExpandDimsLayer %s: axis:%d" % (self.name, axis)) - with tf.variable_scope(name) as vs: - try: # TF12 TF1.0 - self.outputs = tf.expand_dims(self.inputs, axis=axis) - except: # TF11 - self.outputs = tf.expand_dims(self.inputs, dim=axis) - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - # self.all_params.extend( variables ) - -class TileLayer(Layer): - """ - The :class:`TileLayer` class constructs a tensor by tiling a given tensor, - see `tf.tile() `_ . - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - multiples: a list of int - Must be one of the following types: int32, int64. 1-D. Length must be the same as the number of dimensions in input - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - multiples = None, - name = 'tile', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - - print(" [TL] TileLayer %s: multiples:%s" % (self.name, multiples)) - with tf.variable_scope(name) as vs: - self.outputs = tf.tile(self.inputs, multiples=multiples) - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - # self.all_params.extend( variables ) - -## TF-Slim layer -class SlimNetsLayer(Layer): - """ - The :class:`SlimNetsLayer` class can be used to merge all TF-Slim nets into - TensorLayer. Model can be found in `slim-model `_ , more about slim - see `slim-git `_ . - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - slim_layer : a slim network function - The network you want to stack onto, end with ``return net, end_points``. - slim_args : dictionary - The arguments for the slim model. - name : a string or None - An optional name to attach to this layer. - - Examples - -------- - - see Inception V3 example on `Github `_ - - Notes - ----- - The due to TF-Slim stores the layers as dictionary, the ``all_layers`` in this - network is not in order ! Fortunately, the ``all_params`` are in order. - """ - def __init__( - self, - layer = None, - slim_layer = None, - slim_args = {}, - name ='tfslim_layer', - ): - Layer.__init__(self, name=name) - assert slim_layer is not None - assert slim_args is not None - self.inputs = layer.outputs - print(" [TL] SlimNetsLayer %s: %s" % (self.name, slim_layer.__name__)) - - # with tf.variable_scope(name) as vs: - # net, end_points = slim_layer(self.inputs, **slim_args) - # slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - net, end_points = slim_layer(self.inputs, **slim_args) - - slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=name) - if slim_variables == []: - print("No variables found under %s : the name of SlimNetsLayer should be matched with the begining of the ckpt file, see tutorial_inceptionV3_tfslim.py for more details" % name) - - - self.outputs = net - - slim_layers = [] - for v in end_points.values(): - # tf.contrib.layers.summaries.summarize_activation(v) - slim_layers.append(v) - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - - self.all_layers.extend( slim_layers ) - self.all_params.extend( slim_variables ) - -## Keras layer -class KerasLayer(Layer): - """ - The :class:`KerasLayer` class can be used to merge all Keras layers into - TensorLayer. Example can be found here `tutorial_keras.py `_ - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - keras_layer : a keras network function - keras_args : dictionary - The arguments for the keras model. - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - keras_layer = None, - keras_args = {}, - name ='keras_layer', - ): - Layer.__init__(self, name=name) - assert layer is not None - assert keras_layer is not None - self.inputs = layer.outputs - print(" [TL] KerasLayer %s: %s" % (self.name, keras_layer)) - print(" This API will be removed, please use LambdaLayer instead.") - with tf.variable_scope(name) as vs: - self.outputs = keras_layer(self.inputs, **keras_args) - variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( variables ) - -## Estimator layer -class EstimatorLayer(Layer): - """ - The :class:`EstimatorLayer` class accepts ``model_fn`` that described the model. - It is similar with :class:`KerasLayer`, see `tutorial_keras.py `_ - - Parameters - ---------- - layer : a :class:`Layer` instance - The `Layer` class feeding into this layer. - model_fn : a function that described the model. - args : dictionary - The arguments for the model_fn. - name : a string or None - An optional name to attach to this layer. - """ - def __init__( - self, - layer = None, - model_fn = None, - args = {}, - name ='estimator_layer', - ): - Layer.__init__(self, name=name) - assert layer is not None - assert model_fn is not None - self.inputs = layer.outputs - print(" [TL] EstimatorLayer %s: %s" % (self.name, model_fn)) - print(" This API will be removed, please use LambdaLayer instead.") - with tf.variable_scope(name) as vs: - self.outputs = model_fn(self.inputs, **args) - variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( variables ) - -## Special activation -class PReluLayer(Layer): - """ - The :class:`PReluLayer` class is Parametric Rectified Linear layer. - - Parameters - ---------- - x : A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, - `int16`, or `int8`. - channel_shared : `bool`. Single weight is shared by all channels - a_init : alpha initializer, default zero constant. - The initializer for initializing the alphas. - a_init_args : dictionary - The arguments for the weights initializer. - name : A name for this activation op (optional). - - References - ----------- - - `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `_ - """ - def __init__( - self, - layer = None, - channel_shared = False, - a_init = tf.constant_initializer(value=0.0), - a_init_args = {}, - # restore = True, - name="prelu_layer" - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - print(" [TL] PReluLayer %s: channel_shared:%s" % (self.name, channel_shared)) - if channel_shared: - w_shape = (1,) - else: - w_shape = int(self.inputs.get_shape()[-1]) - - # with tf.name_scope(name) as scope: - with tf.variable_scope(name) as vs: - alphas = tf.get_variable(name='alphas', shape=w_shape, initializer=a_init, **a_init_args ) - try: ## TF 1.0 - self.outputs = tf.nn.relu(self.inputs) + tf.multiply(alphas, (self.inputs - tf.abs(self.inputs))) * 0.5 - except: ## TF 0.12 - self.outputs = tf.nn.relu(self.inputs) + tf.mul(alphas, (self.inputs - tf.abs(self.inputs))) * 0.5 - - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( [alphas] ) - -## Flow control layer -class MultiplexerLayer(Layer): - """ - The :class:`MultiplexerLayer` selects one of several input and forwards the selected input into the output, - see `tutorial_mnist_multiplexer.py`. - - Parameters - ---------- - layer : a list of :class:`Layer` instances - The `Layer` class feeding into this layer. - name : a string or None - An optional name to attach to this layer. - - - Variables - ----------------------- - sel : a placeholder - Input an int [0, inf], which input is the output - - Examples - -------- - >>> x = tf.placeholder(tf.float32, shape=[None, 784], name='x') - >>> y_ = tf.placeholder(tf.int64, shape=[None, ], name='y_') - >>> # define the network - >>> net_in = tl.layers.InputLayer(x, name='input_layer') - >>> net_in = tl.layers.DropoutLayer(net_in, keep=0.8, name='drop1') - >>> # net 0 - >>> net_0 = tl.layers.DenseLayer(net_in, n_units=800, - ... act = tf.nn.relu, name='net0/relu1') - >>> net_0 = tl.layers.DropoutLayer(net_0, keep=0.5, name='net0/drop2') - >>> net_0 = tl.layers.DenseLayer(net_0, n_units=800, - ... act = tf.nn.relu, name='net0/relu2') - >>> # net 1 - >>> net_1 = tl.layers.DenseLayer(net_in, n_units=800, - ... act = tf.nn.relu, name='net1/relu1') - >>> net_1 = tl.layers.DropoutLayer(net_1, keep=0.8, name='net1/drop2') - >>> net_1 = tl.layers.DenseLayer(net_1, n_units=800, - ... act = tf.nn.relu, name='net1/relu2') - >>> net_1 = tl.layers.DropoutLayer(net_1, keep=0.8, name='net1/drop3') - >>> net_1 = tl.layers.DenseLayer(net_1, n_units=800, - ... act = tf.nn.relu, name='net1/relu3') - >>> # multiplexer - >>> net_mux = tl.layers.MultiplexerLayer(layer = [net_0, net_1], name='mux_layer') - >>> network = tl.layers.ReshapeLayer(net_mux, shape=[-1, 800], name='reshape_layer') # - >>> network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3') - >>> # output layer - >>> network = tl.layers.DenseLayer(network, n_units=10, - ... act = tf.identity, name='output_layer') - - References - ------------ - - See ``tf.pack() for TF0.12 or tf.stack() for TF1.0`` and ``tf.gather()`` at `TensorFlow - Slicing and Joining `_ - """ - def __init__(self, - layer = [], - name='mux_layer'): - Layer.__init__(self, name=name) - self.n_inputs = len(layer) - - self.inputs = [] - for l in layer: - self.inputs.append(l.outputs) - try: ## TF1.0 - all_inputs = tf.stack(self.inputs, name=name) # pack means concat a list of tensor in a new dim # 1.2 - except: - all_inputs = tf.pack(self.inputs, name=name) # pack means concat a list of tensor in a new dim # 1.2 - - print(" [TL] MultiplexerLayer %s: n_inputs:%d" % (self.name, self.n_inputs)) - - self.sel = tf.placeholder(tf.int32) - self.outputs = tf.gather(all_inputs, self.sel, name=name) # [sel, :, : ...] # 1.2 - - # print(self.outputs, vars(self.outputs)) - # # tf.reshape(self.outputs, shape=) - # exit() - # the same with ConcatLayer - self.all_layers = list(layer[0].all_layers) - self.all_params = list(layer[0].all_params) - self.all_drop = dict(layer[0].all_drop) - - for i in range(1, len(layer)): - self.all_layers.extend(list(layer[i].all_layers)) - self.all_params.extend(list(layer[i].all_params)) - self.all_drop.update(dict(layer[i].all_drop)) - - self.all_layers = list_remove_repeat(self.all_layers) - self.all_params = list_remove_repeat(self.all_params) - # self.all_drop = list_remove_repeat(self.all_drop) -## We can Duplicate the network instead of DemultiplexerLayer -# class DemultiplexerLayer(Layer): -# """ -# The :class:`DemultiplexerLayer` takes a single input and select one of many output lines, which is connected to the input. -# -# Parameters -# ---------- -# layer : a list of :class:`Layer` instances -# The `Layer` class feeding into this layer. -# n_outputs : a int -# The number of output -# name : a string or None -# An optional name to attach to this layer. -# -# Field (Class Variables) -# ----------------------- -# sel : a placeholder -# Input int [0, inf], the -# outputs : a list of Tensor -# A list of outputs -# -# Examples -# -------- -# >>> -# """ -# def __init__(self, -# layer = None, -# name='demux_layer'): -# Layer.__init__(self, name=name) -# self.outputs = [] - -## Wrapper -class EmbeddingAttentionSeq2seqWrapper(Layer): - """Sequence-to-sequence model with attention and for multiple buckets (Deprecated after TF0.12). - - This example implements a multi-layer recurrent neural network as encoder, - and an attention-based decoder. This is the same as the model described in - this paper: - - `Grammar as a Foreign Language `_ - please look there for details, - or into the seq2seq library for complete model implementation. - This example also allows to use GRU cells in addition to LSTM cells, and - sampled softmax to handle large output vocabulary size. A single-layer - version of this model, but with bi-directional encoder, was presented in - - `Neural Machine Translation by Jointly Learning to Align and Translate `_ - The sampled softmax is described in Section 3 of the following paper. - - `On Using Very Large Target Vocabulary for Neural Machine Translation `_ - - Parameters - ---------- - source_vocab_size : size of the source vocabulary. - target_vocab_size : size of the target vocabulary. - buckets : a list of pairs (I, O), where I specifies maximum input length - that will be processed in that bucket, and O specifies maximum output - length. Training instances that have inputs longer than I or outputs - longer than O will be pushed to the next bucket and padded accordingly. - We assume that the list is sorted, e.g., [(2, 4), (8, 16)]. - size : number of units in each layer of the model. - num_layers : number of layers in the model. - max_gradient_norm : gradients will be clipped to maximally this norm. - batch_size : the size of the batches used during training; - the model construction is independent of batch_size, so it can be - changed after initialization if this is convenient, e.g., for decoding. - learning_rate : learning rate to start with. - learning_rate_decay_factor : decay learning rate by this much when needed. - use_lstm : if true, we use LSTM cells instead of GRU cells. - num_samples : number of samples for sampled softmax. - forward_only : if set, we do not construct the backward pass in the model. - name : a string or None - An optional name to attach to this layer. - """ - def __init__(self, - source_vocab_size, - target_vocab_size, - buckets, - size, - num_layers, - max_gradient_norm, - batch_size, - learning_rate, - learning_rate_decay_factor, - use_lstm=False, - num_samples=512, - forward_only=False, - name='wrapper'): - Layer.__init__(self)#, name=name) - - self.source_vocab_size = source_vocab_size - self.target_vocab_size = target_vocab_size - self.buckets = buckets - self.batch_size = batch_size - self.learning_rate = tf.Variable(float(learning_rate), trainable=False, name='learning_rate') - self.learning_rate_decay_op = self.learning_rate.assign( - self.learning_rate * learning_rate_decay_factor) - self.global_step = tf.Variable(0, trainable=False, name='global_step') - - if tf.__version__ >= "0.12": - raise Exception("Deprecated after TF0.12 : use other seq2seq layers instead.") - - # =========== Fake output Layer for compute cost ====== - # If we use sampled softmax, we need an output projection. - with tf.variable_scope(name) as vs: - output_projection = None - softmax_loss_function = None - # Sampled softmax only makes sense if we sample less than vocabulary size. - if num_samples > 0 and num_samples < self.target_vocab_size: - w = tf.get_variable("proj_w", [size, self.target_vocab_size]) - w_t = tf.transpose(w) - b = tf.get_variable("proj_b", [self.target_vocab_size]) - output_projection = (w, b) - - def sampled_loss(inputs, labels): - labels = tf.reshape(labels, [-1, 1]) - return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples, - self.target_vocab_size) - softmax_loss_function = sampled_loss - - # ============ Seq Encode Layer ============= - # Create the internal multi-layer cell for our RNN. - try: # TF1.0 - cell_creator = lambda: tf.contrib.rnn.GRUCell(size) - except: - cell_creator = lambda: tf.nn.rnn_cell.GRUCell(size) - - if use_lstm: - try: # TF1.0 - cell_creator = lambda: tf.contrib.rnn.BasicLSTMCell(size) - except: - cell_creator = lambda: tf.nn.rnn_cell.BasicLSTMCell(size) - - cell = cell_creator() - if num_layers > 1: - try: # TF1.0 - cell = tf.contrib.rnn.MultiRNNCell([single_cell] * num_layers) - except: - cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers) - - # ============== Seq Decode Layer ============ - # The seq2seq function: we use embedding for the input and attention. - def seq2seq_f(encoder_inputs, decoder_inputs, do_decode): - return tf.nn.seq2seq.embedding_attention_seq2seq( - encoder_inputs, decoder_inputs, cell, - num_encoder_symbols=source_vocab_size, - num_decoder_symbols=target_vocab_size, - embedding_size=size, - output_projection=output_projection, - feed_previous=do_decode) - - #============================================================= - # Feeds for inputs. - self.encoder_inputs = [] - self.decoder_inputs = [] - self.target_weights = [] - for i in xrange(buckets[-1][0]): # Last bucket is the biggest one. - self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None], - name="encoder{0}".format(i))) - for i in xrange(buckets[-1][1] + 1): - self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None], - name="decoder{0}".format(i))) - self.target_weights.append(tf.placeholder(tf.float32, shape=[None], - name="weight{0}".format(i))) - - # Our targets are decoder inputs shifted by one. - targets = [self.decoder_inputs[i + 1] - for i in xrange(len(self.decoder_inputs) - 1)] - self.targets = targets # DH add for debug - - - # Training outputs and losses. - if forward_only: - self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets( - self.encoder_inputs, self.decoder_inputs, targets, - self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True), - softmax_loss_function=softmax_loss_function) - # If we use output projection, we need to project outputs for decoding. - if output_projection is not None: - for b in xrange(len(buckets)): - self.outputs[b] = [ - tf.matmul(output, output_projection[0]) + output_projection[1] - for output in self.outputs[b] - ] - else: - self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets( - self.encoder_inputs, self.decoder_inputs, targets, - self.target_weights, buckets, - lambda x, y: seq2seq_f(x, y, False), - softmax_loss_function=softmax_loss_function) - - # Gradients and SGD update operation for training the model. - params = tf.trainable_variables() - if not forward_only: - self.gradient_norms = [] - self.updates = [] - opt = tf.train.GradientDescentOptimizer(self.learning_rate) - for b in xrange(len(buckets)): - gradients = tf.gradients(self.losses[b], params) - clipped_gradients, norm = tf.clip_by_global_norm(gradients, - max_gradient_norm) - self.gradient_norms.append(norm) - self.updates.append(opt.apply_gradients( - zip(clipped_gradients, params), global_step=self.global_step)) - - # if save into npz - self.all_params = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) - - # if save into ckpt - self.saver = tf.train.Saver(tf.all_variables()) - - def step(self, session, encoder_inputs, decoder_inputs, target_weights, - bucket_id, forward_only): - """Run a step of the model feeding the given inputs. - - Parameters - ---------- - session : tensorflow session to use. - encoder_inputs : list of numpy int vectors to feed as encoder inputs. - decoder_inputs : list of numpy int vectors to feed as decoder inputs. - target_weights : list of numpy float vectors to feed as target weights. - bucket_id : which bucket of the model to use. - forward_only : whether to do the backward step or only forward. - - Returns - -------- - A triple consisting of gradient norm (or None if we did not do backward), - average perplexity, and the outputs. - - Raises - -------- - ValueError : if length of encoder_inputs, decoder_inputs, or - target_weights disagrees with bucket size for the specified bucket_id. - """ - # Check if the sizes match. - encoder_size, decoder_size = self.buckets[bucket_id] - if len(encoder_inputs) != encoder_size: - raise ValueError("Encoder length must be equal to the one in bucket," - " %d != %d." % (len(encoder_inputs), encoder_size)) - if len(decoder_inputs) != decoder_size: - raise ValueError("Decoder length must be equal to the one in bucket," - " %d != %d." % (len(decoder_inputs), decoder_size)) - if len(target_weights) != decoder_size: - raise ValueError("Weights length must be equal to the one in bucket," - " %d != %d." % (len(target_weights), decoder_size)) - # print('in model.step()') - # print('a',bucket_id, encoder_size, decoder_size) - - # Input feed: encoder inputs, decoder inputs, target_weights, as provided. - input_feed = {} - for l in xrange(encoder_size): - input_feed[self.encoder_inputs[l].name] = encoder_inputs[l] - for l in xrange(decoder_size): - input_feed[self.decoder_inputs[l].name] = decoder_inputs[l] - input_feed[self.target_weights[l].name] = target_weights[l] - # print(self.encoder_inputs[l].name) - # print(self.decoder_inputs[l].name) - # print(self.target_weights[l].name) - - # Since our targets are decoder inputs shifted by one, we need one more. - last_target = self.decoder_inputs[decoder_size].name - input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32) - # print('last_target', last_target) - - # Output feed: depends on whether we do a backward step or not. - if not forward_only: - output_feed = [self.updates[bucket_id], # Update Op that does SGD. - self.gradient_norms[bucket_id], # Gradient norm. - self.losses[bucket_id]] # Loss for this batch. - else: - output_feed = [self.losses[bucket_id]] # Loss for this batch. - for l in xrange(decoder_size): # Output logits. - output_feed.append(self.outputs[bucket_id][l]) - - outputs = session.run(output_feed, input_feed) - if not forward_only: - return outputs[1], outputs[2], None # Gradient norm, loss, no outputs. - else: - return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs. - - def get_batch(self, data, bucket_id, PAD_ID=0, GO_ID=1, EOS_ID=2, UNK_ID=3): - """ Get a random batch of data from the specified bucket, prepare for step. - - To feed data in step(..) it must be a list of batch-major vectors, while - data here contains single length-major cases. So the main logic of this - function is to re-index data cases to be in the proper format for feeding. - - Parameters - ---------- - data : a tuple of size len(self.buckets) in which each element contains - lists of pairs of input and output data that we use to create a batch. - bucket_id : integer, which bucket to get the batch for. - PAD_ID : int - Index of Padding in vocabulary - GO_ID : int - Index of GO in vocabulary - EOS_ID : int - Index of End of sentence in vocabulary - UNK_ID : int - Index of Unknown word in vocabulary - - Returns - ------- - The triple (encoder_inputs, decoder_inputs, target_weights) for - the constructed batch that has the proper format to call step(...) later. - """ - encoder_size, decoder_size = self.buckets[bucket_id] - encoder_inputs, decoder_inputs = [], [] - - # Get a random batch of encoder and decoder inputs from data, - # pad them if needed, reverse encoder inputs and add GO to decoder. - for _ in xrange(self.batch_size): - encoder_input, decoder_input = random.choice(data[bucket_id]) - - # Encoder inputs are padded and then reversed. - encoder_pad = [PAD_ID] * (encoder_size - len(encoder_input)) - encoder_inputs.append(list(reversed(encoder_input + encoder_pad))) - - # Decoder inputs get an extra "GO" symbol, and are padded then. - decoder_pad_size = decoder_size - len(decoder_input) - 1 - decoder_inputs.append([GO_ID] + decoder_input + - [PAD_ID] * decoder_pad_size) - - # Now we create batch-major vectors from the data selected above. - batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], [] - - # Batch encoder inputs are just re-indexed encoder_inputs. - for length_idx in xrange(encoder_size): - batch_encoder_inputs.append( - np.array([encoder_inputs[batch_idx][length_idx] - for batch_idx in xrange(self.batch_size)], dtype=np.int32)) - - # Batch decoder inputs are re-indexed decoder_inputs, we create weights. - for length_idx in xrange(decoder_size): - batch_decoder_inputs.append( - np.array([decoder_inputs[batch_idx][length_idx] - for batch_idx in xrange(self.batch_size)], dtype=np.int32)) - - # Create target_weights to be 0 for targets that are padding. - batch_weight = np.ones(self.batch_size, dtype=np.float32) - for batch_idx in xrange(self.batch_size): - # We set weight to 0 if the corresponding target is a PAD symbol. - # The corresponding target is decoder_input shifted by 1 forward. - if length_idx < decoder_size - 1: - target = decoder_inputs[batch_idx][length_idx + 1] - if length_idx == decoder_size - 1 or target == PAD_ID: - batch_weight[batch_idx] = 0.0 - batch_weights.append(batch_weight) - return batch_encoder_inputs, batch_decoder_inputs, batch_weights - -## Developing or Untested -class MaxoutLayer(Layer): - """ - Waiting for contribution - - Single DenseLayer with Max-out behaviour, work well with Dropout. - - References - ----------- - `Goodfellow (2013) Maxout Networks `_ - """ - def __init__( - self, - layer = None, - n_units = 100, - name ='maxout_layer', - ): - Layer.__init__(self, name=name) - self.inputs = layer.outputs - - print(" [TL] MaxoutLayer %s: %d" % (self.name, self.n_units)) - print(" Waiting for contribution") - with tf.variable_scope(name) as vs: - pass - # W = tf.Variable(init.xavier_init(n_inputs=n_in, n_outputs=n_units, uniform=True), name='W') - # b = tf.Variable(tf.zeros([n_units]), name='b') - - # self.outputs = act(tf.matmul(self.inputs, W) + b) - # https://www.tensorflow.org/versions/r0.9/api_docs/python/array_ops.html#pack - # http://stackoverflow.com/questions/34362193/how-to-explicitly-broadcast-a-tensor-to-match-anothers-shape-in-tensorflow - # tf.concat tf.pack tf.tile - - self.all_layers = list(layer.all_layers) - self.all_params = list(layer.all_params) - self.all_drop = dict(layer.all_drop) - self.all_layers.extend( [self.outputs] ) - self.all_params.extend( [W, b] ) - - - - - - - - - - - - - - - - - - - - - - - - -# diff --git a/tensorlayer/layers/__init__.py b/tensorlayer/layers/__init__.py new file mode 100644 index 0000000..cad53aa --- /dev/null +++ b/tensorlayer/layers/__init__.py @@ -0,0 +1,26 @@ +""" +TensorLayer provides rich layer implementations trailed for +various benchmarks and domain-specific problems. In addition, we also +support transparent access to native TensorFlow parameters. +For example, we provide not only layers for local response normalization, but also +layers that allow user to apply ``tf.nn.lrn`` on ``network.outputs``. +More functions can be found in `TensorFlow API `__. +""" + +from .core import * +from .convolution import * +from .super_resolution import * +from .normalization import * +from .spatial_transformer import * +from .object_detection import * +from .time_distribution import * +from .pooling import * +from .padding import * +from .recurrent import * +from .shape import * +from .importer import * +from .merge import * +from .extend import * +from .stack import * +from .special_activation import * +from .flow_control import * diff --git a/tensorlayer/layers/__pycache__/__init__.cpython-34.pyc b/tensorlayer/layers/__pycache__/__init__.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1365b66b27d09710b63cf16798d489713615e4d0 GIT binary patch literal 1091 zcmaKr&2AGh6on`0PtxfRZP~H1*cG)idsI~*p-PniRibQWGjTjO8B>q1WqXo__kcB9 z_B;kpFk4n^cmP&hPb#Y{Afs5HJkGuMoa6l3+fP3|`BHq|NRl7PgU^uo1%2H&TB;-| zXdNWgFj0f#=AMR;HX?0C+KRLtX%uN3X(v(|X*bedr2R+-ksd`ljC4e_k-R(Ey83~jpLR$}Ye6tt)tZf_mq zx?&YJT8RJdexgg4;9x-7j!%ixZO8T*3w3YbREP^;BigbUL$g@P+)!5Pv4bi_7X$&bCR zQ#Ng>YZ#}OO|5GlHbTSJkvB97H-;)sCVrM_ohhp)6KX;m8vGLNZHBEkt#^}2mcBs? zY}RVAc1>_iF3}h*`6KB!5A?jezGCOK_r^WX^W}1x`96{vwa(Da^93ml*5?g(q&nwD zmK6$@OI^WTR{KU>Ov*A#e^Q>w$)FoNdwX)+jmmJUEW1rXvvj)x^#WBJ<|z)zbFd|O zalg~;|7Az=TU2)r-8S5j=!)KZE^dqC{_0S7PHh%_K*e70qa+dD zqWYTH1Jc?rzh(ZO0&;oHu|hr1U!iEjg?0J0#W1kT&C1uP&q7EfOhCT2xR3guyuJu| eSxPNE`D<3)_`JcYRd6v5fBmPlY!B1H|DQh%7+jeE literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/__init__.cpython-35.pyc b/tensorlayer/layers/__pycache__/__init__.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f16e5605bfefce48cabdc4cf231be17e344a833c GIT binary patch literal 1038 zcmaKr&2AGh6on^A+oaQm{vffo*cFu-39&*|1)EBh03lHcX*M3mzDZ0ywq<*gh6mvt zV9jIj1hZwuE3o2vQdwogL}Pt&oO|y%$9XcH=A`;weH~@luk67yM1GC0|AC9hvI^Hh zCWe_BWHi1-06*Qm&3({y$f@4yFAampL6?4+Gp3Fe=Wy9R9Crk?> zOv9vp?k#XRmpqo8VGUo9YfYFCGFCb#YamQ{ll#_M<3g;gy=4x25ym-qY-WhYzAGTx zF|U+a6FkBi2;h8?Ul~W>j>Q@Kg7*rDHh~zeHd_%sPD_;GOc4j(q7J|#u3<`k>2;lx zS&LocIK6Cay<~ACHf%k5gQGBOv0`0^d7*WooUUtZf*Tsb+PK@ow86H)*L9JwTy<<_pMeJ|(EMIkbrNI)R7fA3L<6yOH@3zosTT+_YJ`0U zymc|wZr3lFg>HhE3KIv{?PA>pbbD}zq6K~*7{9Gf`>TW98MayV0ab_54~im!GwNIB zj!2m90pJutuw4_vO8PF=b=_VJA5v_vDN&FW-#bs}p I5B~r83(qJl#v zobUVn-m8n%O;SVcdS#L9$Ld%0&U(l9JKvuS4)y;=?pyP(p3XS`&58UR#Pvmd{N4`7 zDd8{UR2^qNd(0`FkZZK^q*Hp_dDqGC7FvADDLuij=tTaV zc1oj8Y0P`_1mduZ%gFee~#BLk09rOTVpW)DA>|q=GAYz~8Xk+XV8~YGq&v5`U_Na}0 z*eM@!<{xp&N1gdco$@hf-gU~yoq4J2+}P(n&wfP+VY1PjmVs7 z4qU0%Zcmk7Dcmi4`eoXD8M<2LJ^NK2oQdDFUk|*oHdk0(t0HUr_@S%yjao8t|JCwR z-CHdzR!!E;1D6_Jecg{%A`)dY8!eTKQkDE-1%;FgUVeSeHznpPwMwH>s8%-7f^N-8 z|4)~iy|xkdYNpg2zFJ;utZY<#TZ!Sh@>;=*)N0@*RBBCIUt|~BpCMda#K->{g0e0! z2r))7&U}Yc?sRT;$pX^BRi`9t4I{DC$ZEqmhOc(n;T*%t6Ip!T8{k5Mn7OfGuN#FF zUhj0AeiuL2SIX|WVzuD==N2OUwctveTk+k3J9X8)GCS*5`8LO0zg%_~8cVraEywPw zSJxYrdd)q*(rB#t=O!mNHa2pNa?P)MOV#>DuI?>Q7S<~HQoZO;uH9{{)N7NCrO8@t zGLrhag?v8OFBN(XeYRR|ls#X*P6R*lRqEiL3#u$5Y(Z|ZHLrQ)+ZDWdt!qx=3&jQ! zAgtUjNKKd4Yxq*H1*zBvDfiv_lG|884@jY!nZHt4Lx;JI^|fl54^(Q6SU&oIq%6Bj zl`0!@&K=dOe6?I#Zmf(WRlc}VsMX3$l4#*Q+eAG~5FIT)$CX)67eis&qu&D>Ra2yeKtx*ViWO zM3}79*47(t2@@}fbD2^zTPu`Gn4qYt-|%8RVeb^G%XP2PSXp%&ch}12+!J$`zWBlk zx9+(ozI^GGYgbR0S``|Fyv$?1u4rbM-i7sQgRMF}d*jM!x8S=K`D%xz=5RyitaMs1 zbz?P(OlcP-Nsn!mrHfVz=*>Ht&tghGtnO^RR^B$h#R_V7yIgG4y_|b(NfKes+^Vd( zyjAw2b&S=edool4dTl1`8HYQ%kC3n46 zd3(LA6T{^bI(I+p!rJ0tX>ppJe9O%aysu`r-zb&Jx0io#>nSSXJb-oMne(YT5wYK#C!WLN?fDW}E$MSd8F5mPJ`C&z<+TO0!2(`^_G+0ypI{Ai=U2SruP%qVM%ad2@ zMO~&QU-RlWvGmz}eqy8U-SXE8#qy*A0vQY~ujQ^Eg$22_yWSZT`5ZnzFP!1Q41fFh z$sR{Io!y@~gYRcE-I-kGQ1)2YAzU5LZjM9-Th6}7u@0;tW{Hxku|DrCBAWgA{A#_l zj+d2h4(9W3uNSK3HTL8QUGjPFG!u^U;fER?AOyC+QocsuS&f0k5xhOWQK&VdvC<6b zzDV!_b0i$&JM&qw!AHBhx-%WY?{H>Vu8X)A{OlMmf1Ke<`}VDYvfM}W?i#%lE>XUKHvFA8j25s)%&@MM0Y zQrbC)B)({kR;v|3ZyS{w0G?TfeZtFP-Sf)+TD>Os4F;?LjmYa!xIx5yt>Lb&`we%o z4Co+;3cwM_*u$Dbphi@>upo=~!U9%z1MW2g3lZaZk8s@H*&%rRNYIZ}+iUp8JR@-G zlK}zO{c^szUaHmd^;&*;Z5<!3+wu zpL^~(_YT?xLVT@?9d@N&#f%%pS5V!B#>_N`4$`U@MoG4eyVK9;XKHLra%K8jxxpN& zIo)}4*GTC}8KZZ`1=e3UEi+&7$LZE7x!0PlU=+EIOA)zTjw?js2gpRLjiuZxSJ&G; zD=om&qfd>y_?sGcXQs!-UH2sZyG!LlV;yUh5J%+#DB!2=JcWg$QCw+Lyupgh6xo@- zv38X(OVNeXq-ajJds0`S3wQ_Z0K%w}Ll>SZPoObvs%q#^dB;gL&!w`Q{Z7XPA z$&R1j`3qS?g@eJiTDX&EM7u6GeZ|LYXN|MB;(aysB$|W|`${^CQxu&Uou%XM-TJy) zEY#cz=QYWA7r=3_;X;g`zarZ{MzT_aYulCJwuD!Kg zn)vG-b3fxQx~88)@ctpFOvgLU;6VfdZ^3(r-|&&2+4;U;*vGgFhj3fC3;%}P1<>cp zFgPp>gI?hxfR5J4GZNV+Tm;a-LN2Fq5rm-5gLx1zE0&#xux^!Eu|0PIbbOq?UKzh`hVld5MhQX5zomz z>XD50o@a28!6gQ-Fd$1nYjBxYoEb?nl}-3IK0Ya6lHnlFpSWJ&30-GR2z+%B=)4fh-qdSqyy4Lrk+iCzQsB1cuTWOj`V5Hi;1U zy?&9#41!H|$m>N3g@20zF98HkM7Ia*7J|#n?l2b@yG)nz4X;oGh31#{kfIJV{bbPA zb0V+VB95^HwV|YS%L{}sG5L&<$Z$6!VXA_h(>{pWh!6*)Z2;(%9)>_cWHOMwpmafM zO4FFK6xy~R&0iH@W@r~vc@LCs9o)O3w}sCX3Rv3{+nh!0*f@dymMu0mt3d59a{A>-hK##~ z+FcVcgI0ukqBcR!?b5JP_eb1cB1=q`2$r*4u9YEL#pDDp?dp}6F3q}Svv7v+F-M8+sM4l!Yf5X9K~O7+zg{<|fwK^C_suO&+c z0Y}2he{8QTPp+Jugl0&jZEGGR@?>%3SI*{i(D}K%Eapl#lj$+~G>AjCDn?2pMXXO= zFH5qpS60pjJevv7OUMI%Pfd~IVfz{N0mp-TTBJ@|n^@kLIs9iKn~3L*mXbSRIWLf? zbHA_DF4&7$NjlOf;fNHr^9~5>AB<6{=~S$ycA<$?R1&1KkAk-*y>P*BhNho-+H}=U zl13Mc3!~wL!b_7s4)iY#G)Tsj&!V0_>;vY*~`h!Gt2IUw!H=Y4pe?B z59XqdfDb!B*nIeP1`c>EO50`ebq7qFA@o$O1`4N2jk6-DG5+ks=Kww=PyK&Q#!bMZ zQO1q%XhNM3W!#8Ph$@Xhg&}0zfQMn!Kkl1xLq(CUxPC~t(GO7$4$E>r_=8p)oUPm# za9W`3^7%9l&XB5kr0JBTa9}`oCOA0V;N5h9vlHRn%%t&dTBO_o*9JBJwFuV+Z5c{c z7oh?Uh-0dY?3;~nZ7#<4ggD;@16p@ma&FAT-^#@4zmeC;wv~<3zZl-)aBL1n=Savb z`SAaVV#uEJZ}xMJM~U}TPR_nmPR^H5->`>ObuUFLI~B}q;QStMsLag>R(FUiE2 z$m|ns3bJv|WuLW7oFlRK5oP1dvJ44c<<)BlEE|W{65;Wdjgv?-MfCgM>l41t z0LJqHuMgQ5y1QDaO&AsDnA*flJGF_Ky;GZ*`GlxV%%!t-jHYdg+Sq}{gyr{`n;*LZ z#ZLJv1cYDQPJKdPPsm?VQ1yMmnGtBgHeLsD8!$7#HL%*ZTlg7@%yz{jA>kZjw?s?Q z&QfHz>s%wX+nVRDI4`NveHHKTK#*@l_#oH`DQz$lmE^~0s%xoo{Fw-rjT81)uqBxl zX%v*Xf!V;X`Zr_5?h%}A?hxXdT98_MQ&kO~LJjQ)vPKyMo-F#@Dn4`(C>-2gd^Z3b zIAd`jmu~)0|1H3Qw}=DpM}XJAcQ_CrFjauZBAgF?vK9Q>D!>~MNg5CsM2UguZcu?= zDp=eqdc(dH1%4uvB=8%I0Ke9Pw?Nld#E(YTSCHCJ6sd`P&5WElPHPFxj7INFEBP93 z5E?{a&c`W8LuqM(w4qe^b%OOe$skRTwmtINB=X{8&QKD0L44M62B_;o&!LQJu9{vA z(N>pEA(=fx3R7Wbrs&1&?iq3`ru|~Xp)TdBIGr7B18z#l7Tk)0B14evOsJcK~LS16HiwoAU z-0ZzzEt=^kL$Jm^S72fo^9rqK>tvvp#a#CxV~a@GLj1D_@-R$%x@~6%NQvSYMQU=t=i;)OLevT~@F4V&zMry%@ zqS4PqeEb}Otz0PLerxEhCf@IL($55lfjKbENC*|aod7;tjOAm@rHDZ<)ld`Ar?OgD zE<+oe^76cV@$XBgNV1@$va$(lX_!9bbZ|4yEF+R)*mXa{-XjN1?G$LW5xB zqh&xktBKU^qFks2nZv^Px(7>KfG#nThRlcjkOgMAK#S=@th~>;b=>yor6Bnrlc{cm zXhN)2?!Y*`b)8_Mzfv!u zS=qD3lpU9^%V7MXl#;uA?Uie@FI;+EyMqiR{!F;nB;i>mO?`gcy=*)?u$pCWt*(~u z>G@1;$%kdWFn2va`@*%CUcUV5>*MY$yhM~?zEPt^)}knCY@6@&mV7T?yYdD;tX^?_ zu~OVg?PlUlM8z(RyO+FjAuTcdRxHECK;45r2@Sw#u9GrsAZg@-GGy$-2F{CyhYUmo z%E9YmV}CiJ|ePsv9@?|>784G4&d zlFkm?%4eM$wa+`9DEd~v^$zp)iMf}#K#>a}{q_1D+d>d>pw<^Pnwd3DS7DEunBvWhejOD6y8 z%60BkNhgP$MYshEYdq8J)+d@nv02va$3mXh1-#kEYgxg^_NR#un;q~3((I!}xKy#( zf%U1`_rjeb#1^oB^+LsPQFWd|1Djp4tu}{rg*E$hh~EGVLiO*jGC{9xo;S}9qB66Z zb9b{H`4t$4^Z!Pgn9#x8Q!!y5&8#1UQT1c^eI&es`L&zb57p^hW+-!|Yx99ET^um4 zW`WGkvh;)+cT1IRBAow0JVjtNc%-{qO`?nV7W~{Fh9ccn_^kwnO7SifK~ZrRD1RYP z6f^*d3j#*ZE`ijR-7es@y|1slCFrLw^nEh5aClOm4IZQd?*D?-D0D9 z*FY+G?Qox@j=dnT?*^zk$n{CQHhl`Kw!?Lw!YZqI)dp61RFP>9t6oakF}DX+hU|zS zE01_3f>M5fU*AHI4xhZQ@qV7c0)qmB6v*T)@&-{u8<^w)UPj9nK^}NAj<>|%Y7o4@ zD?*m@yT(QjrS#KBM@PgikFySK&=C>TMSNQ5IT;QUTzrBR3)9YR{loyq<4-KwjGj{# zyr?0M!uZ0MLB3rFP6&l?5Pn1)GDbjD0qiH?Uckh}KIBpXAr#F)i&z{7g)FFocc*e& zJqa%QlT)8PJCi&6^z_u!$HPlf?m&HM$p<_aP8mRnZQ3hTNevwiUa!+4h5p+FC}|>c zO{nH)4ZY$03056+YZEda*j+IZ6v%2tSH$GBK%>_^i(vtdyi7ggj!vIJ zVpVOf-1QSj3!G4SM2=~IXqiP;WBLc5_ zvFfDhFU9nZv{I^T6wl{V?|~8DkgYwqq-sgYJGeh@B|0nPiuYV{;K=}{70_XV*~cig zoWMO5uORr3QFQ^IEv5v<6vt#4RjEoiQwf*1Xi+Aeng`f2gR<1j2drGPdQzL76PL}` zl+!UquV~I6xe?QmLeL$t0@^hK%3ri|+O`qQUs~~cd8zD`YsG|<8{_6?LX-qe+r>|7 zuxz>;aFtOg71qc|Uh?YjB#PxB@T;L-U9kWzfVCYlhF*r{;`$<-)UHl0m8+FI)x!EJ zoX=_zVrtF(L+h6AJcz5170+XRUII^COeED6jLvthcX1u=vf$z)St;KoJE{(*&MH#Nd4L8uBaQN%FeOd0$;wik8ohAjYvVqUghztn)r1)hV`U{4g@( zypO*Q=Y2TFM24c4jre;|kOQ+{>PO0FM(NJ_o^Z)dmLvF_9Zmy$PWAZNMMmgSrVUg#XsJaQu%wjs>>F!`$)VRMp=dc---e)ra5yO8AqmCrC!ax194h)fk=$48efXG0BMfF2mXVo$pB-u z&0Fpr!+kso2m9IAYhM~pQ49q7jejmNOW-$Cc-J@h$7r@TN6hj*m@_YO-Fa6|z58xH;9y#-j$xEhamKP2YMd=AR5Dp%gKkW zfF@eqn-7jILWC}E^onEd#B-om&J;CfOayx>mgz*1s?zcVuHbQ%5{zbFpj6rHK@(}a zF_vv+?lgPus0buMXu~6Gqn=+Z=2!9?KTlehO>(l2WKL!_C&J!S1Qd|cx%1UJ7_|O# zxiEFCV`wfU9YY|=RMJ~UFqZWyyuQicY7p@4Hix9C?58|LzP}Xq)fD74 zn`d|0S2lwp#cKTFe?kW7u0yy8y9=9bhFvl@*0H(oMaa}KU}ez4X{fw@X+&1t^so^) zCL&WvR=gihkC!y@udvn}xK4IQZ#PU?WO@AI_#i)oHT8cHfvIlR`36QX-Y&LI$N6Er zuQnGM%0;=w?IH~zi>6ryLct$#L*`zmlrMvYNCLx0mq&obGTQe%kt3*Eh z&t}O>;b_N8;fjgTqhfksPym-lh(Rf=4nQ3QzdGYYrjI0QxXa~L;`x3g2fI9VY(Akr zGI{7k;G_hMnA#c?o7UhfQn2^b)yM$t4*7`9eV2SVwI<#1g6Clv%c3<3}Mmq9O zkS}`?9RMthF$O@Y*T6HlmQja6j6OTgfv7&|u(~0R!%)9tedG6T`7YyDz6(qS1|0!B z7`X?<2V{sm5X%0*Y<0Eki`p1I^hWS0bt;7 z3$qRM04j(AcbJ7KaEH~46-Z;A41oPNFkDflIPFKvsS|>cIV39wUJn#5p!^`J7U)d$ zB@qlz{yzHvv?@{`7`6`p>ZLxg-##GPt0_eYL8PyZFF-1G47i+_5%=~v$B4~??rd|R z-86C(C>D@h0om>o1GXZ)ggZ{GU5=6gBoAYa94P#4>_?Zysg}h&6sj%dR|{*pd{t`j z$nGmSPo@cfUaf)AX_P^HDn1&gu`W_29!;D(qv@e!7W@bg!$u23mYP)y3vj5SE6+m6 z7PgP6;2geYL!=bTM%UeskPTo3%<~Lm#g_JZ84hu4>I2!vnd-+%ZC~e0R)tbLKata- ztpE=uYzARXysN`KOu-*n7EoEK!4>hWSLc@=mB0or846cnC^4{N0y* z5Xw4lXWq^p%Tjd*zu6<%3mN=oH_z{6V{cVpk+6_I!vqa?rB*u;HyUx^~F^81^TW zOD4mvG6Sy*fHKSsyIeFgZhWA^b>oY+36Wdcg!U*@LZjIS{oJ>(OGf()TbChV5YIEh z`U}4qEVdYHhV@K6UATvnX%wIO+b*4k#cS83T`!)NbSf%4+~%Y62_|Rl7qoB@w)sq- z!Rlpm;s0fvkz%GNa%;DuzbaxuNdN~V;(`FnyHds9nVxxOJY)f zyuXBhvM%dN+@H1*D*<|!Pi``hRd}dat*nt~z+$yt_Wm+%j14F;(qrC8b0E^89zZG% zSka}6!aVPN=Jmr2egwgMk6EHUuIL_yE13r15IbI#0hrX$b?|U}9W*8WAFhFq<4LZ8 zhoG3)&)<&Wj>kF=W=?f z)y!6I$~&}g^rF1X`Ga(QW93Ih5ppFx86TtzoWvt1*vThZLYExM7VgnHJhB=g1Bt}F zNBgA2L;B=NLptU22B+cZoB|&nW58`H^n?uOI}s_n9Ig{l;}AuRNhT28(FI8 zZ01pb2zqxO%#39_G7m$$w;Q~G2jCiLFf-n9JaZi1J}3NvgU|;)h`X0ES8*KEVTpSr z)1N(zsn(qt!>1eZbSKme*fEK#XEW3L%>hlvlM^W5;(^E}dX!)53>Oh^Wbymc7lY+8#QZ;qJ9XYIynIFf8Z^Qp71f)U1)&O@{y^;lo(rKi6CDTLc zz}65<1Jpw70vrQ5oUm#^!^4~bD<@=L#7Mv<6o4r(0w@B}CX^>&0NME1xUu_#Vgz0R zS%?~dbcARP6d$~7IfHX!`wIuah?%2&va(bum!d2ZtJ|{W)YJtdg-mSl0ZF7DOtw*x zc`bINiV98bToN^j_yL+b(is5~3HCY)X(Os4T7a5uBm34zAF1y~*sEJ58`am>^!5O` zlnR=%f+x7TYa%S~cpl+i=%ZM<^L@KsZha09Cg7(BYkHzg5L4vAMjWUB>6`tYI}nPJ z_ED1FPThoB+ND5WZR?zN>x(?3Pr;%wsyUB!OZP%@YrP8$hz9i7z*D?R+umf~6JCF( zoMWfjn4jU}+^ENfnq9Iucw85~zsZ1EH+${m(d|=8YF`MqIh7x9FN5ig9Plh^5L2b~ z4lKPV5br!?cQo!i-I)^->a~X6&*G^fAGJSJro4zx3t#^f@W5+7P8Z*yq9oQ{{E1ov z7UB2QVTqo+Jd9!rKhPD|l?-2yIVT~aED45{HZ-PAI9kZv$`x|4IN{ac3@8W>Aug2e zEth18BnC8!h+`?HzwiYd>B~JjoiCQ-$D^dF2EPUO$*We4l0s84$lRUDicawR-H=*t zGm)LC#a5GY3NyDfvxC9EdIRpwTvhBhB!W<0q{pSHx_^a6!=bB`n7%0#m>^#Uw@MO8 zsWx+K9bT4OT4gy|x&pV|5Q@O5J;=5?2^fT$!H!D3rZucqU?eN2by3k26@k-poLq!R zT9f*9T;e!1n%?33RUB6a(>u0~8nC7t?4ZWoKhJyZr%I&X^PMtOm{UG9v3WZ{Kilk) z;`1rq1wkBY8^n|GUbuX)qT}8U_QK^Q&R!V1Ouht=X;5Cm1eD zs0&OwlEwv5HHlH?GQl9nK%kZIJ%rn_z&K*6G*(6o4(;Gj@gVk)L{{#%qmP20!Dj{ul$oq19HauQOnWE}6(dMb(UEWSHRIS1o!tp6_jAD$f~2=W ze}%OQkHWU6drvl&CN-PMWHu4Vcb(h4Jm#)oQ>QlcVj0>(qT~#nA)NADa_^QKd(Uau zF+$W+QNTtwrz{&Y8|xj$oCNdTK#1wQh@H5gQ)WhbdI~t?HSjb_*zXkJZl<;jA2i^i zl$VLYmgKO?4V+m{ezU(;E>@QA3Ko>ZoCR)*`V6gFaiDvn0uPG8fmacfIj2bp7;);3 z1#g=Eu?wzvSgF*Yk!x=e38c8HF2V2p9s#5KIfj` z*{yowu&}pJ2x-)sGxBOpa_xB2vDo$) zFm^WKY+9!USxG73v0T8}M%?C`exkT3TImb!>De1EUpZ|o$Do?2CxhFOY;zu?U!7_> zsHBYtM;n!=l`>Q^dDeG&k>)t1&|W`LkeMOz_a>f!NQ_2Fqfj@2{bUo!zDlFH1|lP? zSlFvLW&GnGyjRqQql(j_%oMQW*j6+-B1ye$JDd##6HWqcmUzj`udY`c6|iJ6UbebY z3R$7@FugWKyT=l~_HD`xm4lFDf~2q)+2@UV19Nw`Id4=j@v(FW0m)&B42(F$=P^-d zaCjm7tw)-prS2(5l;>=UOYW`yjBc#JQ55=+14uU;JPd#)XR+&CXZ_fNa^?%xFA5^1 z&jVIpFg@5$5f-5ID|1eN3#vt;?Om&5O})=jf;?jHI$8=l;XaxneRvxzz`B=qK7C|( zN=v-*@_H5L5o>=%z6JgDs`#wIu!`LLNSzi68$$@CyYxmNXqmL|m&UEsuI`nMxe4O}FFYLE~w zV_TR+Bo^!lLHlzU7tH~q9VZ$aWUi|~K-lz;^MU=zCBNApioTk?cC~IE4wlX6n$U|F24x-(smwJ|2o@TV3457LzM)Q=FM&;f|~o2 zR8VstHRFpo;vH)(CVKP8^%wImy?*Jnm%s4BSLgCyzV!OFOP61HVJ=nw;BZ9$AfVSH zUh#wX;Qa!tc%H$x82m#9zr^4lG5E&}ewo2PVeoARD_J4M6y#2e*jcCPn<)_*^tPJ{`J5Nh#!n?cr)+kjGnFW?-`J;t^y})XX zKcqbW_6wOC*l3{H&&3`0p_nNS4c~RLQ2NJ$PSxLWN;cpi4Upe5c4OIvRVplkmXJoG1v~RFTkM3u@WsH9v7a9F#%AF7lpH z&P?ncEzB&qjNR0;6AV$PD)_8qq|PyYNEi^Hq-bs$6Dd3NeQ^glVOX>})|fs!V4(#H zCH&9aZ9St0B_$kOTq9TJ75elUy@LusD0$(uP|xc4C*m5zju#n|sE<8h5G1J%qmH|N#xNMN8xVGdCN(soS^o{%x6s%_Su>E7_wcsB|7=O z+8>c?P^cV@n=Byt9VeBxYo-cSF9iL}MWJm11A8y1n~t`R0vqx~VuD9i)_hfW%mQX?q9){k!7*~s-t=(=cFaoQcBQ^9GIa9Qp&N=7 z4MHnW5XotLe#>-oy~j|V(H{QJ^RwPV_&U}vh_{)otu?y@VyT|^58+<3qp;|!tm^al z>di2Cl8FXY;6?qO`TfaZGC!QC%lvTAG9@wQ2cs(<6fCh#mOU|eZf5Q_d(4vF%v9zN z2T4t&%(NtU-H(d=AORn>jh#~?fUfvtRVDn9%sz;shVbnWeh+6p10$J3nI}3P%m^7> zq&_Eq`2V}F*9mcw;H+Kfb$$-;w7p*E6%44UlB{wcgGQ%Jj@?K{B!OKIv1q4oK9f6> z;h}D2k(wKjBCScbF)}xiaYQ&F+`Ni9gIDe|&;ck`nLCmP<`L(i(VW^1H#6Yt2=oMp znC{l7af>2YuJR{CQdxQ8R%r35x~DS3qFgrj+LEhB3;MSzYr@kl8=;f#4ZA+sQCAOG zfnqVn$pazE)EE7|Q({q2*$NzQtL+(`=|<1f;O$hLy7Ln#L+c$Q`-xzxyZKaj2Fy(V zUJWYVzIscE>DYg=s0``Ye` zO(xw~tY!20?ESyfTi7d@m2CyR%1fb|2G)BATXcThnvkVFO`s&BZ72;Nb~7x+UE2Lyru@Q@$~ zKq3eNAP#^a0O9}$0(cDlz}(oO!k55C+8#(*E$zKmSkb*_H%LlT#GzDF+%5y0*Hg$3|njGpjr09XjEpk-uaY@%FXHIX#4Jwsd{N$HpEjG+*%)O~r6WuBin z+(bE2alKTlj9hIYh=DRL}PR7)lfsZY$leB+9$0?l4|`NoNH(#_Tk zN^k0`*p&$Oz7U&TQg(pYq}ic2+;r>&mR1Z136qKFz&gB5`fc61rxr;v=40FON6sa- zJxeYIZP@CJVvC**K-?NpR}?*LDOgZd)bDJ1{inC7|Bkcy2n}Eo122TF^E}S`88yo# zj(`h%X`|)(V#ItyF9jvJJVc~aSrV7Ijoj{S#sywbZCCkLp#)c8%thXKp1~ysuP_ji zB4vYW$lWL2v6}r~rkTGw#aiIaz`QFd9Hp3SY+n-aNWmf2Ndr-d(tnzzBv20l%!6G( zJyeNyE?U%c5Nw^$M43{&?#u)DjmslHWTWPLz*(!+pyqEmJR^vuxv$_LdY~gLh~l8u zmb=t2&?he&Ig&R2EBYB z9TCs*!YWIkAZSx8nZvq(ilBsGJjKEh6}}G8r4-!N5-m~PKSMXKhzCj{uyH=P>D+@4 zQ^bg_K9uu`sG$4+j0NZ>3L3;qs1)!L#D&01XdIFQ6b5*SIrl+LA`hK^WCi!I@E|4r zIZ**hB@aL;A_G1BhHB`G`U9UIaZHRjz-0{ll|q{WnZWlc1dND*o_j0GNayzgZ60@Bd8`?9zP zG+KSgS$&vlB^kJ-GEPUS*`Y}xHpOdv)OZ7?S_parY9Roh$eI84;~$*MxY7t{u1NWC z_(~%Sf={##eqeALZyreyYrY^Mz8LU&R1hd0XRi2%oA@xjz6SIKUchkytPjY@gp*bP zR`Z$%=FkmuV-FYpD@cL%pfx5__!#(qX16qwp9IhCIv@%7zl#vO0fH$6HLRPxf~}Mx zQTEdcx4y1kRjK6+y&CI0>^wAVK$s_tzgbBYlW40fz-!z))+z5jpsh5FCI%iFdUS7P1h!T30n;zr@;I&x$SI=9Shc%BB_^Wxu2$DLP=D*_^hI#I)&rD#4jK&r3|*h+3RLqj3O+=714$ z5oqI#QNWeJVji|wf&Yj6QmuI?DyTAQ$IXKrBITG4ulFm=28+4kH8{-eD|lYvZoa(l)`X9D`7ijhL=^P#w8?6`CU9#9^Ml zAZ-@UJ)-N@1G)g~-JGrthp)(0AfY=UMsAV{w|juo=mhy!x6)RWh+* zYaXPk29)V*xAFxXv4aB;8X|sd9#$i6`f$yy-NgyRwH(|3ub5Jr-pi{61Y;u!pxNxT z8$m=%?#KAJbX;0bN#o>&_DgH?9O%s+EO2?8-|78xByJAzB+o!iZhpU<0b%^`iE5<-@piQHwal5k&iQDAgdU|0k-0I5=wN#UBghP8%_Y$ZahWLI{ZBXX=P`2F#8aF z^1uhrar5j%=7|{4_5M9dDY7X00|PSWB0l~b2+BaffH(*NVKUAB;LZZY1S z=+h?5)$9qH3PyGcSK8#XXQ<^b-2QIxFqRSw9xQADj%j`H(Qa+l4J|uUDsqnVXAmEr z^Ku0r|9>MOnFeA5Y;++xFxkQSZmH6bdpy@#lrwn|ke|Yvx8$d)UP?aB2PJB7GDZ%N zhN(H3@mNwo%VKSjJVC-DZ2ADeYo=|84YIVl(%ChjG8l;P)7CkH*F$`yq8plo!Gs z#7o;%PxA)%P4D*^$ewwFSHYgC`>Sl3&+rEKP1z@*QxhKM{8Nk-?>VyRpXArXrY2h% zH!SbpF^KO+q8j~Y+=^V>bo!~yJ(;rY3}!`V`+!}j12yegIAokyWC+EQs@Nu2n;eNuUdn$#Oa0b*&{ngxW7Tj8Ja+F zJKDz1+K{OJsj5*R;9))tE%l9bH$*#g3Pf?AI9l;zidAGqRG?h#!=)|`R1h0Z zn5jcg1MCQ-fZeHB&cd(Nk|oVl-<&5=>R^?r5V1YQyGynxD<{OQ%aYfZF%fUL~=F*fsK_s*`p#=;f1U4 zRCZl;AZbT&6(LM90P8GvcF95VB3Pvx&&r< z(wazJ0d*@PWB`Jo~T7DpYqLy-?U|a=|qWCH5 zWhxLqX=L1SY!cj4*b}$crBor|WvWwz8*wjFIH1treq4y83YZ;ff$J^qr&14~xfrDx zpHc~>o*_}{`J;A9J){k~Rf1x)czVPk74SdAOj0nQ7;V^2Q!z-1h=^VfomX{8{e#DQ zEha1|v7fxQy)U4;=Ac1+fnb7huO)SdAdau;?Bt@LtvDc>gQ%ywW2O zgy9?sQtXo`j0&hbMXi#bN~}3T zHOX??mLbY1@X`F3%hB?i$6K&-pUlk<(T%wQxbSEtyg!#Sjz$YO+9P)N1aAmc^!Ior z4A2KssWhR;Xh0gF*MxKmS)mKOn`cm9K=ML;jSC?+&98ywgZC7_{s#tWG~E6O4JR#- ziVEc>wSAlke}Tbs44!9jk%7qMrCvYHua_BIVQ@7FUf>mJ(CghGf|%YJwo-A>cW~`J zn4N}Qt8w3*eE{^{!|;8-A8f>9V!hj!Ih8q=eb##7I+hp%0r3~}#vQfMjb8V)x7YQq zvAUmUAarxs4!M%Xkj8q9jUgXna7xW|tywPVKbq;rXuy4V@FG6`s|ZqPK+JVvSO%_J zu>fHXV|5|XXFYVi3mysBnL_%#HyAkP+UFfXu7#1lwr;p09=Le2eZ)mbuzf_~F6@31 z#G&qA-(5#s;Oo=PMh9w`Y+UpxIY7oQ5 zz?6S$E~_{*8Hj(SP{UdDR%xHkhyuQ9V%$OL&sHzJax|%e?zd1RAP=C1xGrvs3MJG= z4v4Dp_-r_MQCr)^a$|!Y$HLrjq??;s<&TdzZKKs0Xi1{V4Ro`*K~f1Too&I|c(jr$ z=R~p^(lJJsPO^%ETY?Mg=2vNi6nc1)5*n5eC;1YFF?8P`ct zsa<#vQ<|D5Y6;oB!j`YN;8+I-RlC6>dX9=(KLAg=J}if4VTTHe|Cv$^cZglumojr&RURwje) zyX&Pa*csHeo1-CcI}mVDt(T_Y;pmb}Y7_gLF1*3k(k{CfInuCh2M@Q*E=?sosFlqo zUFW2-jIGerZD=#Zz?C8>e7NT7iTDQ3X;*#8Xs_6jCwNkp$HWB}(W>THnY}$%8(l zIQ{+L?}~P$xR){#Dk0J+q!MzJbed>e0s*E9sYv*t3ZhL}0yPav>vt$ws39S!PSAKD zD?#G{XF}-^LNFZR4hj&N^Mt!$CEP$_8vz&83898bKQPqOyn%<*wJ|gukyS|0;YwXg zQ5I(WNF9z+Y+s$TzFn`7M^kcRf-d?fj%@!V;u2qF6h~O5OaZ`$C-rc}pTmn%VVKY? zC;9ahgU1;N&GL)9`WAxgFD9H0$zk6(juRt5MP`RmiLp>*CZtYCf`xK35t;on78Hui zwxhHX(z1};ir0UBUoewpp3d(JriL1o3ppk=Yb#g#Rgx@cQAOu5(p`O-aZp;M#Ns;0 z-JrT8rdO0Zvha;`Eb$I^M{#ZAA<6r7_F79~i?q#8u;x^N9(>IDM6%>oM+b?mzJVg{ z20sD0MZ!3p+`_C7{zsrv0*Wj2gDhe;BXtvUi&8gC6-%dKnvnnlF~+0C_j^ZCPT`v! zaRTfmvKDpFM~E+wUWVrwT}O=!4xS;)Aqx_G#^v1ILajp9pH7PjNjuS4hf750XiY|B zX>9~mA6-<{h8oxc@YBG2Qo5n75JxWhcp$?{J{b{RnPopOZEq_wktYo8hD8F;BK&i8 z#?kkK_w+1Y6DgYe92U8x7DYJXd5`RDAypNNmvEE>z>U4g@ zaefp~1k7eZ1;85bf){ku+N(klhWY^j9=HvB08X`Kk>diV(ULRUB?qeFNwCX%j04L7 zp1fdK(641O;GLTT7ev*7n4=GjaQuVcH=47;$r>^TgPVpJSp9ICxfp8zd=Pn%LEQx_ z4{)Hp7bCf0rIlJS6_(RPssTwgOpZAb49sr9T3^n517D@JWCr6v*!@oJF1h zG=t~WttGIQ(*fg-^aqdw?jCgBcbp~Y76ST=uumJv0e^7dnRTwBkLbY@1&GB65EHE_ z4k2Y0WwS|{EboF5kFTf!Ud|Du%&=ytD|%n-I#6k-3WbO%V7VKWAY}Biu)vi!p27mh z4U`;Vfgg(isv-uP{O~{YJoHFoGI|^6E$ao97Bl$E@|~A(JTd>l2_Qavd7TbA_uXuE zZmerobx=EzUd^vg;tD!5f^39o{6$7&z2CsUX66>I_=Kjq%BvT6qxXEZu)0_(JeTXi zgg}h5`NT`%{ucm--1%x9K85_}aG?p*UhJjF!&?9%wO14DeHd6W_pL`B*OfP2=l59z&4Z#It;c5MeKU;sSk){r9K+=gIJ* zyZPrXvj_Qd`<%_uNVlZ+p-t4}Io<1JAFhu|C6y~2ONFuv$XJC2I{>wD83H_!vWJ=- zo0T;=T(Sr)i9ck1X+U=*B2tJ0-T5B#_Ea@Yh7-jCX<~(DujP)+XK$37*~JQuSx48+ z>v8JRJ^uWVQC&A*`}u@kOBZ7}{}<*T4qW!RK+a?AY>t_jOaji9j%QD24nU!q==c$Q z`iL^|e+G#8aP~38oe-S-VD@mQIVN-@?PVoo5`3?sinbS8Xhfa=H-N?uhgxXS6GJ;P zV<(#Zp)#_}?55*!WQ%Nx9qK`uJfEljdtOHka&1+8Z!#b;?a>W{$3;Z+zC{fi=Z)ekk-Z>ik9gfXz<~O69w%Ekg~C~a zRc5tbTCbL$W99vCBN)Uh>K76~oujr2`#O)jdgSdRcaOYs{?A ueBsFFk9dPuTil)FFyKJ2C$j2u!>?&Phl$k!_5CEl-I3z`i)WR7F(uma116k8(bT@z|8{N>o z4T6Nl#AV5&u`7;Z$Ihgjr4moPFBvDcD^;m@iS1P0Vpr@ubj4M6ma6C_RjE`SVpqla zzW+bxZn)h5C}_qb4}dUBzePO0B14LGGir!?f0 z_Bf?sr?l58?Q=@|ozelPG~$#FI;BHS>9Di3->y$vW%U?4U}J|6`xtA?*g+e+ z2eFT{5sV$Ov1pAe9f7uYI^~RW3ynMGl#a_aT6w}LJ>k6PWOxfLKIxR6@{D`KU7|b-h0JxsNzSWa{ylrB@2~%AS6iHeZIWmU+*9l?P|y_w3hwZ?0Z1tgKd%wSD}+)%toZ8M*grd9m)T z6c(x`>*l`84X?iDM=KGDvYCyR%0;P4exZUw$^|dKw(6S_^OahqQ7Kd_8)!kd=A{3p zOU+)}2zxbCY7Sp5uQry~E55D7@b&U)!Hd*t;3ZUQRa;+V7uug8TwKD({|bV#E-(l& zMl#M^hg0r!Zgt55(!o`yBx?;LvDCBwc2DP z^~L#oKG!c5dL4bXQf`zzU%pNRKk`-T;GPSrEF)}QZm~75d*wS7ym_r_PT~v21`;5w z+$l&+7uRa|Qm+N6*as>1-TI>2SVj*>p_-Y$Tv$bixsA2eYMBpIYK>Sv`hcV?yNi`7 z8*JX7}F zg?sM$a;3P8s@eki)>i6w$|cvUEG;+O0{&dTQC`)|OO>j0MBXbjl4ZOoHFnolC+tL+ ztkhQ58g2;_FNkxQQZrjEluDSOsH)%aVm)E+6sk*guhLjvaU1tm%jewV*Dt^N!g06m zxyQeB`IT!|kDFQ*8il;fW4^9vW|-cEwQ7T{IyHOq$|<+tyA}CrhoXRto6NyPD5JNq?Vb0vHth&5a z_M>&p7rZ4O6)sg|Ko-1vvHWp&Z3XYyPp>ToN6^k_&3Bw@q|Tf3tYJ1LUdUy4tyXzw zt*jHn`llDbL;NbsBxn zyB&B)E;@PfZZ~UitKV7dat`5phIJUwteg*jQ@;b{DQ@UuaxBd6V#V!o8bi*lJ*?}G zIGy*0r1rx^N+=qO`nv#zz0SLx&LS#@aG6!h9nC4U~idi_OD_E&dY+??Bu}MI1|}b+>=-fWraL<(&rq2xd(1l19Ez=L&4E(5pv?6ym@H** ze)O0489Ug_HWr(`s6wq#Y20ge$zB3DBhx>hZ}wne#?t3EyJZPPQ3mKVJ1aG;ACF;p zHak!creRx=X3w31SK*$Y_ltE@b8xBL$lJK)pcFw|vRGMa_BIyt+L&yu)*Ogzht2*% zwW_^{(hIAr{{;$eDO{&I4RM( zLN>c~g=-GzcUdo*0~#q`v}u`}&CJ5jJKj~K{qxUG`hID8@@Cz;?XMP!<;hZUseqka zHhOvAlM?#TZ9TVo&pVCO7xD3V;S3LE`Z9x=Zho>y5l(0KW=zB!oBzq3}Tnm5vuC-lhYy;Dp$ z%7-6mcz^)d!bs-!HK#jo?iwjIDP#2RxWMuYr)1_UlsMHo zCHGph6^tU+aVa8~%W;KB`~YNVwXu|YXe@iZ`&2nIb#$H`cBaW+}RGiWJGIc2DXmbOG<69Y7dW za^}KQq`zY`)c0`=i1v_rHQ}ZG50g>f@}IY1n(b#V065r3?4=h z@CLj`_zfTVnVstkhJB1XZ~(W3JMeqt4uH^BvVXsj{UE484QtF364Ph61|VnC7y&{S z=RyAmY=*Bm4Pm;1&9F6h0CaeqJAkACci=yRJAm(-xdXzvHQa$-!yN#@?_G>MkA}sF zd~kRJBEQcP`KTfa2$%|J2AB%qB7jYdT7YppD6A3?@B_k506W0Y|5>N@EbG3b^g-I2 z=;9>rK=}h~#y5A&I}jpPD9??l$bOW@Gw@q74W>&%0edb6h<6NuCH--ulX1rxkR)#I zO~N7}lD#K!Z|tBq#^*?~dY@r1&LE&5y$OEJF(4Dbn`A&b)SF^3&0vPXSq4ubXm)eu zKqWrQm}eL~%itUW@Z>+oYf_Qkc?Kln!|pS|Z8Tw-vU!7B_%)oTr&=M`r}l00P- zzKM@d%9rGKNP3eLSMr;rcSq(z_EL=aHh>@`zB_gPh7|Z0kmXpXcC7Sn5E#1Ldl64~ zFEMzTLD+-W`1SJ)q?51mDh!0O?m-;Ht%kCGKBTNIRZTiOMp+Ny=}S!U7J_msK}sNl zg*X*62NMzVtj`IhC?Ywb6a}*uf0#}pynVl4q$q>nbRF<|Q9|KApuo!jzZ21I0lS3Y zF|#XNkBdvDOZkRZsDU!`%ezQPhnaplXzJ@CXW1l1v4pgtpmnPYgeNh%ijkyn7b8Kc z0+>@ih}nqn1te_%WIF0ghQ5I?$8xfvbYJEF6NzZ42a z+Y{NGMeNu(Vg9x)Ha4q3cuu>n7sJ~2Bm@E-2 zXQ^B(LnMmH2_D+jD=%H1b<4^R2q}ZGWkBXnpACpK#o~(ATX6zn-72$=kc`L}1?M6B0~h3i`^?m{(_0Y( zTz*}{-4j9>dta%(lEQblWHrd)_GGnW$spiFSP_oxm8Hq$Gn3Hhh_r3hgCw1-jr{VN zoDMoamzTv{>18rIMxO>z$X3NjXrzes$s1)!7WT^WnSfU_0XhkJ;P0s^ayo23qdwqt za8HZWNoy0!`*{xkS;!>fxud1zN?48yB;nleE42&uB36=)G)guig>AnB!ukheRBAdE ztEpXRVilDH>1?Cmtw}FjFdU)jr=B)lwVkBV#p1$fIHB;;D5ZH`5d z(7cG00XH=%8~t=C(&s7NmiB@%W_I#|rzW?0!Bbm|@SwAFb$WARw%iC8D!e~koylS( zu$_vJ{|Ie1%G=Lws($ianpZwEJ~KXbX8NoIm>ib506vraA}Y>1*tED92Hm)DiGe2MC*wzMp{u-ip$8S$y3Fvt|fA zRjYx*sZvv{NNS8fd+^zZ56M&icgU~_*fYwof#O4;%b^UL{xpUS*cV1~WBI(D6JvzK!_O0=hPaQ38faK4E8hBZuO;|Tlc zc|OP4k>r4oHF6pgMwvHzlFXZl%pTF6Ap7QA_MBzj9E!bdHtW10f$l2KqrVjZge}a#H2*KbNLZ(g!R3%cBB?j==B|iT>2wJjqRI?sR^UBW= zP1um1BbxZ+=RiL!qL2T6pYU@AFrN2$eaODh-PJ-(!l)$2)FfuwsY%T2o|?qWr$kNS zdOAzTXvUVPJsoIBSU!)r`H3q`Y?rS>Ao%K5>JkEXLcWrMsqYAmj6e#u@dk+6fQbQa zfz_7X#K%xXwj<^U3Fa7^C0depmLj?x=NhTq<~(=AaY>ctt2loff_o#f2cb?VX@i-l zhdDs#@?AYG^-@ zEy^J9WYOnV@sW!_Vc^!{x&h$78H@k8bn}P$ZUO!)68}Azz^;Fn@E^cms=$u^DGOkQ)Y~xkIAK}ZmRH=m^745g(B&W2K9*KyYC1cNle+17|_gNTcd zIYUXr1#wx&X`ro(JqI$Xp=x?HL|I)rb!7eolgPV)+{U^h)KqwXw~!SP%$Hd85nK!& z50OWSv>NgHQJ+E;&E&3PZH*M8 zZpKwg2N-clq3Qx<2{nmjE-pyJGPC=Fv}mTE4nZ3GTmgw;#4DttO_G6J7HfSBnOa1m z7UG^=kf*WoO76wm5E~>48O%cV0Lrr4#eUyKaNiM@NJi|gt8hT(%F6o@7d=^ux6RB7 zz)UJLt4c8Q9l^|MiE7etN(!pkeKyv1P)(E-C8#Ei@05ybcF1?y408g$6K=^~onYp? zfuZyXs0j(XDY^@#;+{{C-Sj&|5H1?$v}8BsvKPT>+AwUUtpQLl2`vEn5^~pRNLZu{ zr%x6e5rX^_TP9qmM?-AXg6l-1o=f=nIRu-zP6Yhc5L->K-|wU!2v7mn!6YN$Q}|{A z@N6<>k1>-X2Del*O+cQ?N@1xCZEMQK^YX?2Sh_=!1tpS|4JbaGh-LiAz#dgcI6HCG z=-67d0EH$h%NutSP{f)@kJ0*erd8Tji3~14mnA^Kq-fX&ypGXW24k*YEWosT5h6L1 zioaSJhBeFu07@T)WeUmG9z3GLZN);a-!3 zXP7kg`EmDo<9UJAEPJbUwM+jB+5$qb??YNMs(y zM`U>&*ccmNL5MYRs8|h=RYFb&KtW654w^i_=a9{U<@0vd@xDP@Y42PH{1~{kI0{|s z=yQz1vm^I=$gqJb^ri<8B@e)A8%bd<4XbUjyv7B%AyD70eVt62Xs)=0HJha$%d{=s zX5O;&o~=ti>NG?c5v>4C$bSG8?R9PeJBrme>VXzQWt+C&*2uhH)X3SJ&WGRcWbWmh z2BHR?TTtbe-X!Y?{Q}+(wjN+53N<@$E1z|4);{laqUhWGj`!a(_(>*39@^*7Z8e=6 z&q(afv(A@m8RvTe3?v0C!KE?VbKmULd&34~j)gT^z>ZhfLT6q{hpeyrhnUAvoo zyq3j#Y;T&hu-O489nC)4ZA%rK9ax&0eJ|WCLP7!4Rxi~37H#G!G_ctvJ8E-C7gw`I zhtLg}Ak^^wG86RL=6Q4MAnGft(RDZ5kza-hH~+7vNdz5CJCz9b(SrJ6sB|C0@5A90 zEUVqjUT8<>GDDfGT^kQ=>f(T3H49L7hNUMowOeXs6Wsg{LMdXZ!9(5MYVBOax8UdF z!6%Yhh2KfQrxeFO5danUPV(0RKtaimkRagg><~k3N*+T&B!0Z3h(C`%rhu3NI^^90&fs7 zv_VK7kY%)N5!-9FI4~k?d4hFl1B?isF5%Nco5}E% z;CK_lHdtHP%|9)Du5X0(~I;6M#|6uB3^1fuN*AVMf|gQz3oCNYAa3Sc@3&jJP{ z_92&Q2B9DhI>PI8{u45u3f|qy9rf?G;7?9{_RLJ~%+u3TQ=be6NqGYG#YG>;T=-+a zBerR;P~kLm19+oOdlPz96O5#Z$Sa{TpEcixdnVXy&{<8$ZeVxC1Wq8P6<86D)4q(p z@GMl)Z$@r5eR?!BQ2|#)EXEDWiWYmjjD4qSv@pmfDd(1QZUqJ@tE=3i+?mreu0E8s ztRcvWJD&jjtFKi{^xiV{j5|7g8i`eLxt3Q!UkV2(tfr9#YWt0@6&spBdI2|!-D9$q z#}ceF%?Dd8(j(jre~YSLB#j7s-NmYtrll0qF49V=Mo~PU@A~wMcx-I#mnBsfO5VZ8 z^FgAsGOkw7B?tZqU^oFyB^Y&#QkMxlQ}OkI{}@#l@Y!T2U<_?cmQls2go~7Ld5ach z(y8*lEi))fjd;MoHLE9e71NfDVE)pI zH_D4;uUso8T-O-qH4~x=Xxc8`Q-fvGU55jVLaDGy_VJ=uhks8j4}rG~^|guxa2^cp zh^g}u>=M@&;5v3?ak+^rVYR^Sp=ix5L+>Qw;bH)(4qkM= zYrTtWaDoNL9mz`hE;&(k@N-tM+M)Jf6t9$5>fSwAQ7#qAIrnw2WBsz&RKnG05oF`y zTD5x52$yG7dPViPx&7h55Fdki=&(PG8!cQ~6bS+NLo51+@ae$EuR-xxIjLZy-UgsK zPS=&#+cnZ~3`0@#S>x6sw3~QQVt@ohF7>|S{E)Z_2B(wPh!i#?ue+QN)WM{v@_ZFB zj*Ss*9>!#y4-lyyszt4bks0R${B<}Vz+ELW6rF3t--j9;IJ?4Rg>!Rw)&xHh%vX3h zhd)cp3Wm-d7?a@5s_h1Ns=znuqoY&YTJ}2k;fk}5ZZ_c@og7B+H9MRJ_?qg3vx|(+ z#ZI-4Uy)+F2X zkaLv(M(__Mb+7>ArV~ysmG}C?9bz5P!?Nz9P+@_x#3!NZcUatQAeRxk-9R=Y;4J%p zlxge}99FmwrZDY_ZygqPx;Vc1u>PMUZH{Cme`*;+t?zJr@12iX@!CvV!?U%^CFoU4+_q8@?0KDwO z_5B!LdB#Md>WI{;>tpB+#2l8!Xbc*U7-{yq(0~gXk70?w{m$Y0=yQxtd4U)X?@Ime zt8ZF3>qmdXwLbjlYZlsO`$qZyw7%faI7gQ-3m&KBqU1V_hn!mvQ=f#cVo@vD<@`n` zdquN6#w?k79DRc{&^Qg!sC(JN(ua8Am`F(Wh;!>v=Use-oBZRZGFl}z2?d{UN>4~_ zA(Da4>0!}1WlEGIZR5M)V3cvdq9>fAm>?JkoXlNL?e@ni5!tsbEf+5>B87O|x#db7 z#f~e(uA1v&osT)UjPeBvjd4D=jm`UZy*51L4`jInGpbGO-ZS4AEP>Ke{f8}`+)RR&&IK^XJ8>}`pWhll;?|Ms+1dWRU!hcNT+$rsHC@qU@YrZczuh()ga*8 zZ4OCO*-v>$cYiVLt0~B5HqLCfuWSZIiqZJP|HKK>T?cRxb{DqV47=p|SjWbm7a=~! zfR#ZD|DW>ur4d>6yu(J|#D+{ES@C|jB3{(Qzsg#3;5ylDz1=Wnk>&A+-5Cv%Sd4Xxdnmeig4jeXW?!Iz*6e})LK&oA5qhGZtc&bQEu=bX&< zXZGWF=uwG0fG3d4V+5X*P6v<7 z+;m%`JIo>INUD(W%}7W7Pvpy9LNNlju4U9=5TnnIav+MFpQ{_%vM)B__qsg$@&@If+aO>At#`HaU;!X z*XBZ}9qEcT4+Xb$!V&BP!N7$UW*evhEpVGzm;$$1y;y-X*vSCde+|PGWr`zxv>a%F zBXdYr4!j;{SU}rBbSu!A=u1Kv!2CV-0jN==J}_(_0M<)=V6S~Z6jf7-5OPT04PSs% z>he{o!MVDxG(0&a{CTYg?xs=x@Tucy{J*+Lm3TC91dOJKK3VW1 zj0+nrTv#em&GS?mU3un1Ua);k1?6xX8zQAxHoERUKsJCCu)H(e6kFOGW%#zOsXJpE zXR047wSAp0TWv{k=0x_2wgQZouo;9k@vaW{Fa>|)Q$R(f2Cu@iUY%chjFVy6-E@oV zQ^^e%TlGIU&j&k;aCY46!sB%hU&@uLx61e~`0-gPj~!CSh#sSGN&}eVMrmeHyHmf- z4K0XUC!^2F4UtbS+l%#PuQI&zwYgrZ#c>CO>reTguQq~9Zy~s>hL>K$^HG7h9&WFE zTrcpjiLn=6V-EV&%sj7rJ3ibksqFuS<^aayDozHFy*lfyBWDjQe00M;%3VR5lo;~A z&u!*3o_6*RVv9KdB^>_l$x=Dzoy8Q?@I!B>^;%V8_k`RtO0X>`A=B21O-2Tw&|NPV_ZJQ~hh}xbcAu*NrdQCPZ#(6WXIt35{kS z^mE_BMjP!jY+Z(cK|GJ^>M#6uu-Ia(8GbWWa^dk!rcr$AZ@F|DzOG%9cD#66(y6FC zaGQ_LCs>uWU(muu*yb~R8mpJhh5wgua*3Iq$gSOq{+h@FB>{Ykh<5=j?@ASadzNI| zD0EJ&O%aluFkEo&EQv|^@%{$>$-1m7aevxMtOV#iKDog_R^g#ywX#a00gKgI+54Ng zF*cyYNRN3V&4EaVdH|_7D@B(sO7Of7nAZ<7_$q?A9)^roI%rD#-&_Nqz>{194?q#IpT8Z$9mhKNXHIr>XJ5@+&-P&j9L_$L-FRlltDh+* z;(+GaWFDgIU?rgE+>@oiu>>292na6ZtRZ<58>in|FL*UrLilgvWUJdXP%Sbq0wp*a zpF)&ecc=7C@+CrrliKY_)y!6I$vd=f^pd>Hx&8ENV}(XVk!&TM6Yr;Yo5Ul>*~uqZ zLYJJ67VgnHJhB=g1Bt}FM^mK4L;BZ8H8h&<1@`UviBx$JkB7^%KJz} zdG_z(p(ZebN}-zr5uvrZPL`TpWLD2HxWwQxgTIZSIRLc)>n>IbY<6q2!#N;RVs6;T zmCZ^7l2cmsw~)!)pu!h@ao+RH{0f7s2*3*v#2|AZ?qsa*raPdH7 z6Fo|>8}5Iom@?0nkKkf3*WJ~Dc6Fdm)(|UVo=NFgJ5ieY)Q+4R$IOr7!?)poh=61) z*c#vttD`Y&st|E$j5-mor8R6@a*LL$H+ zkZVG70sxSMkKD2EgH{Am0YQkeK{TQ~(0Xv4Yl$OR1nKD|mwIx+cQHj^`2XhWd#WGkI6!PNWi>6V`8 z4#f0!umuMiKl*0B?OuVRM17Q)w^KKvg?2emPuo1F-TJ~U7~UyNH0Cqsv0~{iNItE1 zfdO%V9^-eCS7{rY?0CZE@07#nRQd8Ve4JbK*if@e764?2pj-QN9rdstD+_P7$ z)+EKHVs*JQ;}o6XA9q7)xy?j&rWRXG%2CMN(##G9XXy?2EOS+b-w+2vbCD94rt1C` zS_X%HP-5z(&{u+F8GI^9B&FKStqFKpZfTX}c;^aya6=pd5A`6}>LgtdY6jaX^_teO zT7f039JxikQd9&U%yIk>B55+}*Kvt6&1htY^H6c-7>w-LIx4)HZm@$IcmFEywcphl z{kHFvp?;ilpoz-cLHXHck1SWyrNC=2*Xe!uT*1dD6O6Drddw3*H`e&*F;Cm|8Vr=L z$%12^Ac>0~0!=kF&;!cOgo;_8vl#IaEOZ12f)D@9p$RcIrlb4^R$)Wcf*>j-2pb5x)FAs7i>lc+*kri~<(2CfhPp7tcnxu|e5u=~X5l{Oa90bjb)$W(o z`z29}8*y$yFod#_$3x==Qbdl29*md?Z@hR}z=JA{2ylusZB9ZfQ_iB7!yst<%f*tW zzrf`DY8W^e7|Ks;cI)#A7K@VR0*?-*@jX;AVwAZ|Fvu|wNF|&O;cv_{j#w&!DF z5C(1IfTsY3H9Og&2>5CV?#LN|$VR%u4|xqC6cdKRpAvwvA<17DU)&Lg1zZZN?#=uafc`$#y%zY3ic1MO{;Z%PdAErGNhlmk` zr>Mxc`I>R;haT(%j{CXb2|>_XpuWOth38oNldJdO*aHV|FWU3Ali5U|-VJW^QkuJr z&74Zl3uWjBi7qp=gm6T2$-P%@>^_HK+sIIlHUZ<@9Fc6SX{_rPbDYb!0vV?BAhzRI zPMH|#=_%5X*TADBVWv}nFPYjB9L#`wQeGnVT9lI}*Ky!DIn4fQxma1eCm2vpViuSw z3Ny4@#o6nP3fv+Fr&dMK<#kO;popV$ENIj8$1b?yKBZEF;yv@gfH&hbp2`x1;1oB? zdj6Hm#0vOY^Ftceb(eIhvc`J8*42e0aJ!NSl!E+kQFj>u~@$;l1@7<<8?%c$X# zh%cbv)?&r;F?!j_{2^qAco)4YXIk5{yx7@ zr)F=yeC3oe5Q8$N9_wvKvdwvnPIIc|pprHoJZw~+cF0h|pOKzp_?sRKSYGc-ibHDP)1l!*t0M?H)_;+P5h)R8BFDiH^cvWS=+c z4b0u0=Dc3P#K+Phgd>L~GB5%VpJzRt#tDRQo*rqAmb$B)P#%^k{zmoWM(jeF4Z+^VPEh&-{lR)d6~4k*M$62r@Q(ULrOl8hRQi2 znZnV;r0oE8;Wew1LQPPs0KcHqps7m|3%aL6^fUz>NFZX8X`qgUgoWQHP_d-wKp^5; zePoHKijlCl8gLeS!{cutxMJzfhhKMSF(dTqEs&Q&Qd0B<5)_0esNF#Hg?b@B)CjaM zfHiVJEkF>0UPnR@t{`gx)}gfsqz>v71g&U5(##7+WY9*0kr8OfeJFRY5}qs<21AG! z>m0O(nw}%h&5$77=iEXrAwi0(17ZvXW1NgRrsi;z;_NoUUdA>siAXHh5pwpYFe;h@MjK8vG{{m{F@P}VpWp*~ zlM8;cKNNU1d+l1?JQysR(G{zCFqo^c=)K9MEt)jS7IGr6UEv9+%(2W=KFTHX3^_Rc!!k4e- zzjXPHYnPvY<%R31`UVFh`UU~D9&vOZya(@RS)KC?exAWEF!)6Vzr^5|8T<-^f6m~W z49Hn%X7BjTzB}fKcwfo*tGxRH1L|cw*NMg994?##q~V7%gV{a^@}Gg~dLMqH!j#R| zgPo7lKkH+uq-~VKMi{Lq@MF zA#RWrhkszjL0O3_CBBFmCs#^bQx9LS@CX(5HO+n148(Wb6yJeZ<)|yjylFy$NECV) z89zkrNKw5%gc#UNU>HzwAnP1~wqy-UA-?p=m%Bx-pm-SdyZZK<4sr@ zbUTg_h{^^U#cU$dFf_wz0oEKP!q(WaRI>2~X@K~aunWuXt5RSQtc0`?4b+*3*;*$A zjxSi}p;mN)TogdP$sz%b{qhpU8ET6TyvFe|j zoJ=@0dHT##>d?e!^R-vNPzA(xx(+GlAC0?jS#r@(kBURAjuMzg5#RcT)FY(}nk+%$ zYWXdC+c0=q39#p>X9?O5=1nyo9~+?Zh#viAB4T+U2G2$2XxHx_0jFe3`4xqs4JT7k@zTi#=w<26@TqLN%*j zpNQuRJ6>c=qCWPNK#-z3kn-U`ijLb+nGF|gKZuEI*rSQ+#0Hr79+5{|oMXE==S|0y za)R0)GoLk8*k`BXW5{;lrs(ARYJa}1!Jl%jZL)ymcN{a?u9+%Cy%6*>7lpP74B)+> zUO3u53RKABjJG&NS85lUvTVGz-IwpiQN4c;N2G&AiwPc8QS()?F$;jPftrx>1#ibe zd(+1i*f7h5JC*vH$jr%4hZZPSGzhCeJ0z#^`AyT!^^T!DqbK~Q&&_&|;OkhwVBKc6 zw%Y6xaHSgIKY)A9j>3YkGOEwxt2e{oEE5f?po{uEb9SLfsiU@JaZRsMb~vj-xm zA$&W4--DUYz#Qg4=E;tSGeSNWskzA?{{L2La6*722x|u#oSy?EZLh(31w(17BQ;91%wd|E{9G-Id=ATmULn zW{u>58N?ZAbfk8|zYI7z0y@DHrXMxx)}jcOs~pLYJXS8a6;pgyu~XS!QNEgsLksj| zK>v1SRk*liBWBWFVaEqE>be0-Pb|YYNFcM9B+TfyQj^*W<7-Dq?gq@9XT zw|{_SXrW`IJrOi@FP{p`fR^d_%K(i?29cyZMlv0IMKBj1dPCiUC`N-V)#wD!o}o~9 zKDlBx&rc-N{@|IEtO|U$r5tUqNsCb@YAo^9!rj+#2*MPczu9vv14JQg)l;_Am1NSe z)TwFsjB8tL%&pqaicKcnNwLYZJa^FAWK!efBX~91pNok%aTR))<=~m8cnB#F7xYZM za&^u2WGUH`@l zszdK*>d{Q~+hC9j=D2u215by{FR=81g1}S~-Lj+nM;;pZ_po-s9aIe*BjNXpk{fO! zf_IeD1%?n<{{hn<0st`mkqAtGu>QgH2kRe9e;yt`aDD7R;fvrKZH=F-8uspMrs$Tl z69A{;g47rs6v80UB~jf7Fjp8eKQDX5{5+U0MiF=?xGO|dP$x3NG|?ikDo2{xt^utF z(&}Y9W8gz8^|8FcGS5#PT%yRRxK^sw^7UGNX>|?5lV=hILpx%n6ggZdstyxJ(#L0C zzIjD;erB({eDnA?31w^Nq&M|daQi}G7~?@%tzFT6ETCqWlx+YtsY>YGHXSm7hZVy= z!c`(DunugKepokerA1PT`PeS}z`4YBWy!^W3|pO1?99^vc3UObih_ns1q+yp`jJhq z|MV91-*z?+&;TZpcQq?&<{Qu9=$=utT;d?Nz&AEpt}jOHGxSoxk<0TyN|i-%f!oOK z+#Fm`1@&{4Q56bn1-)G2jprC#X7CCFkseYEs5aVt;`plB|0P=RtG}yx-VE%wl5$W= z#K!g{agG!mLYy@0lqmh{EF}SUh+7`+0@$I5)wyKB&VKN5LhEA+?7A}#;WsXN{D_U3 z>j5jRR)ZeDW$BC{mX^E%f4%{B8(~2d{s2Y*B1GYj>JeznvqnDxqBBAs5o{Subd&C~ z0!#$N1SkY(avK^Fe?J3>8Wn&cSA*6)(2dC8cwv)_g%id@*qKuwYI+&Rp>i zH}PS5eF^9YbpKI-s1M0lgv(WoKl73YuA>jGk3CxWFTnimVQNfl@Ch*b%>HPEI|=66 z4S*1^co!gB1L#uFX?Qfd1ym^}qIjnjQhh^Rq*8$yN;B3I*mD{(L&AsYZA67On0 zEMz`(maZDW0PyS|czMrhAHloVz51Hnzf{7oITr)$l?aGP>CA~;z~^XA3Q7-f7ROM^ zebohyqxc5`-@6UGKXst{TZ4V$>r@>58r*cHHEy!01v%L-Y1oP0lxH@k?IST&IiDJC zX3p~xQ#YdXCSYhB5TQ9>5w(221Gfw|KBmC zpx0hr%_A5aNkGeHuiXeDDsVr-$ED-adP*86FSK7;o994p_F#d_fT-QL z|G06Na|i;-&g@|J5j;HzhdPJMvlE#oW6;+7k0_%nN*j5D+Hg z%mJL>0?rDc*{a7kZFz_j@Gt<#p!0sOGY4!k1pKkr8$sTM^Bt`~2-j{ap3o_0&S9Sq z&CJZ0%2lap;ZwioPV{8?@F1u4_4Qn2fYiOE$rN)llbtm?OJ)}_J8Q2fCDp;!WUDWW zN$9Whlkim2lG7IB)`_xg!V=9Mf2m+(yKtpVPP>L${@U&D1`qFqU$j#*bVI$)l!~0= z{29cDC$(I`$NyglNbiBz5KU5&d7w#2;k3}!$319Lg1RBKBAj+oDj-UQ8*hnE)vlC$ zoDWM>(qxPr{|qy7vfr_!AQ9FU$%AyJ=$MjZ+vI!dJ*OAd!x_-b1Xcl9i$MAyHyl9% zdU7DDL?4)o8>?b{^Rf zsXwCZ4{jk|+O~R%H@IHCEFsBO9AAg3kp6)Y*xv)QD6?uYa@4$O_uVaqf)W6&*a<{5=84uc z|E_L&yS5d_w4fpwVH(1>xQjgdq$)vyf`@4^G||`7T?=i`(GMkj>VhJsLPcIg1q#$Y z9OUA71hLPA2|6?~z>h!**p!M@EPPchTG~u?%6XWi4px~85xdd^e}u0rWI|cRU-CWl zum)UdKHmxKgJ?}ZJqk{{qK>3GMQ^E27*%ZenR?r*k=)%RMoJf$ZF=lVL|8Th8I2V- z+0!0XwS}wjW=Yip2)BZbC0cH>Osp`}O-^dcz`~g(C!6aL%}WG+m|)b9vTaDuk8KKs zS1nIqRDH1t=+l6d_iCqncYX;T_voFT8Q`SBkcNvKt7smFix5dRxlpYyOwN?g6lbQN zeP(g-sWZ>aJbQL=ro1@y?9@WB^x4v9XQs;w&ph+=nT6?ckV-ZRf06}(Wx!2P_Lm8s zO@Jdvx^qoiVRknrS*L^{b@@>VF+2}s~=?J!DhAjL>p%MT<@)I<)H zhzkHxlsH8_N2$aq>N!f>z$S4$r95$5Pk}^FiF=O1F@py0<3bd6i1&erU|eN{ zO;q0jBp0(S<0mSizB451JAc$peTOtbw@OZozD|!gnF8*Ia7hXV0;74_!&CuM;vu5J zLx)r_dJCTImE(V&%`|}ML}tT7fKY8Q+XXT3;U75SjtYVvlq_J5Rj`Ub5L?mRQ{axO z&++~&^1KoukbU7K8>Rss1ZiSv4Z6I7%nN;q0@a}uC=s}@5v@IKy1k;{?r8%J9(anI)FhUkghpjx=75{{qCX+~=VoX!!udz?3f6#A#U z61L|MTw!oE2wvb73C|ndpnRC#8Me}I5Owg* z-Itw)sjBh7oqY&I-a~L-zZcBIBVv%-mpPd^mpx})YaL09fq>$RIn<7t-A1qb+MD2d z*I3=pGZ11qY=>M)V@OyXV`Ip@7@SheTWe`cqK}rhF~W~}ikI;5Uqg^W_+hdOn=aR=o-TfOwk(N+qo-9m#pjc6Cjjdi*Z3vZo&_`F(MqnI6M1S# zych|()s`fpu>N|rms5z~Ny=qdLY&S^*u8A_*kvna%FMY}TDS%SA!U3aNo{xF4onGY z9*QNb?`_spnA}1Sf<%lCzFn@O8|k8Nt*0{MTI{%36sUS4d-b?GT7=+mf}VD3KCGYb zlnvv*wT5H5molT}nC7J{^iD?IT{AF);Tkv+7J0r8$I{D%TCL0jt?2$NG^uXkC9DMl z%I0F6kcGK!)A^Xq*5;Eg9R)RmUh{_$dQIMD|onNc4?~LL9J{y z>6#{$C2WO8_uDAngylhp9(w^cQ?0d8?I@757G&RAvDy0q&{2f^{b&68(+Fng`T!b0 zaYJ>WNQA~861arr1hM{1CRHd!zq@hbm9!OTC{R@(ZA2(Vbx0u;L%m4QtWcbUUZnMQ z3`IPU%i_iN!+#WDS$fblzqi9NqQ4(sRVF?77Dw-nYhbo8$WeJ2dD6QY6IH87w zAUi?ifvyCR2b>8#Lx{d`3_D0bWX?nChEZ{fwNL>SYe9(+s+aWcLfy=pc*r;_hIS+J z3Mo2VskHYyV3cJ0@^$O2^$IyNB{wGCqSxWb{!c0{@l}R#gi*>AKzw*o4@mqe zyeSod3DI(bUr#c4f`Je%KgX+|M{wiCgd-t2up38eVhE_9>_93t77EIQ)(L5_P+BH} zvVX~fLP6P9q*g*k7Sdbs`p@kNrm{@bxjn($P;+si$E0R$rD?xLnnl!)kCE`|%Z!86 zq7UP^vT--aE{W+CosKO09UV!$!<~^_TX;S4{uO(zCACH3=0{m`YB~=dvwn^&z17h{ zYO8Och`Yg0KyQ&UPN%mpErbISXp(^B3OyT(aLvftgx;d84Rgg3YM5uFz(9@hc<{a6 zVU$z&Mn{|idx;E19rO|E3$&NvG)5OvBYA_b$8yMc1b1;Mcdt;Zkm0A(VnWhRG}hqc z5UN>|5eZrw@zjG0sM?SNn-G2)nomk;v=!o_MIR3&SIH+M0xPrb=cVm!1ts!?QP#yG zfoBo^l{&5HJ3(IaX*EgLaWuPQ)s;RORwP^lMmWX(2k^m^clP6BuW??saPS$kV40 zhyWR)7(A~YDuJ<_4j6KzKY$!?cfa$2<19j<2#{xl9oj$+_=7XftaBBph2A((fLMnB zDbaS~08(aAHrw-mq&?uk<11=_H**LnGrWs_7yAv=81N|qlLAh=(f(n#2;*Bh;wg-8 z+(5Yz#`lp3iYj7+$q)ZSr9;mmCVRJmzOv3tsWc^eQ+VAaoGZ+K@aBgP-&?1Hu6-|? zy*}18tNNy$NUz3KCvXMz7=bgwDt?s_S?{;-ubH`xD?XvAuJY;y-snAFEvzh*3Kw%d zm<5P&HlBPb-1GvJkUL+k!$pvP5f_?3t-)T3JiG}NQd2d-)Q9;bbKiVY#q+F^Fs^~v z>ADcXF$RB(;D&Of>-la{HW((MY$X_MuJG%XLHQNBoXkPbToS^vk%J4gv5{G z(?@8D|7QS`4`z=c?zn*CBiTcp=6uhQw3n5TMDV?e*4b_-oDpjN-vAFk8Y-MckNWJ$ zj2&cBlu%@qC`T?s*+G$kp-CH;%_8#)Ck?@%kAIFrdDsM-?_b z!S))jKF@$h&UwQOzQKSbwD*$?xQK|3x2Rlu2lqlP#P=;Vi)_ zvr;duRm&GydHsZ^p;r#Qc<4NS|I#^PpH=$7N^6EM t9QynrZ}4i1yVp4kI1ubns`}jU>l)8tVio>)HN1ebRu3&7np4W^{{slTH1hxe literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/core.cpython-34.pyc b/tensorlayer/layers/__pycache__/core.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c17dc61539e4c2121f3e4e599784161425de1c1 GIT binary patch literal 43132 zcmd^oYj7M_c3$_)VDKhLfCNdAl6pyT2}t0O;7b%F@F9ZKa+kt#Nl1`DGlRiQ0~m5J z19lHc01|Cy#a+ivyz<&fIlD>hs?<8JO>8HQW5n(48YdaapWq{v#OcB|B3l{&3bmsRSvN|SXO#|-eB|O?5NKPaPqH;E2gDw>N<(ZJe}Zt_ zMHXBfW?LVkFVU3e<;n|e`a@8UmW{9vP~*&g$#R7K!4lBU96N@$o&Vql;OZzll2>QZHBhpz~hYUnx||>u%655#zPu0&PF>EwcKyP&PkrXn+c)xW zUbuec!mC#=-3a>TD}{R9t>!(~Uo0%U!H#0ZEqM8gWX@L$OKz~uck6P7L@T}<>?~Ky z^|I8G_o~&rUnd0v%U-!!&lf6{{Jnx#F3gd>-3!PX{mkDl*B7cO@p*IRJ&Rkwb>=H2mweTmu^Dz&*nvc5KHbkKFBy1Y`?^X;Flx}U1m z6Ib?4*Sym4dv5X4(wti=p+n`SP!kNay7<(Ed+6o`x7EF_3AgIIrrzsru~yado(ZqE zTnnk_f32|M`{hD)vQ`dL?_vt1ue!y0Tu6~4ApF=kIghXZEDkPa1|}JfDa%b;w=>pE zn^ne7OlS_Cl-G$=lf&&8Df+zfL03M%R4c7i+;B4aPXPpSE@cAw9i*+h5Cu( zK}UT)U#hR+`FHSP-*MgLuUp<>6eov2agV;?V=x_EEYwQ1>cY{9T5)B`t=9dc*Sy+o z(C;5zUaK$Gsz>A@EfwZLluOnz*&ydXTo>Onv~(w22yXNB(+oW=6O;*nArt@g{cs)ONE zuX#?H11;C(;B#}XP%kd#eN4U)XQ|-d$$Pc?=B!w&j5yUiuikQw9655fb!ul-Ga@-o zP5Le={aV4pbHi-+Q>&+(Wj-URf~C$yrOuTLe#EGTZnB^#)pUNoINGY_7oF!Feoq`9agLul zjVw`~#i-=^e6Cu}d2VH8NSaU@TFfukYL(HEuu;R#3n3+OrJ9HhUMj3ghWzo>G<VpshyMH`DhZ5L_RXpFH&i~1P02QrHlCk{5GsjQj9oc zW%*^VHW$^d59tH4*2$nvrCx?(_=Eq04`gd+)R#fK=|}G%QU~ouNFq%~IJdkJ-bsTo z1a>LrnBEXBd)+u3!`JV?!E&*ou zxODCMrHdD)E=@RvYROqF_)fLvxbySiXCn?KV8M4m2qmynt~>KyZOK`yt$0qcR&sO0 zsi55`Awm27f>#AaiAIaSke2s=9&%@2_;zaj#V2YfDOmB@+NmO+6F>a>XWv}!B&zjJ z;Q$3J>sG1OBpNJj?Zw@j_?osxqKaaQBS0R|U&hJWfO$r@MfgAxO;8&j{};O(gu z44qjAtaes@<801msW? zL1YUgDxNTh{pK`LO~}p)Nr1N|Y>e5js5Tqtdf6$@JCn64#JH#O%vpE6TG$VgaWg>| zkrx%e6?Kv*=!^)Q9PT!p23)@GmFI#q6zpJIV>bt#msX3ebYzgKR%be>gcs4zL01`* z20c|Rx`AB`GVZ-XC9oHVv*2@lO5TeYh29tsXL&e}L(o}u@2d{qJg~Rn_AI{sP8_Tq zX?wu#wRfia>|OZRmFl-IN3)Y^8I#Lc{|TI6#CZqIaDUIT9@v&@T!uYW&(eu+>uoX>u7^_rHm}^&c_6a}gBIqfy?V*wybxwLaqC6vJ5ro4m_+B1>2iD z+q5?-Vd^(Z~8&HTv`q6d(HEt581v8 zU!5ntS-Z#Ho=V%>F2D^8`s*B~vX_2<7qcV$drA;PE+RP$LX3<@lMYyT6D6P`udn{`nS^{vc zHtUO+r)g^fg+G9_fOPq$wfZTUSD2MH3@4peZPtTqNDd?u<`v`zPCBi6mz-cm$?OW} z)sKJ<6wRsCUif27t=HVTQ*fw}a!$p}I)3ub+!|6Wl<&D!F(@3rSX&OwimgRM3k!wy zVRj^nuP;JLC0E6C#w}<OoT0;#{P2k3E95T54w_sa zF#4pZ40yck^7Utw2cS4|iLgZ9At!YX`9&&v{7~7WSQ)_ACn~TYQsve@N(;;hOb6Ke zce0k3<@uLk*JrI8GB<~9oyDmjl{*%sQHQyrGnZQ1pLDlLG|me2kIYhZPq*7^&{ zX8LhEbFWltMexA0I0^HSTP@V;8@Lsi6}gV;juSjP$iqb*F7a@ghu3)c6c02ZyemAA z0S&k7g-_!==#0jb_Zj@2$(H27cvqSFbsi4m5VnpD^S0yg6@2~uI9P){cAvc?m9_gq zj+EMKXTg(>+kqAFC910J_>x5b2XUZe7Fz;6E6fL-4~7H&1WvJ96@4=W22r#gfEPfn zLTOFQ!#FH57VHEpiA)Pz<@>_ogi(PlXzDti8?v=GG7sDabJ~q+?tY(j_b~rYgGo_H z+jvH=HEu92J6tZ&R-EcE1vsPYy_bhO8 zFl|YVF_(vVg`Z+f@h6Ngm}H_l@@Jb>hpGrEolKxt5+xwPjp0n;GI;yz99d#yEk|hg zNXRdURvCy7HU|7YbL9%$P{7MDGz(44TE{RswUG?q(q_OLMolb?lVrZ^ft5!WsT+cC z$~2JCu(j%`Y`_d(sfFb4VS=R|A_cdD(Jbyp$IN-7+rJACg3W?Yb6SuTMA1|5_d9Ew4P10~21RPs23 zhqK-r_~T9UKuAzBdbe;E*p=ovPsKI`mhC$@v35~W>k>J^*}onC2({oJfR_GLBrho8 zsH=*+7{-a=1f7=OPdy99Bd%Z`D-iOY7-lwj1>oR92XY&{3vMbI>&+&3qk$IsbyliC zIEY>3yp`$@0QY7)J0F7{-q0`*4)ME8U3QM;MvuP$@I;+$FRWrob&z+9ZtpZ5JT+JYqeUJg?>fZd<7;(6E221_BGtu(0MQx)YD% z2%jkJ7OMwOjeo-rFP_KO|0)hiXD%MjjOf--ufe&Y{ys#kgjndjBGDwg(q>{x9;(k* zB1?FVSQCja;W>g%B+7*6Kzt?Egy%>s5hpxHJc`7f@EjQ>`h@3*P$43`$J+%03!m+< zEa^oujJbkeE;eyDNAzO~SNKKf48}AgXb;<0c@_8})$tV%c(36OIp>Os`FL*nD|1p1 zbc*|nu>Q+*15%G5O9D3O&@T-%Yu-o#e>Imx{t ze;5uym%e`JK@b>aYC+OELl~HW5fzPW6Uh}&by$3rf?(^ZuHkxs4)C#{{t-wGp+qv}hsSA8J*A4h$KBEKgImE~{8u zK@f}~GcHSIvzMhg(hu9(B`I%E|fMas>ex zi?vG0^@a|Z7!O!cH`S>DS9!v_U2PDpqIqAXbR1fRPz7Eyez;sctSw;;jmhJ%m7(g0 za|SMW%~BeD@+`-ZMVfuYp^=|3Y2_*_mnSt;Rch!+z8c%%Wq%BP5{BY9L#5-xVS~>c zi?#P^uEpbKQ8%Zq-omhvhl{_*;42zEdIjKCyLX4f!lc!D}i$+>C*J38~;c7BT zcf`3-DHkbHC;$wTh)bXnTfI3p;*6b0j6XAk6TuKoV^}6%!|-KxQ{fc}_uH&WM!Hlm zah&Q{LG!MM*_me&C`vCU6eaHs2`bI2%3xl#|$(K zelOzd--5~nqp}1vH*$oCV=w6lc@Ii0eBy9vr|Aa45z5MLWZMlBm)-_oB$n8qFj=Af z!ej;X_=94h!UhG(D2j2uJ-pHpUg;1gA=1D}sA;;a>aSaH%d;E_yY&X_dbd@FvZU%6 zs)Kt3sbO2Anz(391Vs%xTfa8ef__{$3o= zJap=m7RD-k;{v@kk7Y5)jt9? zG`Su{%@4%XG}M$qUTJr;s{u9@n~wi@&|*zB^g-JDbI7SSUYj6okD}nqF$L@Au4%*# zRoPhf3fhKvH>h03emQzA7U{e@byX$R`mQ&scdE7fRU^2lE5nI0yglf8vrt)a#jH__ zra$S~$cuYT5zTSZPnBO=!cI`*H6J!ij9a)!SC)dTPJcC(Gpr6~6pW$De9%R`P~lWX zQ@f-R2}Nxh7mQG)^_S0oCp+UgRAd40>Vv_Xh0nAz-Is>ZiztIW`w;#O;k*y`*S9qe zpQ(tSw4(rGt{3t3pF^3eS;o?+C5*W+aqEvIL;xH_rnUEPtu>ce6a`znYG?R5pUf^tOUS0F$tR~13`UeV3J%R z?mYqFY-F1@`9Lrj=yoO)e-6rrb9ViOYmu5vg)0(>5@||NLeXZl<)GbY+Ut`LDn#fa z&)4x|4AwA78 zXhBc1l|RfmHHZvWYCq;v8URv{{Tk-bfPEl!#$NB&S(MBYvBETRaWct7dJ8A3pChW2 zm`LYkB1zPB6w*VOSY{Chry|6WTMttUGlu~R@t72tA-#ThNsmljC~okX-rxf z#DG0HVrM$4#_|nP`Lef)OMzVt>2<8cIqCSSb#h+N>c;2M$B}J1VEUq8Kudo z5p^^&$Lb?NQ1Wo_Hvv9*BFZ52MGY_G=tI~`+8UIYSa=Ydl%inOe;DO^3ludE?aKQtl3GYF6i3FpV!ifkL z+ay;-7>S?=!x@7DIz@8f3PRXVw1XW3vcKQlUETRS^b+b#`TRH7Wei`iz^fQTwI3XddcgXQumhvC zk>qD5@++OV8z;$5G-@z9NFIFOAsBji3Sor^XC9cu+Mp0|bW5r)v7sT4;B!H683=4a zdQ{TCjGlHXCN{boJ@HV%ega|v?`ZEpt^6L)BF_frGWJMZ3$mg-|0W_U(TCC9XgZNB zevA0}PvfvUXr|M|t!|_HziHp?LbxP*(#8TcVYML{loQQlP{V_@n$Q0hY2Yxo;L=EF zaB?#u0f}O_26-qRBL}CbM^a@fNLSpd*1PG{LyCO<-zQofO+C3YyiKR{Cr|;xQ}JXm zT}!nhrh1K+-r)h&S{`|n_Z|;l4p6xy9p6;Fujv01x^kKi8$Jb|W7d%u-y9Fd0u3prVYai+F0#+aZZJ&fvsT7QOB^xu!tx*= zIetVjG5kLj#Zlr;K5ML7Fv0~HYxxeYrsFL*PB_lf$_Ur{<8E~zBE~2t3`>+za}so@ z!a>HN65{hbi%luI07H)QkhdgdJc4BKu(03b;OW7M1>Wai2^{~e55yEiw)1dX(^&St z#DayQxZrqba>xWMo&DF-oo#eZb>iZAe0_$Xv>u2Ul*#|mxNH|+m5xg?;KEO&6P}}R zXf`jyeBuELe9Cnl#97bh0QBbJ65t?09N|6?@I`OFFq)IOLN8Zg6?yPqqijkJ!d2tdhO@jo z3slgZFRWBxDyeoH=hC<(xX_S|_u#-D*7;VeB4Q9oK!SR8K|~GUY}F(H7Sx-EIQZGm zPR_zx7p{cBS|Fj`P!PJkG#?zaoc6W(`43yy<;#y<*)gq5`m6O5VS{*E6N(L%XuSd$ zm$^VvZZ-4+9Q*}wIF9JErsak}k$J3X6Odbrd?FsUg%@WNmkp}J0OP7cMP$WqlXTg@ zJ(7%vXr5Yh-N=X?KNC7+Fk>)bj8ld@P1@aR3woe*;B^_MkXP^a+txIR~zKanTScz&^}6;5o-!Idh8IlOD7pd*|Ymyv&xO*O7-^XE7$q`Bmh!Ad20iy-@H=+{| zuYeI{rOb6)gzzPp@l)kA?M1D1B`TFfx3qL$sz?llnis&2B}hIVYJYbI9UVXbo^uS6Jj1Gj8X^A z*|0Jg!UJZkyX+6~Ogds%-K)BcqEeW1D^1ciVzCh&nkv__a!Gv;(e`sI6)e<~74@JZ zDRbGgTxd`IGENC*7fop)OiI!|hwi&sL2;%iX)Jm*7C{;<>(fguC|QbXt-1?XIR@v7 zYgV&MAjRaxOHQTco1%&?_VMuje8F2_H?>%i$kySm?yw*wKq6ofIhOTFRuiOfNg#Bg zC)|%CUZc-4VyM)UQlj6X12Jwdp1D_EmHyIVpNPD;oTv&{Ng>J8ADK>+0)fUA$QZ`5 z{vWZ)5y5dCPegP?YS1A_)olvGK@ zt%U)m*5HLYSD~)do3>>z)*z`Z*5FSJcb$j|pS4aJj~`>6%5n~)zg5YR zxTQ@gAEo2OQ9UlIaxzM0xGW#5juOLRTPk9bf0|2=v^)Wt*ga}%h+Qj09W_(>Xnh46 ze_;C?;eFgjCNIHp*uP|UR(p+V-CVR82voH*QVTCDk2Ds^hFG0u#cwDsWjGGOY2dR@ zQU-R(NE8D=u0e~O-h5Y_AtiV`Nxd$Ar7Fo!k|vW(S^P+XKMMmMM>q#Mhw2j6_Km6%ae*MbX??)yGKSOLNDC9Fiiez>VEYFF+2ll7|g2|qEsKf z1nrJ+a|-#Dd(A79$_OW#aQ!0wd=?A1moTcmA#S+DjTG`+kUE^hE)Q~JsG&ckmmlLm z+R^9S!U?g-=22+-;x1}ErA(hi^C&UhbX;?qU>q* zsgO#{8*q2N|hRWhA2&T7s1h(nR+`f^QA8+lJW%?#g@{^v>*sljZL|V_P5m28_55&f@?t zdASN;38*YTe{>EP3ahvfWOcMH{QNUt`2ElQ{EwUq+5~?x3F(0h8BKAUX12I2gUF`< z#&iq-TrkMQE?+yIDCe%;z1I&wCuLt#rBqng#p1>s0bS$RKa7JMcyo0$PPh(8vf%% zD+Td8tartt2j#qu9WBwppL+}E2v=FqTmwvlicre}3PiGj~%*bSJn*&=qqo?0qCwh#zE?e|Qfu#JDPy3_o#KH-(YHkMI3A1xXx?0V~nLei1bIGAM)kxBt34=*tGGdlNn^f-o4R+Fx z@DTe%B4@Du@>?}KsH6l&O1t*No?Xc%Efg?` zJ1iK-vHEkHVMV2y84^*gkA@c$R<7zt&G4e}WHqrD80jAZ5>y(aK_cpDi;}>>%_^NJ zX#x|}LY^3Z#Ws%o+KSHK__zWNn@{?WOa&Zx7C?YO}YJ@-5Cy@;>> zpK+k#r7EgwxNRF$RE8aIp`t<&OG~s|LPeEG<@aA#l~q+sRZ%sus;J__Gb(DED2%e& z;+Qx&x%)bb<*Ee~xGw|YZ$54s+LGD3yXD0)ycspz#EMK5;8T%j$jLyy53 zp@Qd84G_)F`|CWrivxT`66h}ey3KSi?`ur{V?3~LV?E3JTm1QX9-5qZov~8n{cYa= zJ3RaZ4?oF+=ygKRPw{69RpdWV5aD^LjV=X35VrD99Gu2pE z-Z#Y^zVN?>-9U`o1=3CiEU8Qw6qN>9K#7&B+{k3&!sN}-Te5e_Wvncm!cG7;kKg*x z@^h2OD6o|K*vecfQF18*BHx&{<4JsUkHJqrf(%s>BgV6t$c&>^jH4sUVLTiL&tEQZ z4=asG*&x8N#TK^zLlRl4t;s}dg7^sgdsv+We&ID`Nf8p0N$oCIxfU20woX z``wDij;m<-Xdfx2p?$$>i$|$Z+oC#1oKNGzC!LAH%F?&L`zu&}bmhv3^C?7zV0+?I!#OOb#Zvk= zU=zaY29@(7c1o-H=uYg5dA*D$<2yIju$>oU% zZE~>HLakWVjWs#(Sl|kFCB=R04{#L#_zDo~a?uPEwB`6M=hN81qg)+vE`V0heb2=% zL(-1h1+04V@03TJYtoF@%Xey(+C6L)r7bv(7RI4wbVo6O|D|c4N%V5{evLX1-P2V zol_#>sj@t&ZlXnOp`mQfJ{cGEgc?fW-oj@3i7X9>G?U4h9fmi@Zi6VIi1ECUit?iX z;TixljVJ)d0o7!U2?IlbR6(g|R99qcWLzeqhIJB% zaR*>VBuoU5Neq#MjJSd&ld7C#t>v#0QUb09RRjtNGj8M3D%DJQM0%8iaQT$|02z@0 zh?HDs^b&3%ZUwghiRoRSWk}O+y$dTCSXw`|uBb+wzhp`Qx&=w8Q7*Rqpi8pt46^}n z)S#D;R@Q_SUv}mJR-2-NUDmt6-;jN~wVU^L2o*h{h6Owh4bp(_!BDH(-KcQw3DGpX z$AwDPfGSyD8V_#$C1t#hEFHyWI+#%sdyyF9&pYM^W)^YjypGcK!B>m;>(UFuJ8RUZ~q(*v7#8kW_vI!{L8p$bq};*jVAQdrx17631CbCvi=Ou z5xJb+hvTkPZ)#WCz-EBVI&q%1y97St21n}$HyxLL?3A+VS7px#u!8Da*OgDL>rO+@Kh3@7iKR;dYphM?OgI7PK zZaUG(xu()lp!&K^fOl99c3$FR8b3Uj$h@YR!?(yFFD#U$m5RZg@E#d?gG>k;)f%F; zq#Xc~IXsjph%^VNjJJZ|wM)1GU>XJ~BIb#HBhH^&e!^Bxz6yw&=_*_v8tIqy6+Y6# z;coUHKzFb_?mZl-h2B^B3t#zQIS)3}@!cZXEI(Z6!sz0Q`1)z^=GNX>@Y1ArwtWMj z1+52KAQ$MT=y}!UpI9PE?JThf6#SZc$_s!Vpa!~poIqZbQ4M#*mJ5L?oOzOqLn=)T~rjkf5^kj zJUqZ5Xg4!Bp)U&N=P<=V9=^syBdEx@22Nx}6Q`KqCJ!?>U=Q&M&!s1R1!qATOfbl* zyn)|Yw7j4@A%BAIW>094A)E5P!GeC7hZf4*7tq80G2FDejsFuHg!kBcpq}l8@^%o4 z-7)-5+d1q?Icy(FJ=Z>f-+NQ*195joYT8zw%?>EHvYVFo_ejf6^ROuvNj2#vN#ojv zo}p}88X6_FMS3WUxE6gPkL0E*8>YPil2qA3DDok2Nbr?#da9|DVnXAKA?(nn4{ zVc6VIMK@(EZ`9qXQC z=T@9YBDR^!o_Ao)Q<6zxHBiW zz?eP`#m)?m@zIw~Clx%?m!TpAmf^bzWk^RSA_t?l&0UY)xv2`H87$)fyTU5am@TT1 zA`?|`mm5{Nt^iTMUZT_}Z|L5!X6XfcGo~O`-#2;RkBkcDc`Gj-d}yQL$y>z2`CI!q$P;^O!zYT+^9Gd_C!3?wf14$9Aa?%i?F z-~;~)cjUyWmU8(1DCo$eccl+n?C&Ypn`w(llV*YN!A^%Ac^bHNLUCH(vJ;H?*+YLk z4H|*cNdhWU!-%D{yxl8y({36wNDt%ZX}pF2HV6V$kp&f?J{wyU$?k^_fNEo7!Nffj zPjc{z^St%0BJgODCP26-ijY`Q1dt*b3j+PPy@z)Y9T5NMx9WJex>Tl#_c1lCnw<}@ zC0ca}_R>zVmtZd<5Ta#WfYs6ZN7gmCWnF4!WrX7KI8mz}hEBP%yj&xDge|G> z8Iy?%#R|%NQl$&|WY9K;eWphje)C`P^WP9{&fmuuUihBD|M;)}*B_n>+OZ0@T*3RW zBTqv&NJFG;@eXvp%kq*~S%&k$pW@B3AN+5=UiMi5fYt6f$sV2HL2Sj(^Na>yVB%`Q zsS0pL9OU(1;PrkVZUs0H;7Y&?TV(}+_v5^KhKDzpWE(o!a~HUBmLa!dT4Ljz+Jg6s zyxoD_)}a-^l57*bPxo=e>uc|Ed&fBqC+_f#4hjbbVui8qpX#?knQi^fPu} z$Fugq&R%;U!)yI8F!Qi=XEXgc?zH>caqRAa=8=aE^&v-%Nr10`LMme-Fzadaczj2)h6iLM8YQ zHV^0qsnhnvtwq_XZkwzr!24yvVx>Zq1V%~%F-=ck`t=jiEt$>gVTk!xe7NE49){77D2VA!UX9L#V@3p7R z^FU9@oqh7`N_(X8U5!LSB@nlE;vp!JgRLI|i3G5=Wmh*53CTtx@h2uSmkNo5zgX@l zkq}Pabb9T(LbMW(AMcUqwD#T};P?SlqWdvm!05{+;C=?a2T25Z29abQ6b8)hk_e*r z>^8fuU?9rcEoC9puVsc^L?UhMqB@n%)FC^ui|VyB0nR;qYX!=LsCyeX-?}@UzWa`a z|AZrN4DX#>e@eswUq}jJCA(iyX&3&)tv&IHEe6P*()!bKQB9!8Vwd+xh{)P4xACg% zrW9a~i!1Z<6?aNsu1mkPh_LnDmuNYmXc`ST))B7`A?L5}Kv6=7YziQy_u&%Eoah2p zI#INiyx&7{VA`;!Qi~MZ!6CLoo8NH0w;2|_#bgp(`AN2;4H)i%8)UE|*2UW#aiyCwMV^rBduSZ4 zFx)C%ui<^TuJ;|xKgYxG@bFC@hyw;aC9iNlT7(157z3hT^V;~1@Vt&r_=u=mtmSC?~#X(m*hPeB2XbodOWOQ55$)lLTGhLk3&84-^3-WeJ6xo)_&IB4X-?bT<)!$g_!Hbzo$~q;kRDn?>+F+ zZ^JIk8?VrVpS3UBCy*-LC>Fyyw}660&G7yci4_1igJF}PJ!*g;um1o7)9*pro)MzJ zX@JswZxq><@Wb(O9$$YHhn7xzm|GBrQ4A_L^TRgth8>2XK8<_@8{;uD*CW6cFFt|3 zag|nigKJ)p5@W63Uo2yytA`M~$N-yh{MwSbm}9*~Tx#_CLDww8^d*zQ3@Zqa;g9&O zW_WG0Q1Y>(yYhORF@48^)IhFAc!D=J(7>DgFaf-!u$9S45@ga_Sk@&o>P$?w7K*BL z1=`3~E9y}8^YdlC@2~lF)|bnxP4&}*gXlvBBFd&`G&$rA7+MkugI{w>UZ_jDS~P)K zP#uvBygd%E1VVF&OekN(20Z0L>$%J5J7GWbS&=X@P&7p_hUkW6@zm|$DB9#wkE*W+ya5`Gn<{&Z7-r7 zE|x0bL^=aTvA@L@>w^WhGHZeLTviApU<(LpxFbNcE;!#*a2!@ikhFQp#in4h?lqr4 zZkuyXBxbLI!yF{z$8rEK)z8k>HN2~)DS}~w@UzT*%^xDlkBCZ07KtDNnQCm>K|#=D zc%kdD#zjYy>Ejr0i*xWjPa+Kf2EVg979zNL1oW=u#|RXQ$yIm|)e+^$>HF%2qHQi% zjiWDbIT%)*;A6U&oMaLK*B8ARdk&6~kdE=_EZ%*fMH2yEOAAY7Una~JO*G<8(e9Nx zUhI@h$odWBAB5teWwxX}A}{JNipi+EM^O+FUfIQf7aJD8n}*9jS+0rfo>;KsPVgQ(&CGT0ar zG0g}|Xfd1`OU6cX&9RlbQW zSb_>CZEi#5EU5Vt^-g%Q8B1Ztl?lB9){%R4u&HdnKb?w z4<{lW#fN^R=0_@zF?==@PsTyB08I#=Z=Jh5H96C3Cav<>Xu%oPEq*WJ>mP#Zrq(Tx z0U{cTa1(4mC=PTkgxuhzJ{LlWS|JI>=)lSa(wMQT-(n{76QSLTTq_bTKs7*#(5}ZR zB8q781K8qu*kiqVUKcqOA@7#JvhXs}mv(U*`Dq#6N zb_mjiKMe(#U4z4M)WRsZ0-GsPxs}MRG;T0Z?v6~Y|3hos@;p8+ecL<+vhdc+e41k~ z#FgFX%>OWguQ|_VTC7xuQPm>07HEaqcB3In{AxIkO#uv~9pW}_sIgJf zMA4ytbPf_{XP6J$k}wiMo_QpAi108ROa#xr!^uY!Jnuig;8A&@82bCKBDfPGMK;n= z5u|T|A`%Q)B!QtCcljQ6>CxCyh^cmU)@8zlIS>!J_NY-#bT!Bcc)UbTZ0zp!p^eu> zW^krbX6VesnT9ud=r9S+6^1?B``8lXYseV%ngD26r@qvrf@4K?~ z3HYZ>JV8w;cmU~1xQi5OZvFp@VLf47W7wN2RKRT^=weBZou@yBg)(M4D&YFjLXrmI zowyz?aM^0X3>T7McqFntvbeoq5HB%o8yV}46nm{ahD3qZy-Z?6z#!nzb?0OhFp^+x9`6Pa(6kh@ z=qO-psZd z5>l|`N}8D+2@;H+M6t!sHhiPy)?a}TrcXb)BaAGdfn(iVOze5sLZf0(*vGl52F`nl zX4rI94MH3?tg4a7wD*5*#bqg%hkP%R0EcPI4#`P<}3waENE^RQ#Uf8Qr_;?aT7oukvTCHg1cf zguD*k{}tZX033UJC+ish%Y|| z9^~nvT1&j^;!szAIbMH8?@wMHUeJjxHkKDZePoPbR_TCMI%qBIwPto$ua%y*(A@g~H)Fk*vS#|N zwVjsRX5HOs-Q8u)3|P0S&sc5lpmnd?^8SZq+qSv6n>XLC+SXg8XROk*1m}H>b&ur& zI>#zK$L30h2qRj554VQGTZiS=^H%8y!AC9L1%S3y`UFSAb^z>At2E4!@h1SsT~tA4 zmScS%bBUq6AUQ8`=qzc%$Q|QEGIw{FdkDGXoLlB%%v$y^a!+u&nTz3S?(@h!$tS?vy}FgJ_bPx1jX_h6Wd zd9tmW*@+Lha;LI(&|PwuJ-1k>yQL4Xa4lqY<}0;@g>rQv+ZMFXmn*Ixbd>9^hcZF? zO1)h1gRTcKlPc*Z{N(ndEv&@3$I?g zd^6~uuN3Nax0?4{f3dLa2D^$Cx8UU~QaN8OEV;oB->u6P3a$8Vu)AC>*UQpN-m6yg zew`2uE_>x_JzuC)@(&7LxiClg_Aa1m^fUjkTwl~Sck_nnB+ebEl>It-S*ks7@n_jB z)Po&MuD9Ukt8V>a&AaCZ2NJz6RBCgDWPj~4=%D**b$O+(*E`>+x}U1m6FCQN)x6U9 z1Gjj2Y0fQ`Frji&pa}+BWj=M`0j7DuZFR5vl3Vp%)9($pSgY!F?j)mjbJej!TlKRJsHP6j0 zuL&qe2zeWlUc1X?XR_3aT+aOPP6pqL`1)@m!6+W3tkssCEw-9;Ws15>y2<^nKVoAoXcSL$XqQwY+Qbwd}p z?-ZQH^1|Y=Qh5o`mupoAE1_QVoHEx;uG_)q_FSP}T+I8}M%lB7YM;@QU5;=wPhFf3pstq7Y zEd?rS;EFy}1FKfOyjoaV#`c#&4TC#-_N;R&k2U{b+?jM9x_A&Vei*)eYQ(|+vC+{H z=fwD(ECzSjIepTZ2Od}ff^z0DdbdzqbWW|Fa+djwqzkq>7qvQ9F8C478NSViqEff= z^Tn}NJ-_6<;P885e8d?)bsANoI*U=u_4!=2n)BSs%CHQfG`yH!uGK1IBjKR3&Wj-= zai>~{170euN`?IR>Nx63BOA))h#CbM4`**r+{s}yM2o{&G|;dxUm9`nALA2HbGaN> zgM1o_ZLxk6W8^r-^IR_Ire4YEcFvjg6Osor;Bf_d+b%Jo(mOZ73(+PZh?YddWSNd;okB_cqa|S5ZI*{`FX?0_Ii+v;_G)Jv7C&3*xq5UcaM*bojEpo`q+ul z^)cNU*GM@XERr%7EU-&r6Jtvj7v17LZZlAwT#-$MA7aIe`1-q$)PVqj>V@;Nf$@Rd zr`BJuFP41=m}9Y4a&UF&^7R{+FJ73we90+POU`1!cd9kVou3Cj8*#7!3%(0LNKBn_ z-I@1lOU_zt#dC_alAFt>f)2w^1RW0xUKJ1}1}(T>M&1i}h@E}G+o|=Jo@kteV8!QX zr;B_}{P6Ez{cydL=+--h1iVznt&&4Y3|Pk6hr74&HDirP70DD=fIOh@!NuC3c}91I z_<$1Ksy=76)!T#*KTSTI&#>LPn;~(+ePPAx9i&sBP~g42b)j>e)?G*mwoog}CUHZ( z?#4{eGUT^e^C{~lZzn5&;I&&fK_zcy(}j01wFYrJboT=ui_tL<yVCZc-DmGk_1k;!uRAqhUx{`n*)kTFwf>{Hz>4z@ndSbTWj(Sj z<+u!bs-C4t%GKUR77e#WB(1m0R=5#v1&FV5Q}==F8Z25!miFppi~B;5-K9G(S>KT2 ze!)&K-GJ5Y7JJ}2Wf3fIFbqott`e@{ zx>qR6uIJi@oDy+O{)jw1$|0aS&g^VV|KbglZx%L&eb@K1vyZptrSE*LT3N%&odd0P z3)N^{-C6`r2hk7-DgkClm`Fz|Yr1W@%=tOyT%C8Oy%nr%Qw>`u9${3qMXf7^iXT;L zmtPpfi`0jZ2gN*Q- zevmGgRs;J%^FHZEwQt5(_eo#I?zMNO()Ld5mRLS5$Po%Btm~0{Bw976@n=NF?7OKh?!cE;m zX(c6JvaETq2+&uxTVKQ`O`R=rfRnyWkPU3mawTnJaExv z)w|^a+ex-oxT*fGbqiGIX11g7=h#%Qxpk-DkQe1{iqUlZ%$>P4lvpS~aH}FpIDWCV z910X$6NMJC3H!soNHkwx1ba$+ij9n0;DJ<%%WvT4$q$FPAy&r>AC=-q zM+`jEKcl-J%@IF@E&2{|sB^?GV!6nNY#s^9Aih5J0SiJ; z?i?VUz=ptHfTVvfV|f`~e-|YBjCE6XW!BdHn+j67(IAaJOpfkZ@@s$5{R$_rSX){? z`_I?|yX&&nUrY|ukH?vNrBW*b`<=x_SdVyWp;q63r+}u2Z@-7AXQVE0_n94{QU>1T?X_D4b>r z*x`=#2#5d_73^wS9>!6juD~V0N5oh_DM!QR1WN%eXz4nh8>)3QVh-E}X4;Ex?jHpX zNef}N@pN8mz#%GR9(6!Qy8XAnMNJTIg^;3$zC99Yfj|gUV`sYSTObl0^tPC!u{CEnfxzpcXfiv# zrS?YqpwzC-Y(OfS9dET^%;<=7=;GAWA;l`D_4NY{n8UPY(1u5}vDt?7Uck+4XS^YU zZn6T}g6y^+Q&?Vht0mo#@m|aXX+WbJ@&+0SGFmCH%R#$bDoWulRB4|w_`k$g*?=K# zy$l$Eey{);`$b6p5rRvj6u+|RPU3=l%(hmsgQ%{2maHtc4|dnR4$JE^2oE{jSNHp{ zIjFj|3*U{Q-%wedk6xAEF68{WyX5Q6;1gEqOJx~ZxX_0b!H1$GZ|nMfyS4(o%NiE7 zG(c5g(Sr`nh4KKpbQBdGMWuYtbuQMbFfxodmn$yxq4kH*PK$u&){BoN%&lT|v*&1k zi7tK9wJOCL${SKAJE<@a!A4aUBrD}QB*4a**qro9%d$YRHg7|XcVfgj31uEGR6!Qr zWABYNhT%%2QHpBb(Vl>msr$&L0~_q(n)fz-G%uC`ymlBd5sxw6!10hl>bmY(dx1@Dp91)dh80a5cmyMc2BHC<=w>TktjPgNmRP zf8T!@T5=$2DUAh}hcJboB0BLWL@fwl!YlG;yHy8o2+Eu+pgD;aP~c{^t#Acc{dF!Y z5vi6V-S$X`DX5z=3Oz^^FxbqME3h9yJBD>xXhPGvfQh3GO#ps2tKFbzYQDHb=F1+C zc$7(g5ExVTfGmZr*-b?NX6=%l-Go|v_>sjh>TVN`wE@CSQTZmYa8|Q3Qy=0+jM z`J^)+!xy%>5yEXqL6xd>D1oD`SEZ7>A%i7iXN`@JC6@;v$WI?9G_dbX17GzeIOB4`0NiVn*gElUd zpp7adkJ~q!@!rB84{iaL_l-lO;Zt-llhs93>2 zs8t415v3q)BY!HCB8v-y2D&VNfV>q%MdV-~tJ>o|k;H5u3MhXIov3ZJTQEw=T5lG( z8-=painCIMK7*QyoVQXPhI+kOs?NtM4sSRMeTGGQ)7VB_rP1Eo zTz~AbNC=_BPP~af(9}qG6zCHC2}|IXX>AH3{f-Nuc^)-8mp!lMLAkb4Rkgywtp*6q zZyOtHexkeaIF9j&QdY5g@znS?{4n!8zW!H`B(1P`I5WaegQW)M2Fv?CoE$_XXO6gm zFsI!(gS@ZCT5$>CHJn7mF@)Ff77_OlUPHetP9nTUVR04VH5^IAVT9MHC~hOXhHDA+ zzx%vB0I={zr)9}35@F00{Bp4gyE(!cld{4J0-{fwC{^^3cNJFwACeJY@u1x`*dga! zRT>}fO@C!h8UjP{a1j!JnMOY{5kyJQL^?DmLw7ZAD1qXMSK-tEF9NmWX?I0vlWE+c z3=iuFDiArzy882cC#7H1!)*D76It&@8PL9sUhGwC!q!K%7^}6Ri zDA!he^SFjXReW|tHg7?k8k8yk+i{=hbHMXX)Z^UBB!HqOV2Gt3>PZ{=-P~IIx>%(e zExsxWoS4#`H+;xAXFz_sttJc@#uK*eY6EE%w)+~XBjX!xyGT8=Lvz|ob9uM9?EH?p+LVPO*)c`mwjt~tn8;Bvk%r`?F zwnnJOj?Zmu4G+0t^bVXu#QSHf*2ZNHTENC7(*WAzw01&T`>3#CBSyOrYiI1_snes; zN~`8t>|{P%EhgoTIX5fiB1sAffI$**3uI!ew_=QZ>4Xo7O^L(ZId6E+oCNB`|cG76JelLjV9+pNu7E*TK}uKrkiM zLj;er)n$3t7D>6A6C8Br!EPj5Oj4+#qCjF2=k;{ve zkk7YLpnn8tC~0j6&3DGoH1yPlx-#x&s{%wR4jupTltqYYuqvdz0&1#g*Dl(%?I8HB z7=rb4*VV0t9BfQ-MZ*TCHgH--Y&m{CrsTXkeN73}`kv2K?^SCLtA<{YMTWg3yEEv1 zvrt)aMVe7TW*}+A$crUSRhVZ9KRJCJiAbskqXIBC!lgTZE`9*O9kQpre`~F|R8i5C zinYy0Yr|@xR#e~=SWbalSO6wYD6?RbP;7ymE@m=ETnB&^?)LI0x?v`EDprZ?QTx{Y zcDoH*md^CQg%05Uzy7@GgslRzs81HRwP?q%V{g3;tbqS0{Y@F`Ej01geKH*zQ^?vYo7ccO<)-Lvu;mce2OX!0F*s7n?otDt z?2hwTYO(R4*=YA7?gs7Z1Kq~ifIrT1kkOf-OF>DtkQn!Z-XxC!Q(ujhJIoAtK0BhK zIIx##w}jtLv!hwin;hl$a!(DRf|WXm{gj5@sMmfCduY%;lsaRt59lsRR*6ty8nw8Y zWFt|Bwfb2qlM);0ylf&uVjCqem+oAkPM$tcKtO_4 z@bgx6U*rzx(t(2&+tqagh2V#X&5w)Sw2__?GY~TNqWAkl#we6t&uu4uHg0&)hN_vY zJ+bgj9m@v4XUv<-1#xi=ssLnP;+95rUq^G ziRlsa4@X$DM3eg>nq^duSw<>8iNJx}MA^v;n8%O`z684C(t~eH+8PqCSJ(@i80{mz zGYpbRvY83G@_A80<@26{rj?L&7@l6efQ};{b{vknnXGl>;V$Rp^Z%R!B+CPQ-*=5` z8Z3|8iCjR_xXDE&Yvl{Lz``^G0Z9O@&`}^w(Y~T1!Y13#-!QP?Y0KD3V&vhG29Ofo z1K|?#MH+<*p(?g1t}HMLfdz&E1`Tux*+LGS;i&U#n^LCW1wiB-Lz3-Q-hloHK}Ho- z9^p-J3p8n|nmb|{%oYpM_uRFa4&Y*_=^r*vS5H0%!6_W%3AK!FNtq=MG(-^J zf@`>s0l*fZM=kwE^t4kku+iPcC<20FAGeR(Kqe?RvWbYuM=JUTx z7zB61jFDj97W%q{~#0uDDh0chjwh68Ze^B}N@>J-L(Jp`POz+O#bZ<{@8`3sTa`fT6oX6K^Z5KFHq%|HNjqCs-Yr-S%eYFE{hh{uB zzy8M}NKb=33-#2o{4i^D!^X4;y){&}P)`WCB9FI08?SnW>yfop8V`@!98=A!?tNHR z5?VzvTXpgFkQ&r95hN)Zb|eSwRKptUf}`S_>ws|tL)vNB?VPiVoHH&vEEC$P6>(Aq zhnKvdJP=2&AK^+2{!c~DlDL!47||Apa6#5uzLQ7acnii7uJg1a!u5f8S{;yx36cqe z5@pq#1RTn35OFAl5cC+!QM0%aVw5kjkd%y9kSrcH_NQDty|}P|`y3o{(dFm^+Lp=O#YA7Wrx_ObX^hw z7k)IIuoi_&v-!;A6E9Hcq+Hj5C-r;|%KiLet^Syf(z1eR4*c+o7AI%ngg7Cl3$Ax? zKGEuhBPbbF=>;haz_5&9+j)iRJ!fhpp%8>CBTNROdC{9Mj^$*p(6UunMIHQCx0;dz zx77GN;VkdYLKEoD7gj2elaw`%YpItK%x0*@doWsOb-&fB@C<|+AOXFa9ijnnw`vh+ z71V+UPxsk3re>w2we@wGYz`K0^_+s z)xwIcCTXUDNhIk5(Y&>2u8|cxc_uW%V8>v?7()zo*P_`Cy(GN9jcr@(M8XgMMkkZL z4?h}kEcwUBz_Gv-s=~b%pSb{cYC|-HT&R*F6cE(jf?I`FL)0nIYf>SSY9kiz4DoVT zc+njqXjrX@pgRqM-q$<>A(~!)SzQBl|4vToLzT@Fncj@47l&41wZ;ZqsbY=8oK^}t z!@U!9MN2zq{mjC^RaL5%+Oh-@5aA{+goDW{^l1np1*Pf;u!HC>R*nH^As5|`%kUg7mqI3Eq&O`i$(H|$=6z9VO0Nd4F zfF&2`-(A(31jH;YEGjNJS7HOuwSf)l5%DK;4fs$5%;6ZMOC^NSIZkTG8UUxIA7`83 z;dse#bU>R82Y*31Aj7)L{xGj(BC={v)w2?n!kk-aBDE2Ijo{Eyxt5(v=?8GBpIfQm z9G)DQ2NX$y%bDdldNP-BNnpBYOABDqlFm68-_06|D@8?Px~t9yGH5w|UTOi!Qq*hJ zUBJOGm{nYJSY13PrY>G~DmC9URdf+?!}s$AZ-LX);=mzChr2j|L@ydV5p#eUr}m_( z@k+QX%5#AyjGPf4!Dkz>lGKw{qTiv#Fdi?SxmRA5`O;>e2)wwRs0&v?A;`BrGMs1y z)Qks=v5e(-KRl5mfa5-%2oITY>W;~UC`KH$ZH zMFTyQbV;tQh4iJO;DtI5nXc5EqAnOwkkA%uuq1{-C87qo`odMOxX|d%S{IGikC9E~ zDbPK2yp^<+yQ_iHo|Nii#O5%g3s##9-K#T$t3q#q&klo@kjkJt}C3P%A(k zH(UC6eFbqn5U56Q9|OMR#TyRcNoHqt*2vS%MX^4>r=5{rn5{U{h$9=EbDFun;kcD- z>~qtAXP=}E1i?r&18rOb3wdkv_%*`{@MIEtJ>5z^lAk0@rkJAmkpzAg1Uw1<6L@(< zk5V(oktdTBHkiP;lc(WvuZ~2->YVZ<7Yha#wd3iL6$xJpx1I|ZXw&qo6^Q;AC`0dE z&4@~U^cHkF!q^k?EBBgLD3yWHFS&jZf8N0P>?N#fZR!uw%{{YzfZD=27VQ;wfsqrOcc~`zW#9bX{|sU>|R>Qy?Ne3^$4&?G*ob_}rq4 zmU||-ytp(QJ=gw9H%GKQ<)Gty?oAVXs_~%!BuMe4+EN~(139PR+c7sH3%hxt-x+a^ zjpeZZktFt_bf-f`ESRWsQ2W}wmF3|m!-p<`=gmQfaI_67D6R-`4Ff7x%D5fCb@((( z0Iih{oZ zpgW1$W@~cd-O>sMP3gShq2Ly#kiC%08a9(M$qaKn;*cb|zU1j;BkqZ}nZbK9|2%p^ z*0vG7Z8A4W^p&_44pmJ{=)ZZbY|$2;nmbACEoE-F^RDM&NKLM>s-c}#RqT`RMdy{p zdVSfSIDUKqJAGviPH#&l8kaPdXm<`V(vEwhxtEgnL#Fq!HvZY!d|pR0RsQ@L+xp*tVBSVcyV(bcx_v(J3t&p!LJKX@)^7v+;FNb_TeXo@?u zvNeFeRC|iH_%|TLLb+hn5(88vz`f#K$dRh3cwTZ~5`%lZ=A4!soMVTdmgam)a(Wvz zBJlQrMuaL2C@pXdO1(5x{T#ZG(Ds;6f3S-6x z!+%_8rvQGJ^{zJL4};H&9;D4{4~y@bz**a1XQv;%ZoVjQYtR?wSt){@|N-VxS)1xc_o z8j2wkK~DlF2AzbQC=fPzToG}K{we51M6QSdnAsCuCfRE-v8gS07JX$0VkOl~uTmg0 z?EWSs7VL<@qJ-}KL)4$?3xN@_CdjCa8T2G%%^8_$l@(|DSzfQDhTc@kNN*;jBsHY` zAm^lAI`{rQuDyT2>_2GK~7))1$d`8=+t|T$ss0UtdR%*R3jT9Vy4Ez^n!!U z6uaT>Zs@FdBBh?p-P{61P1$}UZO;;$I^UM-8tWoXclB?oTBP5TduOojjjdMQa#GFDC1V-i@VNf)O zhlHSGgVIQl!NlwqD+VEd*f7j3tZAHc;Y@i<`av5i%*G8P4ncWAp%w-|^wxt3r;Ib( z(%SVWw&hAS8KG#97(v09iq(SKtV>j=Ss~%Xy1h;@A=)Za)T~o9wyP%e0?GVC=m8bR zM#R*jC17xK7AGp2Py}itPpov~qKcaf6%w4U1o8tgkEI7}ZYZhZQ}~|9F159KCsx@g z8B}F!;enZSi=d0~77Qxbu(UYGB^XrkO8)ZeYOhnqQW;b& ztPHAH?F@t3F6^Qlt1t{IOnCr88Pv{*#}k*cJeWn?>kFAZaC&gU2>C_B=7Gy3o7Z93 zyx0PBmEx~1JUU~9?43s@K=?ZEhj>NH8O%fU$WfBG8RzBwFpK;nCY;%r%ksX;pI>0o zWVh>zSt9Spc>l+l{9`8ngo$u<0?$wI=g}yM7?J-#I{Xy!tu8Pj%6z1h`FJ+f1r`IY z?gv{ey_mHyE94j8EbOrT7OaK$PkBS^lQ9T#C2)`u{{-Qnywv-A$PbVUXy5?o(|R6X ze*#HM4#3cUEEM<}VsT()sFAP3dLJ>R!3WUSyvyT&SAG(-pE}}@EN&hNd)C1z6-LR% zc}rZxs8r5hs(h}PZ<>+2@V|y|ABO5exlZ&erHtDXSq3>IiPNc!RC4LU)a|i55}f1; zP7hAwwEyk#J0F^QZsG|AgaSV0olbGcm1Dk#X3aTJRXh)PdEeK}p zonmYoQ4M3%Fe?1z0%KFD2g(NWjnG*L=!YV599xSC&jjKT&dVszSR22SCfNKW<0~+3 zZw%W2ZIUcUWAO9$5W!ZAbUZ1`N4qVN2JZ!#EncODX^Uc+DezB3q_JccltNTw%m{6Te@rt(A^J_YBTv#Q8KbKH*#{tSo))^WTrNMpv(nIG=(i z2m%hD%I0uj7RTj32Z<1dHZYtQ5dp2{V>%J(@S6Axs`3q!t}>Hd_pf+%?Wc z#J}>(^JOR(8C3xXnH+>us1?f^Dw7+J4Xz;0DDEQ$z%}T+SD>UW7tJ!kSjO)-pGK69 za&^SH09XO{0~cY0WE^)3IO62rD~~wWWf-rQ@6{@`2M7eEBRGu_%)VM!Aip~8m5cY( zc6xapFj4Uiy>$yPMdt;TSHW8?KgiX*h2w=e|M=MH(U)?gV=tZhWUQxZ$%VurtYTXY z6q~Wd2TrF8ur5tHr-ZM`$fh<8CnDg`4rLj&U_yONVbCK~L=H|ys_rafEq^byNzkc*4*@sAiaU6SO4$;o zD35mF8=tZtp&|-E872AjR>Cd#so)mWVR{#uGL#vx-h}`RJ=y?yuBb;`f6}x9#TLk< zdbQa0qi(6TJFEsxqdK{Su(Btd?Xue*;ZP|$*hBr~yQsd?+RJ;p1d85>zk)IjBU1P6 zp^&HA+sJP13pHtY#R^%hL1nSLG#=afCl&2Fa!M58a?PzGLn3 z*pT zZAI`9L65t-46_h|S1+F#>6fyiKTUXl2Fc7IKoR40VyeBLR?lGj9vsl@ zN<9Tn{w||F%h&^LyoR50`T){BslL>nv{9KsUDk!`wB3!<99;}xWUn9Ie3g8h%q>~X z|4!z%8yCHYGaahlYG7>1?w`lkzky^6#+K|Ycvz8u;EHBE(*(!RGY!Z|)LiPl_CEZf zvBNvCv3k4%Xt?mZZH)|X9Gap*y|sxOnoy3dRXHjQHOF|7isUpKW4jnGgjP#97;RLk z#EK9tA^Z|pYw1KQg9$)IvH}kbeTP|*ggxq#{C$i2#3s!FYM$6bh+MsD;|+tQK(+GE zA=^gwtpTot(qH|n&>ofgp-W!u^bCGu+1LxUkAzkl6R<5qNVaDKwBTzaCN&B96FL2` z)db8N+caEeVLMl!ZIvv|?lG#W5VLcW56RW&4N1v(G2rf{Xi7Ix8 zUabd*e^2A^Z$`ohl%UN^^o*+Lt+`Sf45Pdl0Rvz+;7n!oU>|9C?XmV^^c;7}!t4H8 z7K#ca477vRT`;R?iD#`4#u?q>d0{+%YBP6_br-y+aHx1=7E4r>ZN?4ML*K|kjhorN zDQ_9;K=~7}7}omvX{=gZud!|+WJBe^3{FIt$l>N)JH+qieL`5vVr*UC6XO(f44?$= zGq7(p=>`LPPT9h}3ARwUu`dxWGKk*KGkKZG=aB>*X6GjOMA7s)EOD4g3yzTv4BWt~ zLwu8^zLCitB!~zu=Qw31eia!(8ptomD4l_=S-88PCqZ|Do@VQ3&_?v+{W2T<6(&cc zBx2&efEo5jk#F@FnPiMrQ zZDq~u1XC+)p7(c~iQw=29quT^+CoX!2Gwo?`lH?L;l`JAJ`gG#zJieQ=YS{Fu zh9s*NDj?s7_7Ny0+@H#!sq_Uwi*`w{YCK1!y1ssAB!>YF;Oie<07~;ct zPG*TZqcDZe1^9N2%2!FVi?L%Dk8@@wkknp-YVYh|^6HlU1ZA9PBed@l+qVR&0cikI7_%Q{voM zS<{GyFrO~f$|i8N*npdcm*ijZ1QD)OYs}8REby^OGb#1*{2D!uh2%b7TToOlS62LR zu!ddR#xHwYW^Ho?lR3W@9w{3pSB#gvojJLMX6fT#=*;q%9Q)+yBx7ghGGu^&GJH3| z1nKI8i(vHDxEs-HFqKg>i)9ijuCNOXW{WPQ$wU_nWTULsRcI0rC6t`xO^pg`&RB3Z zV+dlleUtb0$f{tSxAJ1O2NxP%yhRN-f6E`-cj>5$+xSZ$Dmi)iY7U1COZlZL^rCs5 zL_`3AVH1e$7~&c?J}ns#f&wB8JzNna@Xd&`$_X+3PeyP^_xLHjW3KtaP>IPTaWXlB z?sBcTIC+ZPcdY1{92-9aip%go`FYR1KM5FoX#K)io4C|gjy@a%9C^&H%t4FDos!=S zYXm7Twu4O~7j6X`K;Hs5^W?c{Z}KcW(Kg^w{|ZIJk++0OsV?s;Vvfeu6z7p`Cj?B9*|pJTs|(<^3&!#g(%wjaI|-< z?G`w7698VT)I7Xxg?5aw(2j8vbA;n4 z6RdKZiAI1+d5aSBz`5WZ)|R}U7QYdi0D7cHqFkc-SOXh?+3zskq?!SCyVGPqLyT81 zAw`}9f1x!N?_h$HsOCegk$5lWXD_YrlxReUP7u(UN=9s@~@B+(5YL7k1*fr)!)p5WlE<9X}dKBqr}s^_>kI4V?mN~$b|RiM>3RahLV zep3aW$B5QV+XCY{vhD%v?m@9AZ1U!JEW5i;%Y%Zl|A1VG{e=zUCGM2Oo-vXbU$1WcHv#*_g>fHKn?pz?XrMXfe z4-eCa2$3<*m##23dBBDFBBg0uDQv8;x$EZ^PRQq%&aLm4iZBCDI7@u3+%>d_bSz(3 z-`DciXRcjZf9X=KdK5h6%JOoJ=n=A{zBNoXE*L8?^GTL2#FIh$93n`ME&SGR^7EhJ zR?gq!3om|Y==Xp7fBx5VK?jbzmMgVtY+>jDXz;Er-UH8fMPBwQ=WRatOS~cWga6If z#6ByEUzK}Kaz-yR5m`|rObUR3@udZ(D!>SFm?i!N^9OvG6JR)i2>~x&6<2@nM|k%P zlebu82PWBb7kEULZnh#?B0NrQ!TUwt?nE4Qa0QSg+lBAbP>pyE?E^+|oKqL#PT%mL zFkHYt7@_@?zYW@jp$PgelnP7HL8YKGJeq_zd&?=5+A_RrBI$Le!*FC?<1+HT0RYYP zB-TdI*RUjlG@kKH8=vva4&lOtyyW44hFm=Jpp6d%;rJJ8GwsWHyss@tHh^@uJ)dlQM62UsK|mD6f^<1_V1Td18hLzeTBI|w^g7sWJ4pSLG@PuFa`AxkUnt3r91y7 z1@sGz7tr~8IOou3t^N%`7eGSj1pk5N)qat-FWo6gFuEObkO1$A1&Wmp(Gn0TA^bxk zb>N{Guve%x+hLVa1n7E~#J@C%4thED?}C>1?B$Dv<|;?5H^2(7n%}4y#IpcfT$ru$y=j&9_53B?1EGx=$J1rPhCI{b#~{mrV-Sr((h-}?j-u+4-sXv1+=7w=<4P7mpb zJR#TjFgWDUj+U?2@Ge`#i(^%n$)7U$8WZaNg5Hu>co?4I^M0TC=b3z(Njnmq%sggF z^{Z6dX7?(ET415uEYx9y;-Fnb&Y50)>MF=m!7Gw*&>gQK1~m>PV1zlwv;|#EMf9EN zE-mLx6F7jvbrE#SqRey1L6_+cWX#SZo+}FT1wk(n6!Q*Q9@AmR(qoVYH784fx6nzx zKnc^}QI0?o>U4w7aLG@CCG)9NwmNo$&}Hmr?Y*$TQ+dmf$s~RZ3-eU!dHmKq{@w?R z{0;i5L?H!@! zHw{H}|L2MvOZe%)#d&=FF(g^GfA_n>x$FD7^IXUK4#C%2zANb26#9bm8?689182*Ug zQHED83nU-AMps^TGof!xkRFKDsFL7~4KVN~KP&*tDCA>ek_4Ev43_n1j2aG;qXly+ zQ-LvZ)T(JH`}z4YU)|UID(Wlc)u#SwwSk`@T@EFj8O02FpM|zWz+l6imRI5ut`OY*RRr8{sEts(i0C@yLgP#1*(g9%?7R`kX|l9(tVm$1OXM^2R%`JBNO&zI z!6uLjHDX4X@Gvc%4&=6|i^a4YzP?WDeoWe5)w?-(UB~D6?j@+K8Q*|!C!3wsm=;m^ zilquLk?w#o1h3fQ@UN(<%t2qhmgB#0&Vtr7JnkPI`pq{r96L}_G1`2zVp9dPhLBHa zYny9cO6*=$2y>B4j^?1SRGT_S*YM_vDC#4sk<+)&4M#C7 z*o~vFVmTakoxo#yQk-B?>#eVCGcp{EAt4--(N!E;pv4fOvX&8+%D!xvErw{woocvO z>Ud34E5?B?OqKyA9vdhv?FDEpnw8s>gqc}{lSkCw zW)mY1#%#Lr2`6+)QP+vb+kE5yuQneWZrps(7CYFT^vwwop;6>}E)*J*`bMFKx5Xgj zBfOX4g3@?2dHB%l*RJC&FIrF1Y-7FHY66lej1trp=ww(6M=P3Gxp`}EiWCy)pjEv~ zTc89bPCDF%&e>4&Cz_q`Vl$M&ipdSa-;hLBJiK+s)c~Q28jPYIuUAM$ZZH_p(4@)7 zcsLR0Xg;(ZH9t~AwDj4KJQ<_R0yrUj{^;D5>8Y7MvuPF2MhDBtZt;5&U;jCqG6WP?}qTnN?Ds)nFf4#Zn%7u&4rAJAjR{DdF3YO7WIEy`&qB0_l{ zm)=)I)Q3_NS_;UHCb42+IbDbw(4QW63vG#sJ;#;bIwbjVn*Fe}kSbYp+Z zme8#kN4NN|h!c2R2LM3B0YOx5MiR0PRRmHTPZQ@2N^Mste+svdT#M_6lE1_enzU-- z$UOHUn3xH8r{MJ|ZV4ytIu^}Th+LJ~tCIb(xlaUk7sGP9DJA{k&JiZ@VPIv~D3+K`; zDoYDpijE}5cO&@3$^=oC0;#NIUrnKYl+Q?~4aX@m4lW2*5~4cKRLw=rkEoorwt zuL&0hMmv;b6e#J480dOvwUlzoZ!k~}YKSs8DoEy^HyZeG6J1wht+cE{qh6_iBlZaW zqsMz18Zfbd!*$d`Ah-&7DPp%3Nb>ueaoa$ijCo9s|3h=z@;p8+UfVndqVLv+d76DK z#B|+gy#Fvnuer`<9ISMQRn@|W7Mcp*J&_=!&24U6?S?xRKh?4w72cen9TF`^ z$&bF=iRxd_A@KxNN6YsRibtJEL83a;IM)`PzyambQ%1N*5Q^>wLID$(5Q>fSy*|7# zPlyJ$Dv5?}Oyl@3$XZs$Tg#0in27SiU8R9tNEUMFEOeaiuVj*yGKRZbo zBJzMkG<>KDB~L^pY^de@-RP!)O5h~OF?(;@`hkt({LoZ_==)D1DsgZVDnYx=d3=42 za5HU^`Vg8EAL2$BpZE|J6Hk1=X%PRhRP?>eo#IB=3H@}h_bl2jbVJoCc$PRe#M7jQ zQ2jEi^m*dnB8{3`|Nknmp3spooK2-FK)0ZDF)2q(>5t(=jEOk~rG9i` zq=9-T@}rY1Tb+#Ii4iQ1M73?3OB^!MktSS`G2*`lM_@h_n`6SpZ4E=a0^b55bm4W@dIJKroCF$reA1 z(h;4i<{Lz5#7~BO0Z#&|lsrBIpD}PDO~+^G|MjsQpMjxXycRY+J_A>V4aaB11MSN{ zv+?+hv}TUaKXVwph1etnPw-09k6D#(`=` z^pd-u9%;}Yz#j{@^|&;KC2!n?g*!efLo2iiRk+W(yI=Gadd>wmS{-URL=K?LlX1Y6 zi0STA@@qWQqQ_RAZMjo~^Mi@`h_B~6XeN%s+vstRW5~-U&o;*jgBFKaykBJ#v35a3 ze200WSRhx}91CX$8NTOvgEn0?@%1t9*Z4DL7k9+&KwdxZOIPAi_UkP98%%za$!{@{ zo`0KHzs%%Ukj!-560-y_WGja9(_|wEC`@8YQK=)j~5DNyVjp+IioR; zxp_!F?{^5$W1L?hv-b>vqAzIg3(9xKCs?vCdVh`^V3Ajrme;%oyza)ab-(68MtmqU zK65C}fX)^{ma93c%5!)f2wc|$VRJ4|0H>l6?$gT^coOsAbnMctf z=zImv&MOu7EYvZUUqyn~0^{FK{5zBm;}t&6->$Yn*p+ueKea2}|A_<8OAWO3KimJ* lp6tN$y$AbW>>V8X%FxhIcIf+tUL9H-dVA;>hiXGF|6ib<$Ql3u literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/extend.cpython-34.pyc b/tensorlayer/layers/__pycache__/extend.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..597c2844001b5305d825e149a0cb25bd7b9cce4c GIT binary patch literal 2987 zcmdUx&2AJ&5P++9XMb56a1cp=geJtno2+*+jv{3R77-v6iC82!f*7Ikc&2xEmf4x1 zyBF_Tz8NXUJV73S$H+0S(3c!?;*tl*DOEl8_($e~l0!!B+UlO}s_vTVul!eIroO!N zdi}Rk0Dr=v$3guzHnoW&z&{2Nz*CzQ32MAzyb$?5=0KHmmnxZP=P+aD8v1r zO8r;3uneLCyG2;9;%JSg&}e0U6h}(%s9Ar*&{2~a_W7Gard8c2>G#99{|5W8YEIvq zq+T4{4M*yMHx<&<*1||w)u$QzPZ5>d*wj1<0kF>K0PDDOMtjbVfE#~>UAz59&9W@x zT(5_o>l~l8f#6*~@|5as9P_%t?Klr(C8So|<3TtQu?mxzCq2G|FLZ*V8pjfOO>s5w zQqi(zl@NSG_u9f9#Knbbp~-I!v`$sG)7jeEYG=!OQL@!eWWVF3p&KN=>ZDUWNaBts zr8jkxRCTOb5YUIQrYfe{@89rq2%5CxHg^}_SJqQ zQYl`B$yjkaw{72flThI*JnWIF2eAm_Kuox&`PLxx2OMLPZI5e=t=&9@D~!{z&Md~b zNwlc6%q@Py$8q>#EHcNOX6&8m-ozWFkvMuHyKZG=g-`e`j$uwC&liIv3WQw1E(X=p z%QsqlVKs?Gi!WWj-fZS;VpC^VAIP$Lp3`SZ-PUUcNrr)1$Z1Uf}`v3ybkhL zV9YMh=a!Dl{V8VfBnRNFFRz+9 zWtVHiZXEi;+n0_-lk<7i<&<#U`M@}&m?9=x^QG~VFFhH0n~`vppQJ)o=|%0{#22Z?t8APoRNB@u5SpB3iV9NYfjSd`oQF%JI!(cYViOJs6g z>&+hUkoXw=TBgp>HYl$f=gMUtfAgn=+HGv=3lt|~+7=tkQmhcKuz;Xaprnx%T;ctG z60Gxzu#ag!iIFH{jBU)JWMb-5zl1s zKj-pj9O;nA*xfr+4%6?(O7l&@k$ps7!f-E%u+UH-^faXl5Wr{c^WQp28>#?pp-W4Y7#zDv5fV`w710VIl?d8MEv~A|m%F!r}l;Cl*{)S3^ahjIT6_F>b^J(#3k z9NZ5_>Y+Ck($u!XNLbY;i=(HA$~|mq5rqKgGTJ~FSI%h7*%ol&ud!>F|F~I}WwiBr z7`e{z*%}DG?nj)U6dZgV@%!&nKa758`$jzp}&B<4wvui^`x;Hbv2L@p_=23{&! z)~ynPZ|h!L*p0Y2Q7t$5oq^V=TJLmrcX!*_v|g0#wiDU!cxmVciLW~8R1cE4<4NgF z-6T~V>sGm#5SLU;)6ElZH@01ux9c|Qcb@b{LJO&EcO`#pH=k)aoMg-68gpwGPw5KdbgVOvF)k84 zDlK!5U-EGrz8s4zFei({Jw2FsqcjqycVy>nY;5oezsoVrY2^80kVJuy%h<)FdiwTS zi!W~`v1swtTeq6cd`?VrqPE?lYJWrPvZ+{}v~1wJOJTh9Pv&Q8WI1l}wam8NZu4h( zJPi8WMl=^rd@SF=;-iOt=dpc-P5psF0}L7L+vB&7Wn6&$A{?*+!_E*p0B#Yq14BH0 zMd}XAuwMYIJ9d#s`d%p8`CISHRJAcu@!WGcabS z=eDI&wm-xSo<6~2_5Gu#`U22$8tBQcr~!J$j?5m7%-Vo+FuZ6Liizj|Joe>HQ>XHB zZQhMTpLp-unRIf#sMa}E-1=f*98ydXE3J9SlRuGNDIFkXoFMeI zsnSRwlQesm)r%>6X)5BN=@>_es3&XaO(Ijl2K)u&2dMwG)=?^0>pV&13zcGRb^^ce z#hvWTTTh+LUq?)EFleV!Ig1l-Vp9t!p!_~-6t1v~tjMmj8MaiIXY*|Dy)y}GAEjfn zS)$xfRxvkq*BvFn7`N=2hU>l@d(q*Wic6GnT^r4=^@R8k<61t>&@!k-8<)#vAAj?w zgxWo9>QfZwBiCLWSaC@E*LWGAQlRfID>%dZ{XFpIbU#9_pTtOqv2UrHC5`V)xeg}VY6kSMo)xTzeB-;R~$JAxy} zh@ynlUJ~J@L5a}Ql%^n>33mAN`DYnniRY^;_xVE+_w|5}4xLya6MQ{oJ;meE4qgf9 zH>bHmH*dviyW;;1tm*DJeGlX{NJxLSvWft2@!QYOXZ!7p?VHA-@;F6xLV>0FkEr~c zROcDxb4;H>dPZov80s)YPNQ;yzNO6!R0PGntNC6t-X2%qqB6C-*KA1QKt_r=(k_y? zM1t-v3q1J_X_rY5yW|xTS5Y(_d5v_-$B#*)*k@rw%zK1QHBcNg?}oECpD!|JT(cyv zqm|?&J|c001o=4Om-s6!$p|=KU{gf=#=B*wh%jx|jpHX$7?1=wyB$+M{%6c?$RH+( j{%)25`j^q->%1_gdM9J;Mv-FCM(T*|MzP3h&O+;NQQzMH literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/flow_control.cpython-34.pyc b/tensorlayer/layers/__pycache__/flow_control.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6402324832725d934d27be256b561c3cb425fbb0 GIT binary patch literal 2896 zcmb_e-EP}96h4$>$99u7Tla$wZO5S~8W*V@CtDW;bI@Wbh5~C)tR1>}C=^wqZ8@@} zQuN{!=~ep@yVwKtQT7VF-BoY)0K3{DX*-gZVJkMlqWN?9osS=Rbol%7O5;}Nb?@gD zfZrhZI9R`pZ*~uh0DlA`fJcD^fejIXu?0OFNZ>$FfyjYg1%fIJ2=uD(b+DuwuW(is z*b=<7p;uFO51X|&1o4_gk{fR(dAG8lyM;0?B6U#{MtL?TQhHf8)9@0f94_0Jz0 zE4T5@u3^CddPK47AuV)mgW-p)USU&nd)Tb-jcjXa+XlPF-Ph)TFzKNa2~%cvKlR)P%}S2E4?!|kL; z!_Xhjyys-o1n*A0F>Y~@fF9Z1-KA67rJ~=;`C7wfKT15YwYg5Sp_j7m+I|wVb^68j zXU%oJy6*Mq_2vTODh*gaj2RWkI%eW1;fISsY`bBsyhN>;jQ7-jYpvI&b3Cnc4)cs@ zdpt?;K$wYUe4QRLmUcH<+jE=%jXF7;tBMfaDA3s`u-RjA#)_b^i~A|E?)JvUI`w?f zMM>hg#aT3&`$5!=w(7_`Q>04ZWzGmVqx^XRO7Y&z@je5p$P71!a3B0UHXQ9br8*%xa_F0;gpo*Fx(7)5f(9CP1^e$n)4b836)|H_C6U{vKRhs+c z6*h1@(V@HuvhwG4%>-ZfM}4wTD7D}x^D~IEiz`quTsRWnGW{($4HTTR_&(f3on;Urgih6~0*o^Q-_E5jfGw`vmi(1t%7~Bo+}^BUn>ot_=gw6$hrD zs{Xwv7vY7ix<(F+DsW=MsH&{Vvu${Md~t_95tvZ9Zbthv}53fGl(#9~>S z8_B9MmQ^oJSschEvqhP-L|;}i;rWNMes}7#RD?+^om6!!%`Q9A31du`HQXzCE|=(J z1(WI4rnE5)%j&?x?8Rlniy}imYgVL#Lw#8_p<@t8)YCSG(QDvW5ciL9`@0Np744yy z1W7z-?U(!*$0= zFo{%s*>#^yyl6JE$99u7Tel7w+KxjJG%iv*PWBT7bI@Wbh5~CaECsT8C=^wq9VN2l zQ1s#y=~ep@yVwJ4Z?ae5?XL0)yV@aXIg%D&D>lKR`E&T4j~{t>baP>$L3_XTeqI3h z19Fdp^@sSTPp}B^M<4=t7FZD25D^$!(6fOA4g?j59OzXbsKS6iuL@rWbE@$MXH|jC z!D}0OHD$NgtX&YqYZ6KBUs$GAbE9N12;;#8wqaK;J{?CQ97Sxx_&44e{c*iWIUW{4^Heg)IG}RBclb#-i z{^8VnPBu;O@x&YA78eQVk?rkmI-y-E`mLO=6>RpS#1k9qt290IMy$KCo5XCDezEy! zb5*addv9{DIm5V11J(~?Mg_8tnK(`O@oW&=ZWt>sQEMvW6Sd!3>y=4{r*+O@UNLQl zCnG!%W}+EirN@kox@)b?3@1RNP7dd;B1G2;bk+)Nc37OUB53U5eu}iaxwf`SJzsQD zk~nU07L7AMh`QER9eHPpR0+Jy83Cu1KPx~f-s?HuS3nhcBGB8=%wro(It5KS|Boh} zD>W&Bmo@3kG?{H9F2B>hN|O>)Q4<9EcbXWQc}64Za9na93Mb5~wr z1BYWB%CjIVU$-kJ__{yplZ8U51wYTuAkHqXK*ez3NPNfiU2qvFIAwM}JV50!SZ22a zH^%<}Sq^CBb9NMF;h|1JQ38_{%ypO7;BTCZp>5!^1Jbz+?J!`m2*p_@hQUYj8oXD> zmAYCzU!_F#RPe<#4qxG$Rxr;BfFlCuI(eUCp0wcHg4e_%0&4_oYRt7^0J`G9*5qUp_RnsCn1iF*bUXtkuN>Hq?}H=iaNGd?3lKt-VdDzJ zA7TREi)(N+uf8UfqXk{s(6`{G@}y?Iqa9t(crNPZyKwGk+r?}>7E!CH>dyBxU&J$f z@ylSZ>FmnJh_j!JTGCO8@FgvuM7o@AVVs(*zqK4RZ^(*1R+7XLoEi;l; zV=SxQXvE?`&Y3Mrr6u~Zk_yj1mi5OIpN&MA#L^k5j=AZ_j&#Bp(`60!N?yw)I$6PF zy0I>8OvAD|@GyIE+3=#s&`+Bc>EKXbR!!&_1QPYMjbZdF@H>e6=Vm)igZ1`4p5PRn zvv%MQytpke@Dr}5w+%=eQ|qbgC$Zp3gx@T@fzv-%$LJ$0fGm;gR-G)64~b1~;s0&x zw)FwIbK~VwS;kf-)-Do7<&@Z{T*GyTNidF7ecp9X#$GfXnRDGB@m*KD)UWzBI(P QRTQOaI~rt>%-eP6FSUjxEdT%j literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/importer.cpython-34.pyc b/tensorlayer/layers/__pycache__/importer.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8afb5dfca8d1828187508c5ca3ba796cbffe478 GIT binary patch literal 6818 zcmd^D%W@mX73~2)y!e(x+OlJ(Wk(SqlK>^#Df3x=Af%JLPux+F*Kwjy>b53L z)Bol0atn`|Lm?R1&P569LHjH-W^}a)zsRsUUNTTMFAM1IY-98?_i26}wVjAZ&X!Qz z;XNhff-72HAY9(+1Cs2zp)7Ch`l-Cm@4 z*y7lQ6QtUX+)xnhdU{!o+~c9-D`6lGO@dy%jtC&Bg=UhR8e%T&l0hm)&j8DYI2Fls z{4(za-qW5)Y)d^7E8W?3I$d8Jv5_7K4Hmk_h|+8FrlW+h?9H1u`7Xb~qgJ)+J5A9J zeOJgie75}1iLPCp=c?^=#f{(6BIfy(&*wQ!dE?UVrHU3!5&{vC48N+kLRIevFq7Vf zx!rz$kLr$%8FJG(NSVi~$Zz@X62E#m9cwBvSkIVvLw_Ojyd&fWHV1Z;d`Du>;3(V+ zuxZAjv?hb~wq)4hTXY5zeYw}^hBD%p(*@}+k@iNtq?dMtBO6inx*s;T=5{MfsSt8L zvh8gdcHud5i`DBDbU5CQdBoiH#l?BK2PK z?DuE}5}KOF{ThpHW@)gUW6=oP%oEYzbF;u^0r^%`WSb=;kp1aL>|lgsPo^*8#hRz_ChGNF_) zL!6^yyFWQ};5fgi;t|yhyP`4`kLXhp7j4@MyvVlWiDd<-CSSZGWhmo9f_`ycA7vF6 z61ItRUH}{$gGu8|)QTsz9qBpizOc3Icw$3Db~E&Sp+Pa8UAt@Fd%XPU7Z2|IX4QVO z{P_OzClBwe#%0I%jj}4v(T#&bz&Ds(ybKYoDzOZ(q_h)R!}dr0u8*03fn3! z^;J+`F?j*~{^Qkth1IB9+X>w;*r?qOn+Aewk7T$B%~Y+s7qx+tJE7cCU0`8t2moIC zu6<0^41x$*?e57*%=c3~>TMLPFlLpk^VS*s8?(-#oUz^?Icx2o8`_C#-{_SQ+5p-q zY(UwzJE7Zy9qo9`wx9MKKWQo0wi`BWTTi9eH&lFpNi_#gkq&tjV;A%Jto2&By$Es6 zN7*CXt^ru_wYv)xX>e#Q{No6m(9|zYYtsq%I*2dq1unpvHx!00=%M`Mb`*8hQmqE) zY4_Hv&9IYf09a|KX5`l*DMYQ~D8vOdDHLF+*{+e458?I;k#s1;FcQ_xhHcMl7*ay; z`$1D^n&nTfabakaBMNEU)AOA;|Gpx>f%_bC1)%H5w*R(K8CHSvw6hXM!=a9zYJo2gyij2m zD0IUz%^~HE8VbzjbxWf$0G&pI{4i>JDzyu}h7nbV5^)i~!0#eZ=mm(s{k>rc2BILr zks|G$Y%>VS?EVo#D&s8C@zjJgi)cO}6cEz8#6w^pfIEl=v^3zV#UZW&-Dv(GmI6J2 zYf`Q{%0plh%|n=Dv&dQ*Lr6#H=XV+6Brr`>>N5~1iey>`1@^4KHo$ioCco~pTb^1q zwULhK3+l^KuY)4ny+CvXs(Q}Y?a$A$127+m4ZMZa38eM(@9eVyaTRqPeNSm3BhJ$a z*=nV*vVTUqirz+!J)u}s!@oxS{d22u^p$>o16EtYBgbLd{V!KSo^n9~HUJ!KG^*;%wK;gR@d3q)kg<|Rcvh^*BP&yf;H*8pZV#DUJ0r_W zbckJy*jMGK#*j7%*J;4`wn;gzVumD~oQ(VnZjhuGZyNjiz@KC%BN}C2IV}O~pE@l8 znvcH>$fqBqH&Y3U@Bd7n+~r62fxqV5O3?|og*(r2*MWNsx=>1PiyrFRCNDrl=(tr) z0F^0u3D*5Ku&NPtmO7`fYLHZ8Dj})H)JkChr}87z*2@FWG64O3>dUF1)WZ}yDkp^e z3m!!x4xzN4(xDhNF;3eGtH$lfa%wk0Z`O(7_;bh&uLPqd+dHMIez-= z-bWmzxbixl@8F(>qRKwr4wl<69QvKcFTH5{XU<9>}*PMUs-RQ_}Q90Is2&P&_Vv471@{=4KXO+89$-*_yJ(vRQ<; RS*w(ry*2v@%rbx8`Y*=p(VqYS literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/importer.cpython-35.pyc b/tensorlayer/layers/__pycache__/importer.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a66e380719a0602def7f9c6986c7bb0f98aea4d5 GIT binary patch literal 6744 zcmds5TXP#p74DH_Nmgvf&c&N7EOa)6SV5MYWP#vXr{djBLMp*zV^;!#tzk^5p6NQU^i}{RoE!MP{<>r{1XMT>YxCQ2pFh9?_ z1=buv3wjorUu4Y^bMaqh?kICBYz@t2cHcckwO^uBiHTA6GS8Y7)Q(X{sx7Crrq*_A-Yar%4X=7)^S-geGH$I!)3{)TuJm2{MXoxjOX<>tIzYuDGHnMcavl`^qv^ z*0onT^H#R~aBa;C)|TQ^kDTtR>pXIHg^b6SeXqMDB4s`+4~2A+_ldhI^14nGO5N6` zY0_T-FL&^$85Dw%?Oc?w9<5a-Zhs(Yh1y$k`N% zJG`%ioOMOV3xvx%{h%FrVSrkw%~wRALV3>*x4F^3Ua4^W9y`+MibzPMKWDR_{xp-! z^ZB;#C^g^GD{1K_Jy?n@FYGI$OkLM^0zRKjp7mT=7g?oI+UfOt4{Aptz6B5aQLi5< z9(Fi(;TWm5Bi9r}yPjT_BX@Zy`BE5&1CyXvzbgVr>Y$k^2bfTL#YFw>5J+bf`T11Us|Dwie%7v>tSF2hyNeDzlGW@#U3RQm)z)X4@ zW_Hs49@QNgGvuZ-kW#~{$Zz@1Jil=*8*3^tSkIVvL%NU}?+Uqw&4C>y-t;t}$9T|4{CY^yqU+#5#p^W&oY(ct9q`gru>9rl<$VQaC>WA&knVsr;bEm$gt5Ww^YK|FyvGvxqi3{jQ zuyhK~r+C!AP`m(!F}7J|@(lAB+t0Izu?>s800yH3+U3}b0()uYEc!s#QKlAHlw-d` zGhoov{4%Yv*hZcP+XWVlu#FOU0w9 z3hJ*+eu#em{-~jpd!z9zl$)vtm}|K0H795co$!Ws7S%9NAY{F_D^FwSPw}V=C|GIC zDq0tLV`l93N*Qnda$DJL-^Ta1YYtUBxFfg*gmhe}~g2k?c17 zHwzWC5r|_bY)Kn;!#>}30)$&3q6VkE32Q`QjpO9-ZP zp7}@=b6~Ec(TmnkZ{`WYmZdNnj#>0nvwU&jg$lbsF&LI<4k>rkP=r;}Ev?o7KUyvF z!)V=8nO*2LjHo)42ypl%eh-Y_4-iQEyTcL;+CYLMMbBiGRG zPXks3awJMOFo?d}o4ts4JRjjLVak6$^*Unj!XddUBFFPpbtrh?e$F&@Km=ZWR^DmUh? z-4*-IiV^#!)YKZ%CZRYD7~eFh z##PFYgyWNmui*wshVbqnKgIhbwHVR(`o=j0(Ej8(1%Q0~br?SNFuPPr0DSLf`p_;u zegNn-M^=h%xFy^g$2A46Am}bAx#)SMFN(YXF`(ntHTG90C0#sy6GYWuI!&E3L^U9) z;gmpB!)diNKvG#lZL>0PDud0O`d*?!gVO+^2)h1)N0G>b;P}2ZZnA*_vVpS$JDeB| z30D3@bLp6W>aE566sH`7E4r9-+7#R#xtdE(o<6n@5IHHXysqavxMHC=GKJc~a$ANz zztzyC7i}MZJ9tvudSk>&l{gVxHBXE5M)y#ZTEgyU_a7Rv%oWD}J7i62dva{*(a7`s z!RzmD1#-pZ)Is7x7W_$$-F+=^eTBw@z=ey9G3x+uy?4+QCk8J9nSaoH1MsR&G(R+) zPgy@w=V(0)>ix*`>uC^&dnTE~sJPHZY#lR6qjlgMR|Z3M)Ewi|gmOLGFax=@jvpn# wrljeIusZ|rxco89hC7(sdGyf6n6xU^q&1e$BdE<=qlM`^)1SaBwTsq&06llOs{jB1 literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/merge.cpython-34.pyc b/tensorlayer/layers/__pycache__/merge.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e78e37242fe5a504b6dba4fab9a7616d14e637e GIT binary patch literal 4800 zcmb`L-E!N;6~`C+lxR6|+Qf-6ZMt>hn9RtaKGHP9L~1ma-N}ridLn1s(xZVOVo5>) z0UEp1*CI!g)v+&y?&*-x_?SdrM~`5vZP6Oq2;QQwF*{aaV*eiT zVi6U@Fc()~?iSH*@_?q{s_!b^(ep|^7zN5727)UQ$|#oH?R28pLxfyKyyqW@kh>?o zwC!4r<9O`G?m#FJOMQEPa_CzJ$jf&6IJL)2CLR&G zO4!=WbU2Y#s-%{O1{oj*ZwlZvfYk?CcEgw?DdA8fXq&e%`k8~q8|mGi0c^A4I)>qtuArd zcZXvA*-jLSCI0(QKic;UB|8#kI3EQgH5l|S~u?R+Er*ysItmGZ8zRKa+|8}@K>oDi(jf@_={s%3pk;PbMI`DLnEh7Zo>0P_j4zahm zUf+z}RM=_4nYj`6%JT9g4YG~!LtpuB;GYSn7eohc;2gQJ?;ar4>XcKtJ+y~0QrsER z%O%sSqWGRY+1|slbw7SDx5kTUK5Z?dBR%TKD80{(aSu;3&xu8Mvf3GMSZOLv^C0Jq z5+?IRF0bKj)p&z|m&FE2GmqEsww8Iz$3}aTdAce#>JLW)`T`kEvH72@{cdWbkaMZ% z=AFAJy6%UcDbc@1?o5=Tq=Qn^!K4eD6@2U|6`Ulx;3+52MDVLTLG0;wBo`Cq<5W=N zn6#^y3Q~JvdTfX43LRyf3pgL(l>bDd7(2AsxvrDvC^iLlUSuzlE0@Zy6qJ z?JctJO4M~&V24F^USfwO7&7a5gFQXH!&I3aR?_F^s8_~O!6GLm_B32(B~@kL)>!;E zW?5Eh4lC_(#{4g##r~}3*ddB6=7LF$!D+X^Zqt<1aYILe3ljyZI5KZ- zQlc--**nv3^^Moqd{2{#I~C?%?&dvi)Ldo0b)%O7!IK@f{{g;+Jm z8ARSFpz8(4c|LN3^hwomyr|JyDsdHqF2kY0fgg$~9?Fx8fu_ED1W*EhNUBq2|4nH(_>f~K zH{`xONf8Z=Vasf%K_b1JF-hqgryBx_T>eCZ-lHHo&d1k@v>ES? zLNHDg>JJ8eNQK?A`R2|R_a)!o2Sg0;Mhr%S{eAu!=YRS->jx-~`!}~Td)M={?N{Gy zXSRkrPw~JFJuV|o!q9Yd>KkVoV}25VY9)v(QKy{c96)7cGa1jWC&U0Q#j@R@%!;iUW zXaA(p%;z9X3TOq?m!=)EllU}EtuddbxB0m9u=L0cq{zr`hH+55^~&>1!nq+P=#^E< zv{1f+n2fg^lfWAZpLwDTzJ@mxaV^5n6B~%hc>DijOY`jdJY+=3F$_ci59I`%h@g{s z3TO430*$~!Cgh*&2~wu1GC?8QgjdD1(|#lUMAH}A6y^?<7doi=(5ZRoAGo-LhX^nV zmeSah(g;8ZtW;u)C_kUYy;TeKk1| zki;J>t-F$UL$onzp_#E!i)(aEgb@FNnqN{wP!qp~CYkTyeb#^F#=oLYB8Zr(I7;qY zbd7(;ur?THT#J$-KBmUOJM^4D$21xOXd+4HEi|mUSh!!fRe0OFQ>a*Py?(Yh8~cPs z8dWp@uQ4iqkDA|5^E#TOn&!yW=s|F>i&K(J3-49R6~IrUmXz>65s|Q%SXnPWsb$3{ tb?!I>caG7C5=W!P)VlNvX>rNi=LU{8inx;m-8Ji$wNNZt^QGI5{tF2P()<7b literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/merge.cpython-35.pyc b/tensorlayer/layers/__pycache__/merge.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2ee959b1cf0acbd5fd8b1fd20dcb6c1e4b1d82f GIT binary patch literal 4744 zcmb`L-E!N;6~`Cgmqg2nlcr9TY16F}*J4Ko_0gsoHd3Rp>`rDBB@?-$mL3fR5la#Z z2+-K2zDBun`VxJCPWupjf<6Imd*xT?P5VE)AVE-atxkdwR={HS>^c8E!1+mcZ{M!- z_TSsTxXakT*wj(R^=CNc5gH#q77H1B>f6jOvCv`zo3%^K^2^Mxu&~V973No&Ut>Ky zsIuStb9Db5CRUlKu@@!Qor0|Q#;VDGq--7Pa1Qktva*vs&cbhv&nfVm)2letg(f}=djR$&|s|P&DCr3i9(#n&m z+t9@%&yq2`%s*LKS;n@i4LB4Rcb*4A@tMc-&|a!arBjrya}fWAORQ*8dn>%X4bDCArw zx_RfWimv-%XG-+1kvkKmDCwZov@=;?vx1K;WrCALS1c6-nh5?XOAved9Vz%k`8eg& z*e7i&p@P(2n4a6AxMa-)}6Wl^tD^=UkWGWYbt-5K| zNSe++KI@2~LTyNES*1_QLDY?tc_^CJy)ZN$$VMeCOA%tKNx-z8?Fwp}8El1rW62_> z{d=`3rN7qPkCSiY5Cx{`cY0pbG$mV4ZyF=bfk=A78J;J%FmD~Fd5AFBgxOitOcxN2t?#ut+fNzMKBtqPE*B!+ED5UEJ*L^393JxDa{lGlc|VwM+`qM*kN13EkNx5gJNa0Hkmq>dMLw4?Ct+wgI`)L%hf|Jz z;i*2~`s#E3XBldWqhknojB<=Pr#S(TQ&!t1avjMVL+)So5OR}k0W%e zz8epv(^rG=L%Q2MbX{&%1?rGG1ZsMduF614U-O*Gh3Z2Zt-dj)iR62+q*j6u5hcf6 zDX>ruDHB+WT!5;Q8>m&Uf;MfZh-~g_A@GKFXHc-3xGMQ8 z(Ulnojxh%5R+G-8-_kHs-A(2Yg~o)7d$aE)Cz6r)!=rUy@@|ASAt5yTHR?&7t_cs4 zx2gFNH3TQg8)(w`9$ses*J|<;>Lh$fsDz{5{+O=uPZib%tBea#TEdsq7AZu6)fVlC_8t3O>z-Y;-hT69an|<ityDoije1(f|29O*5@KE*ep=5fPiEY83DR7n6J?F&jH$SEgcMvl_o*SH W%@Xb;!E@cZV=a_Q)_nQylm7yeCA;_l literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/normalization.cpython-34.pyc b/tensorlayer/layers/__pycache__/normalization.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60635fb87b0683937bdc291dc59cddd8cfbb0bc1 GIT binary patch literal 8481 zcmcgxO>^7E8D0SV5Gm?gmMz)1AIPPr_SjpG?X=TV2Zwe#>BTdhOmpjWI@3PyE=Yow9NX!%l&}OA zyYGHK&-1?gyfjjLb?(#UKb~OhGnV>faKC^{e1gQmzs7vVZa6w~4CZTWU1!S%)0_-* zvdqu0`_B_s0}=kK{!R{xIn+05A4vEXyvPLtsjH%IRQfQ8P z|9F&B^kJ4Q<3>lvb3BK7_S**LbJaEH8@?^Xe0~4E)=i(fYYwd< z+jiuILA7Y&v*fyFy&g5IK~VK+_!dvbZw2Yn^|}dry^=gY2DK2lo92xx=7!5f&n9*A zrOGAK3)-E?jNCw=AbMn*r;%%OZf}`k)11S@hTDijF3jg`KMY!Cgqj#$WIEnDs=;FU zglPwk>DrCeq%c|PcFlFrh~u_hBthhFsWH9lMXO$b=33r{8zj|EC>xD%9jlANWXAC} zJO?&bwq(njuGd3`(2T9Ntw%FzndH@vVTlGUT8(EXP#nPDfQITWjQ+f{>0w{Om)b5YOI zCF+`)bj>_jxl}dZu>EFb({qrIkaDNzrAYjHEs&*I@fI4 zv=biQLef7s+Q&p-y0_G-s!{=#0}Y9Y_vYq;*$KQ4I<&p$d{BvG^%j>4J`XIJ-=MQf zANroa^%^emDp>73Sh%=|O{c~MOLm*Epq!60!u6ZH2ot`z?=bJecT$Quwh9>MD=g1^ml&@qwwF#9l1oDT9{DvxkFW%jiUWAd015O&#Zka6pZ zJywW@9{?{o$&!gyLV#Se15~Os!XUu90m+Fw@U#*79pbzMPXXKya-X!lhw`X;m-E&` zxhd-PN1?v7t@`x@ps%{*xDAlw9stI9r2GUUiFfOl6U6gu|IKg%NP7VsY`0u=kH$uy zrB18mf=r_jc1o-SN&`=02E20%3mAoVsB zg)}<>9>Sp4G_B=6?-dDwKjieN7Cm7_+y-CIn=4`HyLR8oRbj5UL?YU z2VL>QO;cv|<_h9V^6lwU=JSr*v^#!Ortx+2=gufYbhcsV@mTG;n@rAwnnX5RG3!fV zhc{gF>}nLX#eA*Sg68O~RH2V*d@I2;bZfH$Js7xAZN(2)YU|KuF0ZL9VA;94Wpxcg z5;ty!*o0I|*@fAGTxlI7ENHKY+UZwL&z(AR=9StinsGu`RG`doQ|YZNq)W6~k!Q0q zfTQY#Lg5NED0?Ei()#Uh=<~PptuIzTyZg(3{PoHaHu&2+r&EVQx7c`6jErNN zU=U+YwVsrpkmYCC+QCFQ?UZ1L{Xl0QGxo8;w&vLmEF5Axy-C^vG#_s6sj=`FcZ&S>f zW*=c0CTWWKPxYNinj_WG-6`5GG*pb_a556=F7hGnFlCUFq(A8u)L&59r#wcuGqHd9 z#E2{a-+OmDr5}&5wWBHhNW1hYwswr|OjCPGPtGhVX+<(>`Jw&Lu~lvS&5@@ zL6QPV!#FE(Hy%z*(W$@EGb{xp=%SAU!{CR|a$J(D4fLY-A>A{DOPocbj{IGX-PKu3 zit2mM;5cGlblO6gl;mDY{3d*MCB~)l))vbKCj^P7p+h1b`e;x2s}0W;@emx)Ao8Lu zDbUIp{tYTS*=d87UC29Pc8kUl&kW{uM-vZq@07$Nbr(-l4GS+U0#HlU{2AT|h%Lsn z8EyOJ!By)i%<5Sg1`y|}``NhvZ+yy6^Kn>F;&PJiXs-A)B?OpKU2_w6aiQf#mRh>Q z3atsO!5pNgt6etdB!}fJ@k3NA1FaFoSq{v0A@bcI9=5C`!~m^3LCYp2*C!+&m#7^% z5ws0T*T~fnuZ2O=YsEuR(^5NymVhERQMxXlr6%)PgOCoUw0tLOC}tqM7CK)ZUT9<%roaUZLbA>W6{piVNL2 z6Y2fODN@BxQF59R=`oPo@*`a09VBd++?HpxF>PGOpLWzZMsAFr)6`!6#0q+Zbv6{ocAgAdu1i{*mcPy87)&A&#;*D0Zc z#ZOW~2Q{9>j&Z{EWN8K#nR=h{fynVw!y+GqDht}uB~v*=to0Z<^ETA-3uBzv8q z#0>&#^n!SZUxG|?@OAQh2$k#w0AAY7*%L^tn=-to{2h|$UJ_~?bbB7fju`iedX z_9JBGKI&b`Q1UFEK;A!=L-BahRz=%-vThk(OU;{m1xuLsm>l&CG5^FavOUhvNjuLz$ z#Pd)cvn?nX=ph&yO}t|YYSU}ok;)Dl?zXP)4s}gH>!nRhrAMtgewsy6bU~^huytG0AW-`ur=HwW zAt$PTh^@_%V(UqU>9k~cZqLS-s$q{3qB|7H7=^Sn$Ib1HV3ltBh+BY?zTrrMO04?ixQ^(jh{2ERZiUZvy=lK5!E z;Rl1ao&6B#9F;py$pRHN5PU|I>H{9dqeNbf(DxCuAcC2>am9N5`h~aN_|E0`m#lX# zT)%qZ;5ZF@T-*j1|@U|Vtqr%+2Mzt7T*MF}DZ@{=Yc7r#N9 zi9Qs6C0#~)O6=}W6~m9D3hCBE^{2x4*mgH@5Pp}C1UyB+;Jr2JnFCDW*7bt)*r^EW zOWL5&K0sM+S>P|c>pGR=X@-~`JcN=SAW7gfZX0ve({Zi=x5?!a z*5Z-ASh1P`90wNx!}vf?+or%(#8b)74(Uw(0=59ZK?#9nJhtNEcVT(pEYj8Sgvv-{ zH)WNlpNuuWMaHFflTQwJoKvTJxhTJ7Yy3Sb_C6&&k0OQkqbaoiJ{6LvE^+^d^ho-g zTa?HF)#=Wrgb0BzQ}QtM{~RyH0unYt=>L>H2K+BUw09-F{5DGZGyCPsQYBq-#KFO( z>k)(^+pkO#gx7z@C1#M6MsgYYy@H6M|5q->nMTOnm~rxRrAHbU`)I2iar`(!KN(qS v{c=>0oB*lNB`c)JvF9hmLub`*E$2v$h-oB6{A5!yRO^MzOl9WQ%!~g4zU*Iw literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/normalization.cpython-35.pyc b/tensorlayer/layers/__pycache__/normalization.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d1dd6c41915e9fe60d305b7a176a58f771ab413 GIT binary patch literal 8431 zcmcgx%X1t@9qyUghjz7kKWy2KJqb9;I$o`1$0iZRmJ{2F!4bhSd5Fhov@^RJc{H=> zo|P;i6$csYoP0@f;vWbHPH^SKkyKHfs5wAU;o>TaA~%Yn3cg?W?Cz{3$97V%()7;s zbpQJOec$i5lLG@qbMY68e>}<9r!4i!;C>F5_y~!Ee~tN!-E?&37|hq$n$8vtra2kr zWSO5~i&^I6n3HEtA9D(2 zB60`F9OV|;xu~6G3+2HF8oE~2V(r8OUETFre%NSuLE{0Q*?c^CJ*?UO4OcY7K)CZE zU%PH^x;!3y&5mj--RF^Ofr#v&mM$Voc8b)}=5x|3y#xY5z^9M7Si{apj|x#F6$HQyFuc4_~ll9>qRSGfaV(Bx*H_bPAVIXSE`;-Ib#iT9U751!%UK++Fv=mPl5QnnL$qzHElAh~`kN*iBarF5SL4i_S$I zOP8r@X3{nDRORxt`KImHD;u7Je2j#qmu9^^eR3aQV1W~EDC^6fEnv0ohj!QQr2v)d z!i~(06%XrxnQMub?faW%GZbEgS(b~0Ui!XiuZNzarWQww=Ju4bZTrm?d#`a*VAr{3 z-KL%J=oXUxxzRo*0@J;%R#lY>upDSeM7%RMm&{h+ebA!qMdyP`B&#>SSnzpZ$^06f zRr=8P2(H(0iQgcJ7+clYHh={w4zSL)b8J`BH9V~1ficTqQHHGo*bI7H&9NxYwsjPe zH%9v=)4s{lO+Cvd@ufw5Y_*_lvioP%bCIp~OH&dRU2zxB=iF~mGBX3g|An?=q^leUJslM9(- zVm;_CE~P8P87drSWkEckPc3Hr-ecg4n>y#YOBoH7;1Pn*;{se4G)kU*|=b;3Y~1C>f+= zh!V2>G2=(^^{-c}LO7?Z?}q%AXxcTm>eL!`P*uPs_eE8vqS|wCrkk6b_Q>c^}mB3sSEo&`wS{R)bmn;hafaLzk{3=;hh|Tj4rT^Ah;fZn)?kjg3AFtwzHIF-9S<*9AHFHgCA*y6pqg z$cTD&?8pXCt9ouzc%aoiUAWTpJ<%2bfL#a8m}sJ8+@% zbLvtmUz+PYOV)t_WvXGXt$jIL(t>8Lt&Iw_UEoI5Wj|c5u0dzHysEN*Wfzt#t8EbSwstGTCZt-*F3k4iO6wq0L3>41 z&%APG=JdI9uT)pij1#(|0+ofEN@ryuL!y<6Je!ps991tA+E%DR*%R57#wWj{&)+UK zK3n#-uU;&pZ(#j<)w>@L$V2KA>s6bD$6GD zeE^q%OBhJ^;q*Q*>gRGgSQr!x{sk_Af7|p;ylb#kRA+1lJPwGQAeH|UolWfkEG2^a z1-4dXJN;nyexm7ZYK`50Ti2oi77em!h$u{P9W%^705y0J6dE;gJLBjcDO7{q{6ttaJ2Wcf+9dLWTN+a=gx-`ClP zjD2XZ%~`e$3kTWuFuTV{XgL@Z+A_2cA<@*U5q1xYm3{@K6r5+7Xo}q`GJaOqH0>cX zy9V29fHHOVexR}U?lN{)V|R6S*I;$%urcP0v!7xOMrn%q&-Cq4nj_WGoiW-gG*pb_ zP%;wgF7hGLFlEq%BqZq-WL!|$r#wcuJ+goK#E2{aCwgZ*Ed&p<)gvh(NPF}dwtAFp zk5hw67*5VBjYN8A>($qK9Nk0+f~{tc*Uf}Y=oV}+dt89X724yTlXK0pNZ?@)tw|EI zY{gcHRA*gc&!q&mL;EDj^)MW;Q4NlEUdSg*tTPGq&z z(As?2-~=G?IAln~Lmv$)*R$riBJP7P8AM*RDfL-7!@o*pM_WyhvI})b#BR|*;>q5; z_Hg39_MK9AB<|vIs$t=UMfho{nm@-I0j>?g*`$kW`;=d|aY-N6A+xp@YRwQF0zhJcS+Ogloyt^ei&ga= zw3o9dCb(qENS$(bNTGX4)UemFCvq;J=h!w=Bb~O$9G!;GSC@vK}A_E86>J*8#j%XN9i-s5W?0l&k_Q(Y# z5e%$lT)5@BO$#wzaT9WY!hiflXau+F?qrwIP$p|=oz@vDO z*sB)$K4KQcFq1d0T5sIA^!A(Iy7K;l_0FXm*Dk$w{mMd@WcU>_a}`Osze_Fr8s)x8 z37vviUl($A_&jCZpk#p(ishuRu!HWuMZP$Mgyjj{O??z`shsu=ZC0DnMzv#EAoQU< z3)fK&^FTQhl^+e#j|5=?$6w(RbW18{|N{o!7>fYQa;SLf67T=vWl6=SaAAxj+3UKUqs-qOO59( zPfO?(>Pzm0uAjZJu`!*JLLqNd?WRZJI#E@+s2bI)iJ^83QQDJ+$j^No{QpnTUmAT% z@JB=rIt*;0f&^6i|2|7M=Ou_BU{9iuT>KhsCi+nPm4q4XDe>d|xuXBETp`_h=>Akl z9^38~4#MvfmVmPe7`(qKJ#(NbJiAW39Xk~PddV9UrUxp^EeoIpH;^|DpImy{K=Onh zw7xDdOSC6P9R?LJ543?fxYyhK`j>7Vo+n}yU?GGG6jN!%C+Z3j!Z5`j=%&jNqmqa~ zcx}gWIL#4Ly~j`z1XKwe$1P)K`b?aw!EXg0N=qy$S|Jh$lDaWig+yf z4I!P$Phb=9o0Je-#zV_4e&v-%&LU|YkEo19c~e$-{ONe(>ttLyIQisw$2kQQi^c8% z=kHOm_bKT(6)C(QOyT|asF1{UiTppHN0R8=qC^g8iSBGlh!XfBC6B`YzvHEtL&63K z|DVx^zyKxa_O_~*-$qG)Zohn4s-#PnIMBOv9g0wN`-N$O5c|)##3Yi^KrTbSSP)fo z|H`E}QwzBpGfu9qbV}o57jKm>j-N;9_a95GUycfrA0S1#6uskJd!3C7RS6LY5F?>THN;+(N2|1nNJJw+h(b`y3TeFdj_q}KcQ!M# zw63H!=ZUibi>xHEQwQ?(CBj53p%**o`~IdkvzU#(mI-o01h9S`77 zSZglM_pqz4aES4bKnidc*C2KvC6Lu1bb!Pzj0uD;d=u9J7cbG$1zv{>2f_x#4fvt$ zT@v)sCKdVovbLNxSt`ckBp+Aro_>FHB1DSwZ=)HPLo~Q`Rd-Jf+Jbt9`iioSUyvE(9WuBaud?^ey7Aw(@h30S8yMu3s z(j^%Rm+@Rjh9O<<2&s8YM-uPdKj6GjJXgH4e?;$3v@TRH2*!z?l!NY2WI=pw5G4UV zM`v~G2Ytrq^2Qx{lJZD#6iewSO=+U(U}hh}?8wr}L;H^oPY>USF;}t3$CF3|8mS0r zgC!WGVi07J!kD_TuP2F&SrJJ+GlbZRP;h#)Xt3Pni08;`Bo45;1n;I-IDM^jsVu}j@=74r#ifQYZ<+K3{oVYM@VEcjq znQfU=#=J^B$eQq?0pxN15L~?hPkoTT0eRs8nSZ+SvhdJ7kd@1ZT|EBDR1N?%{MCae zIe{k&gh4PwLJ}75!nJHHr>PIqO>0oI4)842kK@WWIkmiU<(jyefBahApgXEZ@9xCA zbSG|Kq5!7gY_7>V|M}6;@mY`F>(c)a9h83ArR$IRmPc#1s=6%&Rg{-Bm8*CN zhFxvp0L>3ci+n;HvPJHYTZEF2>hlj?i-7Kx@W<`Cc?A;;r;{M0UEdf literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/object_detection.cpython-35.pyc b/tensorlayer/layers/__pycache__/object_detection.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abe26d3c840e6c8b5330dd4ab63a9ca8a76231fb GIT binary patch literal 1875 zcmb_d-EJH;6h7XW{mCSOstpkc5SCD+E+ji@FIS;$L?aprmMBC@J3?}1*WTUP&WxwF z2b$HEoAe!c26zxIc!as-DzCs5$KK7dsoD!9tk|(V$LIU}`JA)co0|cRevfW90sexO z=HvVkcKsC&9RCQU0H?eL+=G-rR)fd`!hIMKhW zmV5s;w|1tqOS^+K*1CJ{NT0h;*Fc35KP9r%*KX~;l8MGsrYusips+l1;)v-R%#xfM z_@r;j-ZtXU}Zq06@cEn(#Cy z@N|YS2!=>V!r@)|j*a6q31G7B3~J5+p2hmns{)%-^D9@rjhpEwZ`2LCqr3F(4&S9a zynTfN*n+dZBJ1?WCr7>0F1^>G{~#7Fw;Lmul_sa>_r);i!V*axEwI%upH-=VH;81?|~(vZKiB&HyR#N z=(U@bKafgPpnPlf9u5^cvl=oL*e&}*vf|9@p{&QMwLd5Gz^10Q(gx&~x90x?eLDmu literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/padding.cpython-34.pyc b/tensorlayer/layers/__pycache__/padding.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0431aa8b8d0255d737217ba717461cc4a0ebbe4 GIT binary patch literal 1539 zcmb7E!H(ND5FJ{wEGyn30g9mMp&(oYjf<{@G&ds%y4Y-s0ExTEx`+Bgs}^m`(TWO5 zt>J}F-F!#y`GbB7uRZOhAJ9{Wl;v&@J(Lj1p~#teGkn88dq>gBryr-R4e$rtX+Fx| zVOOtkNbw_(15DBuq#oo1E?Y45fTTX8ZODC?wjm8*Nnjel4{66PKA=_ryaP8LOhbF_ zVi4XEv^OAze0|%hrI6)fxy*{?Ev{i~!t*43oou>DI&?5!2Pb`1#G7v&q|6r+xZlj@dcR3T1PqvnxKR4Hg(0Vz_JYgjPlNzT*4D+Y|dt zn6U9R?II-RVxJ>jU9)qD{S>?U4F?UdCa`ma-62O>u=C-Dw8##r0`RU2>M3Z);u`|L z;!y%?&nhq&Wdp=3A?Q-ESOu){?Wgz6a}` zJLy>)1id36$0o8@Q-@)E8|d}ya4~8nsywB?Alvp=M8$&YZON&-o}uH~|EAb=|9oFl zNRP*A)X-dw9xF#~oK4uEFuuLRT8Qc*19mttOFEov2?OR?s26zZxv-U#_lmC2mPb@@5M{HN@8QMa!9 zVY!t@=Old$QYqxdh9J?dLqU!b5(BT9%JvTB;W6J zm@Rz9oN*V>o#AtIgGFoPre}E!fQ|a3q5s6S?=~&LL8DJXXee!BOe?bL^S!Fur9z~aI5(^{|UQ# zjUvU5Kn^fVJCJ&i6FBcc;sHs0NV|~xkaQspU_~Ga;ODew7avh80MUbM50cQHI~|2L z1nrH8A>ZG0npw#5a<$5e)eWv;YQhtqzTq1o7k1sYJ)S4{M*t+mqDl}By7ZvI)u8!^ z&=6LD?s_u7$F6A;WFUO@^#k|T`S}s&v zZS+}L#CjQ{%F8)p&Qq=UN{pk%GMD^ZXd#tbKHWFBY|u^ip}2I--BufACB;Qn)~bo5 zfx8P%(FvV7WdG5~3jO>Uz2JE*6xF3oMWg1TpHQXc{`~Ohm+9&3a5@|QK!@*+U%fd# zvQtdg@btI0Z;xm1ULOtVlLcnyX;vtkGo4+CQDd;c&=A9IgNL*(vJbV`CElLcWx|B5 zucVKVn2UXZbal;+A@(Ee>Q59JU`=4_2)jj&bYSblHR+HoQU&0BAJmVa9gD9C{DDUa ztUas1e9&E32Y@WX<+=wt1k}P~TMw2VJir;zeOO0s-!b1>4eWmF$g`H5z4aYf58O%5 z+92p12{|>9Elm@K@ok`2^L=5|Sy|^P{TKSHqLtokDs% zR+Ev&~C#nD@v6FbYnkzi6Hu3;9zrn5sD4_RiwsJ3VIaFk{ZR!*)x2jc(kNV;w5{_6+U@ zfk#Gt<1b1n3`qNs$$KbF^zZW^@7o_}+9rodtL@C(GHtUUt$h=|XrR6HaHVX{B661u HyuSYr@z{a` literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/pooling.cpython-34.pyc b/tensorlayer/layers/__pycache__/pooling.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05545215ceebb72ee3b142897fc79ff3c9d2d975 GIT binary patch literal 13785 zcmeHO-ESMm5np~&lw`|xqS$fbtYteAT}iaaBxsdbuxckkidZ%3^ovD7UARY)GIhu7 z9WB|gQK0r)3j_#IAV7h>=c&*A8;bq|2lS~=28#B*FYRw;?@lC5%95>oxG-fd_hxrz zKW=_Av$N}eO-vR(So*5^_Z1=jA$mUJ$X~(N>n4Oy_?HlEA*yjcA*xBC@Gm9WDY2Cn z)r?4}v``t*&WcY|R;Zj%d7;LHDhM?$)PzuzLLCw6s8Ca4V_Z~o;zM;zpz1@g%?dj& z9;8GS4URMC6GFX2{i>IRnidZ;q6$U>MXv~Tl8ey6n?lVBbxNpLg*q+NYvLhBgcrzI5>uG^Lq<|A^shm3qguJn+RKl)+Y z{nTpjvYHAByA1oQfYm^ivZ~XtqfZ{0EBJcLNNh+D-X~HB4kReW@7YL5M%ZbwnGu_i zoJe>cW=TjOWjBlCItvC?kP@OG@%f}_%jJ68^1S8scoEhmQ)J7NmLw`xOSWlw7P-+c z-L9-#j;uFb*R$pNy5CrIoW(6`r`AF3^>t}EDkfXEHzIPw)v|m|wwzAam#$`JgRLuS zM0+kE4waC@Vu2alu(Y*h`?mJ@aUuTjBM)0H$CKo@yv{|@{5#sd-EzBL?-Xx&t-Dwm z>32JATe=O5+3#z_<)Hgm!0nc!+-*r?v#okls@9h6crDkVhE%~!*ay)Kmyr5ibA21d zmcK09wll|F7Ujxi`IcE`&(|$wdwY(8t3=&+?UtffbvWY*yYC%Q$5PzEvP5CuazrIo zd&AW&zquv-I~{vj&fHwR{{9T!wVe6c>c<~ln=w-(F~Pj(hX&l}I&~lG6|?-Y!;3yB z8|&-yL&R#A7D-@_46$TJB1m*I09yZ>He~k`ekmWc@&2OGtls#@lkYWszvC^JO55Ap zi{{NWP-)TC8>QQjHH_9PZCM`Vu4HvuHRaa5Qs<7}be$5g(Db+3=R$@n>$Mus%z>O@ z*1c=DD!WeWv#xDqPg0Fgtl!_Uwjg``Ld|Rzq=%75sJ-esx68_ib=Y)GrvY+J3NJ|0 z3#b-oC^g1HJqI#<=(m9HReZfKk@!MvCd56cR;17`NpUYL9wd?pJZ$1Yh+0bctX*KP z@CY5_=fu4vDsd)7nR{t*59%i`8W}N-=LDXpE|e5Xps?_We3mLkU8IRO_pmu8HVa&z zxhA0GZWeQEL4iPOCJ>})mF~WC15@H@<5gbpmL=n7`GQC5OSqwID3+W>b^Hbyt{3FO zT0t(XQydR6%nlZ!)^b{Ytrq0mt`Ak^1zAS07vw_Q5u{rV;C=$D7No$5+R&)breL+h zl}0nG)3F^@%mr!BZa4IC)YmUj@-ijU)I3eEhBJW6AamP-0t_bWP0MlYwg++X z{5sh?`j7>U#r=19Fycs36qUe_Xu4`y z&ax=YlHHp1fa_VqKv}+31MashS{97yF@x17d+Tye*$u1P_KU=6JuYmtwCDF&A{v6L zm91&+;T&Qo!joHF&zI;Eu0`xkcxFUo$6~f5@3QV{ZF?QpQJ(Z&<9c}J#ZY6fX?1Kl z_bG9kC%Y`|dOYk39xzQHzC$`_!PM-nf z(TxztRV>j?Y{Zuqu7x&rq)aliyXEVV!_QWvM@UE75~H}sd!&K&cd?*PL$B!9D0!Wd zIV3^WKroo>!=OGzW$C&LHT(uwvv;7xRWTo=fVyD3XCS-QLN5|qGCQVy4pnQ8y*;2S zgE90)AXrjALS1hHi8x(KEF_O6#u6tI`W-w!FPiZ;qcx)iPt4k|d3*6J+VL?y`_hmL z3oJn|bmR*iY5a}nLPsJ5ad0JB*$sspOiS)6tl`>Gm!OG)V~+ul)tg>IA!|onMy{CG z6(YPvN$f!H_F&qOKRc)n3tO1A(T5I-AJ>Q9r$)z+RCA%`tY`2_-=rV z=nS?+U`!>vM~X`FyjvO*vHOWIAe_wyiOhX;NxP$Q>z{~Z_4YrFxw zdxfG#;arPw?A&f!$xm%ro|mCdQ&t>_i{(IC+D3G$(Xci4lZ$36%CQ29h4x@-h91ey zC}x8cx(SY~df1baEB(IK@Gxmt#qNzRQtl#E%?yPnJ1gg?5+}5ki`|p5`T~z;_c=WQ z*W^s%7YY3?Xw`fK+2;v}|2`xjvd0M2L_mJpjn@k(9%%c}YZ?!T#nnBq7zX_h1C06y z1SC2G7(if36TY3N+yatn6fCHy;b#MXL?sg}B`0=YX9I|UBG_pHsmQP&OcZilMI2Gh z(J=u9L(TbuiKQZn${GYw+3=oCEzqhXOq)+&i4Z z+X$#AiZGiPGzDn%tCS3h>X>~?Z%C)2%uYq3MKPz(QbUgSkKC^GJ4M_n9aS55D0M0| zkt`)IkE{;Ny#iF6!Gub9mOsgtLDF< z&?9$n;?->Oa5X>epsf8za-LfI4{Rij1$e?aaTv#Wq5oe*yQ7n%=Pr`-Oa>tK1OG29 zKpd<3Mohq9=;~0+z-jCwjTuP745Ugan1K^f2=42(1Ei?`JJJr2akxUIbh0sI2Y}EO zpYtP{^)ShfKV3S?>Pb|$KF6%+q$}Fh4qTGzyLGg5O`2i~WenayVpnG5wVy*)=i%Yy zfucY|?{1hF+sev{+>w_hj(0n4oZL6vHqQO#W^lR0@+;+;dHJg~*Rkj2(xprDvU2Iy zMV>7+bI|M~x;n+-fVrJ{zAkckw&l$BR~lH@ImZ+)E-psQ!Pi8Rsz*2Ys#+NJTvB!H z&iF&W6y>>!uSZeSAub>}c%!a|B*SC#te}%{_V7#ql)O4_xa-v!FA$eR*kd%8KKi`6 z;{#KIZx^Jkot9^AArzkAw(&V!LEEZZw`-VckEfRE@zjQds@P$=7Q-=1a6Fbeo4-DsdI|6pr7sDYJMkJQhD9lkLB-)*_U-ZNPvXSn? z@}Ztz(8Wu;C+em`)S-5Vk7>V~usab7c|0lLPG5mw9RPOY5$wR^fVlf?2<}b|4(X|Z zM*yjP4Zu4}!282+;F+ri<$m0izcaQjj2C}9w(cMXue_JVE2}-R*KhG~&w8lEgS9(6 zi-*fz@Qn6n_j0|_jDEX^2y!p}c(adV)xAR&Wc&0gC3MY1OC$&uoTWU6Cq@~)KM^$e zUSMeSW(SR4c{d<&P@_i}`8{=T$VP7pe626Rc!-}FDzVYvLxpeE=soY~ix+rL_zjr8 z?>7Dh4B*oP@P~xAg8}fbLjX8n{hk4Vgp|MZBe0m<4JslZ#}h}=Px1wgps;&A-rRT- znp7%>P3eZ4wgcSvrEAeOqpJw?d<9MOy$d?;)pXdzK}bzIP~I?Vve24Tg{IMJI)2h) zl;kNP-83we8RXwHw~kk66#O4%CUCKNDp5#9H->wE$I{c4)8|j0Kl%E}Gt=ei%Jc&N JEoKw+_doGVOZor+ literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/pooling.cpython-35.pyc b/tensorlayer/layers/__pycache__/pooling.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9fb70d23c505b4670a67d5dd5193971ba722e1c9 GIT binary patch literal 13726 zcmeHOTW=f36`ti?lw`|xqS$fbjAc6#T}iaaBq)@`uxh74idZ%3plMkY)P}PZDO0=5 z?9!GE8wF~=H9&y?1p*Yvd;8Mo{)VD|V1YjM$v}a=_oe;LncWphld@$i7Z;|?;qIB4 zGq-b_Z^pCJ(?wbPbL}4&h4`oF`%L2b625-V5<=mhB|1XXl4ncQQbOThT6EH4J0ogY zVX2HzS<%UfPgG8*yif(9CWI;qH7V4TP}4#k66&x}Gh%a6)bip(bwr@(1F+2rry%a9 zMGX~>GUsDLy+rM*mxY=Y53-^LMnhSz2z8vZ(7>BQ%?ouxs8@wLDb#D?0eXd27lbM? z{dJ*E3H1ikr_r{geo@<#aodMLJxyID>QqcU4C);9o~Y9?bq3TkG$5iLim69HJxgOG z>fxAr6x4GxY@*J@)MLUairP!UnG&^^g@YNH70$G%VNjO1UONAf)?@u)3Tep-3-yk- zx!HC%A7Vv_V4V*qu6bVPWBazF!-=c*4!y0Yuz1z6-RL*+NGM*2A*zrEI zJG-oABEl}i!6INaRHULB)b03Fz{4ec{Z%9mBnaz?6oLZ@O7l7!3CRj4Bet?)3z8EH z&x0HZ38d_LX;SCFzz)+w6eT{Nv>dtG=-9r$x{*x6hGdFt`_h&~#catAP0un{`lZ*C z4cnECmgo77+}H@3%dWe;ZST~(D7~>EZC52^`{rg$ZhBf)uE@6A?FG`)%xt)JRgJ07 z8N{I)aab-ggKM_7x1GSzKEEy{AAaR-%hhC%yviG#6_3BGottg1=l2ishTpz}nUO)S z+i|4VM4y9!N}LaRfC;?Sc9nNaQr{fA(UPjY?YMs1bEzT~Far)i^r9)Gw%5JDLAD*N z%8uhMaFZo@@qPK0nPxxGZRPlT_JWJV&G?2}cCmci|=vnuDVuU-9M4r?vve!lkcM_1;|&`3-$F8ZMkH+ya)zzB7}A97c=yY0I28h*KZJ7{@s8M@F4wmWAch8H*Lbsm`uImM{^ z=Wb2*-1g@^$H<wkqL5Ms*`cVSwQ!oH-$-JG~@r7XN`;YEmgS_Eue;I8lr z8x!Qk-4qJ(Lz*7$X2e~XpMq#+#Vp<}yir*gDdfOl;T6w0Di}ABA>Q1=)`Zw9a(U)z z!N^@N<=4X^DXAGin4wv^^UgI4iHD7)Jm;@U)}PgLKFu%b4LwF;$&Vz62bnUYzmo}r~-PJa^m3@HDpmVIAU%Ab1rhTpXtPFXcJ zZMSTMkk_!xDdY`Rp+C#r+xj?azKyR>ODZN#SW{Ldbv}FNXfjR9@vusnUHM=k;ny+L zf<6U)VXJ) znR+dkNn7S`!g>hLB*N@X8@4Q*ZnSLIbviy)-VYk&y68jBFp=Ef;l)TUiEUN{kRyvF z5)Vc+@xY?l%ovFb(qfCn*vK%7lU#Bs5^TsN%kcel`W{R87KB_7%X@f$;O0f0QwTZa z6_ibG&_c%bQq~50@u{Yz*X$F>J(>keTqG84MeS*0hHXmFj2AtgUgklzMLv-ube#aU zC$<_a>3g|BIFXYTLabZC_T6`E0g3FnsmBoFvH|+4jg8QBhMjQoM0)?Rw<( zBG(y-xb!faHu(s{B=$Fh)|8@|I0qp?%Tudzo<(V%T+qA^6`eN>lpWZ0sPMK;(}F%d z;(G>UZ$mC9r)l>(L5VnRB$>^&_JcmBLS68*ax{%S8bcBf_;S1F2NG=}N=O0&-}H!F zQo@$xT{b+e9lz_j%9nv>q6ptC3|019cGr;$pAol33L!QF{KhOw?GLGxTO_WS`N&SarZoD)5nG^K1JK)Q>cVonH4O6s}_~VtOE0KE~E0fIZPUUJG zyR$>+6VkD6!Pwx*K51Z6Tq^35uq*mCN?xaA0ZEuMY8*}v=%7A9d6|X>GyDb@b9P{? zRjClBp>^S8Uq^Ngzg{M`P-SmAyi<4<>-Y$tC$f-BODsVzY~%|YY2uCN!bSp3*uRmi?xGs| zvy!_8YqWLLWmux{$Ri5K=1s34ldYq!;Hgy5RU*7aNfHb1wqWW|JUeU-i`y8rv4<|Q zAGL=+q)JDSg!#yFHnMo55B-w;yo|5^1riGTEODbC5Sb$iyqBU#(gckN2XCZ=e?bJ; z332rJTWoR=BVv2R@Epox6LW%ovf>23n;-*a!S)7RDTntuaZZ7EJrg2{Jptz-(!6hF z@1aTBy^fNfIkK`SqkyKO7*R{) zK-$^?%G7K++Hr$rv!&!cKVh1E3N=Ts6l0WfVH(YZht_=G(G*92pf$ov+EQ_Bqf7L3 zo{DBi>Ly1jXQ&V-w2w<-lZyHhuV(8xJ7wjpQ`Rpm{T^twLaeXPjTZjKNiJCBc{VpYg>K7JbpmHBNm^j3W`pXC<#DSD( z;G291_Pd0yPab*1NrE|LViyN5W=@|T;Lr$^33#a~O-EYxPI}N)jm9{t2Y_)@)qW!Q zgHpP;jnjDo=SN2Q1<92hczR+%D475|UaB6I9h`fd7vu zWMEQ?V4n!!2lfe=(?F zTa&{We{S8N;}m8jKo0B&A^Uw=xHpi)8wi*v!j=vCOwf#em68#7j@hyFgE>`Y_9zlH zN_l;nDl%w4cB3*F5pkn*-fUu_^ojITs+_t|xN|g$2IHoFPj z0HN>t93^j3GEWI%{eAnQBpzea{C8yf6bnwhnoAw5<)^)q{p1+VQ(OO`Eu`@Pk2&m( z5S$nG{{_w+pBX)O7|t^}fFut5zwiJFO7*R{fML+;K-|Dd>?4gENWl%H%W1fQV=?&l z&H4c{)c+mp2go^GqFg%D7_kEYZpDZ7fU+K>IPj;NLpeQ-;`Z+`DmvqeceO(|od&KB ztz3~Nn{bTbi$omCjJ>;az~($Uw%iv4H1Yz50oX2HyeN0%`w|Dc-40IZTV4kzehYKB zAz}yB%G{#-)w<_8i*n_{g+*Dt@aqzfma4g^_7UBGVmM%7XOZuGyg%P|=LZW7P3(ee zvX_^aW9Hy%V5HjNO}=jy;hih0f!!H@=$H1Gm+|!pFC7sAQh+z^jz=mwFwY4(1Lp|O zpq~`gamC%J&P0K@>A?}Bxw$bA)g2ug5@NeBWAC(mbN`_Dn6Qmc;ELK&y@pfAQ2Qda zbYG-40;l4D=}MxGIa0?X>C=Tf$A@~9#Af>qyht@F)BsKP)C^r{7zn+dR^{k^g#G1c zm5UIG6*W%h2nmUIryLhO7Cko7eNZvf^Q&~p)b6QuGm+Mzbe9ikzn^M%EE4i$P|!QQ z2Ep2g+D*o42PXT}yU$1H-HG8YJ+=tdcpteWeQiCXRKm6zAN1SJCT{K bo;-WtQ_X6s zsws+$1SHmYjyUVVt}{j)djb287_pNe0Sp+3?POsmaSkMZBr$BD0|!YAB$yyb{Fj3) z0yw|l_v+Qr2gQ*zpP{HPwlyJ6Hyvc9^GKc)F9t z^64gJZRNLj+Rkvi{ zvvZz(H&?cux#~Qh?{5BlwAG8TT2D3a&e_%6V&1#M5_&JLI{C#?A!zTm^QHPn9h3Im zQG5#=0`uv@%^Uc7mvON%5tvR~0d^Snln;M)S}n8*l%>@pyuv-M0Kf9sb*50xd)}F8)4*v<9#|#M%3HIg`*zXN<6*gWVcB(Ur)rfs z3)XDKwMvUiWqT3NFpw4owp1?XEfh5%<#Q4StT(TH%gQ@N%dQr(UGn^D-pw!CRonIC z=V0)WpStujLCa*~rsWkIebu$^l`6}gZbep}7clv`S;snKUAG;t;$ALSRxCcWW|y5p zwN!C}?B`-hDz44uO4=X^YZlmf;Y6ZTAi;rLuaRRxw^6^DplLMet9H2{HhF%+zwiKw5C{^owYg2VaG~N zshR^=dFWrUB;Coot3i>RMZ8_+G*I;7O5lmR(Vx?%8t)&1COar~Dy~J+T zw@(8i7U!~!#p^?4tVKDmn#Z7?u_h{xZ2>enb(4G+wCu{Pyd1Em{r2fR(}Z>Or(#JF zx>PLuW!K4JZB$X_Or=t`^G;CGjd>fraIqwq3gd%RmF4QvGXA($9&o3B$q3r{!aRz$ zmvqS-a()IawCB3JY!8@X^iT&y4Oj<&6m*A64$I~$@-T#DbWmzL5VR;L@p8WGg+)f1 z!s1qm`K(|V!%|xm=U5f#=Ssyb4p^mGYqhd$t>l3Msxn%IJD5qmggmR_U}`ax0xbim zMN&z7Vq=QxbECy3qxub}ylNFHj#nyTY^5v%*Vf8B;C4DPD>7@-QhJCRf>DT#>*z&) z=~}m(oP9q}oHbyX)o^=2KooOdxI^%~h}CQh&av{dz)@DI+GOJ5!#vich9L*w1!!hX z!Hr&B^;Iq|m#ZZ-N?|!&^Rl6x(H6Uq_ zWKiIBEJ*#52q2t8ztq{1lP?F$OCwlA3BqRrd&}*r_JkF2#3|d%4m{VDF zxssQwvZ&~^0^+J%n_L&t2!ous4g|kRUW~9V#LIH$_2%YV2xTw!(5Z1D50DLB2EzIXU>^|rc_6QcFdlrD;6#*HHI$qmzwkv2$!%=<)OAMn} zs4N1T;ufPh9TqwLRgLXdN<^S~Qy0`w)FhlGNr8aZOmi-2ToPYx(vm^zGMeT+2Hz4% zq6t^8pSOIpWCeu9T&X(0Jd;IRhjdc}t;X(W%9WWR5SkdtAwU06bs6A_^_N?!&g9Bo zbx|+eV0~qmR)rpkv=TdNMTVb^nXyU6Xfo~W*|WOOdc?=Au~Es!=O7p~R>L|w9z4BQ zDdcB@)WgHW!5fYE%&XDkCxSP3Y8TD8>Q2B7Zq612`aqdCCX<3}XLk=6~m=vJ11m1QUD zM{5Txj?{QCTmWn@2fAon?{#Ll#?6hXr-*V!)~N?{Qlt)|RZ!C77)k8J+)lEt<7DMr zWzJObo|=7hJl;nQ9pk~!8!AUgs&Rk&)FVMKU9W}DawJB-Y$Ks(FC2*V8u%;+Vr-p( z2uUCfcur1^15sf2fmo9bC`#6q#(EbfCSE0|gh-^cn4iPZnrM668i`wAckjv$K728<^;ppDX>gVf>@-&dLLPlNH}1P8Bjtu z&FUjlAwr7CViI5m(*V2yj})diUs)~}vCCU--nnCqfsBeRn8;CO*3RQ_YkN4X;Na&} z1}u>0dCwM_CEyxGW(CBs)OpPo21X&;7~$~vIAVIjUUqRpn6N=pyLaT!^Rzq`3`&k$ zSIBG`w5B!PM81q+#Z;EOAtVJ6v<{&q86tV3WLD&{&)o@&4EQ|jRV}Uj9O^gaMjAnK z?s~`$GnAnNBUfO5lxxKIY+0H#bud<9#We7fSf!k8H5uNc{^u2Jj2+zD$XL3 zLW9D@r{Gztxxe}I{QSe$KW)RW%e54yps*RY{OrW{0KOOS^|qvydI098Qg=2fcc&^T z^)RIzrK(CTB-8^ij&TJiG^rkRsYi)q;sN-bI`tJ*#eE7-)lVjvr_?~HkDE?H zy&J0oVgz=beEYo*H>(Gm)k3dwzm?E854%k>(Uxvqvoj`Ah%KtTRV{8)54WhqgDq;I z&u@<@E6CX+%|dQ=d%K#$B#7*yx(#lkKb**}sw*vjF6P@uY7E$or8*vsMm zu1cm^%0Bh5S2=iuu1QjPmk8fIfXGD8qm_Kns}^2h>mfU+zNi+yg4z-+U*^YIT754WlNcU9Hm)S!(QZx_@DW;(TM= zc5#U3lDyYhZruIDl!4&c6afdVF!_)PS($HleZHH>_pY<3RAuI;?4cOvpcaX|FXEVUFQ4Yi&)KWT06Qe{c=B!V-rHJdM$z3kd0v+G-j zz|t}2m@|ipW;5q)$;n~+^zVe^ICID|Rr2=J6foD?*T`-@1J(%J^mO~Jn?`{yPcnSGpw$|p_cvI^nT_vP{t*uyHU9xMNeKryK|Ft%+P+79q`lJ>oC)|88 z?++C}{WE;r{p?5lk{PJA$u0v4A0%*)YvgL3m+lwrB{IPHkQ0oDDNI|f!xsY9I#%*7 z*+{hxpRB93%c0z>rArRvP`go6tux^M)!H0PfmiEX0TU%hN~hKr+~)FCNcg}tskPn9 zmzV9@&Tv!5)SnAJKkN*K#(tqb*0DuW-jG;eCj>TIo#t+Q~(b`GH<}UQO=x9M{^h&7-Q!&RW{D%d_q-cCf>&!ydVj z*_C10WR{luvZV8gOn02WHb;2+wJkCFSWoL#k^SVt8F0stPo~Gs@a{B=X_IANOWwn* zS3&Ly`=Pd3TB2bFg@Z^mW9j{>a^FS9A1w@h%Yz_tXg*&lR-Cz^i*%V{KY2Xp;6nidDKy4bKDL~781r=h;Q6BUOl-kmr=uGZSbSHZ8FO}Gq z*pk?p>`op`?CVG+_TksI&KHxNiO$51UuOjVGTPl&o-{Z;7_U(zaZ3fk4&9XB@*$*=v?jf|M)|JaaDz^-v z&DFYdxx35xvcDEPa+GK0a_&i{+XwI~Rs=6#OW_t@Ffj1$K{6 z!cFczrMWVS++?nUVQ(aYKZcU-*%hF6`4fy`*2 zW3*hPeH04`|4t-Abvg;M!^=riBXHq=QzSH#pDC3~)m2L~=yMU4xK0^9$%KG;Wfl@@ ziqF7Fe|*tY#h`r16X_~}kIDd4dudco0F~q`PmoU2h=0H;+Ov7cbt%9c(XEK=bSUMy zPQi1X0>YJiL>c7&Yn=j32gOjezijJt3fSM-mHwHP45U|4%>rElF~KC&cAje%XwWe5 z(M;$LN?)*)45X2~i_*&HqFE4`y4Vqg=v)|$>r9@XAyixVlIa()Z{Zn3V|jL`4z9mQ z^@7ifp`WGZnNz_&_;`*!hlQ#P;K2xC=4+*dI%K|b=zJb3G8GCI%&uwi#I1iBEjB+b z5kjEivNB(yHWmmNG|64emj#7jBOS0-pv4G94saqGszVVYk|%7}hNKVElTcBC0%0>X zi~JqX?cP##s0cZ`SH@*1%K8tVit-}PbD<)|gu5N53Uj1--Gzd!5;o4|skWIC+1)*O z!p|Ojzm9JTU+*Z+A|D3q?iW=FTwP&qpZKVb>zxv=HyF-az$!~7KEx?Ub4#xZ*PGl= z!}V@gcH-dT$bzU6=Z>?J-<$T|0Q0 zAGWL6O+LR9ti6Cq4#u}|x51&@3&t{esXfXaOz0c32f;?xtah-sk+aulW1kRB=WfHs z?gbkgoZ=`O8zsi_bF>;c@6ch_Ebb`h{Rf8g-s_L0a4mh7H<}E_rRFi8Yy$)MgQWT( zr9Nm=U=C}2{`Zyo2x9~m?xSQCtlCcUR;zo|tT1d{mBbVF_#;dr?mBS?rZ0HCI_+L^ zTHTkK3jdQx#n)iTW2S}844KC2cE$GWBX@k^MfH(VH=O;!Z03{X0OS2v$RF45r4In9 z50v^Ktv={bA8b;yfCZQr^r`m~>LUnB056+(^C6h}!d=h39J#kRjc6@-=kKRuE!eNX zY>&3!kK-+n_ga2|QTfNI)mgH}KR{~$80Z$P8FncQTlE0zz(ifdm*;m%7lhvr?tU}_ zOa&g3!BRKwK1v&&_;02#BC!%+57AfxY9-LrRknAXq?WpgYTmtz0h#ujIE$vkz@oAA zrX6BxSYA9PHGA`Z(3_u32fbl-7;7|Uuj>la!H}b6I`-rJv@G~Q{PG)=$t=7~afV)d z#g{18SU6-R<*;%9qgp0*SV5BEW1{~h8LieCk4Q$rBe^4L^+f?@EeSv&FexEcNI+V1 z!#Ns2^&4$IRA;0)@q_|gF_=6`0ZnvSS51DbidXqk10+xYL`2e2SgP>GQZP=T*hlyK zuqqyu=SQ$A+P;%g5avAGsUBjrZdUH^wQ*f?NCfNtDMc121Y#UWt=u=;SBIo?19BbY zTXqgvGQ^+7!)ixdj`Q2?24*j``~5X}k&%KUMLvY^3<>n@py!fqDka0^M0%UyOo$|^{{il<2Kcw2G#P( zcpfrR()j`2>j2yfr#_v5v=-DNv@y5C75r;V_RH!ax+dans5G#T3AN`AXcLwAUQ*p{hp08_=8;|lFGVAnWG$H;t|dyi z=@vGv-;`R15A0!^e$@3^x#U%;S$+0W;RWp&!_$FEIY&*LFeXq4}}sR zG-V_g@bmj&bN?J$@z=i;t@xxxD^eIIfheM1AEJ7Fh{CGzAt0tplX?vq%h23tdm-E10L68MS8EmZUg-6!-m!VWc@IrQ?c{U&Pa|;62OPhP|93e_nT@ zrrs3p@oot3#vsyX31du=dMUZP4TAmcO*UA=hccpl7rqOWdyALv@FGj)Ht#qhA=VDJ za}kpN2roTS(%t9l<@HUZ_9-NMgE?3a#=NP$I)wG_!F)ZfY#d1NUpyZ?!V@?@z)um` zzXu%&JQ2qSRJRni33EZwPaCmsLJeyt&&|Ln5~g-mWrk`Rz**o0dcG+-*oIK6xmPJK z`;vM}=D1?B6NNOg^TWSMd@tZDv@DcNK^38L@LHs*)Y@0CEj!?0LWaizhyZBjs(}~Q zThRXz%lBEgvM8+1xzasY>NvE?#ooiknI6B`D}` z60H&Z8krYDFLk3p1DA8T|7g&^O0Ml-FMVVLa#<$`$QUGiY!469ETiAcP!uce<~>-h z&=XtLJ!h|*cE){#^0Dm-{*Vnm*yU?oQkK|!{Fk7;skL3v_J&D5Q;W}D(UyJW&R-X? z!z4ABU?fWe6Ldq9NOVCI znt78@57XrCfl~{D9O|$Dy}kUC#2>KeQs5Eb54eS7+$J7^?b-$+y)B^BFUIL0i6A)q5!76fD?SEkXUAVzHFWH;!YwK$( z;8*Y=n7x6o$8*j)h*$dxp@zFgyzpBiI5viL;X=kZuJQ3Our7d7oY)Wv<0b_#+~Rhk zw)`Jp=*2Wpu&qU_F942FO@$eb7hO|(9=xnqEIOe-WSq*+wLoGC#nc1AC- zSU2QF9=sboZ5Z35z( z7f0;iQM9kefDu`hG76$uYCF2AWoTLIIm7O}=vX|vWJR@0Y`|QwZ|3vx@Zv&%i0#&^ zV8aDRrx^5Bcab~)8Q6|jOZT+#e1oSK!b_9)x+mDtMNtRld3k5v!aVuO7y_=_zylQkUIY3 z#>mog(3#g9b&x;_tgqDC!7IjRiWaHP(6downF+*Cti4Lrb@ns@CZeQE_d3))ii}%k zlN`8eX=+nzObKn@V7a(NgHZj90B@~V&vu|XU)!7uhZN<>)-Q*!pGIaWVQO=`dkiQ+ds)eMr<9k9(gJ{DEiC`Hs*IZK0Wc5P@5&55GDEMq>y0z z{p=mS0HC!O8~dZzE7y3eN5K3V*g9~ddo-Czti3Q1D4a>Xn0rI>WYLE2BXNtSaNju` zY06g`n-ZmD45durbvM2O_@~e+-yAEYmiCZ_LDL?=9*AsG_3;jo-9h4vjTeAVo0+r% z`Qp4F*#Pn);=@XLlnUQgu>vqHZ8UIUPh~YgW=uMWD!wS9-Q>p>CUg?><9UaAxJf_D{H{Zo&GSj~H%G<<$u)}n# zgIJ=A)}jp;I`DhLvqe9FE8{a+_F=?7$n-||8)>q-HK`#G+2$=l8xji_coudt zCC7#ZNU$8;6_ilx@Je%wl~U3DYxo6YI9$HR%Vk_LJYghN1shAIHVHiNPH(W*29^u`_z}v|*g|8x4I6v=I%KHqsIhC1ObjtxhCZYd40Z zkA`Kk{&r7hw&&jh%yB9BhyOE=p*)2sqMZuJi8^JkV}V;IP$PMZ%qvn4$1V=|ry-nN zjP0za$&$!^%JPK#LY62Uds(*J22xh%Ux(1Y= zok(o@++ZG+NwPBUSFK?lkplvVWV9GAp}24RmQ0g0^$O_6%NP&l7J(26#^sqg$h79a zY6=l$umf)kE;9f368`B2?eRQxUhj)90^WW<7S*&{Wi-fv%r0r9{`pRfaKA5 z2XFz}z@oXmhoTTHE}ZTkVK)#(ny5?!(x(BWu61CZ2t@cj%ioE71V;tJk!e(8-{(=11Z4OW@dGlWH z4SOvDAl>U&XA}A?uKVczGBPylWhw0CRGP%yRQo%5_?x+}&-5bK=m3WgzThC*Si)h6 z_J8TgbMIW5N}mVRiZR(lrb{^6wXSf|Ybj)l?3dk9i#)v@(%20?J7jmO?Z6ML9;a%o zE3h_PoN!OD<-tB$OLM7Ab%l#fxO7uHs+B6-En(>7%$WndU7fGB3jlkXy&!#QUzFa# z_}%uVI;9kD|Mf%jbExzZhvpz|)V_A%3ip8r-Idsz>`U%W?Me(J`Vz;Ir@ z<|r_+`H)h317GiLTv{>&HY+d$z;q=;0G8I!ZNtu{_cxqDgcIwtUc1OBz`n;*Klu>( zAK@T$F76wY6BsgENyWr0qG)R*T-50$tSvWGzs$S$OZT+b%o%TJN2h%)VAVvn$FrKA8qUH;v6wPTDo6;Q|@O>w)4dlEY z2!FePQ`#kB#5$fz!?t2E9nKWQ@G`u5n+bQ0r*i!noKF26)3*j~(y+i!rPdpcwL+7e z$Yd;_2!R53*z*2j`~1}7KuF-HiRH*=bo$v)-gB>?e<8ho))BYFOZu>{X(KJ;%7T9z zdcGYuygZ%IgU=)ammn+s2wM8{C3tj1ZlVjr6y+KW;VP28&P8WGTy)N(i~*|v?@T^Q z{XC6=u+hU!O~4rqI;cHFvJ2Xl#_veIr_lfj>!m0DLmP_Jtks)eBJZC@DAUxmN}IH> z>pq6}Wl7ktWX}#TtIsesZcS;IfnkfCDT487;R^IK`vMILG!`S2>OIycXdS3up&w?N zeS*Yo;sZ1^`+*b;c$;(qR11F#_2i z!bRvw1)MWsC-69qqcM3-aAbtzgRs7GDOJ^O-Gx~U`ixv-%@4y77)%5lG300shMWG0 zb3cy8b&iJ2OR&PoKnolV;dl>^M?VnT4;2ks^<3}sVZF72xsQv?PV&6!>bQkc?aCS-w}s8`((gh;8mFV zrZ;E;{?k_wGMM|uo7pySPPUP{{tnoJP$GvLDp;+&XuRCq{l$cM5BiF<5r!L3ghv&9 zB+M!I-D!pyod83K{qwb^A$FLiuO#q4#OejQs`trgaeqmC9PO6^53n}PgQ068j4Ec6 zf53U#fq{)Eq9gw)$Ow$T7t0(LAmybM>6`hMgn9EtZ9d@tSv+&)_N{_w1)R_A>SNGw?ZC7 zSU@Y+Jo-c#oJ!@WEhk|)jjr`T4_yds?T4pMA1`wxqKcP6+eJfAYvH}hpSyV>t3s>Y zi8x!d>kjf+sD)RjfH%gsr+GQUODNc;hnMgquC?V(@!1=^oa5y@F15}#$vyhIy$bIw zfrA>YvUZ7y$fkDRzZhGs!KpX&TWuEg>$9P@H`Cbff9u_X<0Mv zHR;O>Oe_N=y_)3D>%4r6mv8fOgBRf|hs@`sI z_yb50_2hB^o?M{V<$H2DlRlg5q>`6t&G2v|3nP$zfsMpnNG@h8`XPjJGHp|QLtQMb z^F_Y5o{9Apzj(mE&n|FCyHB)3=S=^LIBUUc`A{mA#`glg-iNqQ6e6-Utao8dC*zNx z+5&feT42Qe09X^g)sUE~FvtsRkL)5qA!^e5tD>@ho@SnP6l_ ztZ@gy%`3Q_$--a*Qnr4SDe6&UqoJejnH+1=4zh)B*uNIO^ve2O)GN7o^`b{@5TSGs zEHe>pN5nC01=*X0DC$S8$XO0*k_q5=fd|fW8H&FGy*LRzaL=B^L%1mF50XcG-G+Re z+#Ec}gLxS^#e(vQ=LLC!D&Q&A-5MOuo*o+>9XWm)+wpMr_}Hn@;Zq}HxAmLR zg36hvE!#toWdJ=sXu-C_~_`dQ_M4RdgR#YQ^!u+mf>R28h_jxfrq4uIu&7aG{6DH~l;|BH#d`JYYBxM*v4ch*hXQN^B92ALTf}G1G3~9ynwJM}r;A zO36c2|8LX;-#n9pXxz6N8OBkWji%4IQ)wfF4!!w_dSD8Ws;L}N-zSApYlrp~PJVEP zc9cKc1kZ`JL9Hu9617e6D=2*aTAD7ar#8W@w~e@04Js|0Q3_>69(56LVh5Qz~o8CgPYs&tJ9gusgY?0hIgPjo%v7urI z)akJg>KmMF}xO_`Iyho#9gbo_=q00rCaM@!%;~t`66Z;;$ zI6#F^}f`;EbIWG(Zlo1s&a-Bqjb7MQAM29f@R53 z8qIJ!@GHf}Wl%Mr)Z;@w%gR0!V#I#Esg8tc{j#E4UA!>a?LZARiJjM9m-2JlrSx`D z&SDvs{r!$e_|_=An;-MBo7kaH6Vd=(u(kPJf_Zo<;|b z;W{SSm=x2|D#Mtcf%P1h`$!Gp4C?L7Z1vKYW0AhJ#?%^BpNFj|qTk7?oi=#7nZFZ* zP0ioWj%lYl-?Xc5OSD977sk`T1Wq7S(W!Nb`$$nFfSb_LoKx$96B#Jv;{bghdVn+V zDe6D)mKWh46*LzSFKDDps+Q)=)Y8NmwRVk{>W^DwvJDf*^3b9VZcQFc^g+QWz^}x! zROb==dJH!jzecet@M#pW0$1{<7KX}+0vdw(x3P_E7+r!`p)tq?@ud$~Uw#~U`>F9_ zM6E$)P{A+zTFd{)eFabHG39T(K$%5cr z9|e8Cp`d6H&2zR%&cCOsqTmK%hFWp(>W-hnYxEW6pzt=C>0G;TO`E(#j-qwqVPWs< zWd{;;dK$YH)kmDUfOD_8C1nn!5Qbs^LX@?J=q%0lurYWe1r`D^f-`!k8?=AzotEiI zw?GARPBLmO18s`{z9foqj?g^;D4-y4s$JYak?D*u-Rhvipvb(l3SG-eH3zmtzASLz z{w=f}ddUP1QIWwxu3YP7lOr|PgAEDN6@txH+^96I9mny_;@g{y0TEaz8h8FGZdwA- z+YNIsjGE@)BRuU0;H*V5N3&^=2~%re`9xJAUJ#*#4=niE1XxZr0t@5uJ`IE%qN{J1 zbog<7TLH$!`eaWA51Z=Yfe6C=U0(hPFT@Jb*?0dbAN(_3>X1Y)p1mKR?Gem08IPE4 zybL}>CvV{Ekw1#lNK*sO`hdg00Qx_KHn<7&hp>ndlR9*}yxAqR<^Rr(CT^D-*kzFb z#3uA!XjJ42A!#KeYaG#B{6w@X4%T{0mYM7qQV1AZMsG&=s*hJ1`Z0?zOp`#W*T|4@ zW4?}KTc9H-2Yepn$aW9(k+jg?IDE$tgPx1!54DYa5qn_5z_MgwbQvL=>Y1t=nO(!@ zD+b)n0rU)U6PotfqZLLv8~1wCA=(9pPT(@&JbzBqwL zJykWUcp6NSX8Y0fbp3(7u4O19YiD(^+^0EXH?(h=VTUco46=gAhC0CMBF4;Ngwi@z zLS4%=Rm?VBp;{_OT6jkQ4@T(>4rbUL!8o$(s>)D5S+}9p*om@X z2ldv|)?C>zLI4^f2z;Q!Vg~V_58wOJ_*NPKCyq7I z02q*i7KF1zz~cv`tOKDS2A(u1;t#^rHValR-KDf3`jLW61ar2nk<-fL8;xpiiVn^I z!rK~;|7>1`bW8_5rzO{Vv2It9wp0MkD#vmOtlYG7zRKVFvC6Gi^TXh%AN_{2 z+M-BggvF(}@iP(EO`jrv^#IuprS~pRu%gpVCHAJH?iY$`bE8E+gmI)a|MnFsk%JB_ z;z(l)Qs)kCWX)VyR$c;Clr(#w9&k0d_+$>wM?SnTS^Z*J#|Dhdfpp)p$d0ZnsY zT}j7U3ew~)g7|*HM>tb$gtp6;mR6marg{<>SZ8&{EmhZ`Dlv!=iU%vuYlti{T}C7> zFgfFKu36Ix!o#Q^*N*4XSS67Zb7MG|4&|3hj1*@g<_#H)>g8~F%?Vr2_J$yKc8jOb z>bh2B7mM*$Y84Z7p?UkBTv5b~bQs<^I_67=h^!%G;#0Z`Hma4ci`)hg9g3@HQxMT% zHyE4e@dc9=KA0eHgra;=tEcM##@*CJ55qi+G=zDealFoOnUWb8<0Av*kfKWo5epR| zq4!uH`$3rJfdW7Bfd8taFEHZq51Yh(_+jYsyGiVaA8sV}1DX{?a)5$<-0eA4T;0JinY2of1g0d`A;eR~9Lm!oT&~Li3-=h}x`Y|3p ziU)XDlJE+6FCmc;0|M{GY6CFyL0y0cm?WUZtpYs2u{AjLIw0~Nred|iPjoz}!+(v} zD(|(e2u8wAtsalZXn?2tog6CIDzpgd@DidL{C*I(81C%qVl?b(RszO{^YRh`7W{td zUBoYjzk40ofiVsc7-9pJhQX5meOM{8BPv8=_DGG4mW*W$S(9F+X^C_IQ;q#%T(OrC zED$aMf>E~dU?e`mL8>GBxqdmldWK=32|KcCIMs&*D#oS2P09f@;^7fL0h)mS&}qJL z_m_h(5I8H6OK{ZHcKdo+gObHLV7#-?BsUt*LA z-ByH=U{79>pXgOLoV};N1I;po!$xpxWJDyB0Y-748)ID~FXB++IwY1fSmqEU0eZw0P>|)u-Toj(!uKSmLnI$SnX$N$Z-0Hh zo5=UB^RrR`*1m*N;o3wX2i6%p(zt*Tu_NWuvParOw6IHx79~4Qa|gmqgb}fiNCbsQ z)}TpEv&sU}F%kU85yKoHmOCOtJj#d&9D1=#65E+$Y5{9a?0>ZF%z+=j>up#&VH`tOS8iHsA0~;B8<9VfE z)OeYhrZ55y8Qfr4uymg+Y4yd2@BpBGM%HhgPl^Ooh?^)21* zGKtFzm#hm)-8ZdT=G{(S?(wq1%Y9y?8-Iy+O&rGSo=};w)_z?ifD+9d&f(K^4X>Tr zZ;qT$B3|98J|4P**CulRoX_6kz(lUL zGtz=k<`D7wKF0Jf@bZ(qT<3*jJ++y}PFg1EX}h^!=R0BqXg&$DnCAmA(R+t?f;%R7 zCyglbPO!*L-o1j${{gIfzkwV2ai^#Dt@t*CQ%`im&D<&Dy*>3JQbr8$(tA=(eYtmt zFZa;5J5r^R@Z+AO7k7GKABKy{{qWsRU+-t|`)q2D)ZPh??eN*2*pqHg?Q9D|v8T5u zwj-y6V^4SLfb4r(p&E1Ad$=w`0T+=_sj;IxA!378~A$HaKS)-Hr*f4T~Pl4 zd5HhG2c4|df4YOH>ziG=(#!XtWpef=Z`$bIoG;Z4MJ)I_X$N5+X>9{q5(KJq77Q=o zA~sJ$GtE;I2Qai8c=Ot~w8Cns`Z$XLX^`enZBQXd%i4YJBghuLXfK0364!;1l1ZPO zizT5N0lUQlf+S2fXwgs7G~ZpoJ31|hW(;(=S5{WCA@4GBK%6=PMe zFYsI)=(#%3b9JE4Ngb#@5DnHJ1rX%KL+S-D&_zK+Pp%nMAJ-;Q!3;>3XHdg)-6Ja^ z#liTBC-=M)Y#h&Zj-Ys`Wliv-EITVq3Ta{pD*&Hz@I_kUmtOBkF(FoyQT>KfhD&Dn zg@ooEU7eyVqxh<|E2kr~GEJM;X(>I_A?Cg-s-4;!lumAq25wGoIXU}&o{?Z+#|~eB zW{-pnm=bB}5p^y@=~f^x(_IKR_A7zQeZQ6s>K(0D?INRG(PEC&h>_IkikqGTFovH= z_b49sDN>|S)T96qg+8?=5}eGMkF1%`u9RfaNm+EPjH9xZ{+X3bG~5>2tLd`m+DZj@ zUC1<&HV#xl89<-_s5gGZj7tpdcnyFLGF3yIxZW($sR5%nS;b|V^F(6DgW`0=H-PVl zj1*{tT9ISlhlY+R#6Jzp>bdh12l}#)oxO(ALp0N{Dx39{n5czoGmfK4e_|yX$ z2-=NE76oWuOaCjnh^O+1_T#3s4iOUvsZaYxv# zEiI(#JrGtU6JXx^sco91wUny=S`(@IuLV-|Uuhy$|Fvf#Rqy=FQuUF~C{>4u3x4S! zRj*PCj+=)t#uUebMM%}Rh93T+QuS?+s&8wQss}lJsk->ggH#=|&jpADAr+P$x(?eKB;NfJVKgeiavKgi|~PXls3w=an&M%s?MP};83?v+#{Oojh( zX}ib(eQ7(|vrj>k4!P$?acR3Ja&Npx;CP67<$Wt@JA~e7aa`K2JvQbb5EWmIIV&Ox zK9`~>7;g3?Y&!HQI4%dI$$ylj)K80o@lboxlQ1Pl6nrj_osUzD-MpBCsaP?P*h;A= zLl<-N?l}=^hk5{Sr7=+MFQqSs zU%=ixk(9&~ds`PnEslMv+^-fh6gMX*l-2U-!$v-RL{UC1J*0>lY)A|{e!`U^h8;H+ zCBxODlu|?N{aw}Zos{@u+^HZ3c3jHs9*NKfeGBi8$h=_wA=ri=MU;aY^^7#usD~Ie z9%@k|gylQL2c%TW86Q;DtSTOkh|B@vdc~n?h{K^X;yZg>g}53yASDK@^%GK1*bV1k zfO!syOk9%SJyPi;T2lkE_OG^8M^rJx0%O<= zkdS!aod|^Rf8SJS@>1e0)fQiEmiH#|zU%ygR0OFzQexUEh(`n+G&^`iLHt$)^$tEY zWwX~>5D&Zobq&91F=6~0!C1T_!uUw`plLv3&9g@Z@<31ka){q$b>@y4$vmX*0D%)~ z0db9R6H4Y!sfE-2WNFF#z%!G~zuUkaub0h(;Du+bMR>&Iow#(N5MEfgvkq^63`Fzd z*s%brAo!$lw;kVSB%dE_Ls2YzZJ=RB{$fh#&$GuMvHtue^xt7il4uAd^x2J#{+B>T ze>-e-A*v}r+EoCP#H_C=Adys_SVoU1f@XX^=bEO&nh?tPW#LiPMJ$hSa^Mb|QY=KA zH+OzWdY6Lcsco^twL5|!%ODO7os;Ho;}X8(qvCk$Y-)RNV)%pDR_3$G3ximPiB!>izpXdWb(_rfH=}>22;K9fb-r| zZ#?3>CzIP${b27#)DObWHqk+F|25k|g=?3BYH`n}H4a4CK>C=$i>XfGA_+{HTfou{ z=J|DAo=WxLPqDXnoCUS*iPR2~ds6Et9&|xPK~xV!u>g1Ni9?D0(s*ou1{6Vn8V z-w722{;PYM+=D*%w6X%Q7e_%>c zmphd2i^>3;bK2#QKU2srRO);B|^?Ng_Yojy5RDB6YN`O}4yBPZ

+ zKRJv$d(=L05>JmI_3;y<_;vaOQsY`Yagw=^Ze(m!|E>Q$hV&=}B?M+4RNa`yKrX`? z36@$F4ywL?o9;%*{ghbX6ZWzTf1?vRz_b_kye^*9vEkF%F?e^?!zGhFXkCR}2~4pH z^ET|0gn#P`Vyx*4^ZDuww+hD^9Uew(CC3*3l;YSln6QYq(lxJ$ka6?*%8);;g9Tkg zI6;HrHs#!M6(K(%%flor+D=iNq`Dsa2(Mr8o&naiS6Oz^ZK({LV51<@0B;;6^AZCj z@VX%_7nn!Ekdfs>feXS4Q^?!#m|2%3Nnkt*N7*sOTT!i%lq8?E zaQ`@iHeU~2Ziuppu`!a#rA%zY$~I!=`K**}g6i3Xjp^D*7U>u%p+;33^8Y#3LD9rh zMhdCj$`EQ~6nt9h%b;?@Hr2=Y@_`O(L{@WXzO1R0pof>yWt|ohP_y*4^GEGk$3Seu zn(#%F8vabyb?ecv`egOQNLPPJ0XB_t1%XN^kf37_(duP0-7KpJkj3S!6j|mB;}h@e#Y`&(LRX=5t*RqssvRh4Cz= zs9u}nx!yr*y@O|^5g?0lgK7W-cOOwSTWWeGU>-FdYa#Z(qV*v)N}!B4vWS|M8Z6+? zUTUnZ5kD#+j#{_O%i_jekWkZhMoZjSE1Q%Uv{5FaP9h6k4S76hy#;kzP)BtAFU~a2 zJS)m@sW>|{VkpCVV_EzT2+%>0e((gQ-8i7e>03wUD=S9^tRqm9f#*OEP4+BBdGE zM0`-{G7$PA_&*%B7{2SFYXMLF%MMgBpzK6IE@ziq8~(bi*Wmt~y^$RoG$0-q;KV!n zD0rKLr*tY&xQL;PC{5vW<1+K?dZ9nU*dO9LSwpBuezS6gBJ|=?1<|dK%23CHgupMx zSQrN+Bs89Z7mi3_5nmk9d6uvnk-fP;8tP*sjy@uVMXHDb!BbZeDXg&ydNHAAk=g9i z^dbUq2QqND5dcxP5fUOHN#rPps!jNjocIdwKd{Kt?19V~nQh}3LVKz3PgkMxka+X} zUE)z)Ebv3(EB-Yp07^5C2e1dYhyDv!5Ws=BhJ}p+dJ&>n7DiopI@UPBkDpep4>DmW z;5Yc5r~6^DF>81Z!`NR!PT7(#RveVo(>eR{+3H*!!p4aH3qUx4F;Ga8q>O?ZEWF3Er{#T&Gx)_b8-j5hM*n@Cs;j1PD=-3glwNLjj zLo4?;;RJ86$S^~zBI745-2`-9&UQ@~tvU0om&R9fLOCPN=fc-D<~qZvVcZgx;-=L_ zmL!>sk;O<@AwzOJqERfal>&b`8aF`Euqv^tpxIR|RT1`(WsV=w1aqW7J11VVFz=6d zl!ZuKV48X3ej~Cz^*vxkHmC8(J&u+_hH-l`&v|b`wt>+kZdi?1Bi%%L6^b;FVWhN^ z8Lu*1Ma&6VJE+=JA7T~LE^*)Q2T<%6g>GNhx4E8L2Zor#94M%f)qMot3uIkGjZ6ZV zY9+L&f*QizL7Elw@Ioy#_>V{#EwrzR7MjFurM?CX%;6p#p;_T?AHH2arWP84qcUP1 z6rvd0_3p=k{=-j4w9v#^dg3PDisuJtmw+J-m+4SofL@X*6W$o04>H@PmAtB*bm0Ik zEfoCm?;;El-a=Ue>CrqWRH0J5)?r$~VTTtOXkbA{4AE74#QP0Y$xw@UO@U$*Oyl9h z2GK_!B{tLF3Q7Zaex1?+cQ{u>6Ak`z-~tB^>#Bf}p{attEKmiNN;X4#gpOX27+M@# zOkh%Iw4rTyg0|tUzDG(t7cVvlKg;L(zE4%*umZ0{ONF`M<3c|X{|kH~zF(L|lyvxH zw~Q*(PN30(#3)U&b9V3=dT-$iDaE|lO^+{B1w#%g%)CHRaL|FQbo_&9&{|K?Ht8pk zHWd}3CG0fnCzO<}?ezKGbZ`u=ufHd_v<+R`yM!1}a@-3;V$agQadz#sTSsr3O{YQU z#4n_^c1dW;JArz1+|4@6VzD-+dGTfUo4D56H8!oaYb>03aU(L>(1JS@uG0KfTS8M7 zDAv`r9nBDT{Zn1>RA2Ozs~Z)6rqgNT@3&aR6}CT3`-j>lM5@XW>5IvAt$le3e!*?A z#IE&~p_c%vX0dXQ#LW_-ePp(YP;_bwF_6Ebl^p%k-;GL6#4tJ-&8b~}YM`I!?l!f{ z&sO)`{kLrBzvCqota98%oI2Hm`r+OCj^Bpxx8GL^Od~EFUct|uP!93taU)_-#5Y4k z0sKoPwn4bKGkG|<6+%baLpT2GjJ}b$n$i)P4soR16rPK30mLtY z`BAMSmm>t^H1JY?#_eF%_R7qHU8uU7_@tAUpW~&A7ct@&**?XdZVxY1d6t+7Vma-y znR@)=Gf~_08cs{gW&11};=O?j<-uDMT^Qo#|2osV{xkmV-*u&LY}bd~!`(yOdwTKN L^=GON-_-vDx1&6dR|pGZ&9K|-J02%l12wxBAb^)9Xr&F&ROkhxU(X6HAAi3Y<3sPrkd4M zRZ|pc2{CqNcV^bvOnfZh6ZsJ%@Fs}uA3*>kNbDrOav}cV7)S*P66Z%SNq#tg9Apt- z^E~fYUmbl=)G=eltCqT0UG?4H`@P@$z2}~-Te}WU|J~`A4x7e*HDaHwxPKF0Z{9SF zBL163*)XQVd()Us8Abe08|AdI)M8Ax8fLM@D7G5qHe;&TW)$0vVuw-eG>TnDaf?yx zHi}z~;x?n$V-&X=#T`bm*O=dGOt%|v6?YoQ@Bs>MGwcrI{j@RNiKn|*2A{U;r(Jlu zo5k{JhhcA#wB2~Rhc)qOr(t)?Q&hH>%`WaUiu;WZQpWT)q&vXoFkP1}6U}*v4aCD~ zwCiQIl=o=h7uaOp_sS5w!sdT~o};-a;Z=5pKX;*Ai=9GJT!S9$#?yoBC!g-nPxs*I zA$FZld-YRk=w#-#PdNh>zrx$N(_nD=t9ORiLcJcptBDk1GH$27@1T7}NNf zGA1)y+%^=JpW9^^T^2r53$}HxP|kbaxf$KS8A~2mCC|!RbEW%s(Ng1Kxpra2b#13= zl{pL6T*b9YOUq?@3C}Q)76!IdF6S*2)i33968f#TZhYO!J4MT`7P4LP{GGg;U$U#V z>&ef7@FPD}>F2_h$;8dbD>nKa*S=S(taz#wS$ST-zp-ZJ6^@TTCS{Gd}z(B zIE8Ad;)K~RB$8BIo6VK9VG`CdU`^V##Zj>cMxIr2;u&9Gs8*M~bAyActE<_n)H#Pb zvlVxKFuz>N6)OdAaCxn|P;my&&*XA7WlvP9wl#HiI6HJghDL4&tUOv%EX~c?9ObBE zC8t!)0jxaquUL}qqyOF!e5Ez#GWfK(dh^#EODt)w9D3V2nV`>Ue#V^ zH*4Fc01=CG#m3?dATrUSoL9|bP|sNt700#!nw+{xJ_}oRZBAbHTQfoXRGt~aI{H(w zqzF|i7XFIs;c?R;SY zMcd1&WDYq$0~Xpd<*wNMx)?RoVNw0oAs_|S;gZ9$xr#gtVHq8i+75*+3QN42FMCmu zv8J%NHDW$19LA{BCdD~cMf$l~af|&{Y0g@!tXQjgpn$53R^bk2QY|6RsyLWh45dIz zKWdRw(w@YaqWauOvB9W*)hVx8g^J^qiWpldOT)Fbx&XMHiOq`4+KiMQ;f8P&65~2@ zIas>ZEhlH+&l6|$TY5Fz?iUcn+!yW;JTGH4+k$hf{2XwURjM|axa2U8HmG6H0eAtL zSyOnUR#$D6ODpAS35^n%O;(D{gFx+7-EgWeSXi8Z3pLOdQv`VRLKVB%>`K*ch*vd8 zY9tvJIE4kNUJ?OBa~PC5S90>@aCs>NOE8zkHMN3vCrUzcAPBXVYRlvN=+A(KRfIW} zMVBjixhji_Pb(m<%C*ULA&t<;iR(b{o8-j^t3tdicV2HHzijtqta0ns8~xUZbv&Bs znc9hL-U0&9j5NTj0>aUV)rw`Sz0Nyl&GZ%et%Xcof!7>s+06q*7XV>gEQP#dp;fUy zNSg!D64X!vkmb2YS;ZAs5Tu*O7&`O)%;i{fHr6ag0!xk~5#X+9fF1ijhDjF^Oj-iN z0e~JkZFR-Q;XuzB{{n9{>LiytI_w7R%BwEV+92ZCCjgdo)?Tgv8xHxEjD`^VPya&cp75E@6*gL8@mV2woGA!KmZ4J#V{$wiF!YXR*XE ziiOG&uqkdanln+6GhbBLZnZ=Nsy1~&4Mj!5S&|e8cwIN=iozwy)g~<&u&$zM-XriW zktCY%&eTOKKucCgSj?BI3oEl(v~^H5MbK*Eezshh9R#6?ksJ*24^~$Iu2_G${XZ4!VT9~c6m+ckytCSqgG`2*_c_IWQ-!y&YwT8`m9EL+!`H`Y0zjbsx46H0W zQ6E~{Z*io?!{Gv8dpXcW<7%(d!!@pNbUj6sGrU1Ppp#;C5Uqle9>+*xC+2pNbsZ-w z=PG@sO7>LmqvOdwD(Dywhh9@TLQ;(f+ou`{g6T#re3m0I^1O|Nn!RWsHfrFr9Ei~k z1|lMX6yP~IIS#~uJpf`wHlQe3R|@N0nwU62P>GO8X(>OCqczd?xHTNZ3eh5!7t!L> z3l@(f>~fzAwr;`W+?AIlFB|`OV&O*>W3_~h=(mP9Km@Y>)rz>U*HFpXV6@0!7%8fe zBI@Zo)o-23;4kh+@E1+S3Ucl8iY~6;I2*zH+xh$N;J~5F)aB^E3RaepVxvOhtr{NM zWUDp;zD>7kqe8Bh^HpqTF@&RfGpC+!Fv>SV7>>2F{~P1FwIVtQ;`j*-K?Sq zZw+9|W0MY?MqmoD{}Uj=kgOaZ8N&DmME)ZXUT}a>N1y<6s}VqpHt2n9MIvFpHL5`g z-PEg(OobRJB8y3Y8B7E420T)j-a=)iT*NMKxq0V~H3~8+v0!3Hl{q_)!>#S%u!4i1 zQ|Y%rp65MVXqJ#`6q^+g!&2uBTNoIHcw>aa6X1xM346uG31PwpP3_*1L(kLlSTHC# zZe0C=mSl+Kjgwii#{qXIDl+8rY*e+h@-wJkmm6yY z$+@YB9i}NmYi5SDA&>OA@)DXdgL95bCyUF{2|c9L_aQlXTrvnJBKCIPc6f%db5)#0 zB!vQniBG|^H0FQl7y0?^H$H8}ud9914`m^SXT8Sb}@ zwH+*UQ8@HSVBU0IHCmAVPM6_+V3?*^Z8a8!Y09_nIi~S0s#$DjmAMXsZ^6o!pNn0_ z0~FrISBo9SBDnCl=}!DqRnN5g#{F3=_hjHIAsut%34ZM7N#(0R=kX&pr?z9+gF%>oMARB3-Dm8(q9RI%! zh*EM6xt~iL`yXs4L;ag6?GunjWmrLBWzOp%eEwnc z$}qWD4auwmy*DwiU%T-x29k~MlL61jC<3h8Wgs~R$Bo4k!AP0NF_}3q;dd>A$B%;o zWj!t29@lrjqxK$hL2)DuPAPsGvIT!zl7qCqf4Q=(xDer~*P6?h%3gN;ir(j~BVgj_ zlgqgyMZJCVw&dinVFq_XWSl$V=_+~qDX^5W{$($adkmy)jap@7xG z!ei8N9}Z($`XWpb&$GSEIqOIfva!y*>*m)WZSvMn2iYC&^~-jlG*<%iC@QGmT0urD zuh|%Dw`b9%OEiWaRwLEB6cbGFe`H$O*>7G^WjO55#_c`)b}Lp>t&+l9X+ayQY#_tVpbNMP{df=1zt@rZf72Dq( zZR6OiD5S|^@Z6VJW~EP%L7`GEqmdlcJyTb6Z{4`?&X>M&<-N(=H!j?`cH!dnE0ca2l03g1G#y!_ zej8ybU-h>LZ#Ag83lamh@;qPe)g1e6*w|53X1Cws+2uKR4?Ebd*Wos~k=d1D>Cj8d zeO1y0WTiXKUt3~ae1B_#B39G7U8Fs^Xa?L-A`F^(qKlVL13( zq$LVwP&mj!J(k`N4EJ4R{NbrV&nu1$-URLGErWM7SS-xvodjo1P!C8&E%5}DxUV5E z_71~?GJs)h?Kay|`^|3r>oL>j4s)y7o9a#-HV?F?%>($gqx0odr`c)lN^Ljy;CE+t zoB5J?Lh`(1cBb~3JI!umyE$y`XxW$An`%pKPqn4f<_V-7Y)zY4{2fbmw(T_6cWA7Y z)hkF-RKJsHcaNYoepfCBk=hF2FXwmXa(7qqDNKvO(voVd zMC^9Or<7RFHm_h7qRnSe13AgStZ2E2>L}9@=9`FfY7`1&vzB9`!o|Y8raWghKU*r7 zs%w_wuIFO>Y?U&4l8FHG+8o5olzo8>{^+8qM?v9^C!$dTA0_$JS*h170KpSzK9J;6 zh`-+|+H-jbWhs3eR;`F_S}5g(Lcj}!0K%1gM3dwHYlQ$s4<}G{uxx7-0@&ZV)xO!) z3`9?HwE$HDF~KBtZeFMbDA3UG(MVzrqFu0*45SvDi^j=kq81RFy2NRN=v=sj8%$o1 zA<{W`&Xfb#x9GT_uspj{1J|FYZop^7jn7hT%&A}>d^9_r!$Lg-uv3IE3sg!X1+YM~ zb0H7?mI|c;de^jgcvio(3R;ks2qDx@SzRbml?nt5n&d9!%Ys6%k@j1wP(g$i2AB$U z)uD)C$rH6}Q=)^JN$8hA`>&CXME(xwc5k^lScH(=E8{X4=i5h5Me`8nxkw9Q!rh5e zg+5Zf;X*ss5RS~%>DJj2dDYwSgr9x*eg)q&zTPW1i+mLFs-bBuoavHjd>rRkr-fq; zmho0_yi(>zIN>OM=QZJ2lZ7c9>ox`hlVgpd$*~reYl36_-5AGOvC*1wtUH9~ z)@6Lu`UsA-sv6wLkJ^m6j(}qsXFh{zEu3la825uIOm65l!+p(EH{|bu1+3WQU`-=u zPr!m66XoP?&4TU`)@y_2IZpLlbEusw_Vi6B;+p@Q=9>2eqbLIru&dEza2^$l z_+%?sxF4pB57Wkntp?b;N(KKrhVe1R2u#?uR29tDPI5=9`;0kZxw?O0;t6~FF{Tf9 zowx(r7Tj8uc0YNg?(P)c;eQIL_!{hV%&{WL<2!&Z({KPR9lZGn&;fx7hX2*r zy~SxnYsvavYe}@=paEWayaoS0*#dd5zzL)B57KLMWNv?G8Xp2M&@C7s>@hHG)kCZU z6LlG11^DkPUOkxe@eI)Je?$h$xOsQ2MRT=(D~%CJlz@d*Ujk}Xy-p-@g4HWM^)4o!zmM1AQEJH*s5mUu*J^v0PD=l6a~*c)bteMNotnyxS%Y%iLoLq9}5 z70OM1Kw-+_tCU9Y1|}V^P%N={1k)qZlcR z@s4y9mLcG8YNPsn)F>X7=f|)lTECe#Ad7jp+jxkjy2WsRyOoQQLqcTh`~?LL zC6 zzuES#vtq1`G^r875G?PY(u6qX&2W%^qj{zMO|q+DgcuLK&KeIeEVxm<9x|%KMiGLi z?OKer*9e3+0Z#8Tsw2jOy&OIz&BFMP8jB}oz#~!4DPwUgNTx(N0~0R!hEu{V7T#_X zL5|CvbtA#e?TEeiRHB=Jg+VD!z!IRG=jr4x!kB(y$%}h{U4NFsB(mHSJXuPYNaA?g zarsEZ=-BbGd3?_>Co`!D_X~I)F?};>_c|ZE&C5jiimYJ2!a5k2wzG?OLLJZU9$R?R$%V{!ZUA;3|y|^D`g54iSfg~ zG+mNjkl~G9fI!0L2V9MgP$dA3eT77ngY6x!%+zG2Wy0?fZc)Tj^0(J=nIPxp=g;4e z5r8P(TeXkX+;|2BoIaX8$eV~08v;q z0R+X?d`gDiH**Y}dl2 zk=VXQST8>_r(G&!DS6;w-Vx)vM|nBMOCK*Hh-G`-6TDC!@AmUDz{?;nL%fXgGRzCb z>+T3ISzaiQbkF1Bca>n&25Byg)BIKuE%+@&PJXM17ce|Z6Wyh*n7=KFll_zfEds&2 zl(x4+&O+4Zw?iHxH{Ea3x(YmrOL*+Q!CKGp@S zcDv~w&I5Y|;T7~;yy;zaokKDKhUpG;je}xOcc4>p=iX%bvNv_Qv%D9i9<{o_XF_8L zqQ}w1aBW@$^Sp+;^ISWBfE!~wgzXJP?z!{#ZxJz4^hTv77N zc=`prXE{5tms8Bo>u%K4lg2&X4dUG>#P}>>lqu4$ruMc%n7^~Z2CI2bTAc5~*#I=d zc=;wTvQ%#Kjw2Fb?Pxm}!T1mH(jz6^eX&+X-#}8IM#49lgY{s{8yc8HK>q>k&07o` zCldS@R|W6m3A`2Hr-ZJbq^U4#%G zI1BtgFR&d4s}Nc-_bTO;Ksrxp99L|1qR@iu{P1rI-%I!kEepj^P(`SmI-jg6z5c}; zD-QUUklV2UA^_T@D&U2=71VdcRDI5^ED3vazH|>Jb^@%~^`5Yauufm76emCp?gS(E znznMci&tE?;$~XZ5)|||iPi{yjm-<8m#R&mfy=qvf70mRNUiT;F9T!*a#UBhg}gm5X+e-fkrkk~pMO&L0|s3hOalA?uMk8!_#$As zwgMzu^J%o|7{|iwibHdJT3vEnfOLn$*+_ zhBUdH$j29fNMNo=NdP~p9f{(#@CVSuS+S1A3<9kC>fx=S+u`jnZ>c628VJPN8uU%1 z2kfk;^uSrQ=r^jv3Ky{uLRd&72>kANc}i2MRkCN=)JhX7f~1ooAU@*V$Gjw=Sa$aR z$vG#=F*<>FQv+rah$D48_`?v&rUu)78#(k3)iw7!tmZ>rLW)5V5K-0PDsoxv ze_;JxxS@(zPfKrWZ)*X+!VkghO?*9`b2dP{%DD#>(-q={qZz@m39JjBFW|8$l&kPC zur7d7yv`5_<0cI--0F6sw)~%B=*5;#u&qTiEdY+zEQQ^SSY!wSQ8U+69cn;kU4cwY zAp{y4AvjRuXTdJbFDTCitED1jN|YbrxvGM20g!`vy0*sFTxY1&?92DLaKOOXsDP+gtYudw!lA+Gh4^eE53Aj(JpPm94Z^d1})Per!BvQn$QYOLQ^+fEAB_=4|F|akn8uTsSefN{Vlm@JW-zP z`*H&N>&UE1*b*&1LYqz;wC}2{-o#y%hK`-2DwYhM} z6-*Ef;k@uj$^8`#!lcL~EXoYvig}sD2=jHy*aeQ)`oT$_FcOQH^~h6EI#CB+@G$=d z$7fseCG=#8Bt*d-?1u()kf8g6>>a*G^P+Hs?LG?_8iUz!Lt z%%on-y(W0FXv5DVag(Neh*w8rP5DB7Q{rTdCX=@zTQ|M}^=Hv41?hn1<$WYz(6skq z#zP*d@yRX`5<$|8T^E2)gO?Tqw2SkSWCO@Uk|EUf`|0Ry6)OPK(n{+T_Ec5_?4d{m zQKuJ0wCViVzl2C)emrkC9(EWHJB^28NyVmhQtUt>D(<%!d(o<0(PNP=@D0q8#BOTW zURpn?2e7^}r5aU)*hlSxDHLC$N8M(1E5$#M@c@uu|KyHya0lC@;R;|<)@@MO_P=*k2vmID~_PdNuu6PcbE zccX=jZbfJaNVa;*(0|0r1(t=8OUbcez7eiRcNHc0?OthqsZuJs-@`BW6JCCfm#esB zwlz9@_-4U37bJo3PknRIH|Kn_0?P;+oDlVA0R|rUB>-HztV#BqAhhknjgfj42i~5R zm&_wA2UF)v{_4eHy7BT>r<>kDSoJTz5`zY`d>p104-n;^9$(1u?HYQSvA1#{yeew<=Z&|BEUg` z6`=M`nyd(5AWS`%iGUvET{vA*Y}#h*_wLI3_PY0XEm5&hsZOI}Jh%x%zeMF|(tbdG zRWv)Wc$R;lWT?g`|J^Y86uT2vg0TAc^AAk@Xk3*tc(4F~QYL9);jq=Rh-u&k9fU)Y zyDeCUtcDhH(0~B(r~k7EG3R?fmBI>g&PqEZ533e2FS>};@W{NtB%V#f!^xo0P_#fB z(O_vK&G%3u7IfHZ(`2pP7?NJG36tfwcQUgx|0O^i7ea9GKlcc#QwSoOn1Gb1QSLe! z`d|VjlDEjXBIR)Ma({3V!l^|Yx_XKviEXDWPe?ChiPNxGWyh@}V^#hQ2+j$-3-pTh zIZ5RqCCLQ{rfn^uiA_v#C&WsLgl51A<}sP%D)WBP8shOdBz{Oji_H;=`$k}dG)XJ1 zkaoO^@nCKd29a1?nVp9`YvGH!5K;a*bX_pfp->Zxp3P$BLiK_O8W*avV^OG4wP^R1>0KFu93L_HPpa?4K3P8Y`O|F zqKWwbq-g3bbk#Q8uhXi^>Vs}wB2;u+j%&L zxv$RiBGYIGcMp!+Al6vML5Ze&Et41CzB1i%5l}03U=x`x;cEL`(WLunWQ*;V-Em_& zwHeY*4GuPBZ}WHI2Ud?$)z%bP8!k?_V{Cc2i~22GD$`xjViOMC^sZ{93g1W=IyrCV zKyFtT{5An#Pq7xHFKtWGJ6NjQ-gKvw!p*;SXnp~eUg6Lj#*J~HO*q26F!$;*_osSO zd((T&ezVs+nK}#paEp1`beW^T#QH-*?M-~W>$o&!25d3F3;@HG%mCO&L#qw@n%doP z1Q9N5zys(alK{IOPyFOT(C#Cy(*GH4wpe0S~gv#E3Qgl)7!jVmcfth~Z^;)ix9D98cw^Gd!FI zJ7!?W*`Q&eQ%R*KoM?q2IFU(PJP`r~?y%+k)ppRS#(|K)PXkMl$Mn!ySKbR3oqr`= zbk-2J#7pX|uV^DB-^zk_8@Z_+*L*yc(1U{`0+%2w^$1$|3njQ;L~f!B%@E}pjNmGk zzQ&(sAN*-9pp1U209Q%@O8qR2fUwcSr%cEd4LhjZIkF4Nb|&aZty|C#39F?i&OYmk zRBYAvo+DSCS|HQVluDVfuAmq)jmjRhk;>BoGHTbY2vr@u^o4Y z1!@ZsO7tG-z_SL_uhD5T%{)QkHgMP(nfX8hhH-G~+;z^(yfLGFQB5cUmAn}(WLE|r zSEoC;bJO);fixID^l3omhwukxM> zia97{g*k=0Mh`+bMCEpfdmwnx0m}l1c~GOM6UTkLO{33g@$l0to^!wiQ5G+EOllaz zT@Dx%+eG_$H@YJ_>Y|Pe$3n19P_B9~&hRQE{(Q_&-lmfI{huRu?Fj6d&zzbr}0$phc8>Z>o2&@mWbb*HIeezh` zUlO-O2c^J6tWB|B=uimbiFxE7Fr9W`NMriv$bUxi)aQg!J3udy8F}aBxWHLUX}xar zl)qn-F@muGx{1M}+!`Z?yCZZfq&lHAxpz;*bhzKrNJhwa(Z`Mvtx%{Az2V7yb+k~_m^ zZ}M`1my5Xgoo|tG^c8yzK377wGD=hJ3KNk(?Y_m!mw36x%a?J<>`^>xV%X^}0RcI( zO>c?Kl{&uj9IOUP^w^|jjg;1;FE25%43P9{l0T<-`8qF@_(d9M>hLa%o7eE-w|H)6 z+l*~=Z{rGXXR@%%QIPZy5xV~G%{lonu zv4$@r#6Y$05NuS+hx*r{*mX;C;RY^p`>pj`!}`gvelnt;jPOZVt+j@#*pRHd;`H6B zAU8u5z_*|VcVBg3c8 zVmltno*F$fGIVBm^tO64a_Z!n(c#k;Qk_0MJbLQv*crT`H~F(?PmPS6Ji|Q0XNOOo zJ#%vGwhR}GR`}!AFs#u>qNiwqbt@RWc)k6JijzLjg+#zb(g&^SPc)}V`dAyK{IHQ2 zOa+b_tb;|_)VZ#f&b$p@{^ImcR9vU7!tos<)b#Phh`<1d#(?33zki$tAyJ{$D6vH{ z&XeN+$IQ5)8{dcn91lt`CnXP7gTHZWd;Lt#ph-7s^Oh63Z4@q1HUUm5`G7~0Sb%XZ=q}H=?=Kxwh^DIPMKv3 zN}-s@qt=9RbZ2TO*!QoP`@ps@Xh{-Y8%!trCuKou|HK}ME%B3gAiKrxrnXRJ`Y-B2 z+*@Uf-!+<7e_AQX>!iYT?Q5_@4 zNi;V%wq6uy18UyNXC+fZr@gThWkNKDRwT6tNElBA@W&PHlhe6_NiTUI7%Co72oTUg~@gY za;QO&yS|!~pV>C0_KR{B>#!Vbc*MfDM&O40WPskp{)8Hk8tB5kE$AZW2>TgkNv`T% zU5ENHzKv#Uq@VLN(ti@yQOU-nn2u%{MuQBj=ak&XYKUe~?Pq$gm%f~g^`$wgR=D~i zY(x>JPFC%VM%0abn`m^Z|2{sdo$h?gu3ne0hRPm{$AJkPL8zSLcZsV=Q6hkM&(gf( zcfnx{6!CF_z7G|^S-1iX9(XHDaBB*piBo~4x7C#FGmQLh?a^xf?!YLMj_ZZTE&rqKQ(3rvgA)C2$drRlmqkEv5#yTU4mMn z5y&R-WdK;uKaRZp)OfLe$pQNNu~@ihLgFbTdH8a4qnBf#K(R8zVo5qTMnxl^A1+dB zw-O+u2)8D%kq{>^K@b(Te+;;E{|PRB`#deY8HZGMI%tX9vEARn8|ZQVE>pc5UWDC1 z-83+Y1C`R<1${2ZqX_K(DSnXkrnOQ)Adr&>ZDlBUz01QmPU5`c2#%|$sPRaRTmt_} z(+#Xh5D-j2MyaVZ1twV#oZ+LOpRFq>UPR-Zt&;Qa8&%P612qE%v;$Y{_$l1RKt&GP zZj+hL^-DLDsY~oES|b`3*1lSHph0J5uxn9u#F-Dd_KICn<|qnfD25+IS?dU+(r6DG zh38RVArK=tq(^!|2iM&KTFN z1}Zd)%sXq)vaD2d;7jDo0vGPDqwP>h{$2bL^%$Jw%6<=<9ILq&Y)Fx=5^T2P#%Q6H z<2b%qe0x#}AOa6X;m#l6rYR6zubX?})HDVk;cCYKXFZlVo=t&Flv)AH_l+t<3nGvR zfCWDr0Lz(rU}50gr-6_oP@Z^8GeXP-o)1`IV`_Qc^@Z^|>1{Xz->XG`nM z2xIlp3PV3;p@3-$NYxq{GH%RMIJSlQfpWs)y<-t0%u5bmI1pLHJ1IXL)PWC>KIRX#;$AM3gZJ? z4DMrvAqZ6@(j|3S~mm!gs2hCa2sG#06Y=JNg7x?O>eVZMTBZV%nw@TA;VM@2T5Zp z3If3dpSf8o7YE+HZe2v^3B~CO!g|A+t+-T{2%IBB!()R(XBa!m8z?!j1YwkMu;x|= z@&iG@*L-ii(0RUPK~Iq@njCV!v*p@y6LXN6_5@A!e7Miei6T09acWO_}wv*M>N){drlEx z6euCwW2Wjh?Iv^+*BV(iC*5z8j2m!aBWu<=W+F~>FG_RO^= z7)-1&!^W{BB!3YZ{JDz^9vsx0Pg`R}!x#amiIC?lGPvql2!hu>$H?HCsEUZ!yq3;m zpoMA)+r_1!(+7kr!i}jI<>Cq@Mx;u_^C~hl)_q9Cy$)FF@Lv-*ypn3KJz3DQdi4W2 zsXs<+(1wB-Y0~5)W2IA2S*}2c9-31GXG_v(UNu{QyAdx3e?suwf#}@!-JC=KBO@R@ zZBptr;`nAN0AnW`r~vfKK?~AZBH-~uQr4kJ5Cabyl<|jgWgA5mm+n$p5dTO) zI@CDZR?lf=^38@H*F}eC0O4(oCx14sLOQ0RlhcZ8qoA~FNLwm^W|d>PL_luXxj^S{ zb=-8zE~9du^D>PG zNUzmkZI4H@-S{v#5=6S;tTrhU8DVWH?)!9Tbls=eUo}9sL-D=K6RhZSQ-QtVs0W3j z+T2Jnh&3E5Ex3J+3gn;zi#XEQg4DT#YgscFmX(*F6eYzTs0CaFE&-W?^HBgVOjf%X zVnK}Yikd>gQm79ren`8*ArR6newxOZ~2G&`dbxYNC z=t>M=gpz>?)EXj7OqCG};7iVUl55tmg6J?B#IWPJG*L+`#r!A^ri1zA5~IR-Ixe!t zs9p|-*PO8RY;O=^XSa9;t*&WBcBz`=LP@@;RMS=>|Jp9HJMREs;4#Jw?Qb4?LK>uGyhUh`q0Wl?jQ9+Lm z3jxCb9TNg}2N?KmIwr(+QPy9Khb3lNVNMC|5Jc=nSmF=kVH~zIlEZgY7zaey{ZAV3&GK>T6;8Y<90E&2cEJ%R*@gEw@ zH=Qqq4gW49Jv1)nZnBuZ+G1qRSOFp?z%mBm!fEAWesYgiFMJND0yaWqZ0R1NNT`M) zOa!~3f@7mg-EimL@=YKo1gu8*X=FsCl70qipp^<&Fs)Q*c+%5a9iTZ3&1*!*o6)G# zbm-y!wN^%>MqWgU#&uAvX0X74$N)Fu?;mr{f&AV@jf!)jK3eLM&_8 zq=s2#8iKF~L3D^Ajt~nTmLVTu&;yRW*dU3W%t>Pr%S?=Yl%33>XBP3`=H(=HT^sfQ zC$C4vsSYUIWUvF}SmLW>G(XblW)w*p=;C?x=fIem4fI&$6WPdY!pP=H<5njGqtp=C z$<3W2OxQVK2gla;BykShpFjyiD9^)RWBqu|T!fY#aVH;tsc{b69NOv5;4+UdK4CZm zi4g!lo3nW6a=2VzK0`TK77?_lsIb3R*k6!EpJ{}GO?mjg$(B&_0|5#8H#7Fy!Ae1^ z>@qXWUIZL6xWTYs={{Lhg-x{?K9Y5t6RjY%DXrKawPF*V_&4y7fBSQ*39b)ypJE1V zXVDR*ece-Y6qfkjuEqBU^(8YFvt>KXr$=}>%F8ic`gq|KDI>Q=_HAO?wy{;)T^8W- z!X@iMIWRDzTH)PpUheU-%FBITq#HlUJJBw0WD!TbMZaxIgnbfk9Dd(h=>1*!t=|$m zjKmzbQ&l|j_^!<1{w1G%iI)q!T;t`-xMcQ5hHVZ-kIuzQ+BvzH}|!)rFXZ65!PFFnmdtG zqO7+d72~Y$Yldp{!SX5mvvW)OKF4PtZn|eT;h&xAUvJ{;-N5Cs)qLdSNi`q*$34`q zl%`L27)*VOOMiL!ZD^Ssv&n|mYBm>3HA9hzsZQEKI7LdafJOtMmYfA63wVLe)2K|b z&&1CQjRfAh@pYw}TB<(EI6xYtm{FV5{ZV4|fYAuDMJ?K^;DjXAUZiBwCl?Y)s4>89 zv4AiMlMPsOhcv@?m++3h3E~+;b?w#F)ojFOQBIO-m@LZlg@x+R0N9LhW;%6sI6HJg z{PW7~00ab3U?|Pa*)BtPNQ0zn6vaH21d*Zw=0vkgRS4hcElZWBd8HVuTGfCTnm;cz ze_m+*d`6l-wXtTf{wQl8(;WgX_6S`$LyKcE}}>6N&iS36QdUe#b!zv`6XcNy*&tNaGS%=F%y+=aa zOX0Khi0YM*AS;xOsV+nt`?b)keo)IMRg5;Oc8P(kXdFjsL<{9q#SPB^7{kX>GfIYb ziWRAKGAZjrp--)J1YffHBWvdI^^Yw2B8!ie_D8nbH@ljNN7O=l6S- z#(~x+!vqun_1aySc7LJ#s{!yqrYeXN51S=AFkql2tGGhTomfbCP@InV0SMfUkpivL zAad;cP{z@P1gC*HHFrVcP&M|EvsYJogl6hiWuv|l6E*RG#;$1{a(lS~*Lgq#VY?Bm zq5wr}>3>BP@l=k_e%$nvf_ZvrrkJJRMppaN1j-y%IlUYkm=5@HPw&Vd7&lbcpZbGIR*Hlnnj20S*#f(m|FY z;zJlG1~T+*5L2a0aP5Pnx0=h)e|19{`mcO8GW5>JmZ2YhOc^@FTJSpu8TveB-ne-P zQ%i9fScD9Hd*r(>E<@h|8Tt+porw$+@=1}$hdBcoy7TyL`VM-mgTVM>$i?Bq?xPe$>bQY_KFH$`C5<)TDFe6NVcxh?w3@DnF{}t zvUQRB1+sOt=YRphIb@w5CuQq%BIU+=#Dj-ySKc?1twY3(7AIxv$`@k}LQr%AqC6`m z`yEbG_6s+9%Kaep22jr-Y4RVZDCkqNUp!Qf@+AI9z3g|0LhHuG>=l`GSPZ1JdVW+f z(vwK!f2IYaX&rhI5(e0$F~CHS6*dkS9*xP!0pDu5p=!v#qp0_5kWV8Aq{0BSK7t7fyXhPbl>l~&5L}Yr zJyNM8GRYLKpJ=TP8^sJzoS0DWG$FHh|90!zpp-ijD_u$4A@%R0ggfT^5hCup=R>Lc z-_jN8ycBUuwZ#*S^1g?>?>fIM6+z67l$dr(-VqlE%?=+?^1hv#9DJ(FX0Mgx9Y_PH zJ!o1&`u=7(7ULp)k7W;=M$eoLvnM3*Ku>^j$lGOY=1yu+JjCsQfiYtd;fin*iQ>-~ zi)Vw`Qlj|&XC{h&w~ifND~tz)3%6HGaCgZ&ao0kryYOu19NzvI$l}FoV*zA9_-VnN z$UdKqKz^VVBnAs#@2{JYznUWXi|p}pA(HQ^O4URmkFjpx=9*$FM3L8bL6~`wEKl!9oVF!d?IWar z1;gbo@#64O;boZ@3zvprc%4>^L~Zhy;Z&L|(HIo<33(IQO|I3W(Q^ z^K5)0m4T+(0@!R+Jav~!0q(!zvN{mG+PY}R7h1ecloDlZ@Kas`*^o@5YtZ@MQLMcz|t1C8_z_HRbtAnNS5Sf|MQ z-{IZYc=@-yP?_4LWLkXhDHQ|JE|5NE@M5}C*hT_V`WCQslX;$A*WfR(w|IYqvWy)F z;{N!02C04N4Kxk9ppziF2BJxTyEgNP*_S#5cO2VsPmKi?@}1Y+&^h40ny0CK2mq9Z zH^Hq4O3y&R)IE@}@YFqkmIAdF&LLcUpnY(lIabwY@u$!~psfGzaLnk5xO<|p{z2MI zY~N4kXauJh{D->{90fx4gSexSUbH0Nx2J*n!K8h61mA?xfhQ^kp@zccq;}+tHJhh1 zOXh@9k`gkAwgGs{+93f9b@XaP8cFvk`Mp^5hh`Erxg&wT=m@~`rd^I0GKKt7C951} z!kWotUJCPJSu=RnK6B>e+0%1{qFp$ZKU+9GJZ29S?NhUble1&R;nQbF3uE@k=-lAg znbRlnZ)h|>GB+}Jdi3X9YV6eMA>7#`_Sk7WJ&Dw(#zye#>=;tx zS{ysgTu3)OI->s8{vJhol!6jMlMd=?%wQl_U}*#^tO^HJ-Cw6KQL;NFzW0Q^;=-Be zgbEt%MLn;H6LoUvY<3i`Th(w0i)p}m2R0)xvnnjuussrnZ6JBEW-cw{tFzoH9BX7~ z2r-i!TYOK7*V1spVy;Nny&^)vE#xbM!L$w(R1whx4TwvW3oBIw@`UgXldxnvMe%^@ zdh8=SdciXWc-3BI#YMNJGO&KNMob;Nag_8+43NOu_tPd z$|}v_G{!}6KuCPVt_3sn*v)*QyrGph0H838#T3=5Z@f@6Xs&4RtW*GGQEpQ1PjC+q zMWdyrRs!Zx;jt!i{%cABQlSJ2bz_UDQK{hqe*97sZH@UziEz}0WnL8@?Sh0FwliMh z=33dH#ITJr5j7%MC}+sy0qaZ9p9OV9zy9JV^W3wd43~>@gTtCKyf>P~?~ni;0OD>R>%mMe%_eO!h*836=7FGiyv9}%JP3_NVa3XA#Kh|jac)QIiP zeepORn{nb1D=bz;90;DeidbRwRnS8Ty@t$XpQaZvfIF0dGmQ|4vh|P^T`aXf%Mz&-L! zxQgfu#5F8z1kj7n!?G}H%2P4K34Z)EYkimrLjj+__dL}PosC(ea~Q_{3WBvu41sIJ zJOc22iH0+W{wQB9$Q6Ul#;!t`qsCz1icIA(FoTuRCsbxbRw~k?wb79Vy z(-z(6dkE8FV{BtVWFogUI#J}x1SD^cMdu__9~CC+lSik`CmzDs+H2m&=U@oYRrGOE z9rWqxCfLm3#Uwm(H|~HwHOm6m93(36Nc?A7$6Nx3zHwfyfQqV{0zq)@;HeYIu##19 zCHhu@(&|z?PI?~#P7h95KGR77oVTBCI@t6v+xJ?Ru;V9ey z&BCe#qJk<{wNyn+LzX#yOcBhn0_~i5#KOEk+6flIY=LR!jR%d$2Gn=I72BM~WA`{( z3K_=jNk8Ykj>iT@lek#b9*a~H=`kqQK*oqt4rIK_Tov&oWbL48U44XAbi2ege-It9 zPc*m#<=)1MY3&$d4s)oHMpid1OE2N;!RfsOCRLheF^#k!lwGKQM$I$0ijaVzci=(e zl4zbuELJEffsc(>@W4;+E#Ic*8KRx4IC#*ZfYX#$L(PW|Vwz{-0o@eE1}c?+4Gt&h zP+Nd5k}eR6Ay5OEYgLL})lRx)cBDtcphShpdw8vau7aWTPS;1hB0!Zf0s!zX)XSfOqLO%^0ZX_B3DgV#`Yi(W`6`o&&4b)o(ja!6tN z1&V?v4rHZQAIyW&c8a%2J&CocXdnOr0;N)>JS76M_1yuRn?8%d>f%V-uU5GH7fZ`rBg=UBHz4*dJuyK zG3Wda1fR+g*NbVj-?p+0SKqc+T>HIcs3U-wS*qM4O|y&u9+@2?3!UCd#1m{}B}e@X zHldOeGk^|9b9zsZ8dxX3sZH+*vei6y|Ab@l?|6yisUk|H6m@zV>W9PbJ3$+w-$7q# zldYl!yvMuOaM=x=5RdlRBK<_nGRhwr`Ev(kioL0$sqK(8((bwbUuWXYOR*H4=1$m9 zcT<{~?nV5w-qd~*|57{gYkm8qB7H(Vn`a=CqyX^Osm&lZ(RbPqfHf_>(6LPUH?z+@ ziFAIKI+4=1nTlFuxYXhh9|8zT1T!PQJ(nW{)q^70G3 zbnzm_`XbV&w9}>0wM*4z34jw}8y7CnJ+mvXgIque6jR>uJi6!6)BUM`t}Rr*y!N_#_6Q+A zkxVh6eHT9QEmSW286+TN-OUjdFUHW>Kf zU>EviEv|hM*iw4o7xtFN!Y7uX@(Ae~c+)Oi z(ZI{;yTT1$L08@LNxjI6AZvdOC%H!h+>gqkEh*P~3Oe)$!=~$xy-@fpq<$DpB-D~o zn?Cm($#|<;rBK#~UPz}NWqmj~wWBE5q7OhzpR++9udS&YH0XU6I<{0DYKJcMq(fn8 zIrOMx5i9^p_q7G|ky>hljc;t}3@zbrdks3a#Z!y3Y0`37(4e7(qnotZY_8I|()6lU z1aW94^7{CXY;KP|>2aYpgEf+(y4o@w+HnF~h)z!(zNe<}?vJ?l+-DP!J#Glg(GC@o z&&2N0N$5YDc!{A=A~D{>7xp*`y#0GCz17uK`hu=d*>6RG?RY~LxE^1IZa-kQym7rj zm3ORctg#RbbN#mdu5H$5`k^Org`lObAzGi|BdRMbzW}qrF;=dqhrM#JPSz!tVZl@{ zd8kCQ)oRg=j~n#X?VFq0uHA)1!cv<%=%ZNhKf2z1_{5J;NkT>jd8s1vB{*)5yv&gu zBWFNo1RX-GJdq|gM#`7fw1ML}A`5CfM@B`m14|UhnH{y5RU)!XMi9ALv#R7uq@N>; zAc5+mLu6D-)Nu$x2I68Zu14^x`qZ(BM&cN_+sCt(pi81dm)zaopwt)Qf--7cwk$vN zrDeq>Hj!X!5f_zxMSLW+MiO9g9%3o33_NM6XN*gs1x^AQrGyEw>4$yBtFU)mwSz#P z2v&p;^Fp_tk4@nPeO>_pCH0!{GtmCE-~LK~JGX~6b6GfOKVZ%T!dQy-BhE$;x}qIz z$sr4yQ^uc)2%M`8p(_|yFLhvB&5E`LDyzdYlDrA*q0UdGzNI)Edi_91Tn5EjO zblgMudmioe00@ao^?I6@XvY5%K+76H5c>6lG4eL($^QOn^lUV*Wl5K^C^0~WVi5=1 zp-w9aM13;E)G}ad2FrR1t7M4;R_Iq)@c#@}-$7>%tn%|p)G>sy!XYstaZdK* zQgT;*0*6mxg^=OLuv*0G6jXJSpT@Sr%lp_;LP!`Ljw%w70iVMsaFP^}0T+!+K!EI# zvIOAgK#rfs>H=2pVTG$?VqEQ^mdB{P311rM#eW{?WmwK0zM1yOG`SKg9ofr!R8q1p z>YrfPLDbDumh1G+P|8Sj+U@Cd+S1bSD?M#7K4{yKk337E+m`+IOm($qS@d6`N#yZy zhX%fo^H74b6KPA#?ck1>gUv22fGz&m4{T1Sz8nH%!pS(=+Pm?)BTch8OIH35$9 zTQdhOwX*S1g93VP06I2P>Zn)D=0AIhn4(mY2*s7zb31dn@&kC}U*Zaagpj8dB5q+q z^u2-U4(@#Yy!>YPLI=2-4*?xMMUYX4E+o;e7JE^dL3RXbTM z)(^#|!?+g{jzj7$adj`sIfc;}vKCCzER9|K(cnlgj?2wsTU@@Q^Pg4BSHw}Ms>V^H Pmd_h`qhc;zUcCG}zx|~c literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/shape.cpython-35.pyc b/tensorlayer/layers/__pycache__/shape.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..383504efe1e5ff466e4b1baf1c1f23ecad9e3fbc GIT binary patch literal 3743 zcmd6pTaVjB6oAKxb8WV*dfDD#O4)W@6T>r!S0I5&x&`7DiB}?CnGEuzTO^-) z6&!jEwnf59WCupeFj_?|94*a8D=>Np4RN$Q8wI@rd1BRe4cLKYB*xXWA?}f|L<; z=nLk`h&QWM3eVFa3+a^6s1G}*P8fN)osROLPVYyd>qyn1PUul4T?$jn zA)_*iVFH-CuOpzh)Km*>eC>+9?EC0(Pk-;4vtWy2`&7+-~MKZqQ8<9eN{ z+p)IMi9!g>_1pTrwrS1nLnd*CpsCyt&Cl=-5&`o0~eW-KA8+R*SoMNAbb`c=hhXD}DeW2^ksWmCDRlaB*|wRgUZ! zIRiQ)=n!J(i8Qe>Qg>N>HgG&gWI>JR$f!tmV2T1cv!f=nOGK8*2r^e&R+V0f^mAkx zG|+r>h>U8fISxU}Kwfl`Y7D2Ucb%9RB#w!@eLNcpx+2Cj7cf9;gWzwDP=-p`e8rfRaiTzIzga! z1T#X4vCy;fi78mn=M_*;T0atg2HL+~YYE|9Z+#W6emidy#9~Y5}#oj5rHRDGD9Nl}!+}6UBlH|K|^0mb>u&EwGiq!1)EvY%$mzI`Cfr zsdPNjJbZ@~P}!#<3lxwO$ZjMXoc0_CKuoIdhfZP0WX5Q(2f3Zzj$Tir55xFhvSd}W z1hUvVmIrpQL)S{7O}tfT?R04e4WJG5!0Ii+`o#90O6%(-XHxRXwiatH4f zKKOsmoo}JDfIIm`+!<-a0DAIBCghXSO950QBd8TLWXdxbRS-s5)Krl+$Q(plikd=a zFL#rIx-r|wvw_l(5mt?3TIw9aT;WjTkR&JjNh$S}pTOahD3BNY7>Z>Sr$AUHKaFig ziubXlln@p=9aSbG13rgW;73wK23#~Q0RggG$`XK|13i8o#RU}ap}<)(Ij+`F(_>cN zgm;$c#ebgYWth$xzFEP@S)C+QWwBSnsQO}G)<40tgRGmItXlNWP|8@eTdnDI+SJ>(wh+A0Od}m-K12qgX1}ITN(#-gw>lhpc z1_5aPz##neXMov0w?80M!9`uH6_J?OIDj*U?+L*;-mV9nNBm9HP;i-lfMPLkF5#zx zc=H@S>Y79G=2-4*?xMMUYX4T>o;e7JE-rzGRXbTMT89$TjW|mP$06lQQr*jPPGK~G vss-yUTVof0G&s_W<8+JImXz=4`ez;M6>$_q)i`R@@_8d~RLtee%a?x#v;&TM literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/spatial_transformer.cpython-34.pyc b/tensorlayer/layers/__pycache__/spatial_transformer.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5693288b2707c2f8f8191720717960184872cea5 GIT binary patch literal 9158 zcmdT}&2t;cb?+H`gW#t~eNjt}td&G)DUhP$Juq5nwYy$>6PuAG?XC@l84fW6V5q?i zwr4<*EHIUdHd*_$<%18orCjBz_?C03QaQNFm;48Pv`@O&t{nGb|K4j10BWg~T}}=G zFpZwque)D&zkcu6uYXgSEPt`_^ZMSZ5Wf~N(zMP>!6GWdpo(Vh| zJaL2sGlltq@INxeXQpW4H6t)j{|pLBb2JbtHNvlUyy7A`Pnitld`cM3>=lV`9lC3p*Tho!E()Y<{H(V6lRz;XqtHqmbcgB%h>m?4IoN&Fxid-wAr2#riwW zwZ3ZI+O}5qeaA|iuwB(6V6b-VA9bEGs95TII2Y znEy>HPULWg{-!&a#%3>UuyzoSUatpU(v#td#o~I8yfBVNO*}%Uqb=rI2^SC6{DIf> zq!%{46U|`dxuu&{szDr!yXDsv{5leO6ItH(tB-jrBKc>l^DE-x#VTTGC>@ zA4E^rz8kq%&{6f4pR{|swMNug)6zk~p00J6Yb}wUx7KlDC?@hl>%mR4lv`0UF20_F z2utFVC832FpZ6riC$4Vk@lZeA=Z&_it=1A!r zt~`ExtF~&b^RvCmthHK=cE*=KkBLpQZ*S{Gf1%m5hgvgm63oWRh>PX1-m;Qr?ITC} z&TinX3`S|Q0>713u4)ZwkO8I-S;9#?5AnqRf<&7%*hMmFp9+KCO{Qizg%8oMux*BD zG2b)z64*d?G%%hm7(Y`SYqmVuLKxB!rNHeJ$xOnQf=THXFeBPHiCk&^g{f zJ({jFc;f%W)Sgg2PfedL794sxq z!Azh)Su`b8I(X0lrI_;MQ%Z?^FN;#<97!<2T+l*RFW{7ChH+dORMpx%k{4=wo>(Bk zL#u zR`h2z(;PEl|Daw)^rsk6OsB+|F`{k`oTv;=24TmuDd*f65&670K0_(yb^dr>{7|oR zK>)izivGNq?_)A2#5_&Pg6QLiX3vQEbH``-4Q=q-Inf7U8pAm+`goz-1*Xj)cadq7 zTVz3aU(=)jUrvaAfvp6r2RgaL3|RY1>AGqP#JD8-=b7g<=FyZ3Oj#PETx81f7-f+u zR@yS=U2E0llroc2zLruj?V9IGN|{S3uQLT=e~mQYeE)S?v-9)X=pA2up%mJGWmI}C zEyd(s{!-oRFO;6FyTYYd(yOBX8W!f7=r7^#y67+CZ$sY^jP3 ztLVvY1c%|VozzA#haLW$Y^N$PVEg)p%DKs5*OL_xv2p10ySsyoc$@8MbyAXPk<*kA zh?et6RPM-=5zHkTilmxs1b)}vJ=9)D_h!` zpv;4HJhzm2i2uV|DhvC+!71{)PVChSl-h^uY35cv=eq~CqyKjGUqk=7^@88A$vM}} z4;(zZcp7-zdiG!)B4!V6QF_SvL*$kD)WHM3rw#nODz6b?)nIeZ?nGXs$y-En7f-y4 z1lHX=WAN9UgN3JW8Q%rtig6`_x6C5m@R$pr(KnBG%b3EK`0&kM9@Hal-2!}FkCqMYMsGIV9nQt@L~u`RJYi2^RdHl~CQLXuSh#WUGPR{jb_eL8C4E&%;GA2ROXovBI$ zc-zI%Er;W?T(bDM}mQGQFW`{ncT4Z+^mHfwx>h&`tMJ23m3J$sIw!e1f)T`iU+7?3Hw9)Ba#_# zY3jr#nB7w45pLBE=@Qm<r`olm@4A|2Z;cjkT)rL2T48i$ZpX#iD{d} zv`ymSHW}n*b)zGtcjaHAD$Xio+9mCkv90qiUO&eZQ{M#U$=H*rCgVMg+yYGX>oDBc z@r8jm7tHI%(ekKuIR^8G)GhQjbm7&!GCq{=qj?^s%2N11QpYS93scRENk<0^?TiY@ z(Vo^vjcb+C+}+CT!lfC=RbPIGB6ly3E)7o`)Ci#s+UN04kdP)p&tN}s@cak31Xy+` zqd`{t8Om^k#O{S-F~vXO@Bsz;Tj*F;AWrw0I1#x^u=UtZzlWvgV zRIy$NRqF7|7MryOMK0(=PzqsT?c>M-wqSFG*giw&VyGo_6;^ou$ENrMadmAc$k|p+ zW!Bb};UmUAb#jVF#^j6gZHmq!gx0EUSJv3Z2Ae4v*v}^4m(%klAJprefuX|Q6^gqEJ zgq~UAcvXo0|5c2>em-`_Lz?!6Ydd_e*C6ai9|9W$T;9bK{}k4R4rW3;Dbm51AOTDy z9C=2BLl^<5z$XBBbQEEuGa@hz!V@SE!Ujoaa5%_`J)Gol8sGx~&Wpgb&wx2_R^ZN> z8F3a^!T?a%Lkx<+#pnNH09&9X;1e`Cr~y2l6Hl^2{z4?c%W0{OEa_LuZRaiJPaF%eMaofLdXmq5Kx4e;Tf?vpI*{V z65uEB&v&-S>x1v!0-XiW!roaLAa#m^!>}U+%P>fe(~a8KU7r(s=b00MzI1Fjk^n~G z)Nl?BfofkA$s!kHiotY|juV9IF#9OwNgquyVt#rslN`InIb-iNA%AP=3<9^|H6FpN z2$n==nGPQY&K-N!pa+@TP-P-M z&+qQ)a~8o0W$d!!1zafc4V4{rV-7~(5JYz(`p89RtZG%|SR7Tf5k>S9+g1et972&Q z&rUkVoju)y>A|VO;3{8b{Se;kduT*Hpk#RDP&qhxT(C?J27n6)eT{RUWUCO6NUvz* zGLu+>_}wUM`YlyVnl?u%5&w%qT+aY~m|<7tcql55iy8`Hend6RFjQH(n3Ws^;bRWL z5H+58cXAwWQ46CRMqH@S1x<~n=P|13fDM5eHAP}Jq9DLU4(_ql z*+=i&-+FNG{E6g|4P`=|&wi?*N^; z)fttIy`U*Sq`n+Vu2Vw#qRf3YH-b8&%fej`W5H zP%nqW>vB~}XE`20Ne!<44z2wPPy821M4@6#o6ERHDj8LC&RjH>GMDgn!@O%;Hs8oB zWiO$mV$K+6%?svQc4_jAakTOZ?XIQTRLzlQkvD^8m2De$y1jt%72E!#=LCb2f^EA| z!?q=fHYP5|nivogT>Ec43+UwvlZI24kH7%blNpU3MPPt79b$FU2s(4!u;!Xw}F$ElEa9p3klvOe$QYkzA IuhWZv0CU~BdH?_b literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/spatial_transformer.cpython-35.pyc b/tensorlayer/layers/__pycache__/spatial_transformer.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37208323c8a8d23ebdf62e26a9537a94adfe7c32 GIT binary patch literal 9106 zcmdT}&2t;cb?+H`gW#t~eNiipRx63nazTo=_rTIhD|^>o#}2&~9u6@BaLB<7 zwr4<*EHIUdHd*_$m5UFqN>whda&pQ&mE@L7{)0XyC!ON6FZS=f#$Z4#wTjEmApoY) z^ZIr7>({Se|N8a$$;s-q=07#}R)qL(G4{zL|304h=SW=q3=s&?HAK@8hHDBpCjwJE za`VD1h#)7rdC@GO1e`??6h*VdwU951W<|JoEep3IIyup-3Kzc#B7TMjk*NuHk{IZ< zCcfiN5$k7Yt0KHf@f>wPDT=N7^b3e1>V_&FN+;}iFAV)^+Bn`$Bf zPuz}V*OO{?$4Oed_Bd&2tLG%X6Fkb;*4+o~wjX-mcaA*Sy!>j07Ac56MLZLDa(Ln% z5@-tgf$%>!#pkAI<25Isr@sXSr3D%Yl^Wu6AwD-m8_HzufZ-%B5+U{sG02JMhWKJp z7=yeR;9ZD;Ns2frh(SSYJ%(m}`0y`~Zq=*KUqhIosMgqEEFOgg1z6kbA6R;183V>1 zh1T8OBJMkG6{^tP%iWoqL}_qJ`jYT>hD#hzzvv;rrNH@07` zEf!?kV!lR|tA6ZAr|Tu2jQRDQ(T87uWNlc1A17ASw%S4DBuu3)AMJWp)K7YSE_19_ z6dpuDpT^<@R_G;Gyc_ic*V=UsJS*AtMm6JIr{`Hu!hY9gTUfDnJ-@S?tXR){H`!gW zsHC;)grOJ2Pa9kh^%G~~D@6c{6+|ru;%bdThG&u7PvzJ--RHYcSF8gk=zA6$@Hp4n zigo9ywS3?^R^o)abu9u0Ysdbvw_zpyUVy>3Ek8W14=N@)iIVAt-cnhXPYt!sV+*1G z4J%IMNJBs94b|B0hb^`a;?eW<&`bI?y8m!2KH>V%Ur98^t{!s6T>i(AGr^1nx))~l5z314kAp6 zPp*U}VqEV@#IL^Vg^8aWWvbx{L$%zychB11MzzG*-nNp6+a%(s+Iv>mYdBImN6Sy1 z+-a;>Yy3Q2Vb(^YK`Z0SpC`no*`Geulm1e(X%CHd;3Uw-a>m8-SnpX$yYY!5eP<`| zmWQLX*?`|mE7!G!w8#U~hg{(#o-I7_FOg`U2FFM~?K5G}yUElXr`SaFE1a7lI?VSR zn+cpC?=1UWND^PtzM=|PXkS}@@jHC#rlcY1Ldw-E%1qj7>R_lFJ8nfiPgU4xdQt4D z3GcAygs$!S-B{(L(2LPS;; zFu`1~C^i*rLTnP4DCHp~ z6`Oiub0=l#9;U@$n!v(^lbNx;zywtY(^HCaeY2tBJuB!-MIRI5S4n&4Ck@ z!O4i~WH#lT8zUlL6es5>gglP=tq8Q+Xa+jDkgWP4NQErh1;WbK10a%<6gA%WJcmi~Cg&8pSSJHXa z6o_#}3@$Ry8_c6AmzZ*OjB=SNOJkHprdVmq(7U#(Ybj+WrM#I^pmxo3J*CX0l((1y zvEL*MxHx!==Ir9Uc3>wrUMhw4U(ZT!rlnBswJ+7Z^-}5Sy31UODZL>EZ(w3>iosR< z-4cT({4I-tCFVOkZzpvzsGoLywM|vC;o&^li{Sk|_IcXD6|lr#kRMbfMqgiBR|PjY z>UpvTB32H4esh1A5%05atWQevEpnO?g2Zwji7Fg>GJ@+vLy=UIt-$ZuJ4bp8Q3VD; z@;q^qUs1)5gN;YZS+451t{v8IE7R}F3q&!FR5=AisxpFds;KdeDln8&%eugB zH%nY(w^fxI81h#ruj}MVPF3hShxU>FJ5=W38lF4KJi`Cc9hHYKU*{D09VhmhB}(n1 zwKQ|5S@7LM+tGhJ`md$`+-Aw|+O*R)&F?yRcJQ?DxXt|G8br(=-l6n}^GC=l^O=KZ z2Tu!+tBM-l)eZLNyfw(nq`XBW_wmF#NZ{Vha|VCSIkG{JM?y;&y##M!l_6vG^79p8Yz(9(5_aeqgrq;-1w*#NyD22 zWq?F@*P%urpD?H$4N~Bs;L0c1Z{&Flpr9hWDR{JO*l+Y|)7{~ei>(Oz6MIiMRJc_f zfS(H!I~-ixDI_wrsVa6CP@p4ySFayhfuqHyAxYjj^kY?I02ZfZ|31p-g=l6l@SSuhr+o*R>n&KP<-Dj`RUTAwy<)+%%NYqJYiW*}E{>6;XZdv&mA zbl{*y2yf6rkAH!LObLbt4~lc-Utv>#Z-+G+v&A zZl8-&k+=kFk3IDVXlm%ZVWheXtSUZJ=*3EQ3U`hbgF5IP2A(6EHTVo^;0#%EKE__h zqq2s>Iu*efMN|H7B5{9YKuyO$bXW;S9a~TS7tt|}JMdbFUm2-$5FF$n-N?E)J{EI8 z(sNq7h=EWTFYS+(&xkc%vEF^ky`*q&te^4t7lg3Rh<`oFK;8<)#5Bm$Es`0dweiST z^an-0SS|*}oTI>tjyI7OSr|Pg>o&&}#|L$$STBVhbvR{{{aT9x5%eK6g)pz)<;Vqg z;B|#~dXCP$W7v8FkD9NpYg=xki`6q z@wTx9MA4$R^h0s`_wmFHBp!N&+dztK6L%2U57RA_HePIu1@RefIC!_*suyJil^ic8 z#3PvB9~S?%{Md3V#N*G5cIuG*C@B9`p-x&yY=3|H;Qs4eiVoOvzzGW}9Y4#yJz5eU zOYgvs`mx1)WASwRH5EyxXGPR?6SUgiu^xMf+6RHvk3DEJmEZ$JoAuod#Zj%Uv#-Ok zY*2b-f}A>>mqYPX^wlwV40b#SZ{cQT6VP}xWRm<0XEY4WzakjF!Xe@|B9MLQLFQhp zhlYy-Liw!;^7 zEdqb^A+$lr?B|92SCIfNr=>cyq+e~on#rt4S9IqAXn<7!LLf<*t_|t5f&OqcilG9h1GD25 z(Ll%sZw9nJBlcz?VU7+5C_=>WoY=W?k>sRFUsdsOI&H-p)?>vo;I>n)3 z)Dc2u7$HaLvi5b?7sTF0=0un;9T<)yKu|a_TtGvh+LuML$i>hvm@d;%f*>7Kk5bln zre8$-^k61Aa*H#@-Wx)`FmwiC+wdljU{(ZIMR$n~9tO@Fd)BZA(gRi1nm#>k?g9#? zSl(;Q4l~fPP}a&NGN6*UgcAxMScc+}BHk3qb>SjP3bS|%3NlWn#qd&rhIB*h-5fTE z{;A&HEwQ&u5m0)ERc_VKZ@%D7t=fZ^)DBV7R(HJdYGBO%)w&UH066I@!5cAS8*&mG zH(~}16tUy09Nv{*&^D)x$7&+Gppx<`h=33D`OSTO#v(+aj2+%{ffOoy*lT3+S-5F6J+^d~7I zlpQxz0sA|yN~VVc!2N^1koh6WRwE3NUbe_3Cb0zZx1zA^cT_oP+Z>}r^e+l=(E{LM zhCNl_p{OFRTquC~m};6~sPc5jDmeo35y?RaM9n?J<)DqA^JZBs)zs9;7sWsIL zWLdP4!Lq8hjqBNdK>3<&f7*9~VM)oh-Kb^Tl0+L5mjg|V1qrGBCmzOvwS{6nXV4dC z`AX~`HoDP)7c&^etz0BMB_u5w*+QbK*_5-H!b1C9+@TXx(wTYB9SfuUGkEOt|%z+eRTR($`_Xqkm*Sf=>hbl$ZU0y=;e+bLoZxB`k^O@LWEgoBwK`Ywc{j9UO)L@8HY| zaDN{k^$=eP{s?3MPZJLkA2I?{4+cJvB!Hv_Spb6?By|`O7}VkWq+y9Ss8t8vfI}Y! zp_SX|ghvGJb%-H1kGyIZMp-@@rDAl1G(0k`?I%1dx7eJ^*m90n9XI#!QGI-IfB~_% z1_%h9`hduI0273P`v49Rn}cMrgzyFt)+-22X)To{x^GcjPsg10rd-Q3rcZdR(_zXJx|IsfB+V?$Zp#^3%5kY2xqh-7=_G0Y#Fe|?t~;_Qwk4mX zd8sOk?|%8!oy21%1kWNhW)hwD>0X{^oQV^)heP^Qmi$YqFn}4S`#c?uwMD&D*_%_A zWprssb)6=QQjuyjXFn;+($`#`n6&XY!>7Ysp3>-rY7}5uF=jVZ=d8Vm$&o^%Ns2+r z1(WmlY~pD-N4+8$_BQU^y4}6C@ztH%vyCn??(IfVRVLq=+Kgo-A7K@` z^=PnCNG$otl}K!>(=O~tSi@%%A9a9jv%p>hcoBlU1S!Bl00KY*Ob9GogD$WmJXqA= zkaz@%2@(L25Bj(@%ZCIAZn4Xf797+;Tm#*(8WSHTA$AZXD{u{^@Ju&h z(yC0u?|%TX^osRXHQ54t3stf6LeOoPoU>qmgW#Y6^G&CG9wrx-w$Y+hUxmf7H%oqU zdV2K`?Ka`!6^q`3XW}sg`crt(0=Y$SS@HkdTl-jhKMMlSpJ6B;Kev~?2$M_Bw})}V zh#>1SOs-gKIL<5!MRz)FoJ^ReS(POd*!p`-PX`9Qj+OL&zxJ;yX~&(ZyLM5ghD}*; zvvQ`rOfAv#;6;0f#uq? z{(LNP(;RVa2M0Rz^w6B2F`2Txj7KWYk=M4z$2`+^e$-|zhUv(JdKfua#uLH>wm3~V zEi;`K+1&VSPnqVouVP-<1~5LJm=;c%Dl`a<6$KYb#VhReL>_;ok=F@Lpm;Wvwz>!B z-;dXzUJa(fO7(RE#ocJo*src4Td4OIs zy=$aRJ|lIz^H<4D;*(8sj{I0(Bd^y^3#5CT!%oBI)7B5>vlT_tJSj0Pk!eTK%aUcw zlSULJc^pOZ3i>e@<6J1sgm#6noY@YHi^ZAO4HS2Oac$fv8?W}Idd(*ni0^fpCWv#% z4amTX+4*8xRZNCizF+x5&o-NpykKSQJUV0EMqXMUCcIlAzvm;RY(iUPm9+h4@GtL? BJy!q# literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/special_activation.cpython-35.pyc b/tensorlayer/layers/__pycache__/special_activation.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12a03b32498ddb37aaddd15ff4b15c7b4b139aa5 GIT binary patch literal 2079 zcma)7TW{Mo6h4$>S$3Q@YkJF`I1I(=VzHY51qP$&I;6VRk|J#b}h~qhqAuk#6to@~nEuW?aOEb25C~+{Y;gxNv}w7`s9= z2$i~kmT?cpXa?>*I6~X3Nye4{-lBzmZ9+>Jq^d&oZHi;>kkdh&F)0W8XR7z9Wzkg9 z9VXa>E0M-@m&Ym{raYk!)0{IwGh=42ZGjeYTu4i88LJmp4rY{P z8C?L$p1sMUG*1=6nNP~H^fec!kj5Y9_;gr^Ga9u}g#s)q57|xGJ&zYLF_H+Hr0Ap~ zXJYmqCZ1Lko-6q=h&g&3p`<#q>l<3->5Gcj9->BK;rLxX zX6Bjn?x9lUpxk@fw`^DIhKP1^9CFd zhafRV0w8ihom2&qV;|-Y3>~<7M1bH{>_gIqH$LQRpql2!*oAR`Y!e(t{6oCekHz~bC-Cek`tkAZGxOTZFur1yJLouc z39_!j_?m&kW@cC@`=r~!zJx)V)-lqa$-dY0tXm`Cqd>)?hIrrBxV7K_vkEXk7V zrE|rl8;P2gC#mrF*Ii<-9RG`>dtEndlh8qA>otmWv#3;ADXIN5x8LTXcWN)qF91Wp(QWK8wQJBcR+c4U$>=jss7q_VqX+CU)E0%ZM^pfh%fN-&%M5s$!7m)A)d>U#k`-yBbN73k1T{O zrM_KdUzR+^E*CNEOH*64y>cd25ae^5lHvkEhxpDK>5xx}Pg>5JyFzXemu!(`@}s{> zUay|Tx_1(eZqtOuqymPf9YvEOsnElb?nKdxie-zFW)vkw97W<9>d}|uBA1Hg%Eolj ze62C|S?89UC~p6&I=GSM>76Tn!zCAq>vUV%iwnUu2+xR_MxopF+lN{4vet!~O<*E% g(a4y_at_`&Q5YRMxLaF(&qYd_*tE$C>9{TLKk0=giU0rr literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/stack.cpython-34.pyc b/tensorlayer/layers/__pycache__/stack.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4cbeec153aeda3e01ddfd5843c9ecc98838b620 GIT binary patch literal 3005 zcmbtWOK%fb6h1Sy$B#HX1!=0PTvSo)Hn9`3s1%VD)S#+T6#;=#g+`OXb8U~A%$PgZ zc_4cQo36U;qJMzDG~2G&^arr&ckb9ujD*yvBm3&U&U4QBzWJv*U-@PA-EME5=x;jn z6fnPsf6~F=<7ZJw^u)Kx&rxX6fla$Pviv;x1q$=DTOi*dzepV%IP{}m;`LhyIwXtq zCP%y2!6gWk$*+(2Vh#=Cj zh$ETp*B*bnS|6V+i9|}VuiFiyrB)YryIS{?^=9+v=%|q?XovApBUYWJ*AHAjZY9nB ziSEWxQ@5K&Y;)gr8x`aCLr-}JQcIPX@n9Tf zBH+F;)G#-^W5mDCQB4K5f&;`F#F6mA7^yhw2CXiOwc@DdAq@O8;RxA5Qxw$Eh#fCuY`k~j7-8l56s$q$Ad3x;*Qrq?Va^v|n zGQT9&R`%*s!ANQr4I5cP8b3w-fi^j)9WM!XVB>bcd$(s~R%e8^5V6dJpl?a6u1rLI zK3ScWy>lMfJF~KD=aF5Tm7NjYnKn@EIecD)!%`zt7}xfe1i#n~-ZTj?Z+xt*(P)U; z_sAlLSdSlrZzy-UmO~*G0 z2U~lKG|KZ<&!%3EhI#7cagnT7b$X@?)N{s%0HGned7_Aw_!E8h5~x@t44hpN`ee7pkOo#tiTo5&S#Z_3LQB{ zGe``EN;E3cut@569>H1*kX;y+D1u~;Fge~Kjp8#}w)P&-Z#Ip}G^&sW%bEHGOAbq< z=V(}FQwwj%9$``7_2O`jT*Ty1uS%Gk;{S`DVPmIW@*aT}4TAloX9;MWC8i(^+Uf*{ zG7|n2k-?1(F4DM!k zLfWB>I=agw%(*zt0ylO;+#l%pz|%ns{da;iw53wg7eU)75;Y?u+uk75^>dP)Va5N= zc#owXsAz`pOn8$oi%+D9KUXGKOizpWg@FZ7t7#zw*;es^Ns2+NF~g=gq|-po_Uo0i z-&6+pa0c8(}Iak-C$Kypj3x{4}vIrymnStRjE#1wK XbTfyQLE{SisOEB3Ie-1TYb*Z(rj#1q literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/stack.cpython-35.pyc b/tensorlayer/layers/__pycache__/stack.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0f0c8e50859a5e32f155a3c3e735c191182bdf6 GIT binary patch literal 2953 zcmbtWOK;mo5FSzz^{|vD64yo1Y+9s|T3hntP{4@Y1a(jZD2&uj;XUUAIy01HD?ou7CB((PzS)`K?Dy$*rBb@v{Il7s0Q?0P zo*ecc;;TE@`1rFR1bF7#;AbGTpl?Gn1D2l!KL=qJnmOMes}DFTh)zEP^aT6Nfq2ua)0fcv{U$tS{f$X|LdhaiL$LE!U+r4TA zpd|2l)|Fzj6?$549-SX_BzQ^$T7=|=Xva!u@(R5ao(KbNMBEn2i(anm2_qvND=mV^ z#3GJlI$z!UYOOX|TNYYMab((cwoNDQOk z)lg1yN;@XeJU;dMgHV1D3j?>ex5cU05}3L{=(S`w4t=SrIK*^$W@8;w+w}%=>-jEb zepzg+9@b{SNE(8M^)w;%C(&SNI0x0!WkC*X-4AH){v2j)4zz<2%M^t6mc`oY6zbE; z+B|msGT8Nb?8arV8}ryX=>F^iRiC5JYv`~vm?>;G4wnUe*e#mm1n?K17^~OoqWTSH zk;1H{rKae@COO{LqFYA~aR=R`Z}HW?VPgP#7L1rhMtBu%7-is%Wm`xtIP2PQdKYkE z=Wq$eS(@tE(96Im3%x9E1nX4|4onVu&SViuXoRalFAmMt+iyhXxBrC$?#;0wy5IPgpF;1`+Z_iRMaP7^Vm7S}w_I8MYIL9noq5 zH>A6)@DETQ;qd)hLu-G%aS*GQdVtm5@LL@Z34jTPag7w65<-1&rjXU)3w-qzY`|Hv zidMy1wC~y+{e;3|P%1z0rZdO!l7RFF*- zG|s^R*1`{aKN+kQmKOx!4*d8AizP?Z)6S+YKw9!7akzfg+`w20Wn zaRDMkW&k^<`(Ut;*vih~WBA#IaS_HPFsN8ui>P>`0+`6CBG(_%n7zZSm&SB^4JIoa)}n zOjvaNTv{eJFE1_=;N|26$^Ad63WpyJ`>B!$6jCB;o}CJCD5H+)lAuy9&a*(5v@RYE zP2BfP(89ZPhG}R^rKB%{HX9OE#>loe3{CBl!o>>rztw9`nxTs3#EZk5{!93mG|^v= z=@!1!B7V`6Eu&E;`3_-_Z58)9Da<|-wM;UYPNt9D*Q*!*A2EsH8giJPkr~pwJK-wg zw)a*;74aG}^VBNp8uhPZlUzCWRN%cpD$uQXAl0X|a)X+iw4A}Ol-%e@<9@J@iLNN| zT&Yn072WtGgTk-q-q*@Wj-yj|$kdGV6wwu*p5#%M+(@1zB|ey`DJiyxNS#d9R*=~4 zabiUu5KvN?Br0Xt9Wt!vh2_UE=^U9}wRy6xCY)*Y+jT({KV1W4@dazIRW3|4u95L;hph!2y(gOp0QGdN;8E}{$5bLuKwDlZ)T1lr?lg(~$Aj7+HwAb{9`}2N!UiZe`76|73))@2QDWgoG zj2#tdr%dWAG7cRVX*)+x!8l3pJw+dE zMY?zel3#`vc5;m}iJ>YKRRqPjVCynF4J?m1R0E@SIrnI8WO}Y8U7- zh#UibjR@ZwIEHxNinlNe&Fm|Ph>aM-pZ9|> z6ruEdVPfG2yE4+eYaJX(A(e_%J(I|d6XUizUKGhNTS!1#D{&-;7L1bJO{qlO7NGAY zM)X*8HVw;P6U%MO2w@Tv??Nt!h@*f=A{ts}aZwoT;&jp=8k|!!#7wsH17Y9Q!Bu$w zXL4fZ5ms>^hB;ok0k6d;gF4u=CL!8Mgiag{Y2hW~tP<0KXD0 z1&&nGp)>*5k#cONVPG{jOv?!L@UhMsuS?f_v zTN|1lr5tq0^ZpFju@>K|MMybhm5a^(Oe1c3V9rCVt)jy2f z_{X&caq#3+j;_M(ojIu69nOlwtdWh^>vb`_XWP92*|;=MW;xN~Yt;QwbMAykys^fa z`#UIoen3On`MinN=gv_3;H*Em-sh^ria*ujf2sNTTi;9d8T)t8{f#%!{VmKtc!?L; z&7YzD!FhjZ?a$xpss9t$0v9%7tM`Yp4gVCiOwO`9IejAiYT3Z?}V1AmR34ueAq^+yq0n9z^g$u_6;?UuHj6JEzdV zQwPCbJyu(duHSAnKc4%vK6k5m>ytXVj14mBkKEyVurLydMxx%E$F3FwMt=Cs*ph9j&F}IC`HkN18M3bjp`q({WAg>MlY- zTIzx5v=Bt?xSCI$wbW7R)R3ua0PnI3;KGpm8~WGd#^;z=Hvq{PnKkaktppgy=*9yT zZ_5@M-9yufqZ%KaU8rGzfnsIj^qFltsvDZoqN`5OV!eBq=G_OYzh(VaP?0m?%#|n?^o4{wA2epip5mtxSHVVJXaS`L60u;yKXvWL#UeMuTy)T3cSrw zy1Es2Wm*At%a+q=5nx$HezlzD;os9SEART`&Vr=!_&yO}|xOYCI(r-q~Fg z{WU5WJhHiG$d3wa^DuZHnFoG0aY266&SzN*--bM=Q%G-+dZ3Fu351-!6nmD=U8FAp z=?jt`5ZR29Z-nD;;!-3xkHn?Ki3^f1q%261(6DR{TC3Hv_Yd5Se&W)6W#ZBUgLk2H z&T<}EQ~gy41l9+lz+j#Ml2)HDv#4(Vp^pF#2BE|-$nMYCCk>(z+Ci_@1k4@; z+3+=s(|Uj*L&Q-ZJih>lS+@B(u3ifREDG4TllVa|(2Ic-f4P50vXt&+iTzFzO5KU~ z8Xiwkvwpt@zu(vlbd;3)s z{Uxf*9kHY7|DUr3M}gRyvqSJO_&Ky?T^76+{nJs-^?!jZz!6bG&xtCt0*I(uIUej zx})pEMlc%MdVx%_x6k4P2e6gu_-SZTS9t8lT)oeg4NMgqtZYzHKjp7KLscy)!R@PD zUE}H`@~98E`yp2&o4Sr~k5M#-LHcl__>8IcreJ54mArXI)bXr`k&l3Qz(=|B*$Bq4zS zv%Ao(@=NXBp4ylGi1xX^2Csec^dI!4{mz1vNcvVM%|kKV#V)XC_w2#h^L>X`XJ;$d zxBs@i{~gBu!N#rv+CN57F)9x~iv^55@-ocJvcO{f4BO5!%gZq@&w?D=&NHvTydvx3 z1sWKaXClw`ip(o9uT1qb3`09JdR}4P1?E-Qvn<;#Gf`sO6(-7T8xQkrvpVzK!rE0U zu|9sDNn0g5h`L=r>^?`IttWGv@$SGs62bi_JZO58{g#(pRKxeSivcaq&66Ci63QG( z7Dbh@4)*s)#(vLO$1=)i+3y&8l4qxk?V(X%r`S=B?U$JREz=e|EwH@|dkVo>_Q6y1 zAy#0QuR-&xu)=+fsSKQ&P&pB*ZGe+j|pr)MuAixLUfpaY6B9B$< z1i>-i6=({t!f|yX$v5MHkhKk=q9E3O6mn+(IoF}KTI5OyzO%XU(|hZDrKj~kEjJon zU-#nOx*PQyC(4V$ZqJDtW=vys$F|M1;)vE4^@>^Op(CBX&_XKHt&LsgK`1m|=F4v2 zD7C!vGtzj+ydW(fN^#&vu^R5gJXfw02#ENhJ{$L_hpl1bct!h`w1q`j=1@XKY(#5% zeCP)O4}{}!9dXa^i%`+7m470*kTR0>R3kfzwe9wtFcd+$5QnwaqfksN7{>iwA$io{ z7(a+LABw1S8q&YU*E&WJ%EUVALocX^PXU#9II&K<%{6v$KB*s$&&eBnA>H{AH}~rJ zDzyJsN@C_IQGUcHC0@D#$E7cW+CMNZ!8>t?P83d9;U(|MB)SLJH2yaW z@9{YFzludVjE5L8^qCEzV;N33jFEbUm)v{92}m9BLF$$hv({>6-qqDr{t+Mgt>Qf) z1>cL6=7&zG$yv03=Nv~d??s2yr_X1;*7zq(H%fCI@iq?!b?Jm&)Hk<<)*^3yl9oHw zvqiTsP;=-aJ%9KfD45ba6PMPsM-{MP*WEOdn0e2x6YoS%iQ z;iy~2hi*-e91qAXZvo<1i%wM=T26RnyE&R^DoBsbd5pD{P^vEDkmjnV&V&Hc5yG=Q z-_`A3Z!PnqC+B($Bu?+bQQhotksqgx^nJZv=aYLj-P_QOO7m=%Gb_Hq+#ffW&Sdyo zTb!i7hS6JN7D~U*+gN?+6=olw^;@_5Qgzbs=T`hLE#JEPjZB}4e+}E;ehb^*#r)%k zxJ_aH71kf0_uIC9>+aC}pTZZYkP{obZ-@@!Q|OARNCsdv?`wQ#LpVY05de;F0{4KH zYkj9HIKmPCrSFO8Mdbg5B7k&4{-D$GU0)#V!Cz|+iMMegZbORT1bkh@(%kf6B=^s$ zfg_LngL)*pje*x`G(TDTtiE)&dH2&gy2K4q?N98XB z#=^VJYHnQ;AC|Q4^MB*27LzPOT~h7}ZJTJ9%-vH;NW#$v#5t0T?j&=b80em@BU=ps z1xay8p_7~+cA{!Fv9=ORCUX<6szAF-Hi!!m?!U5stu&PKT8+o}DrT03=XRa2p+R#Z zX`r}aYSmCg{A_tf{OW__Bx^t1{5hGbf{Ix))>7$8X4X3Sa4PcGSAqywp;qe<7~8HS z+J5q*$*;@#O$E>=Y+2?vy~(47Ct$Cxrh8Hufa=*sza&RgK)#FlBz-(t9#z$uq&P$u zNovXqRLxLzk*Z6mU=o|;Z9B;uG?cUSbnd{BzOx$$Te;CdBo)xGbUBq2zQV>^xPbc##Kb2o!v)Ke?|r0MOyX@iB68(7ryHui@?t$ zAxLyO*)#(o$Ien-gj5A-1iDCgFpyG_!q91{i&TUo6+r@nK_)dM=e8_LLJG{zA_*y) zBm`*`(hp=jXc##Qt<7r5`6v8*l!P>2n}iIB-F;Y{GL}ciCw~%vO^rw;_>`l;%B65M z2(nKp1Q@zjDU}=z1HsqFDbsq4h^ADGD3=C}o}-A>^MzeUyS+uweNW>ihjH_Nn22C% z-5arfbE=8*zOZhprtzk4p09lV%6tWm`TxSxU+*NZ_wQ08&Mj9}BgrN#tOb{md}@C)RYbZae9 z^+rfHh`w_#_WU7e^RaY&ZFEM`^z3Cx`hFY;)r$@q4oy)rfxm{p-`EYJ-A3P0NX#2b zx{ZG1i9nqLMEaKvfPtWUczRv>hlehLHwyQgDC$d8X*{AxGx}$yi$DRK#Q!CTMhG$Z zS*&Yf7Qz<(fI|BzqEtG-gs>N7A!RU?)=PnvR;^~!kY3hEu+$(@B6-Ig`7`{q5zK*^ zYbl1jL(&wU)5-*Ais64jQ52<_b|U}w&aq)3<~h%rhfm6ROd+#IIL6m6S;(N0R>iJ?ZUH8>_|YDthJ n=}*V8g^dbi(xkKkAA-BB($_L7RIv0+Hfxo0*Sz;XD_H*l^T(5h literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/time_distribution.cpython-34.pyc b/tensorlayer/layers/__pycache__/time_distribution.cpython-34.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd2edca0eee6bf43c56a615798ff0b28869d607a GIT binary patch literal 2978 zcmZ`*&2ke*5bjz1%Q7bXm^gnmMHMBB2+2TkQDG`1KthG09B@JqMXlG`k+fp9D`rO6 zCh{fbkV`7(JOZzfN0}UR;*tl*Dcv)+Bok*#+S;Az?(gewdV2Qn%3}H1)wgZE0Pq*g zU0Hno0{`kh-a`B_hygZ355f$@42B-GGr+^zsJ1@*oOu zkb!oQ>>k&O?-<6bF~csu^W0O>kJDZ+N_y|`9UhuRjkc)+{kq@$xW;X38wm5_T}BW0zRu|j z*bRoGSk!scS?lwko&=LJK4uxC|_1;LV<6t*V1AYBkovVI; zBhO}t^=T<$l~EB=)X5 ztYQ#c;#twI&x*RvX@%B*swO@Ps#UdhW3A4sM5)@UhE)93{Ub;c$#-K8x6x?uXPXb6 z&k^J+W6d>yhwanOan^1-)LrRX#Qzfhzrw%z6WjI#TPwheBFOU)0qkcW z0nm0_GdS=-T?cKO@BxFLnVrqRAPW;b2t1fzm(IW>3kS?&_%g`Rgr5gpfB_y4)^1RO zi3fu+fps&mjFA{Y01VJ@0R|Nd^oM<3wD(hovuKJ(44yc6`t&kn=E5UM=RR111Eh_x zXqJPOeHZcc;dE$^r~}6K3pVZtWIZ?o`$gFO9rRfWJ;_ttJI{ZC{gO30M+t>UxN=(UHg$FAz_{ip%BP>Q@d<=t6 zV1ioWJ4Sk3t2}(YT%~PV<-@Vkd|O~YrjFV$gW@OL$ab zz}6)t>CBf@S2BY)lMQwwWpYyBA-HLBs*{dH?Nd{ZRFo(kBpqS0gl$Tj*zoR3nM#wh zt;rO9Kf)g3`=)TXFHPQdDP;gSAmRz?HGF|k(6Z$nnUlNFsF{snewcInhc&J<`f0!q;V|l z!DN;<@A+Rpy1ntuw|AdD_J6qj=>F|5AKZOx%0V1EeU-`MSB6MJH|F-M$%|e5k_c;y zCaXl;l?5cxUYP5(C@2c;GG4H1xLn#r+T8SAV%TnbF;Y$ptGi*5@m?vD!H#80N2#c{ zAe$KeAKCm~;b%d!AEaTL^qP0ljypx08!{bWLaI61)BQBLvXjaeYJ`VW(|zt(UjO@S zXI1lf)4@-v#%NETM&LF4tIK$ULWPys5?k_CScP%#JiFp8d1dyYx5Sp&Wp;%vvWwnl zxwGu`rze$To1~!_p}DewcQA-x9Ft&)tZS{c5b> zgQB9UT+HV(EW>IglkKEZ7?4>qI*T_`b|T;by(5=t0E5%=`jT3y4Kx>=q?UwWiZ>m( QTeLweOqrc#l}su7FRiXPE&u=k literal 0 HcmV?d00001 diff --git a/tensorlayer/layers/__pycache__/time_distribution.cpython-35.pyc b/tensorlayer/layers/__pycache__/time_distribution.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7aac2a81a3117240bec5380e87f7a1043e47026 GIT binary patch literal 2917 zcmZuzO>+}R5barg%Q7Z>7@UOEgcK#q5t1>*MTIFyfP@M~5pabdidwI=BYEZ3u9z8t zP2@|=C5Ke*`3K1@za_W1=EPsfDcv)+Bol9Gx3n|e{a(MG>Dk5k`SR!OKihf^;2)T| zviSct-s%B9Li{m^0X9Mp!VJU=1|GCCz``tqIf%2+&Ow-m9)orsz6lHT@(#W7APR7h zfp(GXp4N)*8OEzI!&csV?qAW5(_Sx1dhhWa9+`6+(Lmgdl$O!fNQ>}cuqR}P=8mo$ z{<)2}`Vt=kpv{P38>u0D28g4j>Bzl9bY$rvdS2$yBgkWbGRJFUTkuvV4wPzbo`kx| z?IhRRf#$(*7)L_!fOkhpM@MPGb;`wSA@{gO%1VnNPrGO=c$5rBnro4$R5mQs{Zw+X z8w`fAsPm}HM`{Xv7wv>ibCUA>>?!mQBU_X}?j7^N z+0j5GTJbKHfQqwuOx#Tugu-DETy+!gJA9NxuSQ3(4$#itbA>#vnw`0I>lUAuh_CbO zYm@QGB3b8atCLZ?RH!Iud~J0)zKid=+Ze_{M{K8YDCBae_l9B}2fJ|^=<92BuC@al zc`-$-PnIICGFpTb`DL9G`Sr^p>7=3Xm3XBt*Cq@Sp%Lo=S=$mX?;hG$ZG`HFmM86$ zWekEPo-W$;>7uT4s?hpR)x<|ZwW_vmtk!vzC{!hK+U5=pC#3*MUL+M05mh%%H>qA9C}ThprHTd}7w6Yv{{*i?IR zu&(})_Lf?{AH+&bo$M$s{Z%B7r0X1=X)CyEntanr(0tZ%3xY&Aa=nJGDBz46Iseaf zYsNJh4QM+$N-e~^vFaMYqxNa%IBmBb>aKLnjVAu|`=`Sus!0NElY+T>tq z--SDU=p5RU>435Qf(`l+S@+Mtei3$m2Yr^}j`I}s?#o|bzhsThQ8pnmF46Rk2l5IV z7a>7~%9thl?K$jYCLZYXFfNeU5-EVw8CcSbG=QWppe2s|%3yyE#+V{Hc#ACSJd&%bOnz{4A{D& zB%Sq=)|1TO!(@ZqNST}zc=&CYoa&@QQM+Wyk%|(fgQO!&mat7}0~_5vDN|{3wk4UO z??>1>eBTrf_oK<%?xalN@bkfBqXb)gk&w`w7^AtTyC%P34`Nf&GDy@gRRY0v(AhDi zp3rVzo5F}V;Mf@&dmYFq*ov`N5=mL6pgHvJ33KZBoGDK`r^(>?VNOwiP8!F;9!O?! z`=3{JIcH=*HYWHF>d%Ul3t!-ei@C zyRv{J+H*5K76nD2UB+`(4VO#1NSm6zM-1C-&qvCMVRbhwGTv)tGT5n1=_nQT2goLd z|98EqRJhiBmdYJ9#FM8Pc6ve5bYC(Sx@n)PdAx7n*HB})Cr@LHRlL;-KA=!xdA7hR z-V&=Y?ww~V-hx+V7rh0x$X3`@HqXBBKFghDZ!ev+vKmJe0J z-+wg<;=?Kzd_PP(zHbTGOlBTL$bR@$@Q`__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + act : activation function + The activation function of this layer. + shape : tuple of int + The shape of the filters: (filter_length, in_channels, out_channels). + stride : int + The number of entries by which the filter is moved right at a step. + dilation_rate : int + Filter up-sampling/input down-sampling rate. + padding : str + The padding algorithm type: "SAME" or "VALID". + data_format : str + Default is 'NWC' as it is a 1D CNN. + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : str + A unique layer name + + """ + + def __init__( + self, + prev_layer, + act=tf.identity, + shape=(5, 1, 5), + stride=1, + dilation_rate=1, + padding='SAME', + data_format='NWC', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='cnn1d', + ): + if act is None: + act = tf.identity + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("Conv1dLayer %s: shape:%s stride:%s pad:%s act:%s" % (self.name, str(shape), str(stride), padding, act.__name__)) + + with tf.variable_scope(name): + W = tf.get_variable(name='W_conv1d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) + self.outputs = tf.nn.convolution( + self.inputs, W, strides=(stride, ), padding=padding, dilation_rate=(dilation_rate, ), data_format=data_format) # 1.2 + if b_init: + b = tf.get_variable(name='b_conv1d', shape=(shape[-1]), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) + self.outputs = self.outputs + b + + self.outputs = act(self.outputs) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + if b_init: + self.all_params.extend([W, b]) + else: + self.all_params.append(W) + + +class Conv2dLayer(Layer): + """ + The :class:`Conv2dLayer` class is a 2D CNN layer, see `tf.nn.conv2d `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + act : activation function + The activation function of this layer. + shape : tuple of int + The shape of the filters: (filter_height, filter_width, in_channels, out_channels). + strides : tuple of int + The sliding window strides of corresponding input dimensions. + It must be in the same order as the ``shape`` parameter. + padding : str + The padding algorithm type: "SAME" or "VALID". + W_init : initializer + The initializer for the the weight matrix. + b_init : initializer or None + The initializer for the the bias vector. If None, skip biases. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + use_cudnn_on_gpu : bool + Default is False. + data_format : str + "NHWC" or "NCHW", default is "NHWC". + name : str + A unique layer name. + + Notes + ----- + - shape = [h, w, the number of output channel of previous layer, the number of output channels] + - the number of output channel of a layer is its last dimension. + + Examples + -------- + With TensorLayer + + >>> x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1)) + >>> net = tl.layers.InputLayer(x, name='input_layer') + >>> net = tl.layers.Conv2dLayer(net, + ... act = tf.nn.relu, + ... shape = (5, 5, 1, 32), # 32 features for each 5x5 patch + ... strides = (1, 1, 1, 1), + ... padding='SAME', + ... W_init=tf.truncated_normal_initializer(stddev=5e-2), + ... b_init = tf.constant_initializer(value=0.0), + ... name ='cnn_layer1') # output: (?, 28, 28, 32) + >>> net = tl.layers.PoolLayer(net, + ... ksize=(1, 2, 2, 1), + ... strides=(1, 2, 2, 1), + ... padding='SAME', + ... pool = tf.nn.max_pool, + ... name ='pool_layer1',) # output: (?, 14, 14, 32) + + Without TensorLayer, you can implement 2D convolution as follow. + + >>> W = tf.Variable(W_init(shape=[5, 5, 1, 32], ), name='W_conv') + >>> b = tf.Variable(b_init(shape=[32], ), name='b_conv') + >>> outputs = tf.nn.relu( tf.nn.conv2d(inputs, W, + ... strides=[1, 1, 1, 1], + ... padding='SAME') + b ) + + """ + + def __init__( + self, + prev_layer, + act=tf.identity, + shape=(5, 5, 1, 100), + strides=(1, 1, 1, 1), + padding='SAME', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + use_cudnn_on_gpu=None, + data_format=None, + name='cnn_layer', + ): + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + if act is None: + act = tf.identity + logging.info("Conv2dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) + + with tf.variable_scope(name): + W = tf.get_variable(name='W_conv2d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) + if b_init: + b = tf.get_variable(name='b_conv2d', shape=(shape[-1]), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) + self.outputs = act( + tf.nn.conv2d(self.inputs, W, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format) + b) + else: + self.outputs = act(tf.nn.conv2d(self.inputs, W, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format)) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + if b_init: + self.all_params.extend([W, b]) + else: + self.all_params.append(W) + + +class DeConv2dLayer(Layer): + """A de-convolution 2D layer. + + See `tf.nn.conv2d_transpose `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + act : activation function + The activation function of this layer. + shape : tuple of int + Shape of the filters: (height, width, output_channels, in_channels). + The filter's ``in_channels`` dimension must match that of value. + output_shape : tuple of int + Output shape of the deconvolution, + strides : tuple of int + The sliding window strides for corresponding input dimensions. + padding : str + The padding algorithm type: "SAME" or "VALID". + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + W_init_args : dictionary + The arguments for initializing the weight matrix. + b_init_args : dictionary + The arguments for initializing the bias vector. + name : str + A unique layer name. + + Notes + ----- + - We recommend to use `DeConv2d` with TensorFlow version higher than 1.3. + - shape = [h, w, the number of output channels of this layer, the number of output channel of the previous layer]. + - output_shape = [batch_size, any, any, the number of output channels of this layer]. + - the number of output channel of a layer is its last dimension. + + Examples + -------- + A part of the generator in DCGAN example + + >>> batch_size = 64 + >>> inputs = tf.placeholder(tf.float32, [batch_size, 100], name='z_noise') + >>> net_in = tl.layers.InputLayer(inputs, name='g/in') + >>> net_h0 = tl.layers.DenseLayer(net_in, n_units = 8192, + ... W_init = tf.random_normal_initializer(stddev=0.02), + ... act = tf.identity, name='g/h0/lin') + >>> print(net_h0.outputs._shape) + ... (64, 8192) + >>> net_h0 = tl.layers.ReshapeLayer(net_h0, shape=(-1, 4, 4, 512), name='g/h0/reshape') + >>> net_h0 = tl.layers.BatchNormLayer(net_h0, act=tf.nn.relu, is_train=is_train, name='g/h0/batch_norm') + >>> print(net_h0.outputs._shape) + ... (64, 4, 4, 512) + >>> net_h1 = tl.layers.DeConv2dLayer(net_h0, + ... shape=(5, 5, 256, 512), + ... output_shape=(batch_size, 8, 8, 256), + ... strides=(1, 2, 2, 1), + ... act=tf.identity, name='g/h1/decon2d') + >>> net_h1 = tl.layers.BatchNormLayer(net_h1, act=tf.nn.relu, is_train=is_train, name='g/h1/batch_norm') + >>> print(net_h1.outputs._shape) + ... (64, 8, 8, 256) + + U-Net + + >>> .... + >>> conv10 = tl.layers.Conv2dLayer(conv9, act=tf.nn.relu, + ... shape=(3,3,1024,1024), strides=(1,1,1,1), padding='SAME', + ... W_init=w_init, b_init=b_init, name='conv10') + >>> print(conv10.outputs) + ... (batch_size, 32, 32, 1024) + >>> deconv1 = tl.layers.DeConv2dLayer(conv10, act=tf.nn.relu, + ... shape=(3,3,512,1024), strides=(1,2,2,1), output_shape=(batch_size,64,64,512), + ... padding='SAME', W_init=w_init, b_init=b_init, name='devcon1_1') + + """ + + def __init__( + self, + prev_layer, + act=tf.identity, + shape=(3, 3, 128, 256), + output_shape=(1, 256, 256, 128), + strides=(1, 2, 2, 1), + padding='SAME', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='decnn2d_layer', + ): + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + if act is None: + act = tf.identity + logging.info("DeConv2dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding, + act.__name__)) + # logging.info(" DeConv2dLayer: Untested") + with tf.variable_scope(name): + W = tf.get_variable(name='W_deconv2d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) + if b_init: + b = tf.get_variable(name='b_deconv2d', shape=(shape[-2]), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) + self.outputs = act(tf.nn.conv2d_transpose(self.inputs, W, output_shape=output_shape, strides=strides, padding=padding) + b) + else: + self.outputs = act(tf.nn.conv2d_transpose(self.inputs, W, output_shape=output_shape, strides=strides, padding=padding)) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + if b_init: + self.all_params.extend([W, b]) + else: + self.all_params.append(W) + + +class Conv3dLayer(Layer): + """ + The :class:`Conv3dLayer` class is a 3D CNN layer, see `tf.nn.conv3d `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + act : activation function + The activation function of this layer. + shape : tuple of int + Shape of the filters: (filter_depth, filter_height, filter_width, in_channels, out_channels). + strides : tuple of int + The sliding window strides for corresponding input dimensions. + Must be in the same order as the shape dimension. + padding : str + The padding algorithm type: "SAME" or "VALID". + W_init : initializer + The initializer for the weight matrix. + b_init : initializer + The initializer for the bias vector. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : str + A unique layer name. + + Examples + --------- + >>> x = tf.placeholder(tf.float32, (None, 100, 100, 100, 3)) + >>> n = tl.layers.InputLayer(x, name='in3') + >>> n = tl.layers.Conv3dLayer(n, shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1)) + ... [None, 50, 50, 50, 32] + """ + + def __init__( + self, + prev_layer, + act=tf.identity, + shape=(2, 2, 2, 3, 32), + strides=(1, 2, 2, 2, 1), + padding='SAME', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='cnn3d_layer', + ): + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + if act is None: + act = tf.identity + logging.info("Conv3dLayer %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) + + with tf.variable_scope(name): + # W = tf.Variable(W_init(shape=shape, **W_init_args), name='W_conv') + # b = tf.Variable(b_init(shape=[shape[-1]], **b_init_args), name='b_conv') + W = tf.get_variable(name='W_conv3d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) + b = tf.get_variable(name='b_conv3d', shape=(shape[-1]), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) + self.outputs = act(tf.nn.conv3d(self.inputs, W, strides=strides, padding=padding, name=None) + b) + + # self.outputs = act( tf.nn.conv3d(self.inputs, W, strides=strides, padding=padding, name=None) + b ) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend([W, b]) + + +class DeConv3dLayer(Layer): + """The :class:`DeConv3dLayer` class is deconvolutional 3D layer, see `tf.nn.conv3d_transpose `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + act : activation function + The activation function of this layer. + shape : tuple of int + The shape of the filters: (depth, height, width, output_channels, in_channels). + The filter's in_channels dimension must match that of value. + output_shape : tuple of int + The output shape of the deconvolution. + strides : tuple of int + The sliding window strides for corresponding input dimensions. + padding : str + The padding algorithm type: "SAME" or "VALID". + W_init : initializer + The initializer for the weight matrix. + b_init : initializer + The initializer for the bias vector. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : str + A unique layer name. + + """ + + def __init__( + self, + prev_layer, + act=tf.identity, + shape=(2, 2, 2, 128, 256), + output_shape=(1, 12, 32, 32, 128), + strides=(1, 2, 2, 2, 1), + padding='SAME', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='decnn3d_layer', + ): + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + if act is None: + act = tf.identity + logging.info("DeConv3dLayer %s: shape:%s out_shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(output_shape), str(strides), padding, + act.__name__)) + + with tf.variable_scope(name): + W = tf.get_variable(name='W_deconv3d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) + b = tf.get_variable(name='b_deconv3d', shape=(shape[-2]), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) + + self.outputs = act(tf.nn.conv3d_transpose(self.inputs, W, output_shape=output_shape, strides=strides, padding=padding) + b) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend([W, b]) + + +class UpSampling2dLayer(Layer): + """The :class:`UpSampling2dLayer` class is a up-sampling 2D layer, see `tf.image.resize_images `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer with 4-D Tensor of the shape (batch, height, width, channels) or 3-D Tensor of the shape (height, width, channels). + size : tuple of int/float + (height, width) scale factor or new size of height and width. + is_scale : boolean + If True (default), the `size` is a scale factor; otherwise, the `size` is the numbers of pixels of height and width. + method : int + The resize method selected through the index. Defaults index is 0 which is ResizeMethod.BILINEAR. + - Index 0 is ResizeMethod.BILINEAR, Bilinear interpolation. + - Index 1 is ResizeMethod.NEAREST_NEIGHBOR, Nearest neighbor interpolation. + - Index 2 is ResizeMethod.BICUBIC, Bicubic interpolation. + - Index 3 ResizeMethod.AREA, Area interpolation. + align_corners : boolean + If True, align the corners of the input and output. Default is False. + name : str + A unique layer name. + + """ + + def __init__( + self, + prev_layer, + size, + is_scale=True, + method=0, + align_corners=False, + name='upsample2d_layer', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + if len(self.inputs.get_shape()) == 3: + if is_scale: + size_h = size[0] * int(self.inputs.get_shape()[0]) + size_w = size[1] * int(self.inputs.get_shape()[1]) + size = [int(size_h), int(size_w)] + elif len(self.inputs.get_shape()) == 4: + if is_scale: + size_h = size[0] * int(self.inputs.get_shape()[1]) + size_w = size[1] * int(self.inputs.get_shape()[2]) + size = [int(size_h), int(size_w)] + else: + raise Exception("Donot support shape %s" % self.inputs.get_shape()) + logging.info("UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" % (name, is_scale, size, method, align_corners)) + with tf.variable_scope(name): + try: + self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) + except Exception: # for TF 0.10 + self.outputs = tf.image.resize_images(self.inputs, new_height=size[0], new_width=size[1], method=method, align_corners=align_corners) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + + +class DownSampling2dLayer(Layer): + """The :class:`DownSampling2dLayer` class is down-sampling 2D layer, see `tf.image.resize_images `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer with 4-D Tensor in the shape of (batch, height, width, channels) or 3-D Tensor in the shape of (height, width, channels). + size : tuple of int/float + (height, width) scale factor or new size of height and width. + is_scale : boolean + If True (default), the `size` is the scale factor; otherwise, the `size` are numbers of pixels of height and width. + method : int + The resize method selected through the index. Defaults index is 0 which is ResizeMethod.BILINEAR. + - Index 0 is ResizeMethod.BILINEAR, Bilinear interpolation. + - Index 1 is ResizeMethod.NEAREST_NEIGHBOR, Nearest neighbor interpolation. + - Index 2 is ResizeMethod.BICUBIC, Bicubic interpolation. + - Index 3 ResizeMethod.AREA, Area interpolation. + align_corners : boolean + If True, exactly align all 4 corners of the input and output. Default is False. + name : str + A unique layer name. + + """ + + def __init__( + self, + prev_layer, + size, + is_scale=True, + method=0, + align_corners=False, + name='downsample2d_layer', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + if len(self.inputs.get_shape()) == 3: + if is_scale: + size_h = size[0] * int(self.inputs.get_shape()[0]) + size_w = size[1] * int(self.inputs.get_shape()[1]) + size = [int(size_h), int(size_w)] + elif len(self.inputs.get_shape()) == 4: + if is_scale: + size_h = size[0] * int(self.inputs.get_shape()[1]) + size_w = size[1] * int(self.inputs.get_shape()[2]) + size = [int(size_h), int(size_w)] + else: + raise Exception("Donot support shape %s" % self.inputs.get_shape()) + logging.info("DownSampling2dLayer %s: is_scale:%s size:%s method:%d, align_corners:%s" % (name, is_scale, size, method, align_corners)) + with tf.variable_scope(name): + try: + self.outputs = tf.image.resize_images(self.inputs, size=size, method=method, align_corners=align_corners) + except Exception: # for TF 0.10 + self.outputs = tf.image.resize_images(self.inputs, new_height=size[0], new_width=size[1], method=method, align_corners=align_corners) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + + +class DeformableConv2d(Layer): + """The :class:`DeformableConv2d` class is a 2D + `Deformable Convolutional Networks `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + offset_layer : :class:`Layer` + To predict the offset of convolution operations. + The output shape is (batchsize, input height, input width, 2*(number of element in the convolution kernel)) + e.g. if apply a 3*3 kernel, the number of the last dimension should be 18 (2*3*3) + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (height, width). + act : activation function + The activation function of this layer. + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : str + A unique layer name. + + Examples + -------- + >>> net = tl.layers.InputLayer(x, name='input_layer') + >>> offset1 = tl.layers.Conv2d(net, 18, (3, 3), (1, 1), act=act, padding='SAME', name='offset1') + >>> net = tl.layers.DeformableConv2d(net, offset1, 32, (3, 3), act=act, name='deformable1') + >>> offset2 = tl.layers.Conv2d(net, 18, (3, 3), (1, 1), act=act, padding='SAME', name='offset2') + >>> net = tl.layers.DeformableConv2d(net, offset2, 64, (3, 3), act=act, name='deformable2') + + References + ---------- + - The deformation operation was adapted from the implementation in `here `__ + + Notes + ----- + - The padding is fixed to 'SAME'. + - The current implementation is not optimized for memory usgae. Please use it carefully. + + """ + + def __init__( + self, + prev_layer, + offset_layer=None, + # shape=(3, 3, 1, 100), + n_filter=32, + filter_size=(3, 3), + act=tf.identity, + name='deformable_conv_2d', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None): + if tf.__version__ < "1.4": + raise Exception("Deformable CNN layer requires tensrflow 1.4 or higher version | current version %s" % tf.__version__) + + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + def _to_bc_h_w(x, x_shape): + """(b, h, w, c) -> (b*c, h, w)""" + x = tf.transpose(x, [0, 3, 1, 2]) + x = tf.reshape(x, (-1, x_shape[1], x_shape[2])) + return x + + def _to_b_h_w_n_c(x, x_shape): + """(b*c, h, w, n) -> (b, h, w, n, c)""" + x = tf.reshape(x, (-1, x_shape[4], x_shape[1], x_shape[2], x_shape[3])) + x = tf.transpose(x, [0, 2, 3, 4, 1]) + return x + + def tf_flatten(a): + """Flatten tensor""" + return tf.reshape(a, [-1]) + + def _get_vals_by_coords(inputs, coords, idx, out_shape): + indices = tf.stack([idx, tf_flatten(coords[:, :, :, :, 0]), tf_flatten(coords[:, :, :, :, 1])], axis=-1) + vals = tf.gather_nd(inputs, indices) + vals = tf.reshape(vals, out_shape) + return vals + + def _tf_repeat(a, repeats): + """Tensorflow version of np.repeat for 1D""" + # https://github.com/tensorflow/tensorflow/issues/8521 + assert len(a.get_shape()) == 1 + + a = tf.expand_dims(a, -1) + a = tf.tile(a, [1, repeats]) + a = tf_flatten(a) + return a + + def _tf_batch_map_coordinates(inputs, coords): + """Batch version of tf_map_coordinates + + Only supports 2D feature maps + + Parameters + ---------- + inputs : ``tf.Tensor`` + shape = (b*c, h, w) + coords : ``tf.Tensor`` + shape = (b*c, h, w, n, 2) + + Returns + ------- + ``tf.Tensor`` + A Tensor with the shape as (b*c, h, w, n) + + """ + input_shape = inputs.get_shape() + coords_shape = coords.get_shape() + batch_channel = tf.shape(inputs)[0] + input_h = int(input_shape[1]) + input_w = int(input_shape[2]) + kernel_n = int(coords_shape[3]) + n_coords = input_h * input_w * kernel_n + + coords_lt = tf.cast(tf.floor(coords), 'int32') + coords_rb = tf.cast(tf.ceil(coords), 'int32') + coords_lb = tf.stack([coords_lt[:, :, :, :, 0], coords_rb[:, :, :, :, 1]], axis=-1) + coords_rt = tf.stack([coords_rb[:, :, :, :, 0], coords_lt[:, :, :, :, 1]], axis=-1) + + idx = _tf_repeat(tf.range(batch_channel), n_coords) + + vals_lt = _get_vals_by_coords(inputs, coords_lt, idx, (batch_channel, input_h, input_w, kernel_n)) + vals_rb = _get_vals_by_coords(inputs, coords_rb, idx, (batch_channel, input_h, input_w, kernel_n)) + vals_lb = _get_vals_by_coords(inputs, coords_lb, idx, (batch_channel, input_h, input_w, kernel_n)) + vals_rt = _get_vals_by_coords(inputs, coords_rt, idx, (batch_channel, input_h, input_w, kernel_n)) + + coords_offset_lt = coords - tf.cast(coords_lt, 'float32') + + vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, :, :, :, 0] + vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, :, :, :, 0] + mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, :, :, :, 1] + + return mapped_vals + + def _tf_batch_map_offsets(inputs, offsets, grid_offset): + """Batch map offsets into input + + Parameters + ------------ + inputs : ``tf.Tensor`` + shape = (b, h, w, c) + offsets: ``tf.Tensor`` + shape = (b, h, w, 2*n) + grid_offset: `tf.Tensor`` + Offset grids shape = (h, w, n, 2) + + Returns + ------- + ``tf.Tensor`` + A Tensor with the shape as (b, h, w, c) + + """ + input_shape = inputs.get_shape() + batch_size = tf.shape(inputs)[0] + kernel_n = int(int(offsets.get_shape()[3]) / 2) + input_h = input_shape[1] + input_w = input_shape[2] + channel = input_shape[3] + + # inputs (b, h, w, c) --> (b*c, h, w) + inputs = _to_bc_h_w(inputs, input_shape) + + # offsets (b, h, w, 2*n) --> (b, h, w, n, 2) + offsets = tf.reshape(offsets, (batch_size, input_h, input_w, kernel_n, 2)) + # offsets (b, h, w, n, 2) --> (b*c, h, w, n, 2) + # offsets = tf.tile(offsets, [channel, 1, 1, 1, 1]) + + coords = tf.expand_dims(grid_offset, 0) # grid_offset --> (1, h, w, n, 2) + coords = tf.tile(coords, [batch_size, 1, 1, 1, 1]) + offsets # grid_offset --> (b, h, w, n, 2) + + # clip out of bound + coords = tf.stack( + [ + tf.clip_by_value(coords[:, :, :, :, 0], 0.0, tf.cast(input_h - 1, 'float32')), + tf.clip_by_value(coords[:, :, :, :, 1], 0.0, tf.cast(input_w - 1, 'float32')) + ], + axis=-1) + coords = tf.tile(coords, [channel, 1, 1, 1, 1]) + + mapped_vals = _tf_batch_map_coordinates(inputs, coords) + # (b*c, h, w, n) --> (b, h, w, n, c) + mapped_vals = _to_b_h_w_n_c(mapped_vals, [batch_size, input_h, input_w, kernel_n, channel]) + + return mapped_vals + + Layer.__init__(self, prev_layer=[prev_layer, offset_layer], name=name) + self.inputs = prev_layer.outputs + self.offset_layer = offset_layer + if act is None: + act = tf.identity + logging.info("DeformableConv2d %s: n_filter: %d, filter_size: %s act:%s" % (self.name, n_filter, str(filter_size), act.__name__)) + + try: + pre_channel = int(prev_layer.outputs.get_shape()[-1]) + except Exception: # if pre_channel is ?, it happens when using Spatial Transformer Net + pre_channel = 1 + logging.info("[warnings] unknow input channels, set to 1") + shape = (filter_size[0], filter_size[1], pre_channel, n_filter) + + with tf.variable_scope(name): + offset = self.offset_layer.outputs + assert offset.get_shape()[-1] == 2 * shape[0] * shape[1] + + # Grid initialisation + input_h = int(self.inputs.get_shape()[1]) + input_w = int(self.inputs.get_shape()[2]) + kernel_n = shape[0] * shape[1] + initial_offsets = tf.stack(tf.meshgrid(tf.range(shape[0]), tf.range(shape[1]), indexing='ij')) # initial_offsets --> (kh, kw, 2) + initial_offsets = tf.reshape(initial_offsets, (-1, 2)) # initial_offsets --> (n, 2) + initial_offsets = tf.expand_dims(initial_offsets, 0) # initial_offsets --> (1, n, 2) + initial_offsets = tf.expand_dims(initial_offsets, 0) # initial_offsets --> (1, 1, n, 2) + initial_offsets = tf.tile(initial_offsets, [input_h, input_w, 1, 1]) # initial_offsets --> (h, w, n, 2) + initial_offsets = tf.cast(initial_offsets, 'float32') + grid = tf.meshgrid( + tf.range(-int((shape[0] - 1) / 2.0), int(input_h - int((shape[0] - 1) / 2.0)), 1), + tf.range(-int((shape[1] - 1) / 2.0), int(input_w - int((shape[1] - 1) / 2.0)), 1), + indexing='ij') + + grid = tf.stack(grid, axis=-1) + grid = tf.cast(grid, 'float32') # grid --> (h, w, 2) + grid = tf.expand_dims(grid, 2) # grid --> (h, w, 1, 2) + grid = tf.tile(grid, [1, 1, kernel_n, 1]) # grid --> (h, w, n, 2) + grid_offset = grid + initial_offsets # grid_offset --> (h, w, n, 2) + + input_deform = _tf_batch_map_offsets(self.inputs, offset, grid_offset) + + W = tf.get_variable( + name='W_deformableconv2d', + shape=[1, 1, shape[0] * shape[1], shape[-2], shape[-1]], + initializer=W_init, + dtype=LayersConfig.tf_dtype, + **W_init_args) + + if b_init: + b = tf.get_variable(name='b_deformableconv2d', shape=(shape[-1]), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) + self.outputs = tf.reshape( + act(tf.nn.conv3d(input_deform, W, strides=[1, 1, 1, 1, 1], padding='VALID', name=None) + b), + (tf.shape(self.inputs)[0], input_h, input_w, shape[-1])) + else: + self.outputs = tf.reshape( + act(tf.nn.conv3d(input_deform, W, strides=[1, 1, 1, 1, 1], padding='VALID', name=None)), + (tf.shape(self.inputs)[0], input_h, input_w, shape[-1])) + + # fixed + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + # add offset_layer properties + # offset_params = [osparam for osparam in offset_layer.all_params if osparam not in layer.all_params] + # offset_layers = [oslayer for oslayer in offset_layer.all_layers if oslayer not in layer.all_layers] + # + # self.all_params.extend(list(offset_params)) + # self.all_layers.extend(list(offset_layers)) + # self.all_drop.update(dict(offset_layer.all_drop)) + + # this layer + self.all_layers.append(self.outputs) + if b_init: + self.all_params.extend([W, b]) + else: + self.all_params.append(W) + + +def atrous_conv1d( + layer, + n_filter=32, + filter_size=2, + stride=1, + dilation=1, + act=tf.identity, + padding='SAME', + data_format='NWC', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='conv1d', +): + """Simplified version of :class:`AtrousConv1dLayer`. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + n_filter : int + The number of filters. + filter_size : int + The filter size. + stride : tuple of int + The strides: (height, width). + dilation : int + The filter dilation size. + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + data_format : str + Default is 'NWC' as it is a 1D CNN. + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : str + A unique layer name. + + Returns + ------- + :class:`Layer` + A :class:`AtrousConv1dLayer` object + + """ + + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + return Conv1dLayer( + prev_layer=layer, + act=act, + shape=(filter_size, int(layer.outputs.get_shape()[-1]), n_filter), + stride=stride, + padding=padding, + dilation_rate=dilation, + data_format=data_format, + W_init=W_init, + b_init=b_init, + W_init_args=W_init_args, + b_init_args=b_init_args, + name=name, + ) + + +class AtrousConv2dLayer(Layer): + """The :class:`AtrousConv2dLayer` class is 2D atrous convolution (a.k.a. convolution with holes or dilated + convolution) 2D layer, see `tf.nn.atrous_conv2d `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer with a 4D output tensor in the shape of (batch, height, width, channels). + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size: (height, width). + rate : int + The stride that we sample input values in the height and width dimensions. + This equals the rate that we up-sample the filters by inserting zeros across the height and width dimensions. + In the literature, this parameter is sometimes mentioned as input stride or dilation. + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : str + A unique layer name. + + """ + + def __init__(self, + prev_layer, + n_filter=32, + filter_size=(3, 3), + rate=2, + act=tf.identity, + padding='SAME', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='atrou2d'): + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + if act is None: + act = tf.identity + logging.info("AtrousConv2dLayer %s: n_filter:%d filter_size:%s rate:%d pad:%s act:%s" % (self.name, n_filter, filter_size, rate, padding, act.__name__)) + with tf.variable_scope(name): + shape = [filter_size[0], filter_size[1], int(self.inputs.get_shape()[-1]), n_filter] + filters = tf.get_variable(name='filter', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) + if b_init: + b = tf.get_variable(name='b', shape=(n_filter), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) + self.outputs = act(tf.nn.atrous_conv2d(self.inputs, filters, rate, padding) + b) + else: + self.outputs = act(tf.nn.atrous_conv2d(self.inputs, filters, rate, padding)) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + if b_init: + self.all_params.extend([filters, b]) + else: + self.all_params.append(filters) + + +class _SeparableConv2dLayer(Layer): # TODO + """The :class:`SeparableConv2dLayer` class is 2D convolution with separable filters, see `tf.layers.separable_conv2d `__. + + This layer has not been fully tested yet. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer with a 4D output tensor in the shape of [batch, height, width, channels]. + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (height, width). + strides : tuple of int + The strides (height, width). + This can be a single integer if you want to specify the same value for all spatial dimensions. + Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1. + padding : str + The type of padding algorithm: "SAME" or "VALID" + data_format : str + One of channels_last (Default) or channels_first. + The order must match the input dimensions. + channels_last corresponds to inputs with shapedata_format = 'NWHC' (batch, width, height, channels) while + channels_first corresponds to inputs with shape [batch, channels, width, height]. + dilation_rate : int or tuple of ints + The dilation rate of the convolution. + It can be a single integer if you want to specify the same value for all spatial dimensions. + Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1. + depth_multiplier : int + The number of depthwise convolution output channels for each input channel. + The total number of depthwise convolution output channels will be equal to num_filters_in * depth_multiplier. + act : activation function + The activation function of this layer. + use_bias : boolean + Whether the layer uses a bias + depthwise_initializer : initializer + The initializer for the depthwise convolution kernel. + pointwise_initializer : initializer + The initializer for the pointwise convolution kernel. + bias_initializer : initializer + The initializer for the bias vector. If None, skip bias. + depthwise_regularizer : regularizer + Optional regularizer for the depthwise convolution kernel. + pointwise_regularizer : regularizer + Optional regularizer for the pointwise convolution kernel. + bias_regularizer : regularizer + Optional regularizer for the bias vector. + activity_regularizer : regularizer + Regularizer function for the output. + name : str + A unique layer name. + + """ + + def __init__(self, + prev_layer, + n_filter, + filter_size=5, + strides=(1, 1), + padding='valid', + data_format='channels_last', + dilation_rate=(1, 1), + depth_multiplier=1, + act=tf.identity, + use_bias=True, + depthwise_initializer=None, + pointwise_initializer=None, + bias_initializer=tf.zeros_initializer, + depthwise_regularizer=None, + pointwise_regularizer=None, + bias_regularizer=None, + activity_regularizer=None, + name='atrou2d'): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + if tf.__version__ > "0.12.1": + raise Exception("This layer only supports for TF 1.0+") + + bias_initializer = bias_initializer() + + logging.info("SeparableConv2dLayer %s: n_filter:%d filter_size:%s strides:%s padding:%s dilation_rate:%s depth_multiplier:%s act:%s" % + (self.name, n_filter, filter_size, str(strides), padding, str(dilation_rate), str(depth_multiplier), act.__name__)) + + with tf.variable_scope(name) as vs: + self.outputs = tf.layers.separable_conv2d( + self.inputs, + filters=n_filter, + kernel_size=filter_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + depth_multiplier=depth_multiplier, + activation=act, + use_bias=use_bias, + depthwise_initializer=depthwise_initializer, + pointwise_initializer=pointwise_initializer, + bias_initializer=bias_initializer, + depthwise_regularizer=depthwise_regularizer, + pointwise_regularizer=pointwise_regularizer, + bias_regularizer=bias_regularizer, + activity_regularizer=activity_regularizer, + ) + # trainable=True, name=None, reuse=None) + + variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend(variables) + + +def deconv2d_bilinear_upsampling_initializer(shape): + """Returns the initializer that can be passed to DeConv2dLayer for initializ ingthe + weights in correspondence to channel-wise bilinear up-sampling. + Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211) + + Parameters + ---------- + shape : tuple of int + The shape of the filters, [height, width, output_channels, in_channels]. + It must match the shape passed to DeConv2dLayer. + + Returns + ------- + ``tf.constant_initializer`` + A constant initializer with weights set to correspond to per channel bilinear upsampling + when passed as W_int in DeConv2dLayer + + Examples + -------- + - Upsampling by a factor of 2, ie e.g 100->200 + >>> rescale_factor = 2 + >>> filter_size = (2 * rescale_factor - rescale_factor % 2) #Corresponding bilinear filter size + >>> num_in_channels = 3 + >>> num_out_channels = 3 + >>> deconv_filter_shape = (filter_size, filter_size, num_out_channels, num_in_channels) + >>> x = tf.placeholder(tf.float32, (1, imsize, imsize, num_channels)) + >>> net = tl.layers.InputLayer(x, name='input_layer') + >>> bilinear_init = deconv2d_bilinear_upsampling_initializer(shape=filter_shape) + >>> net = tl.layers.DeConv2dLayer(net, + ... shape=filter_shape, + ... output_shape=(1, imsize*rescale_factor, imsize*rescale_factor, num_out_channels), + ... strides=(1, rescale_factor, rescale_factor, 1), + ... W_init=bilinear_init, + ... padding='SAME', + ... act=tf.identity, name='g/h1/decon2d') + + """ + if shape[0] != shape[1]: + raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes') + if shape[3] < shape[2]: + raise Exception('deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels ') + + filter_size = shape[0] + num_out_channels = shape[2] + num_in_channels = shape[3] + + # Create bilinear filter kernel as numpy array + bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32) + scale_factor = (filter_size + 1) // 2 + if filter_size % 2 == 1: + center = scale_factor - 1 + else: + center = scale_factor - 0.5 + for x in range(filter_size): + for y in range(filter_size): + bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * \ + (1 - abs(y - center) / scale_factor) + weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels)) + for i in range(num_out_channels): + weights[:, :, i, i] = bilinear_kernel + + # assign numpy array to constant_initalizer and pass to get_variable + bilinear_weights_init = tf.constant_initializer(value=weights, dtype=LayersConfig.tf_dtype) # dtype=tf.float32) + return bilinear_weights_init + + +def conv1d( + layer, + n_filter=32, + filter_size=5, + stride=1, + dilation_rate=1, + act=tf.identity, + padding='SAME', + data_format="NWC", + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='conv1d', +): + """Simplified version of :class:`Conv1dLayer`. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer + n_filter : int + The number of filters + filter_size : int + The filter size + stride : int + The stride step + dilation_rate : int + Specifying the dilation rate to use for dilated convolution. + act : activation function + The function that is applied to the layer activations + padding : str + The padding algorithm type: "SAME" or "VALID". + data_format : str + Default is 'NWC' as it is a 1D CNN. + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : str + A unique layer name + + Returns + ------- + :class:`Layer` + A :class:`Conv1dLayer` object. + + Examples + --------- + >>> x = tf.placeholder(tf.float32, (batch_size, width)) + >>> y_ = tf.placeholder(tf.int64, shape=(batch_size,)) + >>> n = InputLayer(x, name='in') + >>> n = ReshapeLayer(n, (-1, width, 1), name='rs') + >>> n = Conv1d(n, 64, 3, 1, act=tf.nn.relu, name='c1') + >>> n = MaxPool1d(n, 2, 2, padding='valid', name='m1') + >>> n = Conv1d(n, 128, 3, 1, act=tf.nn.relu, name='c2') + >>> n = MaxPool1d(n, 2, 2, padding='valid', name='m2') + >>> n = Conv1d(n, 128, 3, 1, act=tf.nn.relu, name='c3') + >>> n = MaxPool1d(n, 2, 2, padding='valid', name='m3') + >>> n = FlattenLayer(n, name='f') + >>> n = DenseLayer(n, 500, tf.nn.relu, name='d1') + >>> n = DenseLayer(n, 100, tf.nn.relu, name='d2') + >>> n = DenseLayer(n, 2, tf.identity, name='o') + + """ + + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + return Conv1dLayer( + prev_layer=layer, + act=act, + shape=(filter_size, int(layer.outputs.get_shape()[-1]), n_filter), + stride=stride, + dilation_rate=dilation_rate, + padding=padding, + data_format=data_format, + W_init=W_init, + b_init=b_init, + W_init_args=W_init_args, + b_init_args=b_init_args, + name=name, + ) + + +# TODO: DeConv1d + + +def conv2d( + layer, + n_filter=32, + filter_size=(3, 3), + strides=(1, 1), + act=tf.identity, + padding='SAME', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + use_cudnn_on_gpu=None, + data_format=None, + name='conv2d', +): + """Simplified version of :class:`Conv2dLayer`. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (height, width). + strides : tuple of int + The sliding window strides of corresponding input dimensions. + It must be in the same order as the ``shape`` parameter. + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + W_init : initializer + The initializer for the the weight matrix. + b_init : initializer or None + The initializer for the the bias vector. If None, skip biases. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + use_cudnn_on_gpu : bool + Default is False. + data_format : str + "NHWC" or "NCHW", default is "NHWC". + name : str + A unique layer name. + + Returns + ------- + :class:`Layer` + A :class:`Conv2dLayer` object. + + Examples + -------- + >>> net = InputLayer(x, name='inputs') + >>> net = Conv2d(net, 64, (3, 3), act=tf.nn.relu, name='conv1_1') + >>> net = Conv2d(net, 64, (3, 3), act=tf.nn.relu, name='conv1_2') + >>> net = MaxPool2d(net, (2, 2), name='pool1') + >>> net = Conv2d(net, 128, (3, 3), act=tf.nn.relu, name='conv2_1') + >>> net = Conv2d(net, 128, (3, 3), act=tf.nn.relu, name='conv2_2') + >>> net = MaxPool2d(net, (2, 2), name='pool2') + + """ + + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + if len(strides) != 2: + raise ValueError("len(strides) should be 2, Conv2d and Conv2dLayer are different.") + + try: + pre_channel = int(layer.outputs.get_shape()[-1]) + except Exception: # if pre_channel is ?, it happens when using Spatial Transformer Net + pre_channel = 1 + logging.info("[warnings] unknow input channels, set to 1") + return Conv2dLayer( + layer, + act=act, + shape=(filter_size[0], filter_size[1], pre_channel, n_filter), # 32 features for each 5x5 patch + strides=(1, strides[0], strides[1], 1), + padding=padding, + W_init=W_init, + W_init_args=W_init_args, + b_init=b_init, + b_init_args=b_init_args, + use_cudnn_on_gpu=use_cudnn_on_gpu, + data_format=data_format, + name=name) + + +def deconv2d(layer, + n_filter=32, + filter_size=(3, 3), + out_size=(30, 30), + strides=(2, 2), + padding='SAME', + batch_size=None, + act=tf.identity, + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='decnn2d'): + """Simplified version of :class:`DeConv2dLayer`. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (height, width). + out_size : tuple of int + Require if TF version < 1.3, (height, width) of output. + strides : tuple of int + The stride step (height, width). + padding : str + The padding algorithm type: "SAME" or "VALID". + batch_size : int + Require if TF version < 1.3, int or None. + If None, try to find the `batch_size` from the first dim of net.outputs (you should define the `batch_size` in the input placeholder). + act : activation function + The activation function of this layer. + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : str + A unique layer name. + + Returns + ------- + :class:`Layer` + A :class:`DeConv2dLayer` object. + + """ + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + if act is None: + act = tf.identity + if len(strides) != 2: + raise ValueError("len(strides) should be 2, DeConv2d and DeConv2dLayer are different.") + if tf.__version__ > '1.3': + logging.info("DeConv2d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__)) + inputs = layer.outputs + scope_name = tf.get_variable_scope().name + # if scope_name: + # whole_name = scope_name + '/' + name + # else: + # whole_name = name + net_new = Layer(name=name) #whole_name) + # with tf.name_scope(name): + with tf.variable_scope(name) as vs: + net_new.outputs = tf.contrib.layers.conv2d_transpose( + inputs=inputs, + num_outputs=n_filter, + kernel_size=filter_size, + stride=strides, + padding=padding, + activation_fn=act, + weights_initializer=W_init, + biases_initializer=b_init, + scope=name) + new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + net_new.all_layers = list(layer.all_layers) + net_new.all_params = list(layer.all_params) + net_new.all_drop = dict(layer.all_drop) + net_new.all_layers.extend([net_new.outputs]) + net_new.all_params.extend(new_variables) + return net_new + else: + if batch_size is None: + # batch_size = tf.shape(net.outputs)[0] + fixed_batch_size = layer.outputs.get_shape().with_rank_at_least(1)[0] + if fixed_batch_size.value: + batch_size = fixed_batch_size.value + else: + from tensorflow.python.ops import array_ops + batch_size = array_ops.shape(layer.outputs)[0] + return DeConv2dLayer( + prev_layer=layer, + act=act, + shape=(filter_size[0], filter_size[1], n_filter, int(layer.outputs.get_shape()[-1])), + output_shape=(batch_size, int(out_size[0]), int(out_size[1]), n_filter), + strides=(1, strides[0], strides[1], 1), + padding=padding, + W_init=W_init, + b_init=b_init, + W_init_args=W_init_args, + b_init_args=b_init_args, + name=name) + + +class DeConv3d(Layer): + """Simplified version of The :class:`DeConv3dLayer`, see `tf.contrib.layers.conv3d_transpose `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + n_filter : int + The number of filters. + filter_size : tuple of int + The filter size (depth, height, width). + stride : tuple of int + The stride step (depth, height, width). + padding : str + The padding algorithm type: "SAME" or "VALID". + act : activation function + The activation function of this layer. + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip bias. + name : str + A unique layer name. + + """ + + def __init__(self, + prev_layer, + n_filter=32, + filter_size=(3, 3, 3), + strides=(2, 2, 2), + padding='SAME', + act=tf.identity, + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + name='decnn3d'): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("DeConv3d %s: n_filters:%s strides:%s pad:%s act:%s" % (name, str(n_filter), str(strides), padding, act.__name__)) + + with tf.variable_scope(name) as vs: + self.outputs = tf.contrib.layers.conv3d_transpose( + num_outputs=n_filter, + kernel_size=filter_size, + stride=strides, + padding=padding, + activation_fn=act, + weights_initializer=W_init, + biases_initializer=b_init, + scope=name, + ) + new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend(new_variables) + + +class DepthwiseConv2d(Layer): + """Separable/Depthwise Convolutional 2D layer, see `tf.nn.depthwise_conv2d `__. + + Input: + 4-D Tensor (batch, height, width, in_channels). + Output: + 4-D Tensor (batch, new height, new width, in_channels * depth_multiplier). + + Parameters + ------------ + layer : :class:`Layer` + Previous layer. + filter_size : tuple of int + The filter size (height, width). + stride : tuple of int + The stride step (height, width). + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + dilation_rate: tuple of 2 int + The dilation rate in which we sample input values across the height and width dimensions in atrous convolution. If it is greater than 1, then all values of strides must be 1. + depth_multiplier : int + The number of channels to expand to. + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip bias. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : str + A unique layer name. + + Examples + --------- + >>> x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1], name='x') + >>> net = InputLayer(x, name='in') + >>> net = Conv2d(net, 32, (3, 3), (1, 1), name='conv1') + >>> net = MaxPool2d(net, (2, 2), name='pool1') + >>> net = DepthwiseConv2d(net, (3, 3), (1, 1), act=tf.nn.relu, name='dethwise1') + >>> net = Conv2d(net, 64, (1, 1), (1, 1), act=tf.nn.relu, name='conv2') + + References + ----------- + - tflearn's `grouped_conv_2d `__ + - keras's `separableconv2d `__ + + """ # # https://zhuanlan.zhihu.com/p/31551004 https://github.com/xiaohu2015/DeepLearning_tutorials/blob/master/CNNs/MobileNet.py + + def __init__( + self, + prev_layer, + shape=(3, 3), + strides=(1, 1), + act=tf.identity, + padding='SAME', + dilation_rate=(1, 1), + depth_multiplier=1, + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='depthwise_conv2d', + ): + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + + if act is None: + act = tf.identity + + logging.info("DepthwiseConv2d %s: shape:%s strides:%s pad:%s act:%s" % (self.name, str(shape), str(strides), padding, act.__name__)) + try: + pre_channel = int(prev_layer.outputs.get_shape()[-1]) + except Exception: # if pre_channel is ?, it happens when using Spatial Transformer Net + pre_channel = 1 + logging.info("[warnings] unknow input channels, set to 1") + + shape = [shape[0], shape[1], pre_channel, depth_multiplier] + + if len(strides) == 2: + strides = [1, strides[0], strides[1], 1] + + assert len(strides) == 4, "len(strides) should be 4." + + with tf.variable_scope(name): + W = tf.get_variable( + name='W_depthwise2d', shape=shape, initializer=W_init, dtype=LayersConfig.tf_dtype, + **W_init_args) # [filter_height, filter_width, in_channels, depth_multiplier] + if b_init: + b = tf.get_variable( + name='b_depthwise2d', shape=(pre_channel * depth_multiplier), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) + self.outputs = act(tf.nn.depthwise_conv2d(self.inputs, W, strides=strides, padding=padding, rate=dilation_rate) + b) + else: + self.outputs = act(tf.nn.depthwise_conv2d(self.inputs, W, strides=strides, padding=padding, rate=dilation_rate)) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + if b_init: + self.all_params.extend([W, b]) + else: + self.all_params.append(W) + + +class SeparableConv2d(Layer): + """The :class:`SeparableConv2d` class is a 2D depthwise separable convolutional layer, see `tf.layers.separable_conv2d `__. + + This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. + While :class:`DepthwiseConv2d` performs depthwise convolution only, which allow us to add batch normalization between depthwise and pointwise convolution. + + Parameters + ------------ + layer : :class:`Layer` + Previous layer. + n_filter : int + The dimensionality of the output space (i.e. the number of filters in the convolution). + filter_size : tuple/list of 2 int + Specifying the spatial dimensions of the filters. Can be a single integer to specify the same value for all spatial dimensions. + strides : tuple/list of 2 int + Specifying the strides of the convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1. + padding : str + One of "valid" or "same" (case-insensitive). + data_format : str + One of channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch, height, width, channels) while channels_first corresponds to inputs with shape (batch, channels, height, width). + dilation_rate : integer or tuple/list of 2 int + Specifying the dilation rate to use for dilated convolution. Can be a single integer to specify the same value for all spatial dimensions. Currently, specifying any dilation_rate value != 1 is incompatible with specifying any stride value != 1. + depth_multiplier : int + The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to num_filters_in * depth_multiplier. + depthwise_init : initializer + for the depthwise convolution kernel. + pointwise_init : initializer + For the pointwise convolution kernel. + b_init : initializer + For the bias vector. If None, ignore bias in the pointwise part only. + name : a str + A unique layer name. + + """ + + def __init__( + self, + prev_layer, + n_filter=100, + filter_size=(3, 3), + strides=(1, 1), + act=tf.identity, + padding='valid', + data_format='channels_last', + dilation_rate=(1, 1), + depth_multiplier=1, + # activation=None, + # use_bias=True, + depthwise_init=None, + pointwise_init=None, + b_init=tf.zeros_initializer(), + # depthwise_regularizer=None, + # pointwise_regularizer=None, + # bias_regularizer=None, + # activity_regularizer=None, + # depthwise_constraint=None, + # pointwise_constraint=None, + # W_init=tf.truncated_normal_initializer(stddev=0.1), + # b_init=tf.constant_initializer(value=0.0), + # W_init_args=None, + # b_init_args=None, + name='seperable', + ): + # if W_init_args is None: + # W_init_args = {} + # if b_init_args is None: + # b_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + # print(self.name, n_filter, str(filter_size), str(strides), depth_multiplier, act.__name__) + logging.info("SeparableConv2d %s: n_filter:%d filter_size:%s filter_size:%s depth_multiplier:%d act:%s" \ + % (self.name, n_filter, str(filter_size), str(strides), depth_multiplier, act.__name__)) + + with tf.variable_scope(name) as vs: + self.outputs = tf.layers.separable_conv2d( + inputs=self.inputs, + filters=n_filter, + kernel_size=filter_size, + strides=strides, + padding=padding, + data_format=data_format, + dilation_rate=dilation_rate, + depth_multiplier=depth_multiplier, + activation=act, + use_bias=(True if b_init is not None else False), + depthwise_initializer=depthwise_init, + pointwise_initializer=pointwise_init, + bias_initializer=b_init, + # depthwise_regularizer=None, + # pointwise_regularizer=None, + # bias_regularizer=None, + # activity_regularizer=None, + # depthwise_constraint=None, + # pointwise_constraint=None, + # bias_constraint=None, + trainable=True, + name=None) + new_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + self.all_layers.append(self.outputs) + self.all_params.extend(new_variables) + + +class GroupConv2d(Layer): + """The :class:`GroupConv2d` class is 2D grouped convolution, see `here `__. + + Parameters + -------------- + layer : :class:`Layer` + Previous layer. + n_filter : int + The number of filters. + filter_size : int + The filter size. + stride : int + The stride step. + n_group : int + The number of groups. + act : activation function + The activation function of this layer. + padding : str + The padding algorithm type: "SAME" or "VALID". + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : str + A unique layer name. + """ + + def __init__( + self, + prev_layer=None, + n_filter=32, + filter_size=(3, 3), + strides=(2, 2), + n_group=2, + act=tf.identity, + padding='SAME', + W_init=tf.truncated_normal_initializer(stddev=0.02), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='groupconv', + ): # Windaway + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + groupConv = lambda i, k: tf.nn.conv2d(i, k, strides=[1, strides[0], strides[1], 1], padding=padding) + channels = int(self.inputs.get_shape()[-1]) + + logging.info("GroupConv2d %s: n_filter:%d size:%s strides:%s n_group:%d pad:%s act:%s" % (self.name, n_filter, str(filter_size), str(strides), n_group, + padding, act.__name__)) + with tf.variable_scope(name): + We = tf.get_variable( + name='W', + shape=[filter_size[0], filter_size[1], channels / n_group, n_filter], + initializer=W_init, + dtype=LayersConfig.tf_dtype, + trainable=True, + **W_init_args) + if b_init: + bi = tf.get_variable(name='b', shape=n_filter, initializer=b_init, dtype=LayersConfig.tf_dtype, trainable=True, **b_init_args) + if n_group == 1: + conv = groupConv(self.inputs, We) + else: + inputGroups = tf.split(axis=3, num_or_size_splits=n_group, value=self.inputs) + weightsGroups = tf.split(axis=3, num_or_size_splits=n_group, value=We) + convGroups = [groupConv(i, k) for i, k in zip(inputGroups, weightsGroups)] + conv = tf.concat(axis=3, values=convGroups) + if b_init: + conv = tf.add(conv, bi, name='add') + + self.outputs = act(conv) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + if b_init: + self.all_params.extend([We, bi]) + else: + self.all_params.append(We) + + +# Alias +AtrousConv1dLayer = atrous_conv1d +Conv1d = conv1d +Conv2d = conv2d +DeConv2d = deconv2d diff --git a/tensorlayer/layers/core.py b/tensorlayer/layers/core.py new file mode 100644 index 0000000..2fea3a6 --- /dev/null +++ b/tensorlayer/layers/core.py @@ -0,0 +1,1395 @@ +# -*- coding: utf-8 -*- + +import time +import numpy as np +import tensorflow as tf +from tensorflow.python.util.deprecation import deprecated +from .. import _logging as logging +from .. import files, iterate, utils, visualize + +__all__ = [ + 'LayersConfig', + 'TF_GRAPHKEYS_VARIABLES', + 'flatten_reshape', + 'clear_layers_name', + 'set_name_reuse', + 'initialize_rnn_state', + 'print_all_variables', + 'get_variables_with_name', + 'get_layers_with_name', + 'list_remove_repeat', + 'merge_networks', + 'initialize_global_variables', + 'Layer', + 'InputLayer', + 'OneHotInputLayer', + 'Word2vecEmbeddingInputlayer', + 'EmbeddingInputlayer', + 'AverageEmbeddingInputlayer', + 'DenseLayer', + 'ReconLayer', + 'DropoutLayer', + 'GaussianNoiseLayer', + 'DropconnectDenseLayer', +] + + +class LayersConfig: + tf_dtype = tf.float32 # TensorFlow DType + set_keep = {} # A dictionary for holding tf.placeholders + + +try: # For TF12 and later + TF_GRAPHKEYS_VARIABLES = tf.GraphKeys.GLOBAL_VARIABLES +except Exception: # For TF11 and before + TF_GRAPHKEYS_VARIABLES = tf.GraphKeys.VARIABLES + + +def flatten_reshape(variable, name='flatten'): + """Reshapes a high-dimension vector input. + [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row x mask_col x n_mask] + + Parameters + ---------- + variable : TensorFlow variable or tensor + The variable or tensor to be flatten. + name : str + A unique layer name. + + Returns + ------- + Tensor + Flatten Tensor + + Examples + -------- + >>> W_conv2 = weight_variable([5, 5, 100, 32]) # 64 features for each 5x5 patch + >>> b_conv2 = bias_variable([32]) + >>> W_fc1 = weight_variable([7 * 7 * 32, 256]) + + >>> h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) + >>> h_pool2 = max_pool_2x2(h_conv2) + >>> h_pool2.get_shape()[:].as_list() = [batch_size, 7, 7, 32] + ... [batch_size, mask_row, mask_col, n_mask] + >>> h_pool2_flat = tl.layers.flatten_reshape(h_pool2) + ... [batch_size, mask_row * mask_col * n_mask] + >>> h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob) + ... + + """ + dim = 1 + for d in variable.get_shape()[1:].as_list(): + dim *= d + return tf.reshape(variable, shape=[-1, dim], name=name) + + +@deprecated("2018-06-30", "TensorLayer relies on TensorFlow to check naming.") +def clear_layers_name(): + logging.warning('this method is DEPRECATED and has no effect, please remove it from your code.') + + +@deprecated("2018-06-30", "TensorLayer relies on TensorFlow to check name reusing.") +def set_name_reuse(enable=True): + logging.warning('this method is DEPRECATED and has no effect, please remove it from your code.') + + +def initialize_rnn_state(state, feed_dict=None): + """Returns the initialized RNN state. + The inputs are `LSTMStateTuple` or `State` of `RNNCells`, and an optional `feed_dict`. + + Parameters + ---------- + state : RNN state. + The TensorFlow's RNN state. + feed_dict : dictionary + Initial RNN state; if None, returns zero state. + + Returns + ------- + RNN state + The TensorFlow's RNN state. + + """ + try: # TF1.0 + LSTMStateTuple = tf.contrib.rnn.LSTMStateTuple + except Exception: + LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple + + if isinstance(state, LSTMStateTuple): + c = state.c.eval(feed_dict=feed_dict) + h = state.h.eval(feed_dict=feed_dict) + return (c, h) + else: + new_state = state.eval(feed_dict=feed_dict) + return new_state + + +def print_all_variables(train_only=False): + """Print information of trainable or all variables, + without ``tl.layers.initialize_global_variables(sess)``. + + Parameters + ---------- + train_only : boolean + Whether print trainable variables only. + - If True, print the trainable variables. + - If False, print all variables. + + """ + # tvar = tf.trainable_variables() if train_only else tf.all_variables() + if train_only: + t_vars = tf.trainable_variables() + logging.info(" [*] printing trainable variables") + else: + try: # TF1.0+ + t_vars = tf.global_variables() + except Exception: # TF0.12 + t_vars = tf.all_variables() + logging.info(" [*] printing global variables") + for idx, v in enumerate(t_vars): + logging.info(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name)) + + +def get_variables_with_name(name=None, train_only=True, printable=False): + """Get a list of TensorFlow variables by a given name scope. + + Parameters + ---------- + name : str + Get the variables that contain this name. + train_only : boolean + If Ture, only get the trainable variables. + printable : boolean + If True, print the information of all variables. + + Returns + ------- + list of Tensor + A list of TensorFlow variables + + Examples + -------- + >>> dense_vars = tl.layers.get_variable_with_name('dense', True, True) + + """ + if name is None: + raise Exception("please input a name") + logging.info(" [*] geting variables with %s" % name) + # tvar = tf.trainable_variables() if train_only else tf.all_variables() + if train_only: + t_vars = tf.trainable_variables() + else: + try: # TF1.0+ + t_vars = tf.global_variables() + except Exception: # TF0.12 + t_vars = tf.all_variables() + + d_vars = [var for var in t_vars if name in var.name] + if printable: + for idx, v in enumerate(d_vars): + logging.info(" got {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) + return d_vars + + +def get_layers_with_name(net, name="", printable=False): + """Get a list of layers' output in a network by a given name scope. + + Parameters + ----------- + net : :class:`Layer` + The last layer of the network. + name : str + Get the layers' output that contain this name. + printable : boolean + If True, print information of all the layers' output + + Returns + -------- + list of Tensor + A list of layers' output (TensorFlow tensor) + + Examples + --------- + >>> layers = tl.layers.get_layers_with_name(net, "CNN", True) + + """ + logging.info(" [*] geting layers with %s" % name) + + layers = [] + i = 0 + for layer in net.all_layers: + # logging.info(type(layer.name)) + if name in layer.name: + layers.append(layer) + if printable: + logging.info(" got {:3}: {:15} {}".format(i, layer.name, str(layer.get_shape()))) + i = i + 1 + return layers + + +def list_remove_repeat(x): + """Remove the repeated items in a list, and return the processed list. + You may need it to create merged layer like Concat, Elementwise and etc. + + Parameters + ---------- + x : list + Input + + Returns + ------- + list + A list that after removing it's repeated items + + Examples + ------- + >>> l = [2, 3, 4, 2, 3] + >>> l = list_remove_repeat(l) + ... [2, 3, 4] + + """ + y = [] + for i in x: + if not i in y: + y.append(i) + return y + + +def merge_networks(layers=None): + """Merge all parameters, layers and dropout probabilities to a :class:`Layer`. + The output of return network is the first network in the list. + + Parameters + ---------- + layers : list of :class:`Layer` + Merge all parameters, layers and dropout probabilities to the first layer in the list. + + Returns + -------- + :class:`Layer` + The network after merging all parameters, layers and dropout probabilities to the first network in the list. + + Examples + --------- + >>> n1 = ... + >>> n2 = ... + >>> n1 = tl.layers.merge_networks([n1, n2]) + + """ + if layers is None: + raise Exception("layers should be a list of TensorLayer's Layers.") + layer = layers[0] + + all_params = [] + all_layers = [] + all_drop = {} + for l in layers: + all_params.extend(l.all_params) + all_layers.extend(l.all_layers) + all_drop.update(l.all_drop) + + layer.all_params = list(all_params) + layer.all_layers = list(all_layers) + layer.all_drop = dict(all_drop) + + layer.all_layers = list_remove_repeat(layer.all_layers) + layer.all_params = list_remove_repeat(layer.all_params) + + return layer + + +def initialize_global_variables(sess): + """Initialize the global variables of TensorFlow. + + Run ``sess.run(tf.global_variables_initializer())`` for TF 0.12+ or + ``sess.run(tf.initialize_all_variables())`` for TF 0.11. + + Parameters + ---------- + sess : Session + TensorFlow session. + + """ + assert sess is not None + # try: # TF12+ + sess.run(tf.global_variables_initializer()) + # except: # TF11 + # sess.run(tf.initialize_all_variables()) + + +class Layer(object): + """ + The basic :class:`Layer` class represents a single layer of a neural network. It + should be subclassed when implementing new types of layers. + Because each layer can keep track of the layer(s) feeding into it, a + network's output :class:`Layer` instance can double as a handle to the full + network. + + Parameters + ---------- + inputs : :class:`Layer` instance + The `Layer` class feeding into this layer. + layer : :class:`Layer` or None + Previous layer (optional), for adding all properties of previous layer(s) to this layer. + name : str or None + A unique layer name. + + Methods + --------- + print_params(details=True, session=None) + Print all parameters of this network. + print_layers() + Print all outputs of all layers of this network. + count_params() + Return the number of parameters of this network. + + Examples + --------- + - Define model + >>> x = tf.placeholder("float32", [None, 100]) + >>> n = tl.layers.InputLayer(x, name='in') + >>> n = tl.layers.DenseLayer(n, 80, name='d1') + >>> n = tl.layers.DenseLayer(n, 80, name='d2') + + - Get information + >>> print(n) + ... Last layer is: DenseLayer (d2) [None, 80] + >>> n.print_layers() + ... [TL] layer 0: d1/Identity:0 (?, 80) float32 + ... [TL] layer 1: d2/Identity:0 (?, 80) float32 + >>> n.print_params(False) + ... [TL] param 0: d1/W:0 (100, 80) float32_ref + ... [TL] param 1: d1/b:0 (80,) float32_ref + ... [TL] param 2: d2/W:0 (80, 80) float32_ref + ... [TL] param 3: d2/b:0 (80,) float32_ref + ... [TL] num of params: 14560 + >>> n.count_params() + ... 14560 + + - Slicing the outputs + >>> n2 = n[:, :30] + >>> print(n2) + ... Last layer is: Layer (d2) [None, 30] + + - Iterating the outputs + >>> for l in n: + >>> print(l) + ... Tensor("d1/Identity:0", shape=(?, 80), dtype=float32) + ... Tensor("d2/Identity:0", shape=(?, 80), dtype=float32) + + """ + + def __init__(self, prev_layer=None, name=None): + if name is None: + raise ValueError('Layer must have a name.') + + scope_name = tf.get_variable_scope().name + if scope_name: + name = scope_name + '/' + name + self.name = name + + # get all properties of previous layer(s) + if isinstance(prev_layer, Layer): # 1. for normal layer have only 1 input i.e. DenseLayer + # Hint : list(), dict() is pass by value (shallow), without them, + # it is pass by reference. + self.all_layers = list(prev_layer.all_layers) + self.all_params = list(prev_layer.all_params) + self.all_drop = dict(prev_layer.all_drop) + elif isinstance(prev_layer, list): # 2. for layer have multiply inputs i.e. ConcatLayer + self.all_layers = list_remove_repeat(sum([l.all_layers for l in prev_layer], [])) + self.all_params = list_remove_repeat(sum([l.all_params for l in prev_layer], [])) + self.all_drop = dict(sum([list(l.all_drop.items()) for l in prev_layer], [])) + elif isinstance(prev_layer, tf.Tensor): + raise Exception("Please use InputLayer to convert Tensor/Placeholder to TL layer") + elif prev_layer is not None: + raise Exception("Unknown layer type %s" % type(prev_layer)) + + def print_params(self, details=True, session=None): + """Print all info of parameters in the network""" + for i, p in enumerate(self.all_params): + if details: + try: + # logging.info(" param {:3}: {:15} (mean: {:<18}, median: {:<18}, std: {:<18}) {}".format(i, str(p.eval().shape), p.eval().mean(), np.median(p.eval()), p.eval().std(), p.name)) + val = p.eval(session=session) + logging.info(" param {:3}: {:20} {:15} {} (mean: {:<18}, median: {:<18}, std: {:<18}) ".format( + i, p.name, str(val.shape), p.dtype.name, val.mean(), np.median(val), val.std())) + except Exception as e: + logging.info(str(e)) + raise Exception("Hint: print params details after tl.layers.initialize_global_variables(sess) or use network.print_params(False).") + else: + logging.info(" param {:3}: {:20} {:15} {}".format(i, p.name, str(p.get_shape()), p.dtype.name)) + logging.info(" num of params: %d" % self.count_params()) + + def print_layers(self): + """Print all info of layers in the network""" + for i, layer in enumerate(self.all_layers): + # logging.info(" layer %d: %s" % (i, str(layer))) + logging.info(" layer {:3}: {:20} {:15} {}".format(i, layer.name, str(layer.get_shape()), layer.dtype.name)) + + def count_params(self): + """Return the number of parameters in the network""" + n_params = 0 + for _i, p in enumerate(self.all_params): + n = 1 + # for s in p.eval().shape: + for s in p.get_shape(): + try: + s = int(s) + except Exception: + s = 1 + if s: + n = n * s + n_params = n_params + n + return n_params + + def __str__(self): + return " Last layer is: %s (%s) %s" % (self.__class__.__name__, self.name, self.outputs.get_shape().as_list()) + + def __getitem__(self, key): + net_new = Layer(name=self.name) + net_new.inputs = self.inputs + net_new.outputs = self.outputs[key] + + net_new.all_layers = list(self.all_layers[:-1]) + net_new.all_layers.append(net_new.outputs) + net_new.all_params = list(self.all_params) + net_new.all_drop = dict(self.all_drop) + return net_new + + def __setitem__(self, key, item): + # self.outputs[key] = item + raise NotImplementedError("%s: __setitem__" % self.name) + + def __delitem__(self, key): + raise NotImplementedError("%s: __delitem__" % self.name) + + def __iter__(self): + for x in self.all_layers: + yield x + + def __len__(self): + return len(self.all_layers) + + +class InputLayer(Layer): + """ + The :class:`InputLayer` class is the starting layer of a neural network. + + Parameters + ---------- + inputs : placeholder or tensor + The input of a network. + name : str + A unique layer name. + + """ + + def __init__(self, inputs=None, name='input'): + Layer.__init__(self, name=name) + logging.info("InputLayer %s: %s" % (self.name, inputs.get_shape())) + self.outputs = inputs + self.all_layers = [] + self.all_params = [] + self.all_drop = {} + + +class OneHotInputLayer(Layer): + """ + The :class:`OneHotInputLayer` class is the starting layer of a neural network, see ``tf.one_hot``. + + Parameters + ---------- + inputs : placeholder or tensor + The input of a network. + depth : None or int + If the input indices is rank N, the output will have rank N+1. The new axis is created at dimension `axis` (default: the new axis is appended at the end). + on_value : None or number + The value to represnt `ON`. If None, it will default to the value 1. + off_value : None or number + The value to represnt `OFF`. If None, it will default to the value 0. + axis : None or int + The axis. + dtype : None or TensorFlow dtype + The data type, None means tf.float32. + name : str + A unique layer name. + + Examples + --------- + >>> x = tf.placeholder(tf.int32, shape=[None]) + >>> net = tl.layers.OneHotInputLayer(x, depth=8, name='onehot') + ... (?, 8) + + """ + + def __init__(self, inputs=None, depth=None, on_value=None, off_value=None, axis=None, dtype=None, name='input'): + Layer.__init__(self, name=name) + logging.info("OneHotInputLayer %s: %s" % (self.name, inputs.get_shape())) + # assert depth != None, "depth is not given" + if depth is None: + logging.info(" [*] depth == None the number of output units is undefined") + self.outputs = tf.one_hot(inputs, depth, on_value=on_value, off_value=off_value, axis=axis, dtype=dtype) + self.all_layers = [] + self.all_params = [] + self.all_drop = {} + + +class Word2vecEmbeddingInputlayer(Layer): + """ + The :class:`Word2vecEmbeddingInputlayer` class is a fully connected layer. + For Word Embedding, words are input as integer index. + The output is the embedded word vector. + + Parameters + ---------- + inputs : placeholder or tensor + The input of a network. For word inputs, please use integer index format, 2D tensor : [batch_size, num_steps(num_words)] + train_labels : placeholder + For word labels. integer index format + vocabulary_size : int + The size of vocabulary, number of words + embedding_size : int + The number of embedding dimensions + num_sampled : int + The mumber of negative examples for NCE loss + nce_loss_args : dictionary + The arguments for tf.nn.nce_loss() + E_init : initializer + The initializer for initializing the embedding matrix + E_init_args : dictionary + The arguments for embedding initializer + nce_W_init : initializer + The initializer for initializing the nce decoder weight matrix + nce_W_init_args : dictionary + The arguments for initializing the nce decoder weight matrix + nce_b_init : initializer + The initializer for initializing of the nce decoder bias vector + nce_b_init_args : dictionary + The arguments for initializing the nce decoder bias vector + name : str + A unique layer name + + Attributes + ---------- + nce_cost : Tensor + The NCE loss. + outputs : Tensor + The embedding layer outputs. + normalized_embeddings : Tensor + Normalized embedding matrix. + + Examples + -------- + With TensorLayer : see ``tensorlayer/example/tutorial_word2vec_basic.py`` + + >>> batch_size = 8 + >>> train_inputs = tf.placeholder(tf.int32, shape=(batch_size)) + >>> train_labels = tf.placeholder(tf.int32, shape=(batch_size, 1)) + >>> net = tl.layers.Word2vecEmbeddingInputlayer(inputs=train_inputs, + ... train_labels=train_labels, vocabulary_size=1000, embedding_size=200, + ... num_sampled=64, name='word2vec') + ... (8, 200) + >>> cost = net.nce_cost + >>> train_params = net.all_params + >>> cost = net.nce_cost + >>> train_params = net.all_params + >>> train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize( + ... cost, var_list=train_params) + >>> normalized_embeddings = net.normalized_embeddings + + Without TensorLayer : see ``tensorflow/examples/tutorials/word2vec/word2vec_basic.py`` + + >>> train_inputs = tf.placeholder(tf.int32, shape=(batch_size)) + >>> train_labels = tf.placeholder(tf.int32, shape=(batch_size, 1)) + >>> embeddings = tf.Variable( + ... tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) + >>> embed = tf.nn.embedding_lookup(embeddings, train_inputs) + >>> nce_weights = tf.Variable( + ... tf.truncated_normal([vocabulary_size, embedding_size], + ... stddev=1.0 / math.sqrt(embedding_size))) + >>> nce_biases = tf.Variable(tf.zeros([vocabulary_size])) + >>> cost = tf.reduce_mean( + ... tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, + ... inputs=embed, labels=train_labels, + ... num_sampled=num_sampled, num_classes=vocabulary_size, + ... num_true=1)) + + References + ---------- + `tensorflow/examples/tutorials/word2vec/word2vec_basic.py `__ + + """ + + def __init__( + self, + inputs=None, + train_labels=None, + vocabulary_size=80000, + embedding_size=200, + num_sampled=64, + nce_loss_args=None, + E_init=tf.random_uniform_initializer(minval=-1.0, maxval=1.0), + E_init_args=None, + nce_W_init=tf.truncated_normal_initializer(stddev=0.03), + nce_W_init_args=None, + nce_b_init=tf.constant_initializer(value=0.0), + nce_b_init_args=None, + name='word2vec', + ): + if nce_loss_args is None: + nce_loss_args = {} + if E_init_args is None: + E_init_args = {} + if nce_W_init_args is None: + nce_W_init_args = {} + if nce_b_init_args is None: + nce_b_init_args = {} + + Layer.__init__(self, name=name) + self.inputs = inputs + logging.info("Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) + + # Look up embeddings for inputs. + # Note: a row of 'embeddings' is the vector representation of a word. + # for the sake of speed, it is better to slice the embedding matrix + # instead of transfering a word id to one-hot-format vector and then + # multiply by the embedding matrix. + # embed is the outputs of the hidden layer (embedding layer), it is a + # row vector with 'embedding_size' values. + with tf.variable_scope(name): + embeddings = tf.get_variable( + name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args) + embed = tf.nn.embedding_lookup(embeddings, self.inputs) + # Construct the variables for the NCE loss (i.e. negative sampling) + nce_weights = tf.get_variable( + name='nce_weights', shape=(vocabulary_size, embedding_size), initializer=nce_W_init, dtype=LayersConfig.tf_dtype, **nce_W_init_args) + nce_biases = tf.get_variable(name='nce_biases', shape=(vocabulary_size), initializer=nce_b_init, dtype=LayersConfig.tf_dtype, **nce_b_init_args) + + # Compute the average NCE loss for the batch. + # tf.nce_loss automatically draws a new sample of the negative labels + # each time we evaluate the loss. + self.nce_cost = tf.reduce_mean( + tf.nn.nce_loss( + weights=nce_weights, + biases=nce_biases, + inputs=embed, + labels=train_labels, + num_sampled=num_sampled, + num_classes=vocabulary_size, + **nce_loss_args)) + + self.outputs = embed + self.normalized_embeddings = tf.nn.l2_normalize(embeddings, 1) + + self.all_layers = [self.outputs] + self.all_params = [embeddings, nce_weights, nce_biases] + self.all_drop = {} + + +class EmbeddingInputlayer(Layer): + """ + The :class:`EmbeddingInputlayer` class is a look-up table for word embedding. + + Word content are accessed using integer indexes, then the output is the embedded word vector. + To train a word embedding matrix, you can used :class:`Word2vecEmbeddingInputlayer`. + If you have a pre-trained matrix, you can assign the parameters into it. + + Parameters + ---------- + inputs : placeholder + The input of a network. For word inputs. + Please use integer index format, 2D tensor : (batch_size, num_steps(num_words)). + vocabulary_size : int + The size of vocabulary, number of words. + embedding_size : int + The number of embedding dimensions. + E_init : initializer + The initializer for the embedding matrix. + E_init_args : dictionary + The arguments for embedding matrix initializer. + name : str + A unique layer name. + + Attributes + ---------- + outputs : tensor + The embedding layer output is a 3D tensor in the shape: (batch_size, num_steps(num_words), embedding_size). + + Examples + -------- + >>> batch_size = 8 + >>> x = tf.placeholder(tf.int32, shape=(batch_size, )) + >>> net = tl.layers.EmbeddingInputlayer(inputs=x, vocabulary_size=1000, embedding_size=50, name='embed') + ... (8, 50) + + """ + + def __init__( + self, + inputs=None, + vocabulary_size=80000, + embedding_size=200, + E_init=tf.random_uniform_initializer(-0.1, 0.1), + E_init_args=None, + name='embedding', + ): + if E_init_args is None: + E_init_args = {} + + Layer.__init__(self, name=name) + self.inputs = inputs + logging.info("EmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size)) + + with tf.variable_scope(name): + embeddings = tf.get_variable( + name='embeddings', shape=(vocabulary_size, embedding_size), initializer=E_init, dtype=LayersConfig.tf_dtype, **E_init_args) + embed = tf.nn.embedding_lookup(embeddings, self.inputs) + + self.outputs = embed + + self.all_layers = [self.outputs] + self.all_params = [embeddings] + self.all_drop = {} + + +class AverageEmbeddingInputlayer(Layer): + """The :class:`AverageEmbeddingInputlayer` averages over embeddings of inputs. + This is often used as the input layer for models like DAN[1] and FastText[2]. + + Parameters + ---------- + inputs : placeholder or tensor + The network input. + For word inputs, please use integer index format, 2D tensor: (batch_size, num_steps(num_words)). + vocabulary_size : int + The size of vocabulary. + embedding_size : int + The dimension of the embedding vectors. + pad_value : int + The scalar padding value used in inputs, 0 as default. + embeddings_initializer : initializer + The initializer of the embedding matrix. + embeddings_kwargs : None or dictionary + The arguments to get embedding matrix variable. + name : str + A unique layer name. + + References + ---------- + - [1] Iyyer, M., Manjunatha, V., Boyd-Graber, J., & Daum’e III, H. (2015). Deep Unordered Composition Rivals Syntactic Methods for Text Classification. In Association for Computational Linguistics. + - [2] Joulin, A., Grave, E., Bojanowski, P., & Mikolov, T. (2016). `Bag of Tricks for Efficient Text Classification. `__ + + Examples + --------- + >>> batch_size = 8 + >>> length = 5 + >>> x = tf.placeholder(tf.int32, shape=(batch_size, length)) + >>> net = tl.layers.AverageEmbeddingInputlayer(x, vocabulary_size=1000, embedding_size=50, name='avg') + ... (8, 50) + + """ + + def __init__( + self, + inputs, + vocabulary_size, + embedding_size, + pad_value=0, + embeddings_initializer=tf.random_uniform_initializer(-0.1, 0.1), + embeddings_kwargs=None, + name='average_embedding', + ): + # super().__init__(name=name) # dont work for py2 + Layer.__init__(self, name=name) + + # if embeddings_kwargs is None: + # embeddings_kwargs = {} + + if inputs.get_shape().ndims != 2: + raise ValueError('inputs must be of size batch_size * batch_sentence_length') + + self.inputs = inputs + + logging.info("AverageEmbeddingInputlayer %s: (%d, %d)" % (name, vocabulary_size, embedding_size)) + with tf.variable_scope(name): + self.embeddings = tf.get_variable( + name='embeddings', + shape=(vocabulary_size, embedding_size), + initializer=embeddings_initializer, + dtype=LayersConfig.tf_dtype, + **(embeddings_kwargs or {}) + # **embeddings_kwargs + ) # **(embeddings_kwargs or {}), + + word_embeddings = tf.nn.embedding_lookup( + self.embeddings, + self.inputs, + name='word_embeddings', + ) + # Zero out embeddings of pad value + masks = tf.not_equal(self.inputs, pad_value, name='masks') + word_embeddings *= tf.cast( + tf.expand_dims(masks, axis=-1), + # tf.float32, + dtype=LayersConfig.tf_dtype, + ) + sum_word_embeddings = tf.reduce_sum(word_embeddings, axis=1) + + # Count number of non-padding words in each sentence + sentence_lengths = tf.count_nonzero( + masks, + axis=1, + keep_dims=True, + # dtype=tf.float32, + dtype=LayersConfig.tf_dtype, + name='sentence_lengths', + ) + + sentence_embeddings = tf.divide( + sum_word_embeddings, + sentence_lengths + 1e-8, # Add epsilon to avoid dividing by 0 + name='sentence_embeddings') + + self.outputs = sentence_embeddings + self.all_layers = [self.outputs] + self.all_params = [self.embeddings] + self.all_drop = {} + + +class DenseLayer(Layer): + """The :class:`DenseLayer` class is a fully connected layer. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + n_units : int + The number of units of this layer. + act : activation function + The activation function of this layer. + W_init : initializer + The initializer for the weight matrix. + b_init : initializer or None + The initializer for the bias vector. If None, skip biases. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : a str + A unique layer name. + + Examples + -------- + With TensorLayer + + >>> net = tl.layers.InputLayer(x, name='input') + >>> net = tl.layers.DenseLayer(net, 800, act=tf.nn.relu, name='relu') + + Without native TensorLayer APIs, you can do as follow. + + >>> W = tf.Variable( + ... tf.random_uniform([n_in, n_units], -1.0, 1.0), name='W') + >>> b = tf.Variable(tf.zeros(shape=[n_units]), name='b') + >>> y = tf.nn.relu(tf.matmul(inputs, W) + b) + + Notes + ----- + If the layer input has more than two axes, it needs to be flatten by using :class:`FlattenLayer`. + + """ + + def __init__( + self, + prev_layer, + n_units=100, + act=tf.identity, + W_init=tf.truncated_normal_initializer(stddev=0.1), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='dense', + ): + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + if self.inputs.get_shape().ndims != 2: + raise Exception("The input dimension must be rank 2, please reshape or flatten it") + + n_in = int(self.inputs.get_shape()[-1]) + self.n_units = n_units + logging.info("DenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) + with tf.variable_scope(name): + W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) + if b_init is not None: + try: + b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) + except Exception: # If initializer is a constant, do not specify shape. + b = tf.get_variable(name='b', initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) + self.outputs = act(tf.matmul(self.inputs, W) + b) + else: + self.outputs = act(tf.matmul(self.inputs, W)) + + # Hint : list(), dict() is pass by value (shallow), without them, it is + # pass by reference. + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + if b_init is not None: + self.all_params.extend([W, b]) + else: + self.all_params.append(W) + + +class ReconLayer(DenseLayer): + """A reconstruction layer for :class:`DenseLayer` to implement AutoEncoder. + + It is often used to pre-train the previous :class:`DenseLayer` + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + x_recon : placeholder or tensor + The target for reconstruction. + n_units : int + The number of units of the layer. It should equal ``x_recon``. + act : activation function + The activation function of this layer. + Normally, for sigmoid layer, the reconstruction activation is ``sigmoid``; + for rectifying layer, the reconstruction activation is ``softplus``. + name : str + A unique layer name. + + Examples + -------- + >>> x = tf.placeholder(tf.float32, shape=(None, 784)) + >>> net = tl.layers.InputLayer(x, name='input') + >>> net = tl.layers.DenseLayer(net, n_units=196, act=tf.nn.sigmoid, name='dense') + >>> recon = tl.layers.ReconLayer(net, x_recon=x, n_units=784, act=tf.nn.sigmoid, name='recon') + >>> sess = tf.InteractiveSession() + >>> tl.layers.initialize_global_variables(sess) + >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) + >>> recon.pretrain(sess, x=x, X_train=X_train, X_val=X_val, denoise_name=None, n_epoch=500, batch_size=128, print_freq=1, save=True, save_name='w1pre_') + + Methods + ------- + pretrain(sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batch_size=128, print_freq=10, save=True, save_name='w1pre') + Start to pre-train the parameters of the previous DenseLayer. + + Notes + ----- + The input layer should be `DenseLayer` or a layer that has only one axes. + You may need to modify this part to define your own cost function. + By default, the cost is implemented as follow: + - For sigmoid layer, the implementation can be `UFLDL `__ + - For rectifying layer, the implementation can be `Glorot (2011). Deep Sparse Rectifier Neural Networks `__ + + """ + + def __init__( + self, + prev_layer, + x_recon=None, + n_units=784, + act=tf.nn.softplus, + name='recon', + ): + DenseLayer.__init__(self, prev_layer=prev_layer, n_units=n_units, act=act, name=name) + logging.info("%s is a ReconLayer" % self.name) + + # y : reconstruction outputs; train_params : parameters to train + # Note that: train_params = [W_encoder, b_encoder, W_decoder, b_encoder] + y = self.outputs + self.train_params = self.all_params[-4:] + + # ===================================================================== + # + # You need to modify the below cost function and optimizer so as to + # implement your own pre-train method. + # + # ===================================================================== + lambda_l2_w = 0.004 + learning_rate = 0.0001 + logging.info(" lambda_l2_w: %f" % lambda_l2_w) + logging.info(" learning_rate: %f" % learning_rate) + + # Mean-square-error i.e. quadratic-cost + mse = tf.reduce_sum(tf.squared_difference(y, x_recon), 1) + mse = tf.reduce_mean(mse) # in theano: mse = ((y - x) ** 2 ).sum(axis=1).mean() + # mse = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(y, x_recon)), 1)) + # mse = tf.reduce_mean(tf.squared_difference(y, x_recon)) # : Error + # mse = tf.sqrt(tf.reduce_mean(tf.square(y - x_recon))) # : Error + # Cross-entropy + # ce = cost.cross_entropy(y, x_recon) # : list , list , Error (only be used for softmax output) + # ce = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, x_recon)) # : list , list , Error (only be used for softmax output) + # ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, x_recon)) # : list , index , Error (only be used for softmax output) + L2_w = tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[0]) \ + + tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[2]) # faster than the code below + # L2_w = lambda_l2_w * tf.reduce_mean(tf.square(self.train_params[0])) + lambda_l2_w * tf.reduce_mean( tf.square(self.train_params[2])) + + # DropNeuro + # P_o = cost.lo_regularizer(0.03)( + # self.train_params[0]) # + cost.lo_regularizer(0.5)(self.train_params[2]) # : if add lo on decoder, no neuron will be broken + # P_i = cost.li_regularizer(0.03)(self.train_params[0]) # + cost.li_regularizer(0.001)(self.train_params[2]) + + # L1 of activation outputs + activation_out = self.all_layers[-2] + L1_a = 0.001 * tf.reduce_mean(activation_out) # : theano: T.mean( self.a[i] ) # some neuron are broken, white and black + # L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, 0) ) # : some neuron are broken, white and black + # L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, 1) ) # : some neuron are broken, white and black + # KL Divergence + beta = 4 + rho = 0.15 + p_hat = tf.reduce_mean(activation_out, 0) # theano: p_hat = T.mean( self.a[i], axis=0 ) + try: # TF1.0 + KLD = beta * tf.reduce_sum(rho * tf.log(tf.divide(rho, p_hat)) + (1 - rho) * tf.log((1 - rho) / (tf.subtract(float(1), p_hat)))) + except Exception: # TF0.12 + KLD = beta * tf.reduce_sum(rho * tf.log(tf.div(rho, p_hat)) + (1 - rho) * tf.log((1 - rho) / (tf.sub(float(1), p_hat)))) + # KLD = beta * tf.reduce_sum( rho * tf.log(rho/ p_hat) + (1- rho) * tf.log((1- rho)/(1- p_hat)) ) + # theano: L1_a = l1_a[i] * T.sum( rho[i] * T.log(rho[i]/ p_hat) + (1- rho[i]) * T.log((1- rho[i])/(1- p_hat)) ) + # Total cost + if act == tf.nn.softplus: + logging.info(' use: mse, L2_w, L1_a') + self.cost = mse + L1_a + L2_w + elif act == tf.nn.sigmoid: + # ---------------------------------------------------- + # Cross-entropy was used in Denoising AE + # logging.info(' use: ce, L2_w, KLD') + # self.cost = ce + L2_w + KLD + # ---------------------------------------------------- + # Mean-squared-error was used in Vanilla AE + logging.info(' use: mse, L2_w, KLD') + self.cost = mse + L2_w + KLD + # ---------------------------------------------------- + # Add DropNeuro penalty (P_o) can remove neurons of AE + # logging.info(' use: mse, L2_w, KLD, P_o') + # self.cost = mse + L2_w + KLD + P_o + # ---------------------------------------------------- + # Add DropNeuro penalty (P_i) can remove neurons of previous layer + # If previous layer is InputLayer, it means remove useless features + # logging.info(' use: mse, L2_w, KLD, P_i') + # self.cost = mse + L2_w + KLD + P_i + else: + raise Exception("Don't support the given reconstruct activation function") + + self.train_op = tf.train.AdamOptimizer( + learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False).minimize( + self.cost, var_list=self.train_params) + # self.train_op = tf.train.GradientDescentOptimizer(1.0).minimize(self.cost, var_list=self.train_params) + + def pretrain(self, sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batch_size=128, print_freq=10, save=True, save_name='w1pre_'): + # ==================================================== + # + # You need to modify the cost function in __init__() so as to + # get your own pre-train method. + # + # ==================================================== + logging.info(" [*] %s start pretrain" % self.name) + logging.info(" batch_size: %d" % batch_size) + if denoise_name: + logging.info(" denoising layer keep: %f" % self.all_drop[LayersConfig.set_keep[denoise_name]]) + dp_denoise = self.all_drop[LayersConfig.set_keep[denoise_name]] + else: + logging.info(" no denoising layer") + + for epoch in range(n_epoch): + start_time = time.time() + for X_train_a, _ in iterate.minibatches(X_train, X_train, batch_size, shuffle=True): + dp_dict = utils.dict_to_one(self.all_drop) + if denoise_name: + dp_dict[LayersConfig.set_keep[denoise_name]] = dp_denoise + feed_dict = {x: X_train_a} + feed_dict.update(dp_dict) + sess.run(self.train_op, feed_dict=feed_dict) + + if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: + logging.info("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) + train_loss, n_batch = 0, 0 + for X_train_a, _ in iterate.minibatches(X_train, X_train, batch_size, shuffle=True): + dp_dict = utils.dict_to_one(self.all_drop) + feed_dict = {x: X_train_a} + feed_dict.update(dp_dict) + err = sess.run(self.cost, feed_dict=feed_dict) + train_loss += err + n_batch += 1 + logging.info(" train loss: %f" % (train_loss / n_batch)) + val_loss, n_batch = 0, 0 + for X_val_a, _ in iterate.minibatches(X_val, X_val, batch_size, shuffle=True): + dp_dict = utils.dict_to_one(self.all_drop) + feed_dict = {x: X_val_a} + feed_dict.update(dp_dict) + err = sess.run(self.cost, feed_dict=feed_dict) + val_loss += err + n_batch += 1 + logging.info(" val loss: %f" % (val_loss / n_batch)) + if save: + try: + visualize.draw_weights( + self.train_params[0].eval(), second=10, saveable=True, shape=[28, 28], name=save_name + str(epoch + 1), fig_idx=2012) + files.save_npz([self.all_params[0]], name=save_name + str(epoch + 1) + '.npz') + except Exception: + raise Exception( + "You should change the visualize.W() in ReconLayer.pretrain(), if you want to save the feature images for different dataset") + + +class DropoutLayer(Layer): + """ + The :class:`DropoutLayer` class is a noise layer which randomly set some + activations to zero according to a keeping probability. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + keep : float + The keeping probability. + The lower the probability it is, the more activations are set to zero. + is_fix : boolean + Fixing probability or nor. Default is False. + If True, the keeping probability is fixed and cannot be changed via `feed_dict`. + is_train : boolean + Trainable or not. If False, skip this layer. Default is True. + seed : int or None + The seed for random dropout. + name : str + A unique layer name. + + Examples + -------- + Method 1: Using ``all_drop`` see `tutorial_mlp_dropout1.py `__ + + >>> net = tl.layers.InputLayer(x, name='input_layer') + >>> net = tl.layers.DropoutLayer(net, keep=0.8, name='drop1') + >>> net = tl.layers.DenseLayer(net, n_units=800, act=tf.nn.relu, name='relu1') + >>> ... + >>> # For training, enable dropout as follow. + >>> feed_dict = {x: X_train_a, y_: y_train_a} + >>> feed_dict.update( net.all_drop ) # enable noise layers + >>> sess.run(train_op, feed_dict=feed_dict) + >>> ... + >>> # For testing, disable dropout as follow. + >>> dp_dict = tl.utils.dict_to_one( net.all_drop ) # disable noise layers + >>> feed_dict = {x: X_val_a, y_: y_val_a} + >>> feed_dict.update(dp_dict) + >>> err, ac = sess.run([cost, acc], feed_dict=feed_dict) + >>> ... + + Method 2: Without using ``all_drop`` see `tutorial_mlp_dropout2.py `__ + + >>> def mlp(x, is_train=True, reuse=False): + >>> with tf.variable_scope("MLP", reuse=reuse): + >>> tl.layers.set_name_reuse(reuse) + >>> net = tl.layers.InputLayer(x, name='input') + >>> net = tl.layers.DropoutLayer(net, keep=0.8, is_fix=True, + >>> is_train=is_train, name='drop1') + >>> ... + >>> return net + >>> # define inferences + >>> net_train = mlp(x, is_train=True, reuse=False) + >>> net_test = mlp(x, is_train=False, reuse=True) + + """ + + def __init__( + self, + prev_layer, + keep=0.5, + is_fix=False, + is_train=True, + seed=None, + name='dropout_layer', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + if is_train is False: + logging.info(" skip DropoutLayer") + self.outputs = prev_layer.outputs + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + else: + self.inputs = prev_layer.outputs + logging.info("DropoutLayer %s: keep:%f is_fix:%s" % (self.name, keep, is_fix)) + + # The name of placeholder for keep_prob is the same with the name + # of the Layer. + if is_fix: + self.outputs = tf.nn.dropout(self.inputs, keep, seed=seed, name=name) + else: + LayersConfig.set_keep[name] = tf.placeholder(tf.float32) + self.outputs = tf.nn.dropout(self.inputs, LayersConfig.set_keep[name], seed=seed, name=name) # 1.2 + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + if is_fix is False: + self.all_drop.update({LayersConfig.set_keep[name]: keep}) + self.all_layers.append(self.outputs) + + # logging.info(set_keep[name]) + # Tensor("Placeholder_2:0", dtype=float32) + # logging.info(denoising1) + # Tensor("Placeholder_2:0", dtype=float32) + # logging.info(self.all_drop[denoising1]) + # 0.8 + # + # https://www.tensorflow.org/versions/r0.8/tutorials/mnist/tf/index.html + # The optional feed_dict argument allows the caller to override the + # value of tensors in the graph. Each key in feed_dict can be one of + # the following types: + # If the key is a Tensor, the value may be a Python scalar, string, + # list, or numpy ndarray that can be converted to the same dtype as that + # tensor. Additionally, if the key is a placeholder, the shape of the + # value will be checked for compatibility with the placeholder. + # If the key is a SparseTensor, the value should be a SparseTensorValue. + + +class GaussianNoiseLayer(Layer): + """ + The :class:`GaussianNoiseLayer` class is noise layer that adding noise with + gaussian distribution to the activation. + + Parameters + ------------ + layer : :class:`Layer` + Previous layer. + mean : float + The mean. Default is 0. + stddev : float + The standard deviation. Default is 1. + is_train : boolean + Is trainable layer. If False, skip this layer. default is True. + seed : int or None + The seed for random noise. + name : str + A unique layer name. + + Examples + ---------- + >>> x = tf.placeholder(tf.float32, shape=(100, 784)) + >>> net = tl.layers.InputLayer(x, name='input') + >>> net = tl.layers.DenseLayer(net, n_units=100, act=tf.nn.relu, name='dense3') + >>> net = tl.layers.GaussianNoiseLayer(net, name='gaussian') + ... (64, 100) + + """ + + def __init__( + self, + prev_layer, + mean=0.0, + stddev=1.0, + is_train=True, + seed=None, + name='gaussian_noise_layer', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + if is_train is False: + logging.info(" skip GaussianNoiseLayer") + self.outputs = prev_layer.outputs + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + else: + self.inputs = prev_layer.outputs + logging.info("GaussianNoiseLayer %s: mean:%f stddev:%f" % (self.name, mean, stddev)) + with tf.variable_scope(name): + # noise = np.random.normal(0.0 , sigma , tf.to_int64(self.inputs).get_shape()) + noise = tf.random_normal(shape=self.inputs.get_shape(), mean=mean, stddev=stddev, seed=seed) + self.outputs = self.inputs + noise + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + + +class DropconnectDenseLayer(Layer): + """ + The :class:`DropconnectDenseLayer` class is :class:`DenseLayer` with DropConnect + behaviour which randomly removes connections between this layer and the previous + layer according to a keeping probability. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + keep : float + The keeping probability. + The lower the probability it is, the more activations are set to zero. + n_units : int + The number of units of this layer. + act : activation function + The activation function of this layer. + W_init : weights initializer + The initializer for the weight matrix. + b_init : biases initializer + The initializer for the bias vector. + W_init_args : dictionary + The arguments for the weight matrix initializer. + b_init_args : dictionary + The arguments for the bias vector initializer. + name : str + A unique layer name. + + Examples + -------- + >>> net = tl.layers.InputLayer(x, name='input_layer') + >>> net = tl.layers.DropconnectDenseLayer(net, keep=0.8, + ... n_units=800, act=tf.nn.relu, name='relu1') + >>> net = tl.layers.DropconnectDenseLayer(net, keep=0.5, + ... n_units=800, act=tf.nn.relu, name='relu2') + >>> net = tl.layers.DropconnectDenseLayer(net, keep=0.5, + ... n_units=10, name='output') + + References + ---------- + - `Wan, L. (2013). Regularization of neural networks using dropconnect `__ + + """ + + def __init__( + self, + prev_layer, + keep=0.5, + n_units=100, + act=tf.identity, + W_init=tf.truncated_normal_initializer(stddev=0.1), + b_init=tf.constant_initializer(value=0.0), + W_init_args=None, + b_init_args=None, + name='dropconnect_layer', + ): + if W_init_args is None: + W_init_args = {} + if b_init_args is None: + b_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + if self.inputs.get_shape().ndims != 2: + raise Exception("The input dimension must be rank 2") + n_in = int(self.inputs.get_shape()[-1]) + self.n_units = n_units + logging.info("DropconnectDenseLayer %s: %d %s" % (self.name, self.n_units, act.__name__)) + + with tf.variable_scope(name): + W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) + b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) + # self.outputs = act(tf.matmul(self.inputs, W) + b) + + LayersConfig.set_keep[name] = tf.placeholder(tf.float32) + W_dropcon = tf.nn.dropout(W, LayersConfig.set_keep[name]) + self.outputs = act(tf.matmul(self.inputs, W_dropcon) + b) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + # self.all_drop.update({LayersConfig.set_keep[name]: keep}) + # self.all_layers.append(self.outputs) + # self.all_params.extend([W, b]) + + self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/extend.py b/tensorlayer/layers/extend.py new file mode 100644 index 0000000..b97261a --- /dev/null +++ b/tensorlayer/layers/extend.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +from .core import * +from .. import _logging as logging +import tensorflow as tf + +__all__ = [ + 'ExpandDimsLayer', + 'TileLayer', +] + + +class ExpandDimsLayer(Layer): + """ + The :class:`ExpandDimsLayer` class inserts a dimension of 1 into a tensor's shape, + see `tf.expand_dims() `__ . + + Parameters + ---------- + layer : :class:`Layer` + The previous layer. + axis : int + The dimension index at which to expand the shape of input. + name : str + A unique layer name. + + Examples + -------- + >>> x = tf.placeholder(tf.float32, (None, 100)) + >>> n = tl.layers.InputLayer(x, name='in') + >>> n = tl.layers.ExpandDimsLayer(n, 2) + ... [None, 100, 1] + """ + + def __init__( + self, + prev_layer, + axis, + name='expand_dims', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + + logging.info("ExpandDimsLayer %s: axis:%d" % (self.name, axis)) + with tf.variable_scope(name): + try: # TF12 TF1.0 + self.outputs = tf.expand_dims(self.inputs, axis=axis) + except Exception: # TF11 + self.outputs = tf.expand_dims(self.inputs, dim=axis) + # self.all_layers = list(layer.all_layers) + self.all_params = list(prev_layer.all_params) + self.all_drop = dict(prev_layer.all_drop) + self.all_layers.append(self.outputs) + # self.all_params.extend( variables ) + + +class TileLayer(Layer): + """ + The :class:`TileLayer` class constructs a tensor by tiling a given tensor, + see `tf.tile() `__ . + + Parameters + ---------- + layer : :class:`Layer` + The previous layer. + multiples: tensor + Must be one of the following types: int32, int64. + 1-D Length must be the same as the number of dimensions in input. + name : str + A unique layer name. + + + Examples + -------- + >>> x = tf.placeholder(tf.float32, (None, 100)) + >>> n = tl.layers.InputLayer(x, name='in') + >>> n = tl.layers.ExpandDimsLayer(n, 2) + >>> n = tl.layers.TileLayer(n, [-1, 1, 3]) + ... [None, 100, 3] + """ + + def __init__( + self, + prev_layer=None, + multiples=None, + name='tile', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + + logging.info("TileLayer %s: multiples:%s" % (self.name, multiples)) + with tf.variable_scope(name): + self.outputs = tf.tile(self.inputs, multiples=multiples) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + # self.all_params.extend( variables ) diff --git a/tensorlayer/layers/flow_control.py b/tensorlayer/layers/flow_control.py new file mode 100644 index 0000000..2ab0d12 --- /dev/null +++ b/tensorlayer/layers/flow_control.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- + +from .core import * +from .. import _logging as logging +import tensorflow as tf + +__all__ = [ + 'MultiplexerLayer', +] + + +class MultiplexerLayer(Layer): + """ + The :class:`MultiplexerLayer` selects inputs to be forwarded to output. + see `tutorial_mnist_multiplexer.py`. + + Parameters + ---------- + layers : a list of :class:`Layer` + The input layers. + name : str + A unique layer name. + + Attributes + ---------- + sel : placeholder + The placeholder takes an integer for selecting which layer to output. + + Examples + -------- + >>> x = tf.placeholder(tf.float32, shape=(None, 784), name='x') + >>> # define the network + >>> net_in = tl.layers.InputLayer(x, name='input') + >>> net_in = tl.layers.DropoutLayer(net_in, keep=0.8, name='drop1') + >>> # net 0 + >>> net_0 = tl.layers.DenseLayer(net_in, n_units=800, act=tf.nn.relu, name='net0/relu1') + >>> net_0 = tl.layers.DropoutLayer(net_0, keep=0.5, name='net0/drop2') + >>> net_0 = tl.layers.DenseLayer(net_0, n_units=800, act=tf.nn.relu, name='net0/relu2') + >>> # net 1 + >>> net_1 = tl.layers.DenseLayer(net_in, n_units=800, act=tf.nn.relu, name='net1/relu1') + >>> net_1 = tl.layers.DropoutLayer(net_1, keep=0.8, name='net1/drop2') + >>> net_1 = tl.layers.DenseLayer(net_1, n_units=800, act=tf.nn.relu, name='net1/relu2') + >>> net_1 = tl.layers.DropoutLayer(net_1, keep=0.8, name='net1/drop3') + >>> net_1 = tl.layers.DenseLayer(net_1, n_units=800, act=tf.nn.relu, name='net1/relu3') + >>> # multiplexer + >>> net_mux = tl.layers.MultiplexerLayer(layers=[net_0, net_1], name='mux') + >>> network = tl.layers.ReshapeLayer(net_mux, shape=(-1, 800), name='reshape') + >>> network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3') + >>> # output layer + >>> network = tl.layers.DenseLayer(network, n_units=10, act=tf.identity, name='output') + + """ + + def __init__(self, layers, name='mux_layer'): + Layer.__init__(self, prev_layer=layers, name=name) + self.n_inputs = len(layers) + + self.inputs = [] + for l in layers: + self.inputs.append(l.outputs) + try: # TF1.0 + all_inputs = tf.stack(self.inputs, name=name) # pack means concat a list of tensor in a new dim # 1.2 + except Exception: + all_inputs = tf.pack(self.inputs, name=name) # pack means concat a list of tensor in a new dim # 1.2 + + logging.info("MultiplexerLayer %s: n_inputs:%d" % (self.name, self.n_inputs)) + + self.sel = tf.placeholder(tf.int32) + self.outputs = tf.gather(all_inputs, self.sel, name=name) # [sel, :, : ...] # 1.2 + + # logging.info(self.outputs, vars(self.outputs)) + # # tf.reshape(self.outputs, shape=) + # exit() + # # the same with ConcatLayer + # self.all_layers = list(layers[0].all_layers) + # self.all_params = list(layers[0].all_params) + # self.all_drop = dict(layers[0].all_drop) + # + # for i in range(1, len(layers)): + # self.all_layers.extend(list(layers[i].all_layers)) + # self.all_params.extend(list(layers[i].all_params)) + # self.all_drop.update(dict(layers[i].all_drop)) + # + # self.all_layers = list_remove_repeat(self.all_layers) + # self.all_params = list_remove_repeat(self.all_params) + # # self.all_drop = list_remove_repeat(self.all_drop) + + self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/importer.py b/tensorlayer/layers/importer.py new file mode 100644 index 0000000..58ac031 --- /dev/null +++ b/tensorlayer/layers/importer.py @@ -0,0 +1,232 @@ +# -*- coding: utf-8 -*- + +from tensorflow.python.util.deprecation import deprecated +from .core import * +from .. import _logging as logging +import tensorflow as tf + +__all__ = [ + 'LambdaLayer', + 'SlimNetsLayer', + 'KerasLayer', + 'EstimatorLayer', +] + + +class LambdaLayer(Layer): + """A layer that takes a user-defined function using TensorFlow Lambda. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + fn : function + The function that applies to the outputs of previous layer. + fn_args : dictionary or None + The arguments for the function (option). + name : str + A unique layer name. + + Examples + --------- + Non-parametric case + + >>> x = tf.placeholder(tf.float32, shape=[None, 1], name='x') + >>> net = tl.layers.InputLayer(x, name='input') + >>> net = LambdaLayer(net, lambda x: 2*x, name='lambda') + + Parametric case, merge other wrappers into TensorLayer + + >>> from keras.layers import * + >>> from tensorlayer.layers import * + >>> def keras_block(x): + >>> x = Dropout(0.8)(x) + >>> x = Dense(800, activation='relu')(x) + >>> x = Dropout(0.5)(x) + >>> x = Dense(800, activation='relu')(x) + >>> x = Dropout(0.5)(x) + >>> logits = Dense(10, activation='linear')(x) + >>> return logits + >>> net = InputLayer(x, name='input') + >>> net = LambdaLayer(net, fn=keras_block, name='keras') + + """ + + def __init__( + self, + prev_layer, + fn, + fn_args=None, + name='lambda_layer', + ): + if fn_args is None: + fn_args = {} + Layer.__init__(self, prev_layer=prev_layer, name=name) + assert prev_layer is not None + assert fn is not None + self.inputs = prev_layer.outputs + logging.info("LambdaLayer %s" % self.name) + with tf.variable_scope(name) as vs: + self.outputs = fn(self.inputs, **fn_args) + variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend(variables) + + +class SlimNetsLayer(Layer): + """A layer that merges TF-Slim models into TensorLayer. + + Models can be found in `slim-model `__, + see Inception V3 example on `Github `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + slim_layer : a slim network function + The network you want to stack onto, end with ``return net, end_points``. + slim_args : dictionary + The arguments for the slim model. + name : str + A unique layer name. + + Notes + ----- + - As TF-Slim stores the layers as dictionary, the ``all_layers`` in this network is not in order ! Fortunately, the ``all_params`` are in order. + + """ + + def __init__( + self, + prev_layer, + slim_layer, + slim_args=None, + name='tfslim_layer', + ): + if slim_layer is None: + raise ValueError("slim layer is None") + if slim_args is None: + slim_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("SlimNetsLayer %s: %s" % (self.name, slim_layer.__name__)) + + # with tf.variable_scope(name) as vs: + # net, end_points = slim_layer(self.inputs, **slim_args) + # slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + net, end_points = slim_layer(self.inputs, **slim_args) + + slim_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=name) + if slim_variables == []: + logging.info( + "No variables found under %s : the name of SlimNetsLayer should be matched with the begining of the ckpt file, see tutorial_inceptionV3_tfslim.py for more details" + % name) + + self.outputs = net + + slim_layers = [] + for v in end_points.values(): + # tf.contrib.layers.summaries.summarize_activation(v) + slim_layers.append(v) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + self.all_layers.extend(slim_layers) + self.all_params.extend(slim_variables) + + +@deprecated("2018-06-30", "This layer will be deprecated soon as :class:`LambdaLayer` can do the same thing.") +class KerasLayer(Layer): + """A layer to import Keras layers into TensorLayer. + + Example can be found here `tutorial_keras.py `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer + keras_layer : function + A tensor in tensor out function for building model. + keras_args : dictionary + The arguments for the `keras_layer`. + name : str + A unique layer name. + + """ + + def __init__( + self, + prev_layer, + keras_layer, + keras_args=None, + name='keras_layer', + ): + if prev_layer is None: + raise ValueError("layer is None") + if keras_args is None: + keras_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("KerasLayer %s: %s" % (self.name, keras_layer)) + logging.info("This API will be removed, please use LambdaLayer instead.") + with tf.variable_scope(name) as vs: + self.outputs = keras_layer(self.inputs, **keras_args) + variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend(variables) + + +@deprecated("2018-06-30", "This layer will be deprecated soon as :class:`LambdaLayer` can do the same thing.") +class EstimatorLayer(Layer): + """A layer that accepts a user-defined model. + + It is similar with :class:`KerasLayer`, see `tutorial_keras.py `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer + model_fn : function + A tensor in tensor out function for building model. + args : dictionary + The arguments for the `model_fn`. + name : str + A unique layer name. + + """ + + def __init__( + self, + prev_layer, + model_fn, + args=None, + name='estimator_layer', + ): + if model_fn is None: + raise ValueError('model fn is None') + if args is None: + args = {} + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("EstimatorLayer %s: %s" % (self.name, model_fn)) + logging.info("This API will be removed, please use LambdaLayer instead.") + with tf.variable_scope(name) as vs: + self.outputs = model_fn(self.inputs, **args) + variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend(variables) diff --git a/tensorlayer/layers/merge.py b/tensorlayer/layers/merge.py new file mode 100644 index 0000000..b2509bf --- /dev/null +++ b/tensorlayer/layers/merge.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- + +from .core import * +from .. import _logging as logging +import tensorflow as tf + +__all__ = [ + 'ConcatLayer', + 'ElementwiseLayer', +] + + +class ConcatLayer(Layer): + """A layer that concats multiple tensors according to given axis.. + + Parameters + ---------- + layers : list of :class:`Layer` + List of layers to concatenate. + concat_dim : int + The dimension to concatenate. + name : str + A unique layer name. + + Examples + ---------- + >>> sess = tf.InteractiveSession() + >>> x = tf.placeholder(tf.float32, shape=[None, 784]) + >>> inputs = tl.layers.InputLayer(x, name='input_layer') + >>> net1 = tl.layers.DenseLayer(inputs, 800, act=tf.nn.relu, name='relu1_1') + >>> net2 = tl.layers.DenseLayer(inputs, 300, act=tf.nn.relu, name='relu2_1') + >>> net = tl.layers.ConcatLayer([net1, net2], 1, name ='concat_layer') + ... InputLayer input_layer (?, 784) + ... DenseLayer relu1_1: 800, relu + ... DenseLayer relu2_1: 300, relu + ... ConcatLayer concat_layer, 1100 + >>> tl.layers.initialize_global_variables(sess) + >>> net.print_params() + ... [TL] param 0: relu1_1/W:0 (784, 800) float32_ref + ... [TL] param 1: relu1_1/b:0 (800,) float32_ref + ... [TL] param 2: relu2_1/W:0 (784, 300) float32_ref + ... [TL] param 3: relu2_1/b:0 (300,) float32_ref + ... num of params: 863500 + >>> net.print_layers() + ... [TL] layer 0: relu1_1/Relu:0 (?, 800) float32 + ... [TL] layer 1: relu2_1/Relu:0 (?, 300) float32 + ... [TL] layer 2: concat_layer:0 (?, 1100) float32 + + """ + + def __init__( + self, + layers, + concat_dim=-1, + name='concat_layer', + ): + Layer.__init__(self, prev_layer=layers, name=name) + self.inputs = [] + for l in layers: + self.inputs.append(l.outputs) + try: # TF1.0 + self.outputs = tf.concat(self.inputs, concat_dim, name=name) + except Exception: # TF0.12 + self.outputs = tf.concat(concat_dim, self.inputs, name=name) + + logging.info("ConcatLayer %s: axis: %d" % (self.name, concat_dim)) + + # self.all_layers = list(layers[0].all_layers) + # self.all_params = list(layers[0].all_params) + # self.all_drop = dict(layers[0].all_drop) + # + # for i in range(1, len(layers)): + # self.all_layers.extend(list(layers[i].all_layers)) + # self.all_params.extend(list(layers[i].all_params)) + # self.all_drop.update(dict(layers[i].all_drop)) + # + # self.all_layers = list_remove_repeat(self.all_layers) + # self.all_params = list_remove_repeat(self.all_params) + + self.all_layers.append(self.outputs) + + +class ElementwiseLayer(Layer): + """A layer that combines multiple :class:`Layer` that have the same output shapes + according to an element-wise operation. + + Parameters + ---------- + layers : list of :class:`Layer` + The list of layers to combine. + combine_fn : a TensorFlow element-wise combine function + e.g. AND is ``tf.minimum`` ; OR is ``tf.maximum`` ; ADD is ``tf.add`` ; MUL is ``tf.multiply`` and so on. + See `TensorFlow Math API `__ . + act : activation function + The activation function of this layer. + name : str + A unique layer name. + + Examples + -------- + >>> net_0 = tl.layers.DenseLayer(inputs, n_units=500, act=tf.nn.relu, name='net_0') + >>> net_1 = tl.layers.DenseLayer(inputs, n_units=500, act=tf.nn.relu, name='net_1') + >>> net = tl.layers.ElementwiseLayer([net_0, net_1], combine_fn=tf.minimum, name='minimum') + >>> net.print_params(False) + ... [TL] param 0: net_0/W:0 (784, 500) float32_ref + ... [TL] param 1: net_0/b:0 (500,) float32_ref + ... [TL] param 2: net_1/W:0 (784, 500) float32_ref + ... [TL] param 3: net_1/b:0 (500,) float32_ref + >>> net.print_layers() + ... [TL] layer 0: net_0/Relu:0 (?, 500) float32 + ... [TL] layer 1: net_1/Relu:0 (?, 500) float32 + ... [TL] layer 2: minimum:0 (?, 500) float32 + """ + + def __init__( + self, + layers, + combine_fn=tf.minimum, + act=None, + name='elementwise_layer', + ): + Layer.__init__(self, prev_layer=layers, name=name) + + logging.info("ElementwiseLayer %s: size:%s fn:%s" % (self.name, layers[0].outputs.get_shape(), combine_fn.__name__)) + + self.outputs = layers[0].outputs + + for l in layers[1:]: + self.outputs = combine_fn(self.outputs, l.outputs, name=name) + + if act: + self.outputs = act(self.outputs) + + # self.all_layers = list(layers[0].all_layers) + # self.all_params = list(layers[0].all_params) + # self.all_drop = dict(layers[0].all_drop) + # + # for i in range(1, len(layers)): + # self.all_layers.extend(list(layers[i].all_layers)) + # self.all_params.extend(list(layers[i].all_params)) + # self.all_drop.update(dict(layers[i].all_drop)) + # + # self.all_layers = list_remove_repeat(self.all_layers) + # self.all_params = list_remove_repeat(self.all_params) + # # self.all_drop = list_remove_repeat(self.all_drop) + + self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py new file mode 100644 index 0000000..6d5f028 --- /dev/null +++ b/tensorlayer/layers/normalization.py @@ -0,0 +1,293 @@ +# -*- coding: utf-8 -*- + +from .core import * +from .. import _logging as logging +import tensorflow as tf + +__all__ = [ + 'LocalResponseNormLayer', + 'BatchNormLayer', + 'InstanceNormLayer', + 'LayerNormLayer', +] + + +class LocalResponseNormLayer(Layer): + """The :class:`LocalResponseNormLayer` layer is for Local Response Normalization. + See ``tf.nn.local_response_normalization`` or ``tf.nn.lrn`` for new TF version. + The 4-D input tensor is a 3-D array of 1-D vectors (along the last dimension), and each vector is normalized independently. + Within a given vector, each component is divided by the weighted square-sum of inputs within depth_radius. + + Parameters + ----------- + layer : :class:`Layer` + The previous layer with a 4D output shape. + depth_radius : int + Depth radius. 0-D. Half-width of the 1-D normalization window. + bias : float + An offset which is usually positive and shall avoid dividing by 0. + alpha : float + A scale factor which is usually positive. + beta : float + An exponent. + name : str + A unique layer name. + + """ + + def __init__( + self, + prev_layer, + depth_radius=None, + bias=None, + alpha=None, + beta=None, + name='lrn_layer', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("LocalResponseNormLayer %s: depth_radius: %s, bias: %s, alpha: %s, beta: %s" % (self.name, str(depth_radius), str(bias), str(alpha), + str(beta))) + with tf.variable_scope(name): + self.outputs = tf.nn.lrn(self.inputs, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + + +class BatchNormLayer(Layer): + """ + The :class:`BatchNormLayer` is a batch normalization layer for both fully-connected and convolution outputs. + See ``tf.nn.batch_normalization`` and ``tf.nn.moments``. + + Parameters + ---------- + layer : :class:`Layer` + The previous layer. + decay : float + A decay factor for `ExponentialMovingAverage`. + Suggest to use a large value for large dataset. + epsilon : float + Eplison. + act : activation function + The activation function of this layer. + is_train : boolean + Is being used for training or inference. + beta_init : initializer + The initializer for initializing beta. + gamma_init : initializer + The initializer for initializing gamma. + dtype : TensorFlow dtype + tf.float32 (default) or tf.float16. + name : str + A unique layer name. + + References + ---------- + - `Source `__ + - `stackoverflow `__ + + """ + + def __init__( + self, + prev_layer, + decay=0.9, + epsilon=0.00001, + act=tf.identity, + is_train=False, + beta_init=tf.zeros_initializer, + gamma_init=tf.random_normal_initializer(mean=1.0, stddev=0.002), + name='batchnorm_layer', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("BatchNormLayer %s: decay:%f epsilon:%f act:%s is_train:%s" % (self.name, decay, epsilon, act.__name__, is_train)) + x_shape = self.inputs.get_shape() + params_shape = x_shape[-1:] + + from tensorflow.python.training import moving_averages + + with tf.variable_scope(name): + axis = list(range(len(x_shape) - 1)) + + # 1. beta, gamma + if tf.__version__ > '0.12.1' and beta_init == tf.zeros_initializer: + beta_init = beta_init() + beta = tf.get_variable('beta', shape=params_shape, initializer=beta_init, dtype=LayersConfig.tf_dtype, trainable=is_train) + + gamma = tf.get_variable( + 'gamma', + shape=params_shape, + initializer=gamma_init, + dtype=LayersConfig.tf_dtype, + trainable=is_train, + ) + + # 2. + if tf.__version__ > '0.12.1': + moving_mean_init = tf.zeros_initializer() + else: + moving_mean_init = tf.zeros_initializer + moving_mean = tf.get_variable('moving_mean', params_shape, initializer=moving_mean_init, dtype=LayersConfig.tf_dtype, trainable=False) + moving_variance = tf.get_variable( + 'moving_variance', + params_shape, + initializer=tf.constant_initializer(1.), + dtype=LayersConfig.tf_dtype, + trainable=False, + ) + + # 3. + # These ops will only be preformed when training. + mean, variance = tf.nn.moments(self.inputs, axis) + try: # TF12 + update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay, zero_debias=False) # if zero_debias=True, has bias + update_moving_variance = moving_averages.assign_moving_average( + moving_variance, variance, decay, zero_debias=False) # if zero_debias=True, has bias + # logging.info("TF12 moving") + except Exception: # TF11 + update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay) + update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, decay) + # logging.info("TF11 moving") + + def mean_var_with_update(): + with tf.control_dependencies([update_moving_mean, update_moving_variance]): + return tf.identity(mean), tf.identity(variance) + + if is_train: + mean, var = mean_var_with_update() + self.outputs = act(tf.nn.batch_normalization(self.inputs, mean, var, beta, gamma, epsilon)) + else: + self.outputs = act(tf.nn.batch_normalization(self.inputs, moving_mean, moving_variance, beta, gamma, epsilon)) + + variables = [beta, gamma, moving_mean, moving_variance] + + # logging.info(len(variables)) + # for idx, v in enumerate(variables): + # logging.info(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v)) + # exit() + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend(variables) + + +class InstanceNormLayer(Layer): + """The :class:`InstanceNormLayer` class is a for instance normalization. + + Parameters + ----------- + layer : :class:`Layer` + The previous layer. + act : activation function. + The activation function of this layer. + epsilon : float + Eplison. + name : str + A unique layer name + + """ + + def __init__( + self, + prev_layer, + act=tf.identity, + epsilon=1e-5, + name='instan_norm', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("InstanceNormLayer %s: epsilon:%f act:%s" % (self.name, epsilon, act.__name__)) + + with tf.variable_scope(name) as vs: + mean, var = tf.nn.moments(self.inputs, [1, 2], keep_dims=True) + scale = tf.get_variable( + 'scale', [self.inputs.get_shape()[-1]], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02), dtype=LayersConfig.tf_dtype) + offset = tf.get_variable('offset', [self.inputs.get_shape()[-1]], initializer=tf.constant_initializer(0.0), dtype=LayersConfig.tf_dtype) + self.outputs = scale * tf.div(self.inputs - mean, tf.sqrt(var + epsilon)) + offset + self.outputs = act(self.outputs) + variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend(variables) + + +class LayerNormLayer(Layer): + """ + The :class:`LayerNormLayer` class is for layer normalization, see `tf.contrib.layers.layer_norm `__. + + Parameters + ---------- + layer : :class:`Layer` + The previous layer. + act : activation function + The activation function of this layer. + others : _ + `tf.contrib.layers.layer_norm `__. + + """ + + def __init__(self, + prev_layer, + center=True, + scale=True, + act=tf.identity, + reuse=None, + variables_collections=None, + outputs_collections=None, + trainable=True, + begin_norm_axis=1, + begin_params_axis=-1, + name='layernorm'): + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("LayerNormLayer %s: act:%s" % (self.name, act.__name__)) + + if tf.__version__ < "1.3": + # raise Exception("Please use TF 1.3+") + with tf.variable_scope(name) as vs: + self.outputs = tf.contrib.layers.layer_norm( + self.inputs, + center=center, + scale=scale, + activation_fn=act, + reuse=reuse, + variables_collections=variables_collections, + outputs_collections=outputs_collections, + trainable=trainable, + # begin_norm_axis=begin_norm_axis, + # begin_params_axis=begin_params_axis, + scope='var', + ) + variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + else: + with tf.variable_scope(name) as vs: + self.outputs = tf.contrib.layers.layer_norm( + self.inputs, + center=center, + scale=scale, + activation_fn=act, + reuse=reuse, + variables_collections=variables_collections, + outputs_collections=outputs_collections, + trainable=trainable, + begin_norm_axis=begin_norm_axis, + begin_params_axis=begin_params_axis, + scope='var', + ) + variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend(variables) diff --git a/tensorlayer/layers/object_detection.py b/tensorlayer/layers/object_detection.py new file mode 100644 index 0000000..fde01d0 --- /dev/null +++ b/tensorlayer/layers/object_detection.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +from .core import * +from .. import _logging as logging + +__all__ = [ + 'ROIPoolingLayer', +] + + +class ROIPoolingLayer(Layer): + """ + The region of interest pooling layer. + + Parameters + ----------- + layer : :class:`Layer` + The previous layer. + rois : tuple of int + Regions of interest in the format of (feature map index, upper left, bottom right). + pool_width : int + The size of the pooling sections. + pool_width : int + The size of the pooling sections. + name : str + A unique layer name. + + Notes + ----------- + - This implementation is imported from `Deepsense-AI `__ . + - Please install it by the instruction `HERE `__. + + """ + + def __init__( + self, + prev_layer, + rois, + pool_height=2, + pool_width=2, + name='roipooling_layer', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("ROIPoolingLayer %s: (%d, %d)" % (self.name, pool_height, pool_width)) + try: + from tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops import roi_pooling + except Exception as e: + logging.info(e) + logging.info("HINT: 1. https://github.com/deepsense-ai/roi-pooling 2. tensorlayer/third_party/roi_pooling") + self.outputs = roi_pooling(self.inputs, rois, pool_height, pool_width) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/padding.py b/tensorlayer/layers/padding.py new file mode 100644 index 0000000..1d52c08 --- /dev/null +++ b/tensorlayer/layers/padding.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- + +from .core import * +from .. import _logging as logging +import tensorflow as tf + +__all__ = [ + 'PadLayer', +] + + +class PadLayer(Layer): + """ + The :class:`PadLayer` class is a padding layer for any mode and dimension. + Please see `tf.pad `__ for usage. + + Parameters + ---------- + layer : :class:`Layer` + The previous layer. + paddings : Tensor + The int32 values to pad. + mode : str + "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive). + name : str + A unique layer name. + + """ + + def __init__( + self, + prev_layer, + paddings, + mode='CONSTANT', + name='pad_layer', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + assert paddings is not None, "paddings should be a Tensor of type int32. see https://www.tensorflow.org/api_docs/python/tf/pad" + self.inputs = prev_layer.outputs + logging.info("PadLayer %s: paddings:%s mode:%s" % (self.name, list(paddings), mode)) + + self.outputs = tf.pad(self.inputs, paddings=paddings, mode=mode, name=name) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) diff --git a/tensorlayer/layers/pooling.py b/tensorlayer/layers/pooling.py new file mode 100644 index 0000000..c17331f --- /dev/null +++ b/tensorlayer/layers/pooling.py @@ -0,0 +1,490 @@ +# -*- coding: utf-8 -*- + +import copy +import tensorflow as tf +from .. import _logging as logging +from .core import * + +__all__ = [ + 'PoolLayer', + 'MaxPool1d', + 'MeanPool1d', + 'MaxPool2d', + 'MeanPool2d', + 'MaxPool3d', + 'MeanPool3d', + 'GlobalMaxPool1d', + 'GlobalMeanPool1d', + 'GlobalMaxPool2d', + 'GlobalMeanPool2d', +] + + +class PoolLayer(Layer): + """ + The :class:`PoolLayer` class is a Pooling layer. + You can choose ``tf.nn.max_pool`` and ``tf.nn.avg_pool`` for 2D input or + ``tf.nn.max_pool3d`` and ``tf.nn.avg_pool3d`` for 3D input. + + Parameters + ---------- + layer : :class:`Layer` + The previous layer. + ksize : tuple of int + The size of the window for each dimension of the input tensor. + Note that: len(ksize) >= 4. + strides : tuple of int + The stride of the sliding window for each dimension of the input tensor. + Note that: len(strides) >= 4. + padding : str + The padding algorithm type: "SAME" or "VALID". + pool : pooling function + One of ``tf.nn.max_pool``, ``tf.nn.avg_pool``, ``tf.nn.max_pool3d`` and ``f.nn.avg_pool3d``. + See `TensorFlow pooling APIs `__ + name : str + A unique layer name. + + Examples + -------- + - see :class:`Conv2dLayer`. + + """ + + def __init__( + self, + prev_layer=None, + ksize=(1, 2, 2, 1), + strides=(1, 2, 2, 1), + padding='SAME', + pool=tf.nn.max_pool, + name='pool_layer', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("PoolLayer %s: ksize:%s strides:%s padding:%s pool:%s" % (self.name, str(ksize), str(strides), padding, pool.__name__)) + + self.outputs = pool(self.inputs, ksize=ksize, strides=strides, padding=padding, name=name) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + + +def maxpool1d(net, filter_size=3, strides=2, padding='valid', data_format='channels_last', name=None): + """Wrapper for `tf.layers.max_pooling1d `__ . + + Parameters + ---------- + net : :class:`Layer` + The previous layer with a output rank as 3. + filter_size : tuple of int + Pooling window size. + strides : tuple of int + Strides of the pooling operation. + padding : str + The padding method: 'valid' or 'same'. + data_format : str + One of `channels_last` (default) or `channels_first`. + The ordering of the dimensions must match the inputs. + channels_last corresponds to inputs with the shape (batch, length, channels); + while channels_first corresponds to inputs with shape (batch, channels, length). + name : str + A unique layer name. + + Returns + ------- + :class:`Layer` + A max pooling 1-D layer with a output rank as 3. + + """ + logging.info("MaxPool1d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) + outputs = tf.layers.max_pooling1d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) + + net_new = copy.copy(net) + net_new.outputs = outputs + net_new.all_layers.extend([outputs]) + return net_new + + +def meanpool1d(net, filter_size=3, strides=2, padding='valid', data_format='channels_last', name=None): + """Wrapper for `tf.layers.average_pooling1d `__ . + + Parameters + ------------ + net : :class:`Layer` + The previous layer with a output rank as 3. + filter_size : tuple of int + Pooling window size. + strides : tuple of int + Strides of the pooling operation. + padding : str + The padding method: 'valid' or 'same'. + data_format : str + One of `channels_last` (default) or `channels_first`. + The ordering of the dimensions must match the inputs. + channels_last corresponds to inputs with the shape (batch, length, channels); + while channels_first corresponds to inputs with shape (batch, channels, length). + name : str + A unique layer name. + + Returns + ------- + :class:`Layer` + A mean pooling 1-D layer with a output rank as 3. + + """ + logging.info("MeanPool1d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) + outputs = tf.layers.average_pooling1d(net.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) + + net_new = copy.copy(net) + net_new.outputs = outputs + net_new.all_layers.extend([outputs]) + return net_new + + +def maxpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='maxpool'): + """Wrapper for :class:`PoolLayer`. + + Parameters + ----------- + net : :class:`Layer` + The previous layer with a output rank as 4. + filter_size : tuple of int + (height, width) for filter size. + strides : tuple of int + (height, width) for strides. + padding : str + The padding method: 'valid' or 'same'. + name : str + A unique layer name. + + Returns + ------- + :class:`Layer` + A max pooling 2-D layer with a output rank as 4. + + """ + if strides is None: + strides = filter_size + assert len(strides) == 2, "len(strides) should be 2, MaxPool2d and PoolLayer are different." + net = PoolLayer(net, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.max_pool, name=name) + return net + + +def meanpool2d(net, filter_size=(3, 3), strides=(2, 2), padding='SAME', name='meanpool'): + """Wrapper for :class:`PoolLayer`. + + Parameters + ----------- + layer : :class:`Layer` + The previous layer with a output rank as 4. + filter_size : tuple of int + (height, width) for filter size. + strides : tuple of int + (height, width) for strides. + padding : str + The padding method: 'valid' or 'same'. + name : str + A unique layer name. + + Returns + ------- + :class:`Layer` + A mean pooling 2-D layer with a output rank as 4. + + """ + if strides is None: + strides = filter_size + assert len(strides) == 2, "len(strides) should be 2, MeanPool2d and PoolLayer are different." + net = PoolLayer(net, ksize=[1, filter_size[0], filter_size[1], 1], strides=[1, strides[0], strides[1], 1], padding=padding, pool=tf.nn.avg_pool, name=name) + return net + + +# def maxpool3d(net, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='maxpool3d'): +class MaxPool3d(Layer): + """Wrapper for `tf.layers.max_pooling3d `__ . + + Parameters + ------------ + layer : :class:`Layer` + The previous layer with a output rank as 5. + filter_size : tuple of int + Pooling window size. + strides : tuple of int + Strides of the pooling operation. + padding : str + The padding method: 'valid' or 'same'. + data_format : str + One of `channels_last` (default) or `channels_first`. + The ordering of the dimensions must match the inputs. + channels_last corresponds to inputs with the shape (batch, length, channels); + while channels_first corresponds to inputs with shape (batch, channels, length). + name : str + A unique layer name. + + Returns + ------- + :class:`Layer` + A max pooling 3-D layer with a output rank as 5. + + """ + + def __init__(self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='maxpool3d'): + + # check layer name (fixed) + Layer.__init__(self, prev_layer=prev_layer, name=name) + + # the input of this layer is the output of previous layer (fixed) + self.inputs = prev_layer.outputs + + logging.info("MaxPool3d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) + + self.outputs = tf.layers.max_pooling3d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) + + # get stuff from previous layer (fixed) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + # update layer (customized) + self.all_layers.append(self.outputs) + + +# def meanpool3d(net, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='meanpool3d'): +class MeanPool3d(Layer): + """Wrapper for `tf.layers.average_pooling3d `__ + + Parameters + ------------ + layer : :class:`Layer` + The previous layer with a output rank as 5. + filter_size : tuple of int + Pooling window size. + strides : tuple of int + Strides of the pooling operation. + padding : str + The padding method: 'valid' or 'same'. + data_format : str + One of `channels_last` (default) or `channels_first`. + The ordering of the dimensions must match the inputs. + channels_last corresponds to inputs with the shape (batch, length, channels); + while channels_first corresponds to inputs with shape (batch, channels, length). + name : str + A unique layer name. + + Returns + ------- + :class:`Layer` + A mean pooling 3-D layer with a output rank as 5. + + """ + + def __init__(self, prev_layer, filter_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_last', name='meanpool3d'): + # check layer name (fixed) + Layer.__init__(self, prev_layer=prev_layer, name=name) + + # the input of this layer is the output of previous layer (fixed) + self.inputs = prev_layer.outputs + + # print out info (customized) + logging.info("MeanPool3d %s: filter_size:%s strides:%s padding:%s" % (name, str(filter_size), str(strides), str(padding))) + + # operation (customized) + self.outputs = tf.layers.average_pooling3d(prev_layer.outputs, filter_size, strides, padding=padding, data_format=data_format, name=name) + + # get stuff from previous layer (fixed) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + # update layer (customized) + self.all_layers.append(self.outputs) + # self.all_params.extend( [W, b] ) + + +class GlobalMaxPool1d(Layer): + """The :class:`GlobalMaxPool1d` class is a 1D Global Max Pooling layer. + + Parameters + ------------ + layer : :class:`Layer` + The previous layer with a output rank as 3. + name : str + A unique layer name. + + Examples + --------- + >>> x = tf.placeholder("float32", [None, 100, 30]) + >>> n = InputLayer(x, name='in') + >>> n = GlobalMaxPool1d(n) + ... [None, 30] + """ + + def __init__( + self, + prev_layer=None, + name='globalmaxpool1d', + ): + # check layer name (fixed) + Layer.__init__(self, prev_layer=prev_layer, name=name) + + # the input of this layer is the output of previous layer (fixed) + self.inputs = prev_layer.outputs + + # print out info (customized) + logging.info("GlobalMaxPool1d %s" % name) + + # operation (customized) + self.outputs = tf.reduce_max(prev_layer.outputs, axis=1, name=name) + + # get stuff from previous layer (fixed) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + # update layer (customized) + self.all_layers.append(self.outputs) + # self.all_params.extend( [W, b] ) + + +class GlobalMeanPool1d(Layer): + """The :class:`GlobalMeanPool1d` class is a 1D Global Mean Pooling layer. + + Parameters + ------------ + layer : :class:`Layer` + The previous layer with a output rank as 3. + name : str + A unique layer name. + + Examples + --------- + >>> x = tf.placeholder("float32", [None, 100, 30]) + >>> n = InputLayer(x, name='in') + >>> n = GlobalMeanPool1d(n) + ... [None, 30] + """ + + def __init__( + self, + prev_layer=None, + name='globalmeanpool1d', + ): + # check layer name (fixed) + Layer.__init__(self, prev_layer=prev_layer, name=name) + + # the input of this layer is the output of previous layer (fixed) + self.inputs = prev_layer.outputs + + # print out info (customized) + logging.info("GlobalMeanPool1d %s" % name) + + # operation (customized) + self.outputs = tf.reduce_mean(prev_layer.outputs, axis=1, name=name) + + # get stuff from previous layer (fixed) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + # update layer (customized) + self.all_layers.append(self.outputs) + # self.all_params.extend( [W, b] ) + + +class GlobalMaxPool2d(Layer): + """The :class:`GlobalMaxPool2d` class is a 2D Global Max Pooling layer. + + Parameters + ------------ + layer : :class:`Layer` + The previous layer with a output rank as 4. + name : str + A unique layer name. + + Examples + --------- + >>> x = tf.placeholder("float32", [None, 100, 100, 30]) + >>> n = InputLayer(x, name='in2') + >>> n = GlobalMaxPool2d(n) + ... [None, 30] + """ + + def __init__( + self, + prev_layer=None, + name='globalmaxpool2d', + ): + # check layer name (fixed) + Layer.__init__(self, prev_layer=prev_layer, name=name) + + # the input of this layer is the output of previous layer (fixed) + self.inputs = prev_layer.outputs + + # print out info (customized) + logging.info("GlobalMaxPool2d %s" % name) + + # operation (customized) + self.outputs = tf.reduce_max(prev_layer.outputs, axis=[1, 2], name=name) + + # get stuff from previous layer (fixed) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + # update layer (customized) + self.all_layers.append(self.outputs) + # self.all_params.extend( [W, b] ) + + +class GlobalMeanPool2d(Layer): + """The :class:`GlobalMeanPool2d` class is a 2D Global Mean Pooling layer. + + Parameters + ------------ + layer : :class:`Layer` + The previous layer with a output rank as 4. + name : str + A unique layer name. + + Examples + --------- + >>> x = tf.placeholder("float32", [None, 100, 100, 30]) + >>> n = InputLayer(x, name='in2') + >>> n = GlobalMeanPool2d(n) + ... [None, 30] + """ + + def __init__( + self, + prev_layer=None, + name='globalmeanpool2d', + ): + # check layer name (fixed) + Layer.__init__(self, prev_layer=prev_layer, name=name) + + # the input of this layer is the output of previous layer (fixed) + self.inputs = prev_layer.outputs + + # print out info (customized) + logging.info("GlobalMeanPool2d %s" % name) + + # operation (customized) + self.outputs = tf.reduce_mean(prev_layer.outputs, axis=[1, 2], name=name) + + # get stuff from previous layer (fixed) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + # update layer (customized) + self.all_layers.append(self.outputs) + # self.all_params.extend( [W, b] ) + + +# Alias +MaxPool1d = maxpool1d +MaxPool2d = maxpool2d +MeanPool1d = meanpool1d +MeanPool2d = meanpool2d diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py new file mode 100644 index 0000000..59ec309 --- /dev/null +++ b/tensorlayer/layers/recurrent.py @@ -0,0 +1,1644 @@ +# -*- coding: utf-8 -*- + +import inspect +import tensorflow as tf +from .. import _logging as logging +from .core import * + +__all__ = [ + 'RNNLayer', + 'BiRNNLayer', + 'ConvRNNCell', + 'BasicConvLSTMCell', + 'ConvLSTMLayer', + 'advanced_indexing_op', + 'retrieve_seq_length_op', + 'retrieve_seq_length_op2', + 'retrieve_seq_length_op3', + 'target_mask_op', + 'DynamicRNNLayer', + 'BiDynamicRNNLayer', + 'Seq2Seq', +] + + +class RNNLayer(Layer): + """ + The :class:`RNNLayer` class is a fixed length recurrent layer for implementing vanilla RNN, + LSTM, GRU and etc. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + cell_fn : TensorFlow cell function + A TensorFlow core RNN cell + - See `RNN Cells in TensorFlow `__ + - Note TF1.0+ and TF1.0- are different + cell_init_args : dictionary + The arguments for the cell function. + n_hidden : int + The number of hidden units in the layer. + initializer : initializer + The initializer for initializing the model parameters. + n_steps : int + The fixed sequence length. + initial_state : None or RNN State + If None, `initial_state` is zero state. + return_last : boolean + Whether return last output or all outputs in each step. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False. + return_seq_2d : boolean + Only consider this argument when `return_last` is `False` + - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. + - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. + name : str + A unique layer name. + + Attributes + ---------- + outputs : Tensor + The output of this layer. + + final_state : Tensor or StateTuple + The finial state of this layer. + - When `state_is_tuple` is `False`, it is the final hidden and cell states, `states.get_shape() = [?, 2 * n_hidden]`. + - When `state_is_tuple` is `True`, it stores two elements: `(c, h)`. + - In practice, you can get the final state after each iteration during training, then feed it to the initial state of next iteration. + + initial_state : Tensor or StateTuple + The initial state of this layer. + - In practice, you can set your state at the begining of each epoch or iteration according to your training procedure. + + batch_size : int or Tensor + It is an integer, if it is able to compute the `batch_size`; otherwise, tensor for dynamic batch size. + + Examples + -------- + - For synced sequence input and output, see `PTB example `__ + + - For encoding see below. + + >>> batch_size = 32 + >>> num_steps = 5 + >>> vocab_size = 3000 + >>> hidden_size = 256 + >>> keep_prob = 0.8 + >>> is_train = True + >>> input_data = tf.placeholder(tf.int32, [batch_size, num_steps]) + >>> net = tl.layers.EmbeddingInputlayer(inputs=input_data, vocabulary_size=vocab_size, + ... embedding_size=hidden_size, name='embed') + >>> net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_train, name='drop1') + >>> net = tl.layers.RNNLayer(net, cell_fn=tf.contrib.rnn.BasicLSTMCell, + ... n_hidden=hidden_size, n_steps=num_steps, return_last=False, name='lstm1') + >>> net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_train, name='drop2') + >>> net = tl.layers.RNNLayer(net, cell_fn=tf.contrib.rnn.BasicLSTMCell, + ... n_hidden=hidden_size, n_steps=num_steps, return_last=True, name='lstm2') + >>> net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_train, name='drop3') + >>> net = tl.layers.DenseLayer(net, n_units=vocab_size, name='output') + + - For CNN+LSTM + + >>> image_size = 100 + >>> batch_size = 10 + >>> num_steps = 5 + >>> x = tf.placeholder(tf.float32, shape=[batch_size, image_size, image_size, 1]) + >>> net = tl.layers.InputLayer(x, name='in') + >>> net = tl.layers.Conv2d(net, 32, (5, 5), (2, 2), tf.nn.relu, name='cnn1') + >>> net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), name='pool1') + >>> net = tl.layers.Conv2d(net, 10, (5, 5), (2, 2), tf.nn.relu, name='cnn2') + >>> net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), name='pool2') + >>> net = tl.layers.FlattenLayer(net, name='flatten') + >>> net = tl.layers.ReshapeLayer(net, shape=[-1, num_steps, int(net.outputs._shape[-1])]) + >>> rnn = tl.layers.RNNLayer(net, cell_fn=tf.contrib.rnn.BasicLSTMCell, n_hidden=200, n_steps=num_steps, return_last=False, return_seq_2d=True, name='rnn') + >>> net = tl.layers.DenseLayer(rnn, 3, name='out') + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see :class:`ReshapeLayer`. + + References + ---------- + - `Neural Network RNN Cells in TensorFlow `__ + - `tensorflow/python/ops/rnn.py `__ + - `tensorflow/python/ops/rnn_cell.py `__ + - see TensorFlow tutorial ``ptb_word_lm.py``, TensorLayer tutorials ``tutorial_ptb_lstm*.py`` and ``tutorial_generate_text.py`` + + """ + + def __init__( + self, + prev_layer, + cell_fn, + cell_init_args=None, + n_hidden=100, + initializer=tf.random_uniform_initializer(-0.1, 0.1), + n_steps=5, + initial_state=None, + return_last=False, + return_seq_2d=False, + name='rnn', + ): + if cell_init_args is None: + cell_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + if cell_fn is None: + raise Exception("Please put in cell_fn") + if 'GRU' in cell_fn.__name__: + try: + cell_init_args.pop('state_is_tuple') + except Exception: + logging.warning('pop state_is_tuple fails.') + + self.inputs = prev_layer.outputs + + logging.info("RNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s " % (self.name, n_hidden, n_steps, self.inputs.get_shape().ndims, + self.inputs.get_shape(), cell_fn.__name__)) + # You can get the dimension by .get_shape() or ._shape, and check the + # dimension by .with_rank() as follow. + # self.inputs.get_shape().with_rank(2) + # self.inputs.get_shape().with_rank(3) + + # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] + try: + self.inputs.get_shape().with_rank(3) + except Exception: + raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") + + # is_reshape : boolean (deprecate) + # Reshape the inputs to 3 dimension tensor.\n + # If input is[batch_size, n_steps, n_features], we do not need to reshape it.\n + # If input is [batch_size * n_steps, n_features], we need to reshape it. + # if is_reshape: + # self.inputs = tf.reshape(self.inputs, shape=[-1, n_steps, int(self.inputs._shape[-1])]) + + fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] + + if fixed_batch_size.value: + batch_size = fixed_batch_size.value + logging.info(" RNN batch_size (concurrent processes): %d" % batch_size) + else: + from tensorflow.python.ops import array_ops + batch_size = array_ops.shape(self.inputs)[0] + logging.info(" non specified batch_size, uses a tensor instead.") + self.batch_size = batch_size + + # Simplified version of tensorflow.models.rnn.rnn.py's rnn(). + # This builds an unrolled LSTM for tutorial purposes only. + # In general, use the rnn() or state_saving_rnn() from rnn.py. + # + # The alternative version of the code below is: + # + # from tensorflow.models.rnn import rnn + # inputs = [tf.squeeze(input_, [1]) + # for input_ in tf.split(1, num_steps, inputs)] + # outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state) + outputs = [] + if 'reuse' in inspect.getargspec(cell_fn.__init__).args: + self.cell = cell = cell_fn(num_units=n_hidden, reuse=tf.get_variable_scope().reuse, **cell_init_args) + else: + self.cell = cell = cell_fn(num_units=n_hidden, **cell_init_args) + if initial_state is None: + self.initial_state = cell.zero_state(batch_size, dtype=LayersConfig.tf_dtype) #dtype=tf.float32) # 1.2.3 + state = self.initial_state + # with tf.variable_scope("model", reuse=None, initializer=initializer): + with tf.variable_scope(name, initializer=initializer) as vs: + for time_step in range(n_steps): + if time_step > 0: tf.get_variable_scope().reuse_variables() + (cell_output, state) = cell(self.inputs[:, time_step, :], state) + outputs.append(cell_output) + + # Retrieve just the RNN variables. + # rnn_variables = [v for v in tf.all_variables() if v.name.startswith(vs.name)] + rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + logging.info(" n_params : %d" % (len(rnn_variables))) + + if return_last: + # 2D Tensor [batch_size, n_hidden] + self.outputs = outputs[-1] + else: + if return_seq_2d: + # PTB tutorial: stack dense layer after that, or compute the cost from the output + # 2D Tensor [n_example, n_hidden] + try: # TF1.0 + self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_hidden]) + except Exception: # TF0.12 + self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_hidden]) + + else: + # : stack more RNN layer after that + # 3D Tensor [n_example/n_steps, n_steps, n_hidden] + try: # TF1.0 + self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_steps, n_hidden]) + except Exception: # TF0.12 + self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_steps, n_hidden]) + + self.final_state = state + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + self.all_layers.append(self.outputs) + self.all_params.extend(rnn_variables) + + +class BiRNNLayer(Layer): + """ + The :class:`BiRNNLayer` class is a fixed length Bidirectional recurrent layer. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + cell_fn : TensorFlow cell function + A TensorFlow core RNN cell. + - See `RNN Cells in TensorFlow `__. + - Note TF1.0+ and TF1.0- are different. + cell_init_args : dictionary or None + The arguments for the cell function. + n_hidden : int + The number of hidden units in the layer. + initializer : initializer + The initializer for initializing the model parameters. + n_steps : int + The fixed sequence length. + fw_initial_state : None or forward RNN State + If None, `initial_state` is zero state. + bw_initial_state : None or backward RNN State + If None, `initial_state` is zero state. + dropout : tuple of float or int + The input and output keep probability (input_keep_prob, output_keep_prob). + If one int, input and output keep probability are the same. + n_layer : int + The number of RNN layers, default is 1. + return_last : boolean + Whether return last output or all outputs in each step. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False. + return_seq_2d : boolean + Only consider this argument when `return_last` is `False` + - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. + - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. + name : str + A unique layer name. + + Attributes + ---------- + outputs : tensor + The output of this layer. + fw(bw)_final_state : tensor or StateTuple + The finial state of this layer. + - When `state_is_tuple` is `False`, it is the final hidden and cell states, `states.get_shape() = [?, 2 * n_hidden]`. + - When `state_is_tuple` is `True`, it stores two elements: `(c, h)`. + - In practice, you can get the final state after each iteration during training, then feed it to the initial state of next iteration. + fw(bw)_initial_state : tensor or StateTuple + The initial state of this layer. + - In practice, you can set your state at the begining of each epoch or iteration according to your training procedure. + batch_size : int or tensor + It is an integer, if it is able to compute the `batch_size`; otherwise, tensor for dynamic batch size. + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps, n_features]. If not, please see :class:`ReshapeLayer`. + For predicting, the sequence length has to be the same with the sequence length of training, while, for normal + RNN, we can use sequence length of 1 for predicting. + + References + ---------- + `Source `__ + + """ + + def __init__( + self, + prev_layer, + cell_fn, + cell_init_args=None, + n_hidden=100, + initializer=tf.random_uniform_initializer(-0.1, 0.1), + n_steps=5, + fw_initial_state=None, + bw_initial_state=None, + dropout=None, + n_layer=1, + return_last=False, + return_seq_2d=False, + name='birnn', + ): + if cell_init_args is None: + cell_init_args = {'state_is_tuple': True} # 'use_peepholes': True, + + Layer.__init__(self, prev_layer=prev_layer, name=name) + if cell_fn is None: + raise Exception("Please put in cell_fn") + if 'GRU' in cell_fn.__name__: + try: + cell_init_args.pop('state_is_tuple') + except Exception: + logging.warning("pop state_is_tuple fails.") + + self.inputs = prev_layer.outputs + + logging.info("BiRNNLayer %s: n_hidden:%d n_steps:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d " % (self.name, n_hidden, n_steps, + self.inputs.get_shape().ndims, + self.inputs.get_shape(), + cell_fn.__name__, dropout, n_layer)) + + fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] + + if fixed_batch_size.value: + self.batch_size = fixed_batch_size.value + logging.info(" RNN batch_size (concurrent processes): %d" % self.batch_size) + else: + from tensorflow.python.ops import array_ops + self.batch_size = array_ops.shape(self.inputs)[0] + logging.info(" non specified batch_size, uses a tensor instead.") + + # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] + try: + self.inputs.get_shape().with_rank(3) + except Exception: + raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]") + + with tf.variable_scope(name, initializer=initializer) as vs: + rnn_creator = lambda: cell_fn(num_units=n_hidden, **cell_init_args) + # Apply dropout + if dropout: + if isinstance(dropout, (tuple, list)): # type(dropout) in [tuple, list]: + in_keep_prob = dropout[0] + out_keep_prob = dropout[1] + elif isinstance(dropout, float): + in_keep_prob, out_keep_prob = dropout, dropout + else: + raise Exception("Invalid dropout type (must be a 2-D tuple of " "float)") + try: # TF 1.0 + DropoutWrapper_fn = tf.contrib.rnn.DropoutWrapper + except Exception: + DropoutWrapper_fn = tf.nn.rnn_cell.DropoutWrapper + cell_creator = lambda is_last=True: \ + DropoutWrapper_fn(rnn_creator(), + input_keep_prob=in_keep_prob, + output_keep_prob=out_keep_prob if is_last else 1.0) + else: + cell_creator = rnn_creator + self.fw_cell = cell_creator() + self.bw_cell = cell_creator() + + # Apply multiple layers + if n_layer > 1: + try: # TF1.0 + MultiRNNCell_fn = tf.contrib.rnn.MultiRNNCell + except Exception: + MultiRNNCell_fn = tf.nn.rnn_cell.MultiRNNCell + if dropout: + try: + self.fw_cell = MultiRNNCell_fn([cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)], state_is_tuple=True) + self.bw_cell = MultiRNNCell_fn([cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)], state_is_tuple=True) + except Exception: + self.fw_cell = MultiRNNCell_fn([cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)]) + self.bw_cell = MultiRNNCell_fn([cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)]) + else: + try: + self.fw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True) + self.bw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True) + except Exception: + self.fw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) + self.bw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) + + # Initial state of RNN + if fw_initial_state is None: + self.fw_initial_state = self.fw_cell.zero_state(self.batch_size, dtype=LayersConfig.tf_dtype) # dtype=tf.float32) + else: + self.fw_initial_state = fw_initial_state + if bw_initial_state is None: + self.bw_initial_state = self.bw_cell.zero_state(self.batch_size, dtype=LayersConfig.tf_dtype) # dtype=tf.float32) + else: + self.bw_initial_state = bw_initial_state + # exit() + # Feedforward to MultiRNNCell + try: # TF1.0 + list_rnn_inputs = tf.unstack(self.inputs, axis=1) + except Exception: # TF0.12 + list_rnn_inputs = tf.unpack(self.inputs, axis=1) + + try: # TF1.0 + bidirectional_rnn_fn = tf.contrib.rnn.static_bidirectional_rnn + except Exception: + bidirectional_rnn_fn = tf.nn.bidirectional_rnn + outputs, fw_state, bw_state = bidirectional_rnn_fn( # outputs, fw_state, bw_state = tf.contrib.rnn.static_bidirectional_rnn( + cell_fw=self.fw_cell, + cell_bw=self.bw_cell, + inputs=list_rnn_inputs, + initial_state_fw=self.fw_initial_state, + initial_state_bw=self.bw_initial_state) + + if return_last: + raise Exception("Do not support return_last at the moment.") + # self.outputs = outputs[-1] + else: + self.outputs = outputs + if return_seq_2d: + # 2D Tensor [n_example, n_hidden] + try: # TF1.0 + self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_hidden * 2]) + except Exception: # TF0.12 + self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_hidden * 2]) + else: + # : stack more RNN layer after that + # 3D Tensor [n_example/n_steps, n_steps, n_hidden] + + try: # TF1.0 + self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_steps, n_hidden * 2]) + except Exception: # TF0.12 + self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_steps, n_hidden * 2]) + self.fw_final_state = fw_state + self.bw_final_state = bw_state + + # Retrieve just the RNN variables. + rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + logging.info(" n_params : %d" % (len(rnn_variables))) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend(rnn_variables) + + +class ConvRNNCell(object): + """Abstract object representing an Convolutional RNN Cell.""" + + def __call__(self, inputs, state, scope=None): + """Run this RNN cell on inputs, starting from the given state.""" + raise NotImplementedError("Abstract method") + + @property + def state_size(self): + """size(s) of state(s) used by this cell.""" + raise NotImplementedError("Abstract method") + + @property + def output_size(self): + """Integer or TensorShape: size of outputs produced by this cell.""" + raise NotImplementedError("Abstract method") + + def zero_state(self, batch_size, dtype=LayersConfig.tf_dtype): + """Return zero-filled state tensor(s). + Args: + batch_size: int, float, or unit Tensor representing the batch size. + Returns: + tensor of shape '[batch_size x shape[0] x shape[1] x num_features] + filled with zeros + + """ + shape = self.shape + num_features = self.num_features + # TODO : TypeError: 'NoneType' object is not subscriptable + zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype) + return zeros + + +class BasicConvLSTMCell(ConvRNNCell): + """Basic Conv LSTM recurrent network cell. + + Parameters + ----------- + shape : tuple of int + The height and width of the cell. + filter_size : tuple of int + The height and width of the filter + num_features : int + The hidden size of the cell + forget_bias : float + The bias added to forget gates (see above). + input_size : int + Deprecated and unused. + state_is_tuple : boolen + If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. + If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. + act : activation function + The activation function of this layer, tanh as default. + + """ + + def __init__(self, shape, filter_size, num_features, forget_bias=1.0, input_size=None, state_is_tuple=False, act=tf.nn.tanh): + """Initialize the basic Conv LSTM cell.""" + # if not state_is_tuple: + # logging.warn("%s: Using a concatenated state is slower and will soon be " + # "deprecated. Use state_is_tuple=True.", self) + if input_size is not None: + logging.warn("%s: The input_size parameter is deprecated.", self) + self.shape = shape + self.filter_size = filter_size + self.num_features = num_features + self._forget_bias = forget_bias + self._state_is_tuple = state_is_tuple + self._activation = act + + @property + def state_size(self): + """State size of the LSTMStateTuple.""" + return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) + + @property + def output_size(self): + """Number of units in outputs.""" + return self._num_units + + def __call__(self, inputs, state, scope=None): + """Long short-term memory cell (LSTM).""" + with tf.variable_scope(scope or type(self).__name__): # "BasicLSTMCell" + # Parameters of gates are concatenated into one multiply for efficiency. + if self._state_is_tuple: + c, h = state + else: + # print state + # c, h = tf.split(3, 2, state) + c, h = tf.split(state, 2, 3) + concat = _conv_linear([inputs, h], self.filter_size, self.num_features * 4, True) + + # i = input_gate, j = new_input, f = forget_gate, o = output_gate + # i, j, f, o = tf.split(3, 4, concat) + i, j, f, o = tf.split(concat, 4, 3) + + new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) * self._activation(j)) + new_h = self._activation(new_c) * tf.nn.sigmoid(o) + + if self._state_is_tuple: + new_state = LSTMStateTuple(new_c, new_h) + else: + new_state = tf.concat([new_c, new_h], 3) + return new_h, new_state + + +def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None): + """convolution: + + Parameters + ---------- + args : tensor + 4D Tensor or a list of 4D, batch x n, Tensors. + filter_size : tuple of int + Filter height and width. + num_features : int + Nnumber of features. + bias_start : float + Starting value to initialize the bias; 0 by default. + scope : VariableScope + For the created subgraph; defaults to "Linear". + + Returns + -------- + - A 4D Tensor with shape [batch h w num_features] + + Raises + ------- + - ValueError : if some of the arguments has unspecified or wrong shape. + + """ + # Calculate the total size of arguments on dimension 1. + total_arg_size_depth = 0 + shapes = [a.get_shape().as_list() for a in args] + for shape in shapes: + if len(shape) != 4: + raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes)) + if not shape[3]: + raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes)) + else: + total_arg_size_depth += shape[3] + + dtype = [a.dtype for a in args][0] + + # Now the computation. + with tf.variable_scope(scope or "Conv"): + matrix = tf.get_variable("Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype) + if len(args) == 1: + res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME') + else: + res = tf.nn.conv2d(tf.concat(args, 3), matrix, strides=[1, 1, 1, 1], padding='SAME') + if not bias: + return res + bias_term = tf.get_variable("Bias", [num_features], dtype=dtype, initializer=tf.constant_initializer(bias_start, dtype=dtype)) + return res + bias_term + + +class ConvLSTMLayer(Layer): + """A fixed length Convolutional LSTM layer. + + See this `paper `__ . + + Parameters + ---------- + layer : :class:`Layer` + Previous layer + cell_shape : tuple of int + The shape of each cell width * height + filter_size : tuple of int + The size of filter width * height + cell_fn : a convolutional RNN cell + Cell function like :class:`BasicConvLSTMCell` + feature_map : int + The number of feature map in the layer. + initializer : initializer + The initializer for initializing the parameters. + n_steps : int + The sequence length. + initial_state : None or ConvLSTM State + If None, `initial_state` is zero state. + return_last : boolean + Whether return last output or all outputs in each step. + - If True, return the last output, "Sequence input and single output". + - If False, return all outputs, "Synced sequence input and output". + - In other word, if you want to stack more RNNs on this layer, set to False. + return_seq_2d : boolean + Only consider this argument when `return_last` is `False` + - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. + - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. + name : str + A unique layer name. + + Attributes + ---------- + outputs : tensor + The output of this RNN. return_last = False, outputs = all cell_output, which is the hidden state. + cell_output.get_shape() = (?, h, w, c]) + + final_state : tensor or StateTuple + The finial state of this layer. + - When state_is_tuple = False, it is the final hidden and cell states, + - When state_is_tuple = True, You can get the final state after each iteration during training, then feed it to the initial state of next iteration. + + initial_state : tensor or StateTuple + It is the initial state of this ConvLSTM layer, you can use it to initialize + your state at the beginning of each epoch or iteration according to your + training procedure. + + batch_size : int or tensor + Is int, if able to compute the batch_size, otherwise, tensor for ``?``. + + """ + + def __init__( + self, + prev_layer, + cell_shape=None, + feature_map=1, + filter_size=(3, 3), + cell_fn=BasicConvLSTMCell, + initializer=tf.random_uniform_initializer(-0.1, 0.1), + n_steps=5, + initial_state=None, + return_last=False, + return_seq_2d=False, + name='convlstm', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("ConvLSTMLayer %s: feature_map:%d, n_steps:%d, " + "in_dim:%d %s, cell_fn:%s " % (self.name, feature_map, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__)) + # You can get the dimension by .get_shape() or ._shape, and check the + # dimension by .with_rank() as follow. + # self.inputs.get_shape().with_rank(2) + # self.inputs.get_shape().with_rank(3) + + # Input dimension should be rank 5 [batch_size, n_steps(max), h, w, c] + try: + self.inputs.get_shape().with_rank(5) + except Exception: + raise Exception("RNN : Input dimension should be rank 5 : [batch_size, n_steps, input_x, " "input_y, feature_map]") + + fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] + + if fixed_batch_size.value: + batch_size = fixed_batch_size.value + logging.info(" RNN batch_size (concurrent processes): %d" % batch_size) + else: + from tensorflow.python.ops import array_ops + batch_size = array_ops.shape(self.inputs)[0] + logging.info(" non specified batch_size, uses a tensor instead.") + self.batch_size = batch_size + outputs = [] + self.cell = cell = cell_fn(shape=cell_shape, filter_size=filter_size, num_features=feature_map) + if initial_state is None: + self.initial_state = cell.zero_state(batch_size, dtype=LayersConfig.tf_dtype) # dtype=tf.float32) # 1.2.3 + state = self.initial_state + # with tf.variable_scope("model", reuse=None, initializer=initializer): + with tf.variable_scope(name, initializer=initializer) as vs: + for time_step in range(n_steps): + if time_step > 0: tf.get_variable_scope().reuse_variables() + (cell_output, state) = cell(self.inputs[:, time_step, :, :, :], state) + outputs.append(cell_output) + + # Retrieve just the RNN variables. + # rnn_variables = [v for v in tf.all_variables() if v.name.startswith(vs.name)] + rnn_variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope=vs.name) + + logging.info(" n_params : %d" % (len(rnn_variables))) + + if return_last: + # 2D Tensor [batch_size, n_hidden] + self.outputs = outputs[-1] + else: + if return_seq_2d: + # PTB tutorial: stack dense layer after that, or compute the cost from the output + # 4D Tensor [n_example, h, w, c] + self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, cell_shape[0] * cell_shape[1] * feature_map]) + else: + # : stack more RNN layer after that + # 5D Tensor [n_example/n_steps, n_steps, h, w, c] + self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_steps, cell_shape[0], cell_shape[1], feature_map]) + + self.final_state = state + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend(rnn_variables) + + +# Advanced Ops for Dynamic RNN +def advanced_indexing_op(inputs, index): + """Advanced Indexing for Sequences, returns the outputs by given sequence lengths. + When return the last output :class:`DynamicRNNLayer` uses it to get the last outputs with the sequence lengths. + + Parameters + ----------- + inputs : tensor for data + With shape of [batch_size, n_step(max), n_features] + index : tensor for indexing + Sequence length in Dynamic RNN. [batch_size] + + Examples + --------- + >>> batch_size, max_length, n_features = 3, 5, 2 + >>> z = np.random.uniform(low=-1, high=1, size=[batch_size, max_length, n_features]).astype(np.float32) + >>> b_z = tf.constant(z) + >>> sl = tf.placeholder(dtype=tf.int32, shape=[batch_size]) + >>> o = advanced_indexing_op(b_z, sl) + >>> + >>> sess = tf.InteractiveSession() + >>> tl.layers.initialize_global_variables(sess) + >>> + >>> order = np.asarray([1,1,2]) + >>> print("real",z[0][order[0]-1], z[1][order[1]-1], z[2][order[2]-1]) + >>> y = sess.run([o], feed_dict={sl:order}) + >>> print("given",order) + >>> print("out", y) + ... real [-0.93021595 0.53820813] [-0.92548317 -0.77135968] [ 0.89952248 0.19149846] + ... given [1 1 2] + ... out [array([[-0.93021595, 0.53820813], + ... [-0.92548317, -0.77135968], + ... [ 0.89952248, 0.19149846]], dtype=float32)] + + References + ----------- + - Modified from TFlearn (the original code is used for fixed length rnn), `references `__. + + """ + batch_size = tf.shape(inputs)[0] + # max_length = int(inputs.get_shape()[1]) # for fixed length rnn, length is given + max_length = tf.shape(inputs)[1] # for dynamic_rnn, length is unknown + dim_size = int(inputs.get_shape()[2]) + index = tf.range(0, batch_size) * max_length + (index - 1) + flat = tf.reshape(inputs, [-1, dim_size]) + relevant = tf.gather(flat, index) + return relevant + + +def retrieve_seq_length_op(data): + """An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features], + it can be used when the features of padding (on right hand side) are all zeros. + + Parameters + ----------- + data : tensor + [batch_size, n_step(max), n_features] with zero padding on right hand side. + + Examples + --------- + >>> data = [[[1],[2],[0],[0],[0]], + ... [[1],[2],[3],[0],[0]], + ... [[1],[2],[6],[1],[0]]] + >>> data = np.asarray(data) + >>> print(data.shape) + ... (3, 5, 1) + >>> data = tf.constant(data) + >>> sl = retrieve_seq_length_op(data) + >>> sess = tf.InteractiveSession() + >>> tl.layers.initialize_global_variables(sess) + >>> y = sl.eval() + ... [2 3 4] + + Multiple features + >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]], + ... [[2,3],[2,4],[3,2],[0,0],[0,0]], + ... [[3,3],[2,2],[5,3],[1,2],[0,0]]] + >>> print(sl) + ... [4 3 4] + + References + ------------ + Borrow from `TFlearn `__. + + """ + with tf.name_scope('GetLength'): + # TF 1.0 change reduction_indices to axis + used = tf.sign(tf.reduce_max(tf.abs(data), 2)) + length = tf.reduce_sum(used, 1) + # TF < 1.0 + # used = tf.sign(tf.reduce_max(tf.abs(data), reduction_indices=2)) + # length = tf.reduce_sum(used, reduction_indices=1) + length = tf.cast(length, tf.int32) + return length + + +def retrieve_seq_length_op2(data): + """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)], + it can be used when the features of padding (on right hand side) are all zeros. + + Parameters + ----------- + data : tensor + [batch_size, n_step(max)] with zero padding on right hand side. + + Examples + -------- + >>> data = [[1,2,0,0,0], + ... [1,2,3,0,0], + ... [1,2,6,1,0]] + >>> o = retrieve_seq_length_op2(data) + >>> sess = tf.InteractiveSession() + >>> tl.layers.initialize_global_variables(sess) + >>> print(o.eval()) + ... [2 3 4] + + """ + return tf.reduce_sum(tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), 1) + + +def retrieve_seq_length_op3(data, pad_val=0): # HangSheng: return tensor for sequence length, if input is tf.string + """Return tensor for sequence length, if input is ``tf.string``. + + """ + data_shape_size = data.get_shape().ndims + if data_shape_size == 3: + return tf.reduce_sum(tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32), 1) + elif data_shape_size == 2: + return tf.reduce_sum(tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), 1) + elif data_shape_size == 1: + raise ValueError("retrieve_seq_length_op3: data has wrong shape!") + else: + raise ValueError("retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!" % (data_shape_size)) + + +def target_mask_op(data, pad_val=0): # HangSheng: return tensor for mask,if input is tf.string + """Return tensor for mask, if input is ``tf.string``. + + """ + data_shape_size = data.get_shape().ndims + if data_shape_size == 3: + return tf.cast(tf.reduce_any(tf.not_equal(data, pad_val), axis=2), dtype=tf.int32) + elif data_shape_size == 2: + return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32) + elif data_shape_size == 1: + raise ValueError("target_mask_op: data has wrong shape!") + else: + raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size)) + + +class DynamicRNNLayer(Layer): + """ + The :class:`DynamicRNNLayer` class is a dynamic recurrent layer, see ``tf.nn.dynamic_rnn``. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer + cell_fn : TensorFlow cell function + A TensorFlow core RNN cell + - See `RNN Cells in TensorFlow `__ + - Note TF1.0+ and TF1.0- are different + cell_init_args : dictionary or None + The arguments for the cell function. + n_hidden : int + The number of hidden units in the layer. + initializer : initializer + The initializer for initializing the parameters. + sequence_length : tensor, array or None + The sequence length of each row of input data, see ``Advanced Ops for Dynamic RNN``. + - If None, it uses ``retrieve_seq_length_op`` to compute the sequence length, i.e. when the features of padding (on right hand side) are all zeros. + - If using word embedding, you may need to compute the sequence length from the ID array (the integer features before word embedding) by using ``retrieve_seq_length_op2`` or ``retrieve_seq_length_op``. + - You can also input an numpy array. + - More details about TensorFlow dynamic RNN in `Wild-ML Blog `__. + initial_state : None or RNN State + If None, `initial_state` is zero state. + dropout : tuple of float or int + The input and output keep probability (input_keep_prob, output_keep_prob). + - If one int, input and output keep probability are the same. + n_layer : int + The number of RNN layers, default is 1. + return_last : boolean or None + Whether return last output or all outputs in each step. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False. + return_seq_2d : boolean + Only consider this argument when `return_last` is `False` + - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it. + - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it. + dynamic_rnn_init_args : dictionary + The arguments for ``tf.nn.dynamic_rnn``. + name : str + A unique layer name. + + Attributes + ------------ + outputs : tensor + The output of this layer. + + final_state : tensor or StateTuple + The finial state of this layer. + - When `state_is_tuple` is `False`, it is the final hidden and cell states, `states.get_shape() = [?, 2 * n_hidden]`. + - When `state_is_tuple` is `True`, it stores two elements: `(c, h)`. + - In practice, you can get the final state after each iteration during training, then feed it to the initial state of next iteration. + + initial_state : tensor or StateTuple + The initial state of this layer. + - In practice, you can set your state at the begining of each epoch or iteration according to your training procedure. + + batch_size : int or tensor + It is an integer, if it is able to compute the `batch_size`; otherwise, tensor for dynamic batch size. + + sequence_length : a tensor or array + The sequence lengths computed by Advanced Opt or the given sequence lengths, [batch_size] + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps(max), n_features], if no, please see :class:`ReshapeLayer`. + + Examples + -------- + Synced sequence input and output, for loss function see ``tl.cost.cross_entropy_seq_with_mask``. + + >>> input_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="input") + >>> net = tl.layers.EmbeddingInputlayer( + ... inputs=input_seqs, + ... vocabulary_size=vocab_size, + ... embedding_size=embedding_size, + ... name='embedding') + >>> net = tl.layers.DynamicRNNLayer(net, + ... cell_fn=tf.contrib.rnn.BasicLSTMCell, # for TF0.2 use tf.nn.rnn_cell.BasicLSTMCell, + ... n_hidden=embedding_size, + ... dropout=(0.7 if is_train else None), + ... sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs), + ... return_last=False, # for encoder, set to True + ... return_seq_2d=True, # stack denselayer or compute cost after it + ... name='dynamicrnn') + ... net = tl.layers.DenseLayer(net, n_units=vocab_size, name="output") + + References + ---------- + - `Wild-ML Blog `__ + - `dynamic_rnn.ipynb `__ + - `tf.nn.dynamic_rnn `__ + - `tflearn rnn `__ + - ``tutorial_dynamic_rnn.py`` + + """ + + def __init__( + self, + prev_layer, + cell_fn, #tf.nn.rnn_cell.LSTMCell, + cell_init_args=None, + n_hidden=256, + initializer=tf.random_uniform_initializer(-0.1, 0.1), + sequence_length=None, + initial_state=None, + dropout=None, + n_layer=1, + return_last=None, + return_seq_2d=False, + dynamic_rnn_init_args=None, + name='dyrnn', + ): + if dynamic_rnn_init_args is None: + dynamic_rnn_init_args = {} + if cell_init_args is None: + cell_init_args = {'state_is_tuple': True} + if return_last is None: + return_last = True + + Layer.__init__(self, prev_layer=prev_layer, name=name) + if cell_fn is None: + raise Exception("Please put in cell_fn") + if 'GRU' in cell_fn.__name__: + try: + cell_init_args.pop('state_is_tuple') + except Exception: + logging.warning("pop state_is_tuple fails.") + self.inputs = prev_layer.outputs + + logging.info("DynamicRNNLayer %s: n_hidden:%d, in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % + (self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) + + # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] + try: + self.inputs.get_shape().with_rank(3) + except Exception: + raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps(max), n_features]") + + # Get the batch_size + fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] + if fixed_batch_size.value: + batch_size = fixed_batch_size.value + logging.info(" batch_size (concurrent processes): %d" % batch_size) + else: + from tensorflow.python.ops import array_ops + batch_size = array_ops.shape(self.inputs)[0] + logging.info(" non specified batch_size, uses a tensor instead.") + self.batch_size = batch_size + + # Creats the cell function + # cell_instance_fn=lambda: cell_fn(num_units=n_hidden, **cell_init_args) # HanSheng + rnn_creator = lambda: cell_fn(num_units=n_hidden, **cell_init_args) + + # Apply dropout + if dropout: + if isinstance(dropout, (tuple, list)): + in_keep_prob = dropout[0] + out_keep_prob = dropout[1] + elif isinstance(dropout, float): + in_keep_prob, out_keep_prob = dropout, dropout + else: + raise Exception("Invalid dropout type (must be a 2-D tuple of " "float)") + try: # TF1.0 + DropoutWrapper_fn = tf.contrib.rnn.DropoutWrapper + except Exception: + DropoutWrapper_fn = tf.nn.rnn_cell.DropoutWrapper + + # cell_instance_fn1=cell_instance_fn # HanSheng + # cell_instance_fn=DropoutWrapper_fn( + # cell_instance_fn1(), + # input_keep_prob=in_keep_prob, + # output_keep_prob=out_keep_prob) + cell_creator = lambda is_last=True: \ + DropoutWrapper_fn(rnn_creator(), + input_keep_prob=in_keep_prob, + output_keep_prob=out_keep_prob if is_last else 1.0) + else: + cell_creator = rnn_creator + self.cell = cell_creator() + # Apply multiple layers + if n_layer > 1: + try: + MultiRNNCell_fn = tf.contrib.rnn.MultiRNNCell + except Exception: + MultiRNNCell_fn = tf.nn.rnn_cell.MultiRNNCell + + # cell_instance_fn2=cell_instance_fn # HanSheng + if dropout: + try: + # cell_instance_fn=lambda: MultiRNNCell_fn([cell_instance_fn2() for _ in range(n_layer)], state_is_tuple=True) # HanSheng + self.cell = MultiRNNCell_fn([cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)], state_is_tuple=True) + except Exception: # when GRU + # cell_instance_fn=lambda: MultiRNNCell_fn([cell_instance_fn2() for _ in range(n_layer)]) # HanSheng + self.cell = MultiRNNCell_fn([cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)]) + else: + try: + self.cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True) + except Exception: # when GRU + self.cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)]) + + # self.cell=cell_instance_fn() # HanSheng + + # Initialize initial_state + if initial_state is None: + self.initial_state = self.cell.zero_state(batch_size, dtype=LayersConfig.tf_dtype) # dtype=tf.float32) + else: + self.initial_state = initial_state + + # Computes sequence_length + if sequence_length is None: + try: # TF1.0 + sequence_length = retrieve_seq_length_op(self.inputs if isinstance(self.inputs, tf.Tensor) else tf.stack(self.inputs)) + except Exception: # TF0.12 + sequence_length = retrieve_seq_length_op(self.inputs if isinstance(self.inputs, tf.Tensor) else tf.pack(self.inputs)) + + # Main - Computes outputs and last_states + with tf.variable_scope(name, initializer=initializer) as vs: + outputs, last_states = tf.nn.dynamic_rnn( + cell=self.cell, + # inputs=X + inputs=self.inputs, + # dtype=tf.float64, + sequence_length=sequence_length, + initial_state=self.initial_state, + **dynamic_rnn_init_args) + rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + # logging.info(" n_params : %d" % (len(rnn_variables))) + # Manage the outputs + if return_last: + # [batch_size, n_hidden] + # outputs = tf.transpose(tf.pack(outputs), [1, 0, 2]) # TF1.0 tf.pack --> tf.stack + self.outputs = advanced_indexing_op(outputs, sequence_length) + else: + # [batch_size, n_step(max), n_hidden] + # self.outputs = result[0]["outputs"] + # self.outputs = outputs # it is 3d, but it is a list + if return_seq_2d: + # PTB tutorial: + # 2D Tensor [n_example, n_hidden] + try: # TF1.0 + self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, n_hidden]) + except Exception: # TF0.12 + self.outputs = tf.reshape(tf.concat(1, outputs), [-1, n_hidden]) + else: + # : + # 3D Tensor [batch_size, n_steps(max), n_hidden] + max_length = tf.shape(outputs)[1] + batch_size = tf.shape(outputs)[0] + + try: # TF1.0 + self.outputs = tf.reshape(tf.concat(outputs, 1), [batch_size, max_length, n_hidden]) + except Exception: # TF0.12 + self.outputs = tf.reshape(tf.concat(1, outputs), [batch_size, max_length, n_hidden]) + # self.outputs = tf.reshape(tf.concat(1, outputs), [-1, max_length, n_hidden]) + + # Final state + self.final_state = last_states + + self.sequence_length = sequence_length + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + self.all_layers.append(self.outputs) + self.all_params.extend(rnn_variables) + + +class BiDynamicRNNLayer(Layer): + """ + The :class:`BiDynamicRNNLayer` class is a RNN layer, you can implement vanilla RNN, + LSTM and GRU with it. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + cell_fn : TensorFlow cell function + A TensorFlow core RNN cell + - See `RNN Cells in TensorFlow `__. + - Note TF1.0+ and TF1.0- are different. + cell_init_args : dictionary + The arguments for the cell initializer. + n_hidden : int + The number of hidden units in the layer. + initializer : initializer + The initializer for initializing the parameters. + sequence_length : tensor, array or None + The sequence length of each row of input data, see ``Advanced Ops for Dynamic RNN``. + - If None, it uses ``retrieve_seq_length_op`` to compute the sequence length, i.e. when the features of padding (on right hand side) are all zeros. + - If using word embedding, you may need to compute the sequence length from the ID array (the integer features before word embedding) by using ``retrieve_seq_length_op2`` or ``retrieve_seq_length_op``. + - You can also input an numpy array. + - More details about TensorFlow dynamic RNN in `Wild-ML Blog `__. + fw_initial_state : None or forward RNN State + If None, `initial_state` is zero state. + bw_initial_state : None or backward RNN State + If None, `initial_state` is zero state. + dropout : tuple of float or int + The input and output keep probability (input_keep_prob, output_keep_prob). + - If one int, input and output keep probability are the same. + n_layer : int + The number of RNN layers, default is 1. + return_last : boolean + Whether return last output or all outputs in each step. + - If True, return the last output, "Sequence input and single output" + - If False, return all outputs, "Synced sequence input and output" + - In other word, if you want to stack more RNNs on this layer, set to False. + return_seq_2d : boolean + Only consider this argument when `return_last` is `False` + - If True, return 2D Tensor [n_example, 2 * n_hidden], for stacking DenseLayer after it. + - If False, return 3D Tensor [n_example/n_steps, n_steps, 2 * n_hidden], for stacking multiple RNN after it. + dynamic_rnn_init_args : dictionary + The arguments for ``tf.nn.bidirectional_dynamic_rnn``. + name : str + A unique layer name. + + Attributes + ----------------------- + outputs : tensor + The output of this layer. (?, 2 * n_hidden) + + fw(bw)_final_state : tensor or StateTuple + The finial state of this layer. + - When `state_is_tuple` is `False`, it is the final hidden and cell states, `states.get_shape() = [?, 2 * n_hidden]`. + - When `state_is_tuple` is `True`, it stores two elements: `(c, h)`. + - In practice, you can get the final state after each iteration during training, then feed it to the initial state of next iteration. + + fw(bw)_initial_state : tensor or StateTuple + The initial state of this layer. + - In practice, you can set your state at the begining of each epoch or iteration according to your training procedure. + + batch_size : int or tensor + It is an integer, if it is able to compute the `batch_size`; otherwise, tensor for dynamic batch size. + + sequence_length : a tensor or array + The sequence lengths computed by Advanced Opt or the given sequence lengths, [batch_size]. + + Notes + ----- + Input dimension should be rank 3 : [batch_size, n_steps(max), n_features], if no, please see :class:`ReshapeLayer`. + + References + ---------- + - `Wild-ML Blog `__ + - `bidirectional_rnn.ipynb `__ + + """ + + def __init__( + self, + prev_layer, + cell_fn, #tf.nn.rnn_cell.LSTMCell, + cell_init_args=None, + n_hidden=256, + initializer=tf.random_uniform_initializer(-0.1, 0.1), + sequence_length=None, + fw_initial_state=None, + bw_initial_state=None, + dropout=None, + n_layer=1, + return_last=False, + return_seq_2d=False, + dynamic_rnn_init_args=None, + name='bi_dyrnn_layer', + ): + if cell_init_args is None: + cell_init_args = {'state_is_tuple': True} + if dynamic_rnn_init_args is None: + dynamic_rnn_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + if cell_fn is None: + raise Exception("Please put in cell_fn") + if 'GRU' in cell_fn.__name__: + try: + cell_init_args.pop('state_is_tuple') + except Exception: + logging.warning("pop state_is_tuple fails.") + self.inputs = prev_layer.outputs + + logging.info("BiDynamicRNNLayer %s: n_hidden:%d in_dim:%d in_shape:%s cell_fn:%s dropout:%s n_layer:%d" % + (self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout, n_layer)) + + # Input dimension should be rank 3 [batch_size, n_steps(max), n_features] + try: + self.inputs.get_shape().with_rank(3) + except Exception: + raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps(max), n_features]") + + # Get the batch_size + fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] + if fixed_batch_size.value: + batch_size = fixed_batch_size.value + logging.info(" batch_size (concurrent processes): %d" % batch_size) + else: + from tensorflow.python.ops import array_ops + batch_size = array_ops.shape(self.inputs)[0] + logging.info(" non specified batch_size, uses a tensor instead.") + self.batch_size = batch_size + + with tf.variable_scope(name, initializer=initializer) as vs: + # Creats the cell function + # cell_instance_fn=lambda: cell_fn(num_units=n_hidden, **cell_init_args) # HanSheng + rnn_creator = lambda: cell_fn(num_units=n_hidden, **cell_init_args) + + # Apply dropout + if dropout: + if isinstance(dropout, (tuple, list)): + in_keep_prob = dropout[0] + out_keep_prob = dropout[1] + elif isinstance(dropout, float): + in_keep_prob, out_keep_prob = dropout, dropout + else: + raise Exception("Invalid dropout type (must be a 2-D tuple of " "float)") + try: + DropoutWrapper_fn = tf.contrib.rnn.DropoutWrapper + except Exception: + DropoutWrapper_fn = tf.nn.rnn_cell.DropoutWrapper + + # cell_instance_fn1=cell_instance_fn # HanSheng + # cell_instance_fn=lambda: DropoutWrapper_fn( + # cell_instance_fn1(), + # input_keep_prob=in_keep_prob, + # output_keep_prob=out_keep_prob) + cell_creator = lambda is_last=True: \ + DropoutWrapper_fn(rnn_creator(), + input_keep_prob=in_keep_prob, + output_keep_prob=out_keep_prob if is_last else 1.0) + else: + cell_creator = rnn_creator + + # if dropout: + # self.fw_cell = DropoutWrapper_fn(self.fw_cell, input_keep_prob=1.0, output_keep_prob=out_keep_prob) + # self.bw_cell = DropoutWrapper_fn(self.bw_cell, input_keep_prob=1.0, output_keep_prob=out_keep_prob) + + # self.fw_cell=cell_instance_fn() + # self.bw_cell=cell_instance_fn() + # Initial state of RNN + + self.fw_initial_state = fw_initial_state + self.bw_initial_state = bw_initial_state + # Computes sequence_length + if sequence_length is None: + try: # TF1.0 + sequence_length = retrieve_seq_length_op(self.inputs if isinstance(self.inputs, tf.Tensor) else tf.stack(self.inputs)) + except Exception: # TF0.12 + sequence_length = retrieve_seq_length_op(self.inputs if isinstance(self.inputs, tf.Tensor) else tf.pack(self.inputs)) + + if n_layer > 1: + if dropout: + self.fw_cell = [cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)] + self.bw_cell = [cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)] + else: + self.fw_cell = [cell_creator() for _ in range(n_layer)] + self.bw_cell = [cell_creator() for _ in range(n_layer)] + from tensorflow.contrib.rnn import stack_bidirectional_dynamic_rnn + outputs, states_fw, states_bw = stack_bidirectional_dynamic_rnn( + cells_fw=self.fw_cell, + cells_bw=self.bw_cell, + inputs=self.inputs, + sequence_length=sequence_length, + initial_states_fw=self.fw_initial_state, + initial_states_bw=self.bw_initial_state, + dtype=LayersConfig.tf_dtype, + **dynamic_rnn_init_args) + + else: + self.fw_cell = cell_creator() + self.bw_cell = cell_creator() + outputs, (states_fw, states_bw) = tf.nn.bidirectional_dynamic_rnn( + cell_fw=self.fw_cell, + cell_bw=self.bw_cell, + inputs=self.inputs, + sequence_length=sequence_length, + initial_state_fw=self.fw_initial_state, + initial_state_bw=self.bw_initial_state, + dtype=LayersConfig.tf_dtype, + **dynamic_rnn_init_args) + + rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + logging.info(" n_params : %d" % (len(rnn_variables))) + + # Manage the outputs + try: # TF1.0 + outputs = tf.concat(outputs, 2) + except Exception: # TF0.12 + outputs = tf.concat(2, outputs) + + if return_last: + # [batch_size, 2 * n_hidden] + raise NotImplementedError("Return last is not implemented yet.") + # self.outputs = advanced_indexing_op(outputs, sequence_length) + else: + # [batch_size, n_step(max), 2 * n_hidden] + if return_seq_2d: + # PTB tutorial: + # 2D Tensor [n_example, 2 * n_hidden] + try: # TF1.0 + self.outputs = tf.reshape(tf.concat(outputs, 1), [-1, 2 * n_hidden]) + except Exception: # TF0.12 + self.outputs = tf.reshape(tf.concat(1, outputs), [-1, 2 * n_hidden]) + else: + # : + # 3D Tensor [batch_size, n_steps(max), 2 * n_hidden] + max_length = tf.shape(outputs)[1] + batch_size = tf.shape(outputs)[0] + try: # TF1.0 + self.outputs = tf.reshape(tf.concat(outputs, 1), [batch_size, max_length, 2 * n_hidden]) + except Exception: # TF0.12 + self.outputs = tf.reshape(tf.concat(1, outputs), [batch_size, max_length, 2 * n_hidden]) + + # Final state + self.fw_final_states = states_fw + self.bw_final_states = states_bw + + self.sequence_length = sequence_length + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + self.all_layers.append(self.outputs) + self.all_params.extend(rnn_variables) + + +class Seq2Seq(Layer): + """ + The :class:`Seq2Seq` class is a simple :class:`DynamicRNNLayer` based Seq2seq layer without using `tl.contrib.seq2seq `__. + See `Model `__ + and `Sequence to Sequence Learning with Neural Networks `__. + + - Please check this example `Chatbot in 200 lines of code `__. + - The Author recommends users to read the source code of :class:`DynamicRNNLayer` and :class:`Seq2Seq`. + + Parameters + ---------- + net_encode_in : :class:`Layer` + Encode sequences, [batch_size, None, n_features]. + net_decode_in : :class:`Layer` + Decode sequences, [batch_size, None, n_features]. + cell_fn : TensorFlow cell function + A TensorFlow core RNN cell + - see `RNN Cells in TensorFlow `__ + - Note TF1.0+ and TF1.0- are different + cell_init_args : dictionary or None + The arguments for the cell initializer. + n_hidden : int + The number of hidden units in the layer. + initializer : initializer + The initializer for the parameters. + encode_sequence_length : tensor + For encoder sequence length, see :class:`DynamicRNNLayer` . + decode_sequence_length : tensor + For decoder sequence length, see :class:`DynamicRNNLayer` . + initial_state_encode : None or RNN state + If None, `initial_state_encode` is zero state, it can be set by placeholder or other RNN. + initial_state_decode : None or RNN state + If None, `initial_state_decode` is the final state of the RNN encoder, it can be set by placeholder or other RNN. + dropout : tuple of float or int + The input and output keep probability (input_keep_prob, output_keep_prob). + - If one int, input and output keep probability are the same. + n_layer : int + The number of RNN layers, default is 1. + return_seq_2d : boolean + Only consider this argument when `return_last` is `False` + - If True, return 2D Tensor [n_example, 2 * n_hidden], for stacking DenseLayer after it. + - If False, return 3D Tensor [n_example/n_steps, n_steps, 2 * n_hidden], for stacking multiple RNN after it. + name : str + A unique layer name. + + Attributes + ------------ + outputs : tensor + The output of RNN decoder. + initial_state_encode : tensor or StateTuple + Initial state of RNN encoder. + initial_state_decode : tensor or StateTuple + Initial state of RNN decoder. + final_state_encode : tensor or StateTuple + Final state of RNN encoder. + final_state_decode : tensor or StateTuple + Final state of RNN decoder. + + Notes + -------- + - How to feed data: `Sequence to Sequence Learning with Neural Networks `__ + - input_seqs : ``['how', 'are', 'you', '']`` + - decode_seqs : ``['', 'I', 'am', 'fine', '']`` + - target_seqs : ``['I', 'am', 'fine', '', '']`` + - target_mask : ``[1, 1, 1, 1, 0]`` + - related functions : tl.prepro + + Examples + ---------- + >>> from tensorlayer.layers import * + >>> batch_size = 32 + >>> encode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="encode_seqs") + >>> decode_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="decode_seqs") + >>> target_seqs = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_seqs") + >>> target_mask = tf.placeholder(dtype=tf.int64, shape=[batch_size, None], name="target_mask") # tl.prepro.sequences_get_mask() + >>> with tf.variable_scope("model"): + ... # for chatbot, you can use the same embedding layer, + ... # for translation, you may want to use 2 seperated embedding layers + >>> with tf.variable_scope("embedding") as vs: + >>> net_encode = EmbeddingInputlayer( + ... inputs = encode_seqs, + ... vocabulary_size = 10000, + ... embedding_size = 200, + ... name = 'seq_embedding') + >>> vs.reuse_variables() + >>> tl.layers.set_name_reuse(True) + >>> net_decode = EmbeddingInputlayer( + ... inputs = decode_seqs, + ... vocabulary_size = 10000, + ... embedding_size = 200, + ... name = 'seq_embedding') + >>> net = Seq2Seq(net_encode, net_decode, + ... cell_fn = tf.contrib.rnn.BasicLSTMCell, + ... n_hidden = 200, + ... initializer = tf.random_uniform_initializer(-0.1, 0.1), + ... encode_sequence_length = retrieve_seq_length_op2(encode_seqs), + ... decode_sequence_length = retrieve_seq_length_op2(decode_seqs), + ... initial_state_encode = None, + ... dropout = None, + ... n_layer = 1, + ... return_seq_2d = True, + ... name = 'seq2seq') + >>> net_out = DenseLayer(net, n_units=10000, act=tf.identity, name='output') + >>> e_loss = tl.cost.cross_entropy_seq_with_mask(logits=net_out.outputs, target_seqs=target_seqs, input_mask=target_mask, return_details=False, name='cost') + >>> y = tf.nn.softmax(net_out.outputs) + >>> net_out.print_params(False) + + """ + + def __init__( + self, + net_encode_in, + net_decode_in, + cell_fn, #tf.nn.rnn_cell.LSTMCell, + cell_init_args=None, + n_hidden=256, + initializer=tf.random_uniform_initializer(-0.1, 0.1), + encode_sequence_length=None, + decode_sequence_length=None, + initial_state_encode=None, + initial_state_decode=None, + dropout=None, + n_layer=1, + return_seq_2d=False, + name='seq2seq', + ): + if cell_init_args is None: + cell_init_args = {'state_is_tuple': True} + + Layer.__init__(self, name=name) + if cell_fn is None: + raise Exception("Please put in cell_fn") + if 'GRU' in cell_fn.__name__: + try: + cell_init_args.pop('state_is_tuple') + except Exception: + logging.warning("pop state_is_tuple fails.") + # self.inputs = layer.outputs + logging.info("[*] Seq2Seq %s: n_hidden:%d cell_fn:%s dropout:%s n_layer:%d" % (self.name, n_hidden, cell_fn.__name__, dropout, n_layer)) + + with tf.variable_scope(name): + # tl.layers.set_name_reuse(reuse) + # network = InputLayer(self.inputs, name=name+'/input') + network_encode = DynamicRNNLayer( + net_encode_in, + cell_fn=cell_fn, + cell_init_args=cell_init_args, + n_hidden=n_hidden, + initializer=initializer, + initial_state=initial_state_encode, + dropout=dropout, + n_layer=n_layer, + sequence_length=encode_sequence_length, + return_last=False, + return_seq_2d=True, + name='encode') + # vs.reuse_variables() + # tl.layers.set_name_reuse(True) + network_decode = DynamicRNNLayer( + net_decode_in, + cell_fn=cell_fn, + cell_init_args=cell_init_args, + n_hidden=n_hidden, + initializer=initializer, + initial_state=(network_encode.final_state if initial_state_decode is None else initial_state_decode), + dropout=dropout, + n_layer=n_layer, + sequence_length=decode_sequence_length, + return_last=False, + return_seq_2d=return_seq_2d, + name='decode') + self.outputs = network_decode.outputs + + # rnn_variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + # Initial state + self.initial_state_encode = network_encode.initial_state + self.initial_state_decode = network_decode.initial_state + + # Final state + self.final_state_encode = network_encode.final_state + self.final_state_decode = network_decode.final_state + + # self.sequence_length = sequence_length + self.all_layers = list(network_encode.all_layers) + self.all_params = list(network_encode.all_params) + self.all_drop = dict(network_encode.all_drop) + + self.all_layers.extend(list(network_decode.all_layers)) + self.all_params.extend(list(network_decode.all_params)) + self.all_drop.update(dict(network_decode.all_drop)) + + self.all_layers.append(self.outputs) + # self.all_params.extend( rnn_variables ) + + self.all_layers = list_remove_repeat(self.all_layers) + self.all_params = list_remove_repeat(self.all_params) diff --git a/tensorlayer/layers/shape.py b/tensorlayer/layers/shape.py new file mode 100644 index 0000000..3e4644d --- /dev/null +++ b/tensorlayer/layers/shape.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- + +from .core import * +from .. import _logging as logging +import tensorflow as tf + +__all__ = [ + 'FlattenLayer', + 'ReshapeLayer', + 'TransposeLayer', +] + + +class FlattenLayer(Layer): + """A layer that reshapes high-dimension input into a vector. + + Then we often apply DenseLayer, RNNLayer, ConcatLayer and etc on the top of a flatten layer. + [batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row * mask_col * n_mask] + + Parameters + ---------- + layer : :class:`Layer` + Previous layer. + name : str + A unique layer name. + + Examples + -------- + >>> x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) + >>> net = tl.layers.InputLayer(x, name='input') + >>> net = tl.layers.FlattenLayer(net, name='flatten') + ... [?, 784] + + """ + + def __init__( + self, + prev_layer, + name='flatten_layer', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + self.outputs = flatten_reshape(self.inputs, name=name) + self.n_units = int(self.outputs.get_shape()[-1]) + logging.info("FlattenLayer %s: %d" % (self.name, self.n_units)) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + + +class ReshapeLayer(Layer): + """A layer that reshapes a given tensor. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer + shape : tuple of int + The output shape, see ``tf.reshape``. + name : str + A unique layer name. + + Examples + -------- + >>> x = tf.placeholder(tf.float32, shape=(None, 784)) + >>> net = tl.layers.InputLayer(x, name='input') + >>> net = tl.layers.ReshapeLayer(net, [-1, 28, 28, 1], name='reshape') + >>> print(net.outputs) + ... (?, 28, 28, 1) + + """ + + def __init__( + self, + prev_layer, + shape, + name='reshape_layer', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + self.outputs = tf.reshape(self.inputs, shape=shape, name=name) + logging.info("ReshapeLayer %s: %s" % (self.name, self.outputs.get_shape())) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + + +class TransposeLayer(Layer): + """A layer that transposes the dimension of a tensor. + + See `tf.transpose() `__ . + + Parameters + ---------- + layer : :class:`Layer` + Previous layer + perm: list of int + The permutation of the dimensions, similar with ``numpy.transpose``. + name : str + A unique layer name. + + Examples + ---------- + >>> x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) + >>> net = tl.layers.InputLayer(x, name='input') + >>> net = tl.layers.TransposeLayer(net, perm=[0, 1, 3, 2], name='trans') + ... [None, 28, 1, 28] + + """ + + def __init__( + self, + prev_layer, + perm, + name='transpose', + ): + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + assert perm is not None + + logging.info("TransposeLayer %s: perm:%s" % (self.name, perm)) + # with tf.variable_scope(name) as vs: + self.outputs = tf.transpose(self.inputs, perm=perm, name=name) + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + # self.all_params.extend( variables ) diff --git a/tensorlayer/layers/spatial_transformer.py b/tensorlayer/layers/spatial_transformer.py new file mode 100644 index 0000000..bc24590 --- /dev/null +++ b/tensorlayer/layers/spatial_transformer.py @@ -0,0 +1,288 @@ +# -*- coding: utf-8 -*- + +from six.moves import xrange +from .core import * +from .. import _logging as logging +import tensorflow as tf +import numpy as np + +__all__ = [ + 'transformer', + 'batch_transformer', + 'SpatialTransformer2dAffineLayer', +] + + +def transformer(U, theta, out_size, name='SpatialTransformer2dAffine'): + """Spatial Transformer Layer for `2D Affine Transformation `__ + , see :class:`SpatialTransformer2dAffineLayer` class. + + Parameters + ---------- + U : list of float + The output of a convolutional net should have the + shape [num_batch, height, width, num_channels]. + theta: float + The output of the localisation network should be [num_batch, 6], value range should be [0, 1] (via tanh). + out_size: tuple of int + The size of the output of the network (height, width) + name: str + Optional function name + + Returns + ------- + Tensor + The transformed tensor. + + References + ---------- + - `Spatial Transformer Networks `__ + - `TensorFlow/Models `__ + + Notes + ----- + To initialize the network to the identity transform init. + + >>> ``theta`` to + >>> identity = np.array([[1., 0., 0.], + ... [0., 1., 0.]]) + >>> identity = identity.flatten() + >>> theta = tf.Variable(initial_value=identity) + + """ + + def _repeat(x, n_repeats): + with tf.variable_scope('_repeat'): + rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([ + n_repeats, + ])), 1), [1, 0]) + rep = tf.cast(rep, 'int32') + x = tf.matmul(tf.reshape(x, (-1, 1)), rep) + return tf.reshape(x, [-1]) + + def _interpolate(im, x, y, out_size): + with tf.variable_scope('_interpolate'): + # constants + num_batch = tf.shape(im)[0] + height = tf.shape(im)[1] + width = tf.shape(im)[2] + channels = tf.shape(im)[3] + + x = tf.cast(x, 'float32') + y = tf.cast(y, 'float32') + height_f = tf.cast(height, 'float32') + width_f = tf.cast(width, 'float32') + out_height = out_size[0] + out_width = out_size[1] + zero = tf.zeros([], dtype='int32') + max_y = tf.cast(tf.shape(im)[1] - 1, 'int32') + max_x = tf.cast(tf.shape(im)[2] - 1, 'int32') + + # scale indices from [-1, 1] to [0, width/height] + x = (x + 1.0) * (width_f) / 2.0 + y = (y + 1.0) * (height_f) / 2.0 + + # do sampling + x0 = tf.cast(tf.floor(x), 'int32') + x1 = x0 + 1 + y0 = tf.cast(tf.floor(y), 'int32') + y1 = y0 + 1 + + x0 = tf.clip_by_value(x0, zero, max_x) + x1 = tf.clip_by_value(x1, zero, max_x) + y0 = tf.clip_by_value(y0, zero, max_y) + y1 = tf.clip_by_value(y1, zero, max_y) + dim2 = width + dim1 = width * height + base = _repeat(tf.range(num_batch) * dim1, out_height * out_width) + base_y0 = base + y0 * dim2 + base_y1 = base + y1 * dim2 + idx_a = base_y0 + x0 + idx_b = base_y1 + x0 + idx_c = base_y0 + x1 + idx_d = base_y1 + x1 + + # use indices to lookup pixels in the flat image and restore + # channels dim + im_flat = tf.reshape(im, tf.stack([-1, channels])) + im_flat = tf.cast(im_flat, 'float32') + Ia = tf.gather(im_flat, idx_a) + Ib = tf.gather(im_flat, idx_b) + Ic = tf.gather(im_flat, idx_c) + Id = tf.gather(im_flat, idx_d) + + # and finally calculate interpolated values + x0_f = tf.cast(x0, 'float32') + x1_f = tf.cast(x1, 'float32') + y0_f = tf.cast(y0, 'float32') + y1_f = tf.cast(y1, 'float32') + wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1) + wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), 1) + wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), 1) + wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), 1) + output = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id]) + return output + + def _meshgrid(height, width): + with tf.variable_scope('_meshgrid'): + # This should be equivalent to: + # x_t, y_t = np.meshgrid(np.linspace(-1, 1, width), + # np.linspace(-1, 1, height)) + # ones = np.ones(np.prod(x_t.shape)) + # grid = np.vstack([x_t.flatten(), y_t.flatten(), ones]) + x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])), tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0])) + y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1), tf.ones(shape=tf.stack([1, width]))) + + x_t_flat = tf.reshape(x_t, (1, -1)) + y_t_flat = tf.reshape(y_t, (1, -1)) + + ones = tf.ones_like(x_t_flat) + grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones]) + return grid + + def _transform(theta, input_dim, out_size): + with tf.variable_scope('_transform'): + num_batch = tf.shape(input_dim)[0] + num_channels = tf.shape(input_dim)[3] + theta = tf.reshape(theta, (-1, 2, 3)) + theta = tf.cast(theta, 'float32') + + # grid of (x_t, y_t, 1), eq (1) in ref [1] + out_height = out_size[0] + out_width = out_size[1] + grid = _meshgrid(out_height, out_width) + grid = tf.expand_dims(grid, 0) + grid = tf.reshape(grid, [-1]) + grid = tf.tile(grid, tf.stack([num_batch])) + grid = tf.reshape(grid, tf.stack([num_batch, 3, -1])) + + # Transform A x (x_t, y_t, 1)^T -> (x_s, y_s) + T_g = tf.matmul(theta, grid) + x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1]) + y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1]) + x_s_flat = tf.reshape(x_s, [-1]) + y_s_flat = tf.reshape(y_s, [-1]) + + input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size) + + output = tf.reshape(input_transformed, tf.stack([num_batch, out_height, out_width, num_channels])) + return output + + with tf.variable_scope(name): + output = _transform(theta, U, out_size) + return output + + +def batch_transformer(U, thetas, out_size, name='BatchSpatialTransformer2dAffine'): + """Batch Spatial Transformer function for `2D Affine Transformation `__. + + Parameters + ---------- + U : list of float + tensor of inputs [batch, height, width, num_channels] + thetas : list of float + a set of transformations for each input [batch, num_transforms, 6] + out_size : list of int + the size of the output [out_height, out_width] + name : str + optional function name + + Returns + ------ + float + Tensor of size [batch * num_transforms, out_height, out_width, num_channels] + + """ + with tf.variable_scope(name): + num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:2]) + indices = [[i] * num_transforms for i in xrange(num_batch)] + input_repeated = tf.gather(U, tf.reshape(indices, [-1])) + return transformer(input_repeated, thetas, out_size) + + +class SpatialTransformer2dAffineLayer(Layer): + """The :class:`SpatialTransformer2dAffineLayer` class is a 2D `Spatial Transformer Layer `__ for + `2D Affine Transformation `__. + + Parameters + ----------- + layer : :class:`Layer` + Previous layer. + theta_layer : :class:`Layer` + The localisation network. + - We will use a :class:`DenseLayer` to make the theta size to [batch, 6], value range to [0, 1] (via tanh). + out_size : tuple of int or None + The size of the output of the network (height, width), the feature maps will be resized by this. + name : str + A unique layer name. + + References + ----------- + - `Spatial Transformer Networks `__ + - `TensorFlow/Models `__ + + """ + + def __init__( + self, + prev_layer=None, + theta_layer=None, + out_size=None, + name='sapatial_trans_2d_affine', + ): + if out_size is None: + out_size = [40, 40] + + Layer.__init__(self, prev_layer=[prev_layer, theta_layer], name=name) + self.inputs = prev_layer.outputs + self.theta_layer = theta_layer + logging.info("SpatialTransformer2dAffineLayer %s: in_size:%s out_size:%s" % (name, self.inputs.get_shape().as_list(), out_size)) + + with tf.variable_scope(name) as vs: + # 1. make the localisation network to [batch, 6] via Flatten and Dense. + if self.theta_layer.outputs.get_shape().ndims > 2: + self.theta_layer.outputs = flatten_reshape(self.theta_layer.outputs, 'flatten') + # 2. To initialize the network to the identity transform init. + # 2.1 W + n_in = int(self.theta_layer.outputs.get_shape()[-1]) + shape = (n_in, 6) + W = tf.get_variable(name='W', initializer=tf.zeros(shape), dtype=LayersConfig.tf_dtype) + # 2.2 b + identity = tf.constant(np.array([[1., 0, 0], [0, 1., 0]]).astype('float32').flatten()) + b = tf.get_variable(name='b', initializer=identity, dtype=LayersConfig.tf_dtype) + # 2.3 transformation matrix + self.theta = tf.nn.tanh(tf.matmul(self.theta_layer.outputs, W) + b) + # 3. Spatial Transformer Sampling + # 3.1 transformation + self.outputs = transformer(self.inputs, self.theta, out_size=out_size) + # 3.2 automatically set batch_size and channels + # e.g. [?, 40, 40, ?] --> [64, 40, 40, 1] or [64, 20, 20, 4]/ Hao Dong + # + fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0] + if fixed_batch_size.value: + batch_size = fixed_batch_size.value + else: + from tensorflow.python.ops import array_ops + batch_size = array_ops.shape(self.inputs)[0] + + n_channels = self.inputs.get_shape().as_list()[-1] + # logging.info(self.outputs) + self.outputs = tf.reshape(self.outputs, shape=[batch_size, out_size[0], out_size[1], n_channels]) + # logging.info(self.outputs) + # exit() + # 4. Get all parameters + variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + # # fixed + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + # + # # theta_layer + # self.all_layers.extend(theta_layer.all_layers) + # self.all_params.extend(theta_layer.all_params) + # self.all_drop.update(theta_layer.all_drop) + + # this layer + self.all_layers.append(self.outputs) + self.all_params.extend(variables) diff --git a/tensorlayer/layers/special_activation.py b/tensorlayer/layers/special_activation.py new file mode 100644 index 0000000..ec01434 --- /dev/null +++ b/tensorlayer/layers/special_activation.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- + +from .core import * +from .. import _logging as logging +import tensorflow as tf + +__all__ = [ + 'PReluLayer', +] + + +class PReluLayer(Layer): + """ + The :class:`PReluLayer` class is Parametric Rectified Linear layer. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer。 + channel_shared : boolean + If True, single weight is shared by all channels. + a_init : initializer + The initializer for initializing the alpha(s). + a_init_args : dictionary + The arguments for initializing the alpha(s). + name : str + A unique layer name. + + References + ----------- + - `Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification `__ + + """ + + def __init__( + self, + prev_layer, + channel_shared=False, + a_init=tf.constant_initializer(value=0.0), + a_init_args=None, + # restore = True, + name="prelu_layer"): + if a_init_args is None: + a_init_args = {} + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("PReluLayer %s: channel_shared:%s" % (self.name, channel_shared)) + if channel_shared: + w_shape = (1, ) + else: + w_shape = int(self.inputs.get_shape()[-1]) + + # with tf.name_scope(name) as scope: + with tf.variable_scope(name): + alphas = tf.get_variable(name='alphas', shape=w_shape, initializer=a_init, dtype=LayersConfig.tf_dtype, **a_init_args) + try: # TF 1.0 + self.outputs = tf.nn.relu(self.inputs) + tf.multiply(alphas, (self.inputs - tf.abs(self.inputs))) * 0.5 + except Exception: # TF 0.12 + self.outputs = tf.nn.relu(self.inputs) + tf.mul(alphas, (self.inputs - tf.abs(self.inputs))) * 0.5 + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + + self.all_layers.append(self.outputs) + self.all_params.extend([alphas]) diff --git a/tensorlayer/layers/stack.py b/tensorlayer/layers/stack.py new file mode 100644 index 0000000..693293d --- /dev/null +++ b/tensorlayer/layers/stack.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- + +from .core import * +from .. import _logging as logging +import tensorflow as tf + +__all__ = [ + 'StackLayer', + 'UnStackLayer', +] + + +class StackLayer(Layer): + """ + The :class:`StackLayer` class is layer for stacking a list of rank-R tensors into one rank-(R+1) tensor, see `tf.stack() `__. + + Parameters + ---------- + layers : list of :class:`Layer` + Previous layers to stack. + axis : int + Dimension along which to concatenate. + name : str + A unique layer name. + + Examples + --------- + >>> x = tf.placeholder(tf.float32, shape=[None, 30]) + >>> net = tl.layers.InputLayer(x, name='input') + >>> net1 = tl.layers.DenseLayer(net, 10, name='dense1') + >>> net2 = tl.layers.DenseLayer(net, 10, name='dense2') + >>> net3 = tl.layers.DenseLayer(net, 10, name='dense3') + >>> net = tl.layers.StackLayer([net1, net2, net3], axis=1, name='stack') + ... (?, 3, 10) + + """ + + def __init__( + self, + layers, + axis=1, + name='stack', + ): + Layer.__init__(self, prev_layer=layers, name=name) + self.inputs = [] + for l in layers: + self.inputs.append(l.outputs) + + self.outputs = tf.stack(self.inputs, axis=axis, name=name) + + logging.info("StackLayer %s: axis: %d" % (self.name, axis)) + + # self.all_layers = list(layers[0].all_layers) + # self.all_params = list(layers[0].all_params) + # self.all_drop = dict(layers[0].all_drop) + # + # for i in range(1, len(layers)): + # self.all_layers.extend(list(layers[i].all_layers)) + # self.all_params.extend(list(layers[i].all_params)) + # self.all_drop.update(dict(layers[i].all_drop)) + # + # self.all_layers = list_remove_repeat(self.all_layers) + # self.all_params = list_remove_repeat(self.all_params) + + self.all_layers.append(self.outputs) + + +def unstack_layer(layer, num=None, axis=0, name='unstack'): + """ + It is layer for unstacking the given dimension of a rank-R tensor into rank-(R-1) tensors., see `tf.unstack() `__. + + Parameters + ---------- + layer : :class:`Layer` + Previous layer + num : int or None + The length of the dimension axis. Automatically inferred if None (the default). + axis : int + Dimension along which axis to concatenate. + name : str + A unique layer name. + + Returns + ------- + list of :class:`Layer` + The list of layer objects unstacked from the input. + + """ + inputs = layer.outputs + with tf.variable_scope(name): + outputs = tf.unstack(inputs, num=num, axis=axis) + + logging.info("UnStackLayer %s: num: %s axis: %d, n_outputs: %d" % (name, num, axis, len(outputs))) + + net_new = [] + scope_name = tf.get_variable_scope().name + if scope_name: + full_name = scope_name + '/' + name + else: + full_name = name + + for i, _v in enumerate(outputs): + n = Layer(prev_layer=layer, name=full_name + str(i)) + n.outputs = outputs[i] + # n.all_layers = list(layer.all_layers) + # n.all_params = list(layer.all_params) + # n.all_drop = dict(layer.all_drop) + # n.all_layers.append(inputs) + + net_new.append(n) + + return net_new + + +# Alias +UnStackLayer = unstack_layer diff --git a/tensorlayer/layers/super_resolution.py b/tensorlayer/layers/super_resolution.py new file mode 100644 index 0000000..bc65a1b --- /dev/null +++ b/tensorlayer/layers/super_resolution.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- + +from .core import * +from .. import _logging as logging +import tensorflow as tf + +__all__ = [ + 'SubpixelConv1d', + 'SubpixelConv2d', +] + + +def subpixel_conv2d(net, scale=2, n_out_channel=None, act=tf.identity, name='subpixel_conv2d'): + """It is a 2D sub-pixel up-sampling layer, usually be used + for Super-Resolution applications, see `SRGAN `__ for example. + + Parameters + ------------ + net : :class:`Layer` + Previous layer, + scale : int + The up-scaling ratio, a wrong setting will lead to dimension size error. + n_out_channel : int or None + The number of output channels. + - If None, automatically set n_out_channel == the number of input channels / (scale x scale). + - The number of input channels == (scale x scale) x The number of output channels. + act : activation function + The activation function of this layer. + name : str + A unique layer name. + + Returns + ------- + :class:`Layer` + A 2D sub-pixel up-sampling layer + + Examples + --------- + >>> # examples here just want to tell you how to set the n_out_channel. + >>> x = np.random.rand(2, 16, 16, 4) + >>> X = tf.placeholder("float32", shape=(2, 16, 16, 4), name="X") + >>> net = InputLayer(X, name='input') + >>> net = SubpixelConv2d(net, scale=2, n_out_channel=1, name='subpixel_conv2d') + >>> y = sess.run(net.outputs, feed_dict={X: x}) + >>> print(x.shape, y.shape) + ... (2, 16, 16, 4) (2, 32, 32, 1) + >>> + >>> x = np.random.rand(2, 16, 16, 4*10) + >>> X = tf.placeholder("float32", shape=(2, 16, 16, 4*10), name="X") + >>> net = InputLayer(X, name='input2') + >>> net = SubpixelConv2d(net, scale=2, n_out_channel=10, name='subpixel_conv2d2') + >>> y = sess.run(net.outputs, feed_dict={X: x}) + >>> print(x.shape, y.shape) + ... (2, 16, 16, 40) (2, 32, 32, 10) + >>> + >>> x = np.random.rand(2, 16, 16, 25*10) + >>> X = tf.placeholder("float32", shape=(2, 16, 16, 25*10), name="X") + >>> net = InputLayer(X, name='input3') + >>> net = SubpixelConv2d(net, scale=5, n_out_channel=None, name='subpixel_conv2d3') + >>> y = sess.run(net.outputs, feed_dict={X: x}) + >>> print(x.shape, y.shape) + ... (2, 16, 16, 250) (2, 80, 80, 10) + + References + ------------ + - `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network `__ + + """ + # github/Tetrachrome/subpixel https://github.com/Tetrachrome/subpixel/blob/master/subpixel.py + + _err_log = "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" + + # scope_name = tf.get_variable_scope().name + # if scope_name: + # whole_name = scope_name + '/' + name + # else: + # whole_name = name + + def _PS(X, r, n_out_channels): + if n_out_channels >= 1: + assert int(X.get_shape()[-1]) == (r**2) * n_out_channels, _err_log + + # bsize, a, b, c = X.get_shape().as_list() + # bsize = tf.shape(X)[0] # Handling Dimension(None) type for undefined batch dim + # Xs=tf.split(X,r,3) #b*h*w*r*r + # Xr=tf.concat(Xs,2) #b*h*(r*w)*r + # X=tf.reshape(Xr,(bsize,r*a,r*b,n_out_channel)) # b*(r*h)*(r*w)*c + + X = tf.depth_to_space(X, r) + else: + logging.info(_err_log) + return X + + inputs = net.outputs + if n_out_channel is None: + assert int(inputs.get_shape()[-1]) / (scale**2) % 1 == 0, _err_log + n_out_channel = int(int(inputs.get_shape()[-1]) / (scale**2)) + + logging.info("SubpixelConv2d %s: scale: %d n_out_channel: %s act: %s" % (name, scale, n_out_channel, act.__name__)) + + net_new = Layer(prev_layer=net, name=name) #whole_name) + # with tf.name_scope(name): + with tf.variable_scope(name): + net_new.outputs = act(_PS(inputs, r=scale, n_out_channels=n_out_channel)) + + # net_new.all_layers = list(net.all_layers) + # net_new.all_params = list(net.all_params) + # net_new.all_drop = dict(net.all_drop) + net_new.all_layers.append(net_new.outputs) + return net_new + + +def subpixel_conv1d(net, scale=2, act=tf.identity, name='subpixel_conv1d'): + """It is a 1D sub-pixel up-sampling layer. + + Calls a TensorFlow function that directly implements this functionality. + We assume input has dim (batch, width, r) + + Parameters + ------------ + net : :class:`Layer` + Previous layer with output shape of (batch, width, r). + scale : int + The up-scaling ratio, a wrong setting will lead to Dimension size error. + act : activation function + The activation function of this layer. + name : str + A unique layer name. + + Returns + ------- + :class:`Layer` + A 1D sub-pixel up-sampling layer + + Examples + ---------- + >>> t_signal = tf.placeholder('float32', [10, 100, 4], name='x') + >>> n = InputLayer(t_signal, name='in') + >>> n = SubpixelConv1d(n, scale=2, name='s') + >>> print(n.outputs.shape) + ... (10, 200, 2) + + References + ----------- + `Audio Super Resolution Implementation `__. + + """ + + def _PS(I, r): + X = tf.transpose(I, [2, 1, 0]) # (r, w, b) + X = tf.batch_to_space_nd(X, [r], [[0, 0]]) # (1, r*w, b) + X = tf.transpose(X, [2, 1, 0]) + return X + + logging.info("SubpixelConv1d %s: scale: %d act: %s" % (name, scale, act.__name__)) + + inputs = net.outputs + net_new = Layer(prev_layer=net, name=name) + with tf.name_scope(name): + net_new.outputs = act(_PS(inputs, r=scale)) + + # net_new.all_layers = list(net.all_layers) + # net_new.all_params = list(net.all_params) + # net_new.all_drop = dict(net.all_drop) + net_new.all_layers.append(net_new.outputs) + return net_new + + +# Alias +SubpixelConv2d = subpixel_conv2d +SubpixelConv1d = subpixel_conv1d diff --git a/tensorlayer/layers/time_distribution.py b/tensorlayer/layers/time_distribution.py new file mode 100644 index 0000000..cbfbdc4 --- /dev/null +++ b/tensorlayer/layers/time_distribution.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- + +from .core import * +from .. import _logging as logging +import tensorflow as tf + +__all__ = [ + 'TimeDistributedLayer', +] + + +class TimeDistributedLayer(Layer): + """ + The :class:`TimeDistributedLayer` class that applies a function to every timestep of the input tensor. + For example, if use :class:`DenseLayer` as the `layer_class`, we input (batch_size, length, dim) and + output (batch_size , length, new_dim). + + Parameters + ---------- + layer : :class:`Layer` + Previous layer with output size of (batch_size, length, dim). + layer_class : a :class:`Layer` class + The layer class name. + args : dictionary + The arguments for the ``layer_class``. + name : str + A unique layer name. + + Examples + -------- + >>> batch_size = 32 + >>> timestep = 20 + >>> input_dim = 100 + >>> x = tf.placeholder(dtype=tf.float32, shape=[batch_size, timestep, input_dim], name="encode_seqs") + >>> net = InputLayer(x, name='input') + >>> net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units':50, 'name':'dense'}, name='time_dense') + ... [TL] InputLayer input: (32, 20, 100) + ... [TL] TimeDistributedLayer time_dense: layer_class:DenseLayer + >>> print(net.outputs._shape) + ... (32, 20, 50) + >>> net.print_params(False) + ... param 0: (100, 50) time_dense/dense/W:0 + ... param 1: (50,) time_dense/dense/b:0 + ... num of params: 5050 + + """ + + def __init__( + self, + prev_layer, + layer_class=None, + args=None, + name='time_distributed', + ): + if args is None: + args = {} + if not isinstance(args, dict): + raise TypeError("'args' must be a dict.") + + Layer.__init__(self, prev_layer=prev_layer, name=name) + self.inputs = prev_layer.outputs + logging.info("TimeDistributedLayer %s: layer_class:%s args:%s" % (self.name, layer_class.__name__, args)) + + if not isinstance(self.inputs, tf.Tensor): + self.inputs = tf.transpose(tf.stack(self.inputs), [1, 0, 2]) + + input_shape = self.inputs.get_shape() + + timestep = input_shape[1] + x = tf.unstack(self.inputs, axis=1) + + is_name_reuse = tf.get_variable_scope().reuse + for i in range(0, timestep): + with tf.variable_scope(name, reuse=(is_name_reuse if i == 0 else True)) as vs: + net = layer_class(InputLayer(x[i], name=args['name'] + str(i)), **args) + x[i] = net.outputs + variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name) + + self.outputs = tf.stack(x, axis=1, name=name) + + # self.all_layers = list(layer.all_layers) + # self.all_params = list(layer.all_params) + # self.all_drop = dict(layer.all_drop) + self.all_layers.append(self.outputs) + self.all_params.extend(variables) diff --git a/tensorlayer/nlp.py b/tensorlayer/nlp.py index a0a6ac2..ce0290d 100644 --- a/tensorlayer/nlp.py +++ b/tensorlayer/nlp.py @@ -1,51 +1,70 @@ -#! /usr/bin/python -# -*- coding: utf8 -*- +# -*- coding: utf-8 -*- - - - -import tensorflow as tf -import os -from sys import platform as _platform -import collections -import random +import collections, os, random +import re, subprocess, tempfile, warnings import numpy as np -import warnings -from six.moves import xrange +import tensorflow as tf +from six.moves import urllib, xrange from tensorflow.python.platform import gfile -import re +from . import _logging as logging + +__all__ = [ + 'generate_skip_gram_batch', + 'sample', + 'sample_top', + 'SimpleVocabulary', + 'Vocabulary', + 'process_sentence', + 'create_vocab', + 'simple_read_words', + 'read_words', + 'read_analogies_file', + 'build_vocab', + 'build_reverse_dictionary', + 'build_words_dataset', + 'words_to_word_ids', + 'word_ids_to_words', + 'save_vocab', + 'basic_tokenizer', + 'create_vocabulary', + 'initialize_vocabulary', + 'sentence_to_token_ids', + 'data_to_token_ids', + 'moses_multi_bleu', +] + -## Iteration functions def generate_skip_gram_batch(data, batch_size, num_skips, skip_window, data_index=0): """Generate a training batch for the Skip-Gram model. + See `Word2Vec example `__. + Parameters ---------- - data : a list - To present context. - batch_size : an int + data : list of data + To present context, usually a list of integers. + batch_size : int Batch size to return. - num_skips : an int + num_skips : int How many times to reuse an input to generate a label. - skip_window : an int + skip_window : int How many words to consider left and right. - data_index : an int - Index of the context location. - without using yield, this code use data_index to instead. + data_index : int + Index of the context location. This code use `data_index` to instead of yield like ``tl.iterate``. Returns - -------- - batch : a list - Inputs - labels : a list + ------- + batch : list of data + Inputs. + labels : list of data Labels - data_index : an int + data_index : int Index of the context location. Examples -------- - >>> Setting num_skips=2, skip_window=1, use the right and left words. - >>> In the same way, num_skips=4, skip_window=2 means use the nearby 4 words. + Setting num_skips=2, skip_window=1, use the right and left words. + In the same way, num_skips=4, skip_window=2 means use the nearby 4 words. >>> data = [1,2,3,4,5,6,7,8,9,10,11] >>> batch, labels, data_index = tl.nlp.generate_skip_gram_batch(data=data, batch_size=8, num_skips=2, skip_window=1, data_index=0) @@ -61,25 +80,25 @@ def generate_skip_gram_batch(data, batch_size, num_skips, skip_window, data_inde ... [4] ... [6]] - References - ----------- - - `TensorFlow word2vec tutorial `_ """ # global data_index # you can put data_index outside the function, then # modify the global data_index in the function without return it. # note: without using yield, this code use data_index to instead. - assert batch_size % num_skips == 0 - assert num_skips <= 2 * skip_window + + if batch_size % num_skips != 0: + raise Exception("batch_size should be able to be divided by num_skips.") + if num_skips > 2 * skip_window: + raise Exception("num_skips <= 2 * skip_window") batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) - span = 2 * skip_window + 1 # [ skip_window target skip_window ] + span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) for _ in range(span): buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) for i in range(batch_size // num_skips): target = skip_window # target label at the center of the buffer - targets_to_avoid = [ skip_window ] + targets_to_avoid = [skip_window] for j in range(num_skips): while target in targets_to_avoid: target = random.randint(0, span - 1) @@ -91,29 +110,28 @@ def generate_skip_gram_batch(data, batch_size, num_skips, skip_window, data_inde return batch, labels, data_index -## Sampling functions -def sample(a=[], temperature=1.0): +def sample(a=None, temperature=1.0): """Sample an index from a probability array. Parameters ---------- - a : a list + a : list of float List of probabilities. temperature : float or None - The higher the more uniform.\n - When a = [0.1, 0.2, 0.7],\n - temperature = 0.7, the distribution will be sharpen [ 0.05048273 0.13588945 0.81362782]\n - temperature = 1.0, the distribution will be the same [0.1 0.2 0.7]\n - temperature = 1.5, the distribution will be filtered [ 0.16008435 0.25411807 0.58579758]\n - If None, it will be ``np.argmax(a)`` + The higher the more uniform. When a = [0.1, 0.2, 0.7], + - temperature = 0.7, the distribution will be sharpen [0.05048273, 0.13588945, 0.81362782] + - temperature = 1.0, the distribution will be the same [0.1, 0.2, 0.7] + - temperature = 1.5, the distribution will be filtered [0.16008435, 0.25411807, 0.58579758] + - If None, it will be ``np.argmax(a)`` Notes ------ - No matter what is the temperature and input list, the sum of all probabilities will be one. - Even if input list = [1, 100, 200], the sum of all probabilities will still be one. + - No matter what is the temperature and input list, the sum of all probabilities will be one. Even if input list = [1, 100, 200], the sum of all probabilities will still be one. + - For large vocabulary size, choice a higher temperature or ``tl.nlp.sample_top`` to avoid error. - For large vocabulary_size, choice a higher temperature to avoid error. """ + if a is None: + raise Exception("a : list of float") b = np.copy(a) try: if temperature == 1: @@ -124,173 +142,198 @@ def sample(a=[], temperature=1.0): a = np.log(a) / temperature a = np.exp(a) / np.sum(np.exp(a)) return np.argmax(np.random.multinomial(1, a, 1)) - except: + except Exception: # np.set_printoptions(threshold=np.nan) - # print(a) - # print(np.sum(a)) - # print(np.max(a)) - # print(np.min(a)) + # logging.info(a) + # logging.info(np.sum(a)) + # logging.info(np.max(a)) + # logging.info(np.min(a)) # exit() message = "For large vocabulary_size, choice a higher temperature\ to avoid log error. Hint : use ``sample_top``. " + warnings.warn(message, Warning) - # print(a) - # print(b) + # logging.info(a) + # logging.info(b) return np.argmax(np.random.multinomial(1, b, 1)) -def sample_top(a=[], top_k=10): + +def sample_top(a=None, top_k=10): """Sample from ``top_k`` probabilities. Parameters ---------- - a : a list + a : list of float List of probabilities. top_k : int Number of candidates to be considered. + """ + if a is None: + a = [] + idx = np.argpartition(a, -top_k)[-top_k:] probs = a[idx] - # print("new", probs) + # logging.info("new %f" % probs) probs = probs / np.sum(probs) choice = np.random.choice(idx, p=probs) return choice - ## old implementation + # old implementation # a = np.array(a) # idx = np.argsort(a)[::-1] # idx = idx[:top_k] # # a = a[idx] # probs = a[idx] - # print("prev", probs) + # logging.info("prev %f" % probs) # # probs = probs / np.sum(probs) # # choice = np.random.choice(idx, p=probs) # # return choice -## Vector representations of words (Advanced) UNDOCUMENT +# Vector representations of words (Advanced) UNDOCUMENT class SimpleVocabulary(object): - """Simple vocabulary wrapper, see create_vocab(). + """Simple vocabulary wrapper, see create_vocab(). - Parameters - ------------ - vocab : A dictionary of word to word_id. - unk_id : Id of the special 'unknown' word. - """ + Parameters + ------------ + vocab : dictionary + A dictionary that maps word to ID. + unk_id : int + The ID for 'unknown' word. - def __init__(self, vocab, unk_id): - """Initializes the vocabulary.""" + """ + def __init__(self, vocab, unk_id): + """Initialize the vocabulary.""" + self._vocab = vocab + self._unk_id = unk_id - self._vocab = vocab - self._unk_id = unk_id + def word_to_id(self, word): + """Returns the integer id of a word string.""" + if word in self._vocab: + return self._vocab[word] + else: + return self._unk_id - def word_to_id(self, word): - """Returns the integer id of a word string.""" - if word in self._vocab: - return self._vocab[word] - else: - return self._unk_id class Vocabulary(object): - """Create Vocabulary class from a given vocabulary and its id-word, word-id convert, - see create_vocab() and ``tutorial_tfrecord3.py``. - - Parameters - ----------- - vocab_file : File containing the vocabulary, where the words are the first - whitespace-separated token on each line (other tokens are ignored) and - the word ids are the corresponding line numbers. - start_word : Special word denoting sentence start. - end_word : Special word denoting sentence end. - unk_word : Special word denoting unknown words. - - Properties - ------------ - vocab : a dictionary from word to id. - reverse_vocab : a list from id to word. - start_id : int of start id - end_id : int of end id - unk_id : int of unk id - pad_id : int of padding id - - Vocab_files - ------------- - >>> Look as follow, includes `start_word` , `end_word` but no `unk_word` . - >>> a 969108 - >>> 586368 - >>> 586368 - >>> . 440479 - >>> on 213612 - >>> of 202290 - >>> the 196219 - >>> in 182598 - >>> with 152984 - >>> and 139109 - >>> is 97322 - """ - - def __init__(self, - vocab_file, - start_word="", - end_word="", - unk_word="", - pad_word=""): - if not tf.gfile.Exists(vocab_file): - tf.logging.fatal("Vocab file %s not found.", vocab_file) - tf.logging.info("Initializing vocabulary from file: %s", vocab_file) - - with tf.gfile.GFile(vocab_file, mode="r") as f: - reverse_vocab = list(f.readlines()) - reverse_vocab = [line.split()[0] for line in reverse_vocab] - assert start_word in reverse_vocab - assert end_word in reverse_vocab - if unk_word not in reverse_vocab: - reverse_vocab.append(unk_word) - vocab = dict([(x, y) for (y, x) in enumerate(reverse_vocab)]) - - print(" [TL] Vocabulary from %s : %s %s %s" % (vocab_file, start_word, end_word, unk_word)) - print(" vocabulary with %d words (includes start_word, end_word, unk_word)" % len(vocab)) - # tf.logging.info(" vocabulary with %d words" % len(vocab)) - - self.vocab = vocab # vocab[word] = id - self.reverse_vocab = reverse_vocab # reverse_vocab[id] = word - - # Save special word ids. - self.start_id = vocab[start_word] - self.end_id = vocab[end_word] - self.unk_id = vocab[unk_word] - self.pad_id = vocab[pad_word] - print(" start_id: %d" % self.start_id) - print(" end_id: %d" % self.end_id) - print(" unk_id: %d" % self.unk_id) - print(" pad_id: %d" % self.pad_id) - - def word_to_id(self, word): - """Returns the integer word id of a word string.""" - if word in self.vocab: - return self.vocab[word] - else: - return self.unk_id + """Create Vocabulary class from a given vocabulary and its id-word, word-id convert. + See create_vocab() and ``tutorial_tfrecord3.py``. + + Parameters + ----------- + vocab_file : str + The file contains the vocabulary (can be created via ``tl.nlp.create_vocab``), where the words are the first whitespace-separated token on each line (other tokens are ignored) and the word ids are the corresponding line numbers. + start_word : str + Special word denoting sentence start. + end_word : str + Special word denoting sentence end. + unk_word : str + Special word denoting unknown words. + + Attributes + ------------ + vocab : dictionary + A dictionary that maps word to ID. + reverse_vocab : list of int + A list that maps ID to word. + start_id : int + For start ID. + end_id : int + For end ID. + unk_id : int + For unknown ID. + pad_id : int + For Padding ID. + + Examples + ------------- + The vocab file looks like follow, includes `start_word` , `end_word` ... + + >>> a 969108 + >>> 586368 + >>> 586368 + >>> . 440479 + >>> on 213612 + >>> of 202290 + >>> the 196219 + >>> in 182598 + >>> with 152984 + >>> and 139109 + >>> is 97322 + + """ + + def __init__(self, vocab_file, start_word="", end_word="", unk_word="", pad_word=""): + if not tf.gfile.Exists(vocab_file): + tf.logging.fatal("Vocab file %s not found." % vocab_file) + tf.logging.info("Initializing vocabulary from file: %s" % vocab_file) + + with tf.gfile.GFile(vocab_file, mode="r") as f: + reverse_vocab = list(f.readlines()) + reverse_vocab = [line.split()[0] for line in reverse_vocab] + # assert start_word in reverse_vocab + # assert end_word in reverse_vocab + if start_word not in reverse_vocab: # haodong + reverse_vocab.append(start_word) + if end_word not in reverse_vocab: + reverse_vocab.append(end_word) + if unk_word not in reverse_vocab: + reverse_vocab.append(unk_word) + if pad_word not in reverse_vocab: + reverse_vocab.append(pad_word) + + vocab = dict([(x, y) for (y, x) in enumerate(reverse_vocab)]) + + logging.info("Vocabulary from %s : %s %s %s" % (vocab_file, start_word, end_word, unk_word)) + logging.info(" vocabulary with %d words (includes start_word, end_word, unk_word)" % len(vocab)) + # tf.logging.info(" vocabulary with %d words" % len(vocab)) + + self.vocab = vocab # vocab[word] = id + self.reverse_vocab = reverse_vocab # reverse_vocab[id] = word + + # Save special word ids. + self.start_id = vocab[start_word] + self.end_id = vocab[end_word] + self.unk_id = vocab[unk_word] + self.pad_id = vocab[pad_word] + logging.info(" start_id: %d" % self.start_id) + logging.info(" end_id : %d" % self.end_id) + logging.info(" unk_id : %d" % self.unk_id) + logging.info(" pad_id : %d" % self.pad_id) + + def word_to_id(self, word): + """Returns the integer word id of a word string.""" + if word in self.vocab: + return self.vocab[word] + else: + return self.unk_id + + def id_to_word(self, word_id): + """Returns the word string of an integer word id.""" + if word_id >= len(self.reverse_vocab): + return self.reverse_vocab[self.unk_id] + else: + return self.reverse_vocab[word_id] - def id_to_word(self, word_id): - """Returns the word string of an integer word id.""" - if word_id >= len(self.reverse_vocab): - return self.reverse_vocab[self.unk_id] - else: - return self.reverse_vocab[word_id] def process_sentence(sentence, start_word="", end_word=""): - """Converts a sentence string into a list of string words, add start_word and end_word, + """Seperate a sentence string into a list of string words, add start_word and end_word, see ``create_vocab()`` and ``tutorial_tfrecord3.py``. - Parameter - --------- - sentence : a sentence in string. - start_word : a string or None, if None, non start word will be appended. - end_word : a string or None, if None, non end word will be appended. + Parameters + ---------- + sentence : str + A sentence. + start_word : str or None + The start word. If None, no start word will be appended. + end_word : str or None + The end word. If None, no end word will be appended. Returns --------- - A list of strings; the processed caption. + list of str + A list of strings that separated into words. Examples ----------- @@ -298,6 +341,13 @@ def process_sentence(sentence, start_word="", end_word=""): >>> c = tl.nlp.process_sentence(c) >>> print(c) ... ['', 'how', 'are', 'you', '?', ''] + + Notes + ------- + - You have to install the following package. + - `Installing NLTK `__ + - `Installing NLTK data `__ + """ try: import nltk @@ -312,30 +362,33 @@ def process_sentence(sentence, start_word="", end_word=""): process_sentence.append(end_word) return process_sentence + def create_vocab(sentences, word_counts_output_file, min_word_count=1): - """Creates the vocabulary of word to word_id, see create_vocab() and ``tutorial_tfrecord3.py``. + """Creates the vocabulary of word to word_id. + + See ``tutorial_tfrecord3.py``. The vocabulary is saved to disk in a text file of word counts. The id of each word in the file is its corresponding 0-based line number. Parameters ------------ - sentences : a list of lists of strings. - word_counts_output_file : A string + sentences : list of list of str + All sentences for creating the vocabulary. + word_counts_output_file : str The file name. - min_word_count : a int + min_word_count : int Minimum number of occurrences for a word. Returns -------- - - tl.nlp.SimpleVocabulary object. - - Mores - ----- - - ``tl.nlp.build_vocab()`` + :class:`SimpleVocabulary` + The simple vocabulary object, see :class:`Vocabulary` for more. Examples -------- + Pre-process sentences + >>> captions = ["one two , three", "four five five"] >>> processed_capts = [] >>> for c in captions: @@ -344,11 +397,16 @@ def create_vocab(sentences, word_counts_output_file, min_word_count=1): >>> print(processed_capts) ...[['', 'one', 'two', ',', 'three', ''], ['', 'four', 'five', 'five', '']] + Create vocabulary + >>> tl.nlp.create_vocab(processed_capts, word_counts_output_file='vocab.txt', min_word_count=1) - ... [TL] Creating vocabulary. + ... Creating vocabulary. ... Total words: 8 ... Words in vocabulary: 8 ... Wrote vocabulary file: vocab.txt + + Get vocabulary object + >>> vocab = tl.nlp.Vocabulary('vocab.txt', start_word="", end_word="", unk_word="") ... INFO:tensorflow:Initializing vocabulary from file: vocab.txt ... [TL] Vocabulary from vocab.txt : @@ -357,26 +415,27 @@ def create_vocab(sentences, word_counts_output_file, min_word_count=1): ... end_id: 3 ... unk_id: 9 ... pad_id: 0 + """ from collections import Counter - print(" [TL] Creating vocabulary.") + logging.info("Creating vocabulary.") counter = Counter() for c in sentences: counter.update(c) - # print('c',c) - print(" Total words: %d" % len(counter)) + # logging.info('c',c) + logging.info(" Total words: %d" % len(counter)) # Filter uncommon words and sort by descending count. word_counts = [x for x in counter.items() if x[1] >= min_word_count] word_counts.sort(key=lambda x: x[1], reverse=True) - word_counts = [("", 0)] + word_counts # 1st id should be reserved for padding - # print(word_counts) - print(" Words in vocabulary: %d" % len(word_counts)) + word_counts = [("", 0)] + word_counts # 1st id should be reserved for padding + # logging.info(word_counts) + logging.info(" Words in vocabulary: %d" % len(word_counts)) # Write out the word counts file. with tf.gfile.FastGFile(word_counts_output_file, "w") as f: f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts])) - print(" Wrote vocabulary file: %s" % word_counts_output_file) + logging.info(" Wrote vocabulary file: %s" % word_counts_output_file) # Create the vocabulary dictionary. reverse_vocab = [x[0] for x in word_counts] @@ -387,71 +446,75 @@ def create_vocab(sentences, word_counts_output_file, min_word_count=1): return vocab -## Vector representations of words +# Vector representations of words def simple_read_words(filename="nietzsche.txt"): """Read context from file without any preprocessing. Parameters ---------- - filename : a string + filename : str A file path (like .txt file) Returns -------- - The context in a string + str + The context in a string. + """ - with open("nietzsche.txt", "r") as f: + with open(filename, "r") as f: words = f.read() return words -def read_words(filename="nietzsche.txt", replace = ['\n', '']): - """File to list format context. Note that, this script can not handle punctuations. + +def read_words(filename="nietzsche.txt", replace=None): + """Read list format context from a file. + For customized read_words method, see ``tutorial_generate_text.py``. Parameters ---------- - filename : a string - A file path (like .txt file), - replace : a list - [original string, target string], to disable replace use ['', ''] + filename : str + a file path. + replace : list of str + replace original string by target string. Returns - -------- - The context in a list, split by space by default, and use ``''`` to represent ``'\n'``, - e.g. ``[... 'how', 'useful', 'it', "'s" ... ]``. - - Code References - --------------- - - `tensorflow.models.rnn.ptb.reader `_ + ------- + list of str + The context in a list (split using space). """ + if replace is None: + replace = ['\n', ''] + with tf.gfile.GFile(filename, "r") as f: - try: # python 3.4 or older + try: # python 3.4 or older context_list = f.read().replace(*replace).split() - except: # python 3.5 + except Exception: # python 3.5 f.seek(0) replace = [x.encode('utf-8') for x in replace] context_list = f.read().replace(*replace).split() return context_list -def read_analogies_file(eval_file='questions-words.txt', word2id={}): + +def read_analogies_file(eval_file='questions-words.txt', word2id=None): """Reads through an analogy question file, return its id format. Parameters ---------- - eval_data : a string + eval_file : str The file name. - word2id : a dictionary - Mapping words to unique IDs. + word2id : dictionary + a dictionary that maps word to ID. Returns -------- - analogy_questions : a [n, 4] numpy array containing the analogy question's - word ids. - questions_skipped: questions skipped due to unknown words. + numpy.array + A ``[n_examples, 4]`` numpy array containing the analogy question's word IDs. Examples --------- - >>> eval_file should be in this format : + The file should be in this format + >>> : capital-common-countries >>> Athens Greece Baghdad Iraq >>> Athens Greece Bangkok Thailand @@ -462,13 +525,12 @@ def read_analogies_file(eval_file='questions-words.txt', word2id={}): >>> Athens Greece Canberra Australia >>> Athens Greece Hanoi Vietnam >>> Athens Greece Havana Cuba - ... + + Get the tokenized analogy question data >>> words = tl.files.load_matt_mahoney_text8_dataset() - >>> data, count, dictionary, reverse_dictionary = \ - tl.nlp.build_words_dataset(words, vocabulary_size, True) - >>> analogy_questions = tl.nlp.read_analogies_file( \ - eval_file='questions-words.txt', word2id=dictionary) + >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True) + >>> analogy_questions = tl.nlp.read_analogies_file(eval_file='questions-words.txt', word2id=dictionary) >>> print(analogy_questions) ... [[ 3068 1248 7161 1581] ... [ 3068 1248 28683 5642] @@ -477,42 +539,48 @@ def read_analogies_file(eval_file='questions-words.txt', word2id={}): ... [ 1216 4309 19982 25506] ... [ 1216 4309 3194 8650] ... [ 1216 4309 140 312]] + """ + if word2id is None: + word2id = {} + questions = [] questions_skipped = 0 with open(eval_file, "rb") as analogy_f: - for line in analogy_f: - if line.startswith(b":"): # Skip comments. + for line in analogy_f: + if line.startswith(b":"): # Skip comments. continue - words = line.strip().lower().split(b" ") # lowercase - ids = [word2id.get(w.strip()) for w in words] - if None in ids or len(ids) != 4: - questions_skipped += 1 - else: - questions.append(np.array(ids)) - print("Eval analogy file: ", eval_file) - print("Questions: ", len(questions)) - print("Skipped: ", questions_skipped) + words = line.strip().lower().split(b" ") # lowercase + ids = [word2id.get(w.strip()) for w in words] + if None in ids or len(ids) != 4: + questions_skipped += 1 + else: + questions.append(np.array(ids)) + logging.info("Eval analogy file: %s" % eval_file) + logging.info("Questions: %d", len(questions)) + logging.info("Skipped: %d", questions_skipped) analogy_questions = np.array(questions, dtype=np.int32) return analogy_questions + def build_vocab(data): """Build vocabulary. + Given the context in list format. Return the vocabulary, which is a dictionary for word to id. e.g. {'campbell': 2587, 'atlantic': 2247, 'aoun': 6746 .... } Parameters ---------- - data : a list of string - the context in list format + data : list of str + The context in list format Returns -------- - word_to_id : a dictionary - mapping words to unique IDs. e.g. {'campbell': 2587, 'atlantic': 2247, 'aoun': 6746 .... } + dictionary + that maps word to unique ID. e.g. {'campbell': 2587, 'atlantic': 2247, 'aoun': 6746 .... } - Code References + References --------------- - `tensorflow.models.rnn.ptb.reader `_ @@ -521,64 +589,67 @@ def build_vocab(data): >>> data_path = os.getcwd() + '/simple-examples/data' >>> train_path = os.path.join(data_path, "ptb.train.txt") >>> word_to_id = build_vocab(read_txt_words(train_path)) + """ # data = _read_words(filename) counter = collections.Counter(data) - # print('counter', counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1 + # logging.info('counter %s' % counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1 count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) - # print('count_pairs',count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1) + # logging.info('count_pairs %s' % count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1) words, _ = list(zip(*count_pairs)) word_to_id = dict(zip(words, range(len(words)))) - # print(words) # list of words - # print(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746 + # logging.info(words) # list of words + # logging.info(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746 return word_to_id + def build_reverse_dictionary(word_to_id): - """Given a dictionary for converting word to integer id. - Returns a reverse dictionary for converting a id to word. + """Given a dictionary that maps word to integer id. + Returns a reverse dictionary that maps a id to word. Parameters ---------- word_to_id : dictionary - mapping words to unique ids + that maps word to ID. Returns -------- - reverse_dictionary : a dictionary - mapping ids to words + dictionary + A dictionary that maps IDs to words. + """ reverse_dictionary = dict(zip(word_to_id.values(), word_to_id.keys())) return reverse_dictionary -def build_words_dataset(words=[], vocabulary_size=50000, printable=True, unk_key = 'UNK'): + +def build_words_dataset(words=None, vocabulary_size=50000, printable=True, unk_key='UNK'): """Build the words dictionary and replace rare words with 'UNK' token. The most common word has the smallest integer id. Parameters ---------- - words : a list of string or byte - The context in list format. You may need to do preprocessing on the words, - such as lower case, remove marks etc. - vocabulary_size : an int - The maximum vocabulary size, limiting the vocabulary size. - Then the script replaces rare words with 'UNK' token. + words : list of str or byte + The context in list format. You may need to do preprocessing on the words, such as lower case, remove marks etc. + vocabulary_size : int + The maximum vocabulary size, limiting the vocabulary size. Then the script replaces rare words with 'UNK' token. printable : boolean Whether to print the read vocabulary size of the given words. - unk_key : a string - Unknown words = unk_key + unk_key : str + Represent the unknown words. Returns -------- - data : a list of integer - The context in a list of ids - count : a list of tuple and list - count[0] is a list : the number of rare words\n - count[1:] are tuples : the number of occurrence of each word\n - e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)] - dictionary : a dictionary - word_to_id, mapping words to unique IDs. + data : list of int + The context in a list of ID. + count : list of tuple and list + Pair words and IDs. + - count[0] is a list : the number of rare words + - count[1:] are tuples : the number of occurrence of each word + - e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)] + dictionary : dictionary + It is `word_to_id` that maps word to ID. reverse_dictionary : a dictionary - id_to_word, mapping id to unique word. + It is `id_to_word` that maps ID to word. Examples -------- @@ -586,11 +657,14 @@ def build_words_dataset(words=[], vocabulary_size=50000, printable=True, unk_key >>> vocabulary_size = 50000 >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size) - Code References + References ----------------- - - `tensorflow/examples/tutorials/word2vec/word2vec_basic.py `_ + - `tensorflow/examples/tutorials/word2vec/word2vec_basic.py `__ + """ - import collections + if words is None: + raise Exception("words : list of str or byte") + count = [[unk_key, -1]] count.extend(collections.Counter(words).most_common(vocabulary_size - 1)) dictionary = dict() @@ -608,35 +682,36 @@ def build_words_dataset(words=[], vocabulary_size=50000, printable=True, unk_key count[0][1] = unk_count reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) if printable: - print('Real vocabulary size %d' % len(collections.Counter(words).keys())) - print('Limited vocabulary size {}'.format(vocabulary_size)) - assert len(collections.Counter(words).keys()) >= vocabulary_size , \ - "the limited vocabulary_size must be less than or equal to the read vocabulary_size" + logging.info('Real vocabulary size %d' % len(collections.Counter(words).keys())) + logging.info('Limited vocabulary size {}'.format(vocabulary_size)) + if len(collections.Counter(words).keys()) < vocabulary_size: + raise Exception( + "len(collections.Counter(words).keys()) >= vocabulary_size , the limited vocabulary_size must be less than or equal to the read vocabulary_size") return data, count, dictionary, reverse_dictionary -def words_to_word_ids(data=[], word_to_id={}, unk_key = 'UNK'): - """Given a context (words) in list format and the vocabulary, - Returns a list of IDs to represent the context. + +def words_to_word_ids(data=None, word_to_id=None, unk_key='UNK'): + """Convert a list of string (words) to IDs. Parameters ---------- - data : a list of string or byte - the context in list format + data : list of string or byte + The context in list format word_to_id : a dictionary - mapping words to unique IDs. - unk_key : a string - Unknown words = unk_key + that maps word to ID. + unk_key : str + Represent the unknown words. Returns -------- - A list of IDs to represent the context. + list of int + A list of IDs to represent the context. Examples -------- >>> words = tl.files.load_matt_mahoney_text8_dataset() >>> vocabulary_size = 50000 - >>> data, count, dictionary, reverse_dictionary = \ - ... tl.nlp.build_words_dataset(words, vocabulary_size, True) + >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True) >>> context = [b'hello', b'how', b'are', b'you'] >>> ids = tl.nlp.words_to_word_ids(words, dictionary) >>> context = tl.nlp.word_ids_to_words(ids, reverse_dictionary) @@ -645,15 +720,20 @@ def words_to_word_ids(data=[], word_to_id={}, unk_key = 'UNK'): >>> print(context) ... [b'hello', b'how', b'are', b'you'] - Code References + References --------------- - - `tensorflow.models.rnn.ptb.reader `_ + - `tensorflow.models.rnn.ptb.reader `__ + """ + if data is None: + raise Exception("data : list of string or byte") + if word_to_id is None: + raise Exception("word_to_id : a dictionary") # if isinstance(data[0], six.string_types): - # print(type(data[0])) + # logging.info(type(data[0])) # # exit() - # print(data[0]) - # print(word_to_id) + # logging.info(data[0]) + # logging.info(word_to_id) # return [word_to_id[str(word)] for word in data] # else: @@ -667,50 +747,52 @@ def words_to_word_ids(data=[], word_to_id={}, unk_key = 'UNK'): # return [word_to_id[word] for word in data] # this one # if isinstance(data[0], str): - # # print('is a string object') + # # logging.info('is a string object') # return [word_to_id[word] for word in data] # else:#if isinstance(s, bytes): - # # print('is a unicode object') - # # print(data[0]) + # # logging.info('is a unicode object') + # # logging.info(data[0]) # return [word_to_id[str(word)] f + def word_ids_to_words(data, id_to_word): - """Given a context (ids) in list format and the vocabulary, - Returns a list of words to represent the context. + """Convert a list of integer to strings (words). Parameters ---------- - data : a list of integer - the context in list format - id_to_word : a dictionary - mapping id to unique word. + data : list of int + The context in list format. + id_to_word : dictionary + a dictionary that maps ID to word. Returns -------- - A list of string or byte to represent the context. + list of str + A list of string or byte to represent the context. Examples --------- - >>> see words_to_word_ids + >>> see ``tl.nlp.words_to_word_ids`` + """ return [id_to_word[i] for i in data] -def save_vocab(count=[], name='vocab.txt'): + +def save_vocab(count=None, name='vocab.txt'): """Save the vocabulary to a file so the model can be reloaded. Parameters ---------- count : a list of tuple and list - count[0] is a list : the number of rare words\n - count[1:] are tuples : the number of occurrence of each word\n + count[0] is a list : the number of rare words, + count[1:] are tuples : the number of occurrence of each word, e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)] Examples --------- >>> words = tl.files.load_matt_mahoney_text8_dataset() >>> vocabulary_size = 50000 - >>> data, count, dictionary, reverse_dictionary = \ - ... tl.nlp.build_words_dataset(words, vocabulary_size, True) + >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size, True) >>> tl.nlp.save_vocab(count, name='vocab_text8.txt') >>> vocab_text8.txt ... UNK 418391 @@ -721,217 +803,310 @@ def save_vocab(count=[], name='vocab.txt'): ... in 372201 ... a 325873 ... to 316376 + """ + if count is None: + count = [] + pwd = os.getcwd() vocabulary_size = len(count) with open(os.path.join(pwd, name), "w") as f: for i in xrange(vocabulary_size): f.write("%s %d\n" % (tf.compat.as_text(count[i][0]), count[i][1])) - print("%d vocab saved to %s in %s" % (vocabulary_size, name, pwd)) + logging.info("%d vocab saved to %s in %s" % (vocabulary_size, name, pwd)) + + +# Functions for translation + -## Functions for translation def basic_tokenizer(sentence, _WORD_SPLIT=re.compile(b"([.,!?\"':;)(])")): - """Very basic tokenizer: split the sentence into a list of tokens. - - Parameters - ----------- - sentence : tensorflow.python.platform.gfile.GFile Object - _WORD_SPLIT : regular expression for word spliting. - - - Examples - -------- - >>> see create_vocabulary - >>> from tensorflow.python.platform import gfile - >>> train_path = "wmt/giga-fren.release2" - >>> with gfile.GFile(train_path + ".en", mode="rb") as f: - >>> for line in f: - >>> tokens = tl.nlp.basic_tokenizer(line) - >>> print(tokens) - >>> exit() - ... [b'Changing', b'Lives', b'|', b'Changing', b'Society', b'|', b'How', - ... b'It', b'Works', b'|', b'Technology', b'Drives', b'Change', b'Home', - ... b'|', b'Concepts', b'|', b'Teachers', b'|', b'Search', b'|', b'Overview', - ... b'|', b'Credits', b'|', b'HHCC', b'Web', b'|', b'Reference', b'|', - ... b'Feedback', b'Virtual', b'Museum', b'of', b'Canada', b'Home', b'Page'] - - References - ---------- - - Code from ``/tensorflow/models/rnn/translation/data_utils.py`` - """ - words = [] - sentence = tf.compat.as_bytes(sentence) - for space_separated_fragment in sentence.strip().split(): - words.extend(re.split(_WORD_SPLIT, space_separated_fragment)) - return [w for w in words if w] - -def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size, - tokenizer=None, normalize_digits=True, - _DIGIT_RE=re.compile(br"\d"), - _START_VOCAB=[b"_PAD", b"_GO", b"_EOS", b"_UNK"]): - """Create vocabulary file (if it does not exist yet) from data file. - - Data file is assumed to contain one sentence per line. Each sentence is - tokenized and digits are normalized (if normalize_digits is set). - Vocabulary contains the most-frequent tokens up to max_vocabulary_size. - We write it to vocabulary_path in a one-token-per-line format, so that later - token in the first line gets id=0, second line gets id=1, and so on. - - Parameters - ----------- - vocabulary_path : path where the vocabulary will be created. - data_path : data file that will be used to create vocabulary. - max_vocabulary_size : limit on the size of the created vocabulary. - tokenizer : a function to use to tokenize each data sentence. - if None, basic_tokenizer will be used. - normalize_digits : Boolean - if true, all digits are replaced by 0s. - - References - ---------- - - Code from ``/tensorflow/models/rnn/translation/data_utils.py`` - """ - if not gfile.Exists(vocabulary_path): - print("Creating vocabulary %s from data %s" % (vocabulary_path, data_path)) - vocab = {} - with gfile.GFile(data_path, mode="rb") as f: - counter = 0 - for line in f: - counter += 1 - if counter % 100000 == 0: - print(" processing line %d" % counter) - tokens = tokenizer(line) if tokenizer else basic_tokenizer(line) - for w in tokens: - word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w - if word in vocab: - vocab[word] += 1 - else: - vocab[word] = 1 - vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True) - if len(vocab_list) > max_vocabulary_size: - vocab_list = vocab_list[:max_vocabulary_size] - with gfile.GFile(vocabulary_path, mode="wb") as vocab_file: - for w in vocab_list: - vocab_file.write(w + b"\n") - else: - print("Vocabulary %s from data %s exists" % (vocabulary_path, data_path)) + """Very basic tokenizer: split the sentence into a list of tokens. + + Parameters + ----------- + sentence : tensorflow.python.platform.gfile.GFile Object + _WORD_SPLIT : regular expression for word spliting. + + + Examples + -------- + >>> see create_vocabulary + >>> from tensorflow.python.platform import gfile + >>> train_path = "wmt/giga-fren.release2" + >>> with gfile.GFile(train_path + ".en", mode="rb") as f: + >>> for line in f: + >>> tokens = tl.nlp.basic_tokenizer(line) + >>> logging.info(tokens) + >>> exit() + ... [b'Changing', b'Lives', b'|', b'Changing', b'Society', b'|', b'How', + ... b'It', b'Works', b'|', b'Technology', b'Drives', b'Change', b'Home', + ... b'|', b'Concepts', b'|', b'Teachers', b'|', b'Search', b'|', b'Overview', + ... b'|', b'Credits', b'|', b'HHCC', b'Web', b'|', b'Reference', b'|', + ... b'Feedback', b'Virtual', b'Museum', b'of', b'Canada', b'Home', b'Page'] + + References + ---------- + - Code from ``/tensorflow/models/rnn/translation/data_utils.py`` + + """ + words = [] + sentence = tf.compat.as_bytes(sentence) + for space_separated_fragment in sentence.strip().split(): + words.extend(re.split(_WORD_SPLIT, space_separated_fragment)) + return [w for w in words if w] + + +def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size, tokenizer=None, normalize_digits=True, _DIGIT_RE=re.compile(br"\d"), _START_VOCAB=None): + """Create vocabulary file (if it does not exist yet) from data file. + + Data file is assumed to contain one sentence per line. Each sentence is + tokenized and digits are normalized (if normalize_digits is set). + Vocabulary contains the most-frequent tokens up to max_vocabulary_size. + We write it to vocabulary_path in a one-token-per-line format, so that later + token in the first line gets id=0, second line gets id=1, and so on. + + Parameters + ----------- + vocabulary_path : str + Path where the vocabulary will be created. + data_path : str + Data file that will be used to create vocabulary. + max_vocabulary_size : int + Limit on the size of the created vocabulary. + tokenizer : function + A function to use to tokenize each data sentence. If None, basic_tokenizer will be used. + normalize_digits : boolean + If true, all digits are replaced by `0`. + _DIGIT_RE : regular expression function + Default is ``re.compile(br"\d")``. + _START_VOCAB : list of str + The pad, go, eos and unk token, default is ``[b"_PAD", b"_GO", b"_EOS", b"_UNK"]``. + + References + ---------- + - Code from ``/tensorflow/models/rnn/translation/data_utils.py`` + + """ + if _START_VOCAB is None: + _START_VOCAB = [b"_PAD", b"_GO", b"_EOS", b"_UNK"] + if not gfile.Exists(vocabulary_path): + logging.info("Creating vocabulary %s from data %s" % (vocabulary_path, data_path)) + vocab = {} + with gfile.GFile(data_path, mode="rb") as f: + counter = 0 + for line in f: + counter += 1 + if counter % 100000 == 0: + logging.info(" processing line %d" % counter) + tokens = tokenizer(line) if tokenizer else basic_tokenizer(line) + for w in tokens: + word = re.sub(_DIGIT_RE, b"0", w) if normalize_digits else w + if word in vocab: + vocab[word] += 1 + else: + vocab[word] = 1 + vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True) + if len(vocab_list) > max_vocabulary_size: + vocab_list = vocab_list[:max_vocabulary_size] + with gfile.GFile(vocabulary_path, mode="wb") as vocab_file: + for w in vocab_list: + vocab_file.write(w + b"\n") + else: + logging.info("Vocabulary %s from data %s exists" % (vocabulary_path, data_path)) + def initialize_vocabulary(vocabulary_path): - """Initialize vocabulary from file, return the word_to_id (dictionary) - and id_to_word (list). - - We assume the vocabulary is stored one-item-per-line, so a file:\n - dog\n - cat\n - will result in a vocabulary {"dog": 0, "cat": 1}, and this function will - also return the reversed-vocabulary ["dog", "cat"]. - - Parameters - ----------- - vocabulary_path : path to the file containing the vocabulary. - - Returns - -------- - vocab : a dictionary - Word to id. A dictionary mapping string to integers. - rev_vocab : a list - Id to word. The reversed vocabulary (a list, which reverses the vocabulary mapping). - - Examples - --------- - >>> Assume 'test' contains - ... dog - ... cat - ... bird - >>> vocab, rev_vocab = tl.nlp.initialize_vocabulary("test") - >>> print(vocab) - >>> {b'cat': 1, b'dog': 0, b'bird': 2} - >>> print(rev_vocab) - >>> [b'dog', b'cat', b'bird'] - - Raises - ------- - ValueError : if the provided vocabulary_path does not exist. - """ - if gfile.Exists(vocabulary_path): - rev_vocab = [] - with gfile.GFile(vocabulary_path, mode="rb") as f: - rev_vocab.extend(f.readlines()) - rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab] - vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)]) - return vocab, rev_vocab - else: - raise ValueError("Vocabulary file %s not found.", vocabulary_path) - -def sentence_to_token_ids(sentence, vocabulary, - tokenizer=None, normalize_digits=True, - UNK_ID=3, _DIGIT_RE=re.compile(br"\d")): - """Convert a string to list of integers representing token-ids. - - For example, a sentence "I have a dog" may become tokenized into - ["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2, - "a": 4, "dog": 7"} this function will return [1, 2, 4, 7]. - - Parameters - ----------- - sentence : tensorflow.python.platform.gfile.GFile Object - The sentence in bytes format to convert to token-ids.\n - see basic_tokenizer(), data_to_token_ids() - vocabulary : a dictionary mapping tokens to integers. - tokenizer : a function to use to tokenize each sentence; - If None, basic_tokenizer will be used. - normalize_digits : Boolean - If true, all digits are replaced by 0s. - - Returns - -------- - A list of integers, the token-ids for the sentence. - """ - - if tokenizer: - words = tokenizer(sentence) - else: - words = basic_tokenizer(sentence) - if not normalize_digits: - return [vocabulary.get(w, UNK_ID) for w in words] - # Normalize digits by 0 before looking words up in the vocabulary. - return [vocabulary.get(re.sub(_DIGIT_RE, b"0", w), UNK_ID) for w in words] - -def data_to_token_ids(data_path, target_path, vocabulary_path, - tokenizer=None, normalize_digits=True, - UNK_ID=3, _DIGIT_RE=re.compile(br"\d")): - """Tokenize data file and turn into token-ids using given vocabulary file. - - This function loads data line-by-line from data_path, calls the above - sentence_to_token_ids, and saves the result to target_path. See comment - for sentence_to_token_ids on the details of token-ids format. - - Parameters - ----------- - data_path : path to the data file in one-sentence-per-line format. - target_path : path where the file with token-ids will be created. - vocabulary_path : path to the vocabulary file. - tokenizer : a function to use to tokenize each sentence; - if None, basic_tokenizer will be used. - normalize_digits : Boolean; if true, all digits are replaced by 0s. - - References - ---------- - - Code from ``/tensorflow/models/rnn/translation/data_utils.py`` - """ - if not gfile.Exists(target_path): - print("Tokenizing data in %s" % data_path) - vocab, _ = initialize_vocabulary(vocabulary_path) - with gfile.GFile(data_path, mode="rb") as data_file: - with gfile.GFile(target_path, mode="w") as tokens_file: - counter = 0 - for line in data_file: - counter += 1 - if counter % 100000 == 0: - print(" tokenizing line %d" % counter) - token_ids = sentence_to_token_ids(line, vocab, tokenizer, - normalize_digits, UNK_ID=UNK_ID, - _DIGIT_RE=_DIGIT_RE) - tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n") - else: - print("Target path %s exists" % target_path) + """Initialize vocabulary from file, return the `word_to_id` (dictionary) + and `id_to_word` (list). + + We assume the vocabulary is stored one-item-per-line, so a file will result in a vocabulary {"dog": 0, "cat": 1}, and this function will also return the reversed-vocabulary ["dog", "cat"]. + + Parameters + ----------- + vocabulary_path : str + Path to the file containing the vocabulary. + + Returns + -------- + vocab : dictionary + a dictionary that maps word to ID. + rev_vocab : list of int + a list that maps ID to word. + + Examples + --------- + >>> Assume 'test' contains + ... dog + ... cat + ... bird + >>> vocab, rev_vocab = tl.nlp.initialize_vocabulary("test") + >>> print(vocab) + >>> {b'cat': 1, b'dog': 0, b'bird': 2} + >>> print(rev_vocab) + >>> [b'dog', b'cat', b'bird'] + + Raises + ------- + ValueError : if the provided vocabulary_path does not exist. + + """ + if gfile.Exists(vocabulary_path): + rev_vocab = [] + with gfile.GFile(vocabulary_path, mode="rb") as f: + rev_vocab.extend(f.readlines()) + rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab] + vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)]) + return vocab, rev_vocab + else: + raise ValueError("Vocabulary file %s not found.", vocabulary_path) + + +def sentence_to_token_ids(sentence, vocabulary, tokenizer=None, normalize_digits=True, UNK_ID=3, _DIGIT_RE=re.compile(br"\d")): + """Convert a string to list of integers representing token-ids. + + For example, a sentence "I have a dog" may become tokenized into + ["I", "have", "a", "dog"] and with vocabulary {"I": 1, "have": 2, + "a": 4, "dog": 7"} this function will return [1, 2, 4, 7]. + + Parameters + ----------- + sentence : tensorflow.python.platform.gfile.GFile Object + The sentence in bytes format to convert to token-ids, see ``basic_tokenizer()`` and ``data_to_token_ids()``. + vocabulary : dictionary + Mmapping tokens to integers. + tokenizer : function + A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used. + normalize_digits : boolean + If true, all digits are replaced by 0. + + Returns + -------- + list of int + The token-ids for the sentence. + + """ + if tokenizer: + words = tokenizer(sentence) + else: + words = basic_tokenizer(sentence) + if not normalize_digits: + return [vocabulary.get(w, UNK_ID) for w in words] + # Normalize digits by 0 before looking words up in the vocabulary. + return [vocabulary.get(re.sub(_DIGIT_RE, b"0", w), UNK_ID) for w in words] + + +def data_to_token_ids(data_path, target_path, vocabulary_path, tokenizer=None, normalize_digits=True, UNK_ID=3, _DIGIT_RE=re.compile(br"\d")): + """Tokenize data file and turn into token-ids using given vocabulary file. + + This function loads data line-by-line from data_path, calls the above + sentence_to_token_ids, and saves the result to target_path. See comment + for sentence_to_token_ids on the details of token-ids format. + + Parameters + ----------- + data_path : str + Path to the data file in one-sentence-per-line format. + target_path : str + Path where the file with token-ids will be created. + vocabulary_path : str + Path to the vocabulary file. + tokenizer : function + A function to use to tokenize each sentence. If None, ``basic_tokenizer`` will be used. + normalize_digits : boolean + If true, all digits are replaced by 0. + + References + ---------- + - Code from ``/tensorflow/models/rnn/translation/data_utils.py`` + + """ + if not gfile.Exists(target_path): + logging.info("Tokenizing data in %s" % data_path) + vocab, _ = initialize_vocabulary(vocabulary_path) + with gfile.GFile(data_path, mode="rb") as data_file: + with gfile.GFile(target_path, mode="w") as tokens_file: + counter = 0 + for line in data_file: + counter += 1 + if counter % 100000 == 0: + logging.info(" tokenizing line %d" % counter) + token_ids = sentence_to_token_ids(line, vocab, tokenizer, normalize_digits, UNK_ID=UNK_ID, _DIGIT_RE=_DIGIT_RE) + tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n") + else: + logging.info("Target path %s exists" % target_path) + + +def moses_multi_bleu(hypotheses, references, lowercase=False): + """Calculate the bleu score for hypotheses and references + using the MOSES ulti-bleu.perl script. + + Parameters + ------------ + hypotheses : numpy.array.string + A numpy array of strings where each string is a single example. + references : numpy.array.string + A numpy array of strings where each string is a single example. + lowercase : boolean + If True, pass the "-lc" flag to the multi-bleu script + + Examples + --------- + >>> hypotheses = ["a bird is flying on the sky"] + >>> references = ["two birds are flying on the sky", "a bird is on the top of the tree", "an airplane is on the sky",] + >>> score = tl.nlp.moses_multi_bleu(hypotheses, references) + + Returns + -------- + float + The BLEU score + + References + ---------- + - `Google/seq2seq/metric/bleu `__ + + """ + if np.size(hypotheses) == 0: + return np.float32(0.0) + + # Get MOSES multi-bleu script + try: + multi_bleu_path, _ = urllib.request.urlretrieve("https://raw.githubusercontent.com/moses-smt/mosesdecoder/" "master/scripts/generic/multi-bleu.perl") + os.chmod(multi_bleu_path, 0o755) + except Exception: # pylint: disable=W0702 + tf.logging.info("Unable to fetch multi-bleu.perl script, using local.") + metrics_dir = os.path.dirname(os.path.realpath(__file__)) + bin_dir = os.path.abspath(os.path.join(metrics_dir, "..", "..", "bin")) + multi_bleu_path = os.path.join(bin_dir, "tools/multi-bleu.perl") + + # Dump hypotheses and references to tempfiles + hypothesis_file = tempfile.NamedTemporaryFile() + hypothesis_file.write("\n".join(hypotheses).encode("utf-8")) + hypothesis_file.write(b"\n") + hypothesis_file.flush() + reference_file = tempfile.NamedTemporaryFile() + reference_file.write("\n".join(references).encode("utf-8")) + reference_file.write(b"\n") + reference_file.flush() + + # Calculate BLEU using multi-bleu script + with open(hypothesis_file.name, "r") as read_pred: + bleu_cmd = [multi_bleu_path] + if lowercase: + bleu_cmd += ["-lc"] + bleu_cmd += [reference_file.name] + try: + bleu_out = subprocess.check_output(bleu_cmd, stdin=read_pred, stderr=subprocess.STDOUT) + bleu_out = bleu_out.decode("utf-8") + bleu_score = re.search(r"BLEU = (.+?),", bleu_out).group(1) + bleu_score = float(bleu_score) + except subprocess.CalledProcessError as error: + if error.output is not None: + tf.logging.warning("multi-bleu.perl script returned non-zero exit code") + tf.logging.warning(error.output) + bleu_score = np.float32(0.0) + + # Close temp files + hypothesis_file.close() + reference_file.close() + + return np.float32(bleu_score) diff --git a/tensorlayer/ops.py b/tensorlayer/ops.py deleted file mode 100644 index 608799c..0000000 --- a/tensorlayer/ops.py +++ /dev/null @@ -1,219 +0,0 @@ -#! /usr/bin/python -# -*- coding: utf8 -*- - - - - -import tensorflow as tf -import os -import sys -from sys import platform as _platform - - -def exit_tf(sess=None): - """Close tensorboard and nvidia-process if available - - Parameters - ---------- - sess : a session instance of TensorFlow - TensorFlow session - """ - text = "[tl] Close tensorboard and nvidia-process if available" - sess.close() - # import time - # time.sleep(2) - if _platform == "linux" or _platform == "linux2": - print('linux: %s' % text) - os.system('nvidia-smi') - os.system('fuser 6006/tcp -k') # kill tensorboard 6006 - os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") # kill all nvidia-smi python process - elif _platform == "darwin": - print('OS X: %s' % text) - os.system("lsof -i tcp:6006 | grep -v PID | awk '{print $2}' | xargs kill") # kill tensorboard 6006 - elif _platform == "win32": - print('Windows: %s' % text) - else: - print(_platform) - exit() - -def clear_all(printable=True): - """Clears all the placeholder variables of keep prob, - including keeping probabilities of all dropout, denoising, dropconnect etc. - - Parameters - ---------- - printable : boolean - If True, print all deleted variables. - """ - print('clear all .....................................') - gl = globals().copy() - for var in gl: - if var[0] == '_': continue - if 'func' in str(globals()[var]): continue - if 'module' in str(globals()[var]): continue - if 'class' in str(globals()[var]): continue - - if printable: - print(" clear_all ------- %s" % str(globals()[var])) - - del globals()[var] - -# def clear_all2(vars, printable=True): -# """ -# The :function:`clear_all()` Clears all the placeholder variables of keep prob, -# including keeping probabilities of all dropout, denoising, dropconnect -# Parameters -# ---------- -# printable : if True, print all deleted variables. -# """ -# print('clear all .....................................') -# for var in vars: -# if var[0] == '_': continue -# if 'func' in str(var): continue -# if 'module' in str(var): continue -# if 'class' in str(var): continue -# -# if printable: -# print(" clear_all ------- %s" % str(var)) -# -# del var - -def set_gpu_fraction(sess=None, gpu_fraction=0.3): - """Set the GPU memory fraction for the application. - - Parameters - ---------- - sess : a session instance of TensorFlow - TensorFlow session - gpu_fraction : a float - Fraction of GPU memory, (0 ~ 1] - - References - ---------- - - `TensorFlow using GPU `_ - """ - print(" tensorlayer: GPU MEM Fraction %f" % gpu_fraction) - gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction) - sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) - return sess - - - - - -def disable_print(): - """Disable console output, ``suppress_stdout`` is recommended. - - Examples - --------- - >>> print("You can see me") - >>> tl.ops.disable_print() - >>> print(" You can't see me") - >>> tl.ops.enable_print() - >>> print("You can see me") - """ - # sys.stdout = os.devnull # this one kill the process - sys.stdout = None - sys.stderr = os.devnull - -def enable_print(): - """Enable console output, ``suppress_stdout`` is recommended. - - Examples - -------- - - see tl.ops.disable_print() - """ - sys.stdout = sys.__stdout__ - sys.stderr = sys.__stderr__ - - -# class temporary_disable_print: -# """Temporarily disable console output. -# -# Examples -# --------- -# >>> print("You can see me") -# >>> with tl.ops.temporary_disable_print() as t: -# >>> print("You can't see me") -# >>> print("You can see me") -# """ -# def __init__(self): -# pass -# def __enter__(self): -# sys.stdout = None -# sys.stderr = os.devnull -# def __exit__(self, type, value, traceback): -# sys.stdout = sys.__stdout__ -# sys.stderr = sys.__stderr__ -# return isinstance(value, TypeError) - - -from contextlib import contextmanager -@contextmanager -def suppress_stdout(): - """Temporarily disable console output. - - Examples - --------- - >>> print("You can see me") - >>> with tl.ops.suppress_stdout(): - >>> print("You can't see me") - >>> print("You can see me") - - References - ----------- - - `stackoverflow `_ - """ - with open(os.devnull, "w") as devnull: - old_stdout = sys.stdout - sys.stdout = devnull - try: - yield - finally: - sys.stdout = old_stdout - - - -def get_site_packages_directory(): - """Print and return the site-packages directory. - - Examples - --------- - >>> loc = tl.ops.get_site_packages_directory() - """ - import site - try: - loc = site.getsitepackages() - print(" tl.ops : site-packages in ", loc) - return loc - except: - print(" tl.ops : Cannot find package dir from virtual environment") - return False - - - -def empty_trash(): - """Empty trash folder. - - """ - text = "[tl] Empty the trash" - if _platform == "linux" or _platform == "linux2": - print('linux: %s' % text) - os.system("rm -rf ~/.local/share/Trash/*") - elif _platform == "darwin": - print('OS X: %s' % text) - os.system("sudo rm -rf ~/.Trash/*") - elif _platform == "win32": - print('Windows: %s' % text) - try: - os.system("rd /s c:\$Recycle.Bin") # Windows 7 or Server 2008 - except: - pass - try: - os.system("rd /s c:\recycler") # Windows XP, Vista, or Server 2003 - except: - pass - else: - print(_platform) - -# diff --git a/tensorlayer/prepro.py b/tensorlayer/prepro.py index 047d646..1d2de81 100644 --- a/tensorlayer/prepro.py +++ b/tensorlayer/prepro.py @@ -1,296 +1,379 @@ -#! /usr/bin/python -# -*- coding: utf8 -*- +# -*- coding: utf-8 -*- - -import tensorflow as tf -import tensorlayer as tl +import threading, time, scipy, skimage import numpy as np - -import time -import numbers -import random -import os -import re -import sys - -import threading -# import Queue # <-- donot work for py3 -is_py2 = sys.version[0] == '2' -if is_py2: - import Queue as queue -else: - import queue as queue - -from six.moves import range -import scipy -from scipy import linalg import scipy.ndimage as ndi - -from skimage import transform -from skimage import exposure -import skimage +# import tensorlayer as tl +from scipy import linalg +from scipy.ndimage.filters import gaussian_filter +from scipy.ndimage.interpolation import map_coordinates +from six.moves import range +from skimage import exposure, transform # linalg https://docs.scipy.org/doc/scipy/reference/linalg.html # ndimage https://docs.scipy.org/doc/scipy/reference/ndimage.html -## Threading -def threading_data(data=None, fn=None, **kwargs): - """Return a batch of result by given data. +__all__ = [ + 'threading_data', + 'rotation', + 'rotation_multi', + 'crop', + 'crop_multi', + 'flip_axis', + 'flip_axis_multi', + 'shift', + 'shift_multi', + 'shear', + 'shear_multi', + 'shear2', + 'shear_multi2', + 'swirl', + 'swirl_multi', + 'elastic_transform', + 'elastic_transform_multi', + 'zoom', + 'zoom_multi', + 'brightness', + 'brightness_multi', + 'illumination', + 'rgb_to_hsv', + 'hsv_to_rgb', + 'adjust_hue', + 'imresize', + 'pixel_value_scale', + 'samplewise_norm', + 'featurewise_norm', + 'get_zca_whitening_principal_components_img', + 'zca_whitening', + 'channel_shift', + 'channel_shift_multi', + 'drop', + 'transform_matrix_offset_center', + 'apply_transform', + 'projective_transform_by_points', + 'array_to_img', + 'find_contours', + 'pt2map', + 'binary_dilation', + 'dilation', + 'binary_erosion', + 'erosion', + 'obj_box_coords_rescale', + 'obj_box_coord_rescale', + 'obj_box_coord_scale_to_pixelunit', + 'obj_box_coord_centroid_to_upleft_butright', + 'obj_box_coord_upleft_butright_to_centroid', + 'obj_box_coord_centroid_to_upleft', + 'obj_box_coord_upleft_to_centroid', + 'parse_darknet_ann_str_to_list', + 'parse_darknet_ann_list_to_cls_box', + 'obj_box_left_right_flip', + 'obj_box_imresize', + 'obj_box_crop', + 'obj_box_shift', + 'obj_box_zoom', + 'pad_sequences', + 'remove_pad_sequences', + 'process_sequences', + 'sequences_add_start_id', + 'sequences_add_end_id', + 'sequences_add_end_id_after_pad', + 'sequences_get_mask', +] + + +def threading_data(data=None, fn=None, thread_count=None, **kwargs): + """Process a batch of data by given function by threading. + Usually be used for data augmentation. Parameters ----------- - data : numpy array or zip of numpy array, see Examples below. - fn : the function for data processing. - more args : the args for fn, see Examples below. + data : numpy.array or others + The data to be processed. + thread_count : int + The number of threads to use. + fn : function + The function for data processing. + more args : the args for `fn` + Ssee Examples below. Examples -------- - - Single array - >>> X --> [batch_size, row, col, 1] greyscale - >>> results = threading_data(X, zoom, zoom_range=[0.5, 1], is_random=True) - ... results --> [batch_size, row, col, channel] - >>> tl.visualize.images2d(images=np.asarray(results), second=0.01, saveable=True, name='after', dtype=None) - >>> tl.visualize.images2d(images=np.asarray(X), second=0.01, saveable=True, name='before', dtype=None) - - - List of array (e.g. functions with ``multi``) - >>> X, Y --> [batch_size, row, col, 1] greyscale - >>> data = threading_data([_ for _ in zip(X, Y)], zoom_multi, zoom_range=[0.5, 1], is_random=True) + Process images. + + >>> images, _, _, _ = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3)) + >>> images = tl.prepro.threading_data(images[0:32], tl.prepro.zoom, zoom_range=[0.5, 1]) + + Customized image preprocessing function. + + >>> def distort_img(x): + ... x = tl.prepro.flip_axis(x, axis=0, is_random=True) + ... x = tl.prepro.flip_axis(x, axis=1, is_random=True) + ... x = tl.prepro.crop(x, 100, 100, is_random=True) + ... return x + >>> images = tl.prepro.threading_data(images, distort_img) + + Process images and masks together (Usually be used for image segmentation). + + >>> X, Y --> [batch_size, row, col, 1] + >>> data = tl.prepro.threading_data([_ for _ in zip(X, Y)], tl.prepro.zoom_multi, zoom_range=[0.5, 1], is_random=True) ... data --> [batch_size, 2, row, col, 1] >>> X_, Y_ = data.transpose((1,0,2,3,4)) ... X_, Y_ --> [batch_size, row, col, 1] - >>> tl.visualize.images2d(images=np.asarray(X_), second=0.01, saveable=True, name='after', dtype=None) - >>> tl.visualize.images2d(images=np.asarray(Y_), second=0.01, saveable=True, name='before', dtype=None) + >>> tl.vis.save_image(X_, 'images.png') + >>> tl.vis.save_image(Y_, 'masks.png') + + Process images and masks together by using ``thread_count``. + + >>> X, Y --> [batch_size, row, col, 1] + >>> data = tl.prepro.threading_data(X, tl.prepro.zoom_multi, 8, zoom_range=[0.5, 1], is_random=True) + ... data --> [batch_size, 2, row, col, 1] + >>> X_, Y_ = data.transpose((1,0,2,3,4)) + ... X_, Y_ --> [batch_size, row, col, 1] + >>> tl.vis.save_image(X_, 'after.png') + >>> tl.vis.save_image(Y_, 'before.png') + + Customized function for processing images and masks together. - - Customized function for image segmentation >>> def distort_img(data): ... x, y = data - ... x, y = flip_axis_multi([x, y], axis=0, is_random=True) - ... x, y = flip_axis_multi([x, y], axis=1, is_random=True) - ... x, y = crop_multi([x, y], 100, 100, is_random=True) + ... x, y = tl.prepro.flip_axis_multi([x, y], axis=0, is_random=True) + ... x, y = tl.prepro.flip_axis_multi([x, y], axis=1, is_random=True) + ... x, y = tl.prepro.crop_multi([x, y], 100, 100, is_random=True) ... return x, y >>> X, Y --> [batch_size, row, col, channel] - >>> data = threading_data([_ for _ in zip(X, Y)], distort_img) + >>> data = tl.prepro.threading_data([_ for _ in zip(X, Y)], distort_img) >>> X_, Y_ = data.transpose((1,0,2,3,4)) + Returns + ------- + list or numpyarray + The processed results. + References ---------- - - `python queue `_ - - `run with limited queue `_ - """ - ## plot function info - # for name, value in kwargs.items(): - # print('{0} = {1}'.format(name, value)) - # exit() - # define function for threading + - `python queue `__ + - `run with limited queue `__ + + """ + def apply_fn(results, i, data, kwargs): results[i] = fn(data, **kwargs) - ## start multi-threaded reading. - results = [None] * len(data) ## preallocate result list - threads = [] - for i in range(len(data)): - t = threading.Thread( - name='threading_and_return', - target=apply_fn, - args=(results, i, data[i], kwargs) - ) - t.start() - threads.append(t) - - ## wait for all threads to complete + if thread_count is None: + results = [None] * len(data) + threads = [] + # for i in range(len(data)): + # t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, data[i], kwargs)) + for i, d in enumerate(data): + t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, d, kwargs)) + t.start() + threads.append(t) + else: + divs = np.linspace(0, len(data), thread_count + 1) + divs = np.round(divs).astype(int) + results = [None] * thread_count + threads = [] + for i in range(thread_count): + t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, data[divs[i]:divs[i + 1]], kwargs)) + t.start() + threads.append(t) + for t in threads: t.join() - return np.asarray(results) + if thread_count is None: + try: + return np.asarray(results) + except Exception: + return results + else: + return np.concatenate(results) - ## old implementation - # define function for threading - # def function(q, i, data, kwargs): - # result = fn(data, **kwargs) - # q.put([i, result]) - # ## start threading - # q = queue.Queue() - # threads = [] - # for i in range(len(data)): - # t = threading.Thread( - # name='threading_and_return', - # target=function, - # args=(q, i, data[i], kwargs) - # ) - # t.start() - # threads.append(t) - # - # ## wait for all threads to complete - # for t in threads: - # t.join() - # - # ## get results - # results = [] - # for i in range(len(data)): - # result = q.get() - # results.append(result) - # results = sorted(results) - # for i in range(len(results)): - # results[i] = results[i][1] - # return np.asarray(results) - - -## Image -def rotation(x, rg=20, is_random=False, row_index=0, col_index=1, channel_index=2, - fill_mode='nearest', cval=0.): + +def rotation(x, rg=20, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1): """Rotate an image randomly or non-randomly. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). rg : int or float Degree to rotate, usually 0 ~ 180. - is_random : boolean, default False - If True, randomly rotate. - row_index, col_index, channel_index : int + is_random : boolean + If True, randomly rotate. Default is False + row_index col_index and channel_index : int Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0). - fill_mode : string - Method to fill missing pixel, default ‘nearest’, more options ‘constant’, ‘reflect’ or ‘wrap’ + fill_mode : str + Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform `__ + cval : float + Value used for points outside the boundaries of the input if mode=`constant`. Default is 0.0 + order : int + The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform `__ - - `scipy ndimage affine_transform `_ - cval : scalar, optional - Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0 - - - `scipy ndimage affine_transform `_ + Returns + ------- + numpy.array + A processed image. Examples --------- - >>> x --> [row, col, 1] greyscale - >>> x = rotation(x, rg=40, is_random=False) - >>> tl.visualize.frame(x[:,:,0], second=0.01, saveable=True, name='temp',cmap='gray') + >>> x --> [row, col, 1] + >>> x = tl.prepro.rotation(x, rg=40, is_random=False) + >>> tl.vis.save_image(x, 'im.png') + """ if is_random: theta = np.pi / 180 * np.random.uniform(-rg, rg) else: - theta = np.pi /180 * rg - rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], - [np.sin(theta), np.cos(theta), 0], - [0, 0, 1]]) + theta = np.pi / 180 * rg + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) h, w = x.shape[row_index], x.shape[col_index] transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w) - x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval) + x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order) return x -def rotation_multi(x, rg=20, is_random=False, row_index=0, col_index=1, channel_index=2, - fill_mode='nearest', cval=0.): + +def rotation_multi(x, rg=20, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1): """Rotate multiple images with the same arguments, randomly or non-randomly. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- - x : list of numpy array + x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). - others : see ``rotation``. + others : args + See ``tl.prepro.rotation``. + + Returns + ------- + numpy.array + A list of processed images. Examples -------- >>> x, y --> [row, col, 1] greyscale - >>> x, y = rotation_multi([x, y], rg=90, is_random=False) - >>> tl.visualize.frame(x[:,:,0], second=0.01, saveable=True, name='x',cmap='gray') - >>> tl.visualize.frame(y[:,:,0], second=0.01, saveable=True, name='y',cmap='gray') + >>> x, y = tl.prepro.rotation_multi([x, y], rg=90, is_random=False) + """ if is_random: theta = np.pi / 180 * np.random.uniform(-rg, rg) else: - theta = np.pi /180 * rg - rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], - [np.sin(theta), np.cos(theta), 0], - [0, 0, 1]]) + theta = np.pi / 180 * rg + rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) h, w = x[0].shape[row_index], x[0].shape[col_index] transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w) results = [] for data in x: - results.append( apply_transform(data, transform_matrix, channel_index, fill_mode, cval)) + results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order)) return np.asarray(results) + # crop -def crop(x, wrg, hrg, is_random=False, row_index=0, col_index=1, channel_index=2): +def crop(x, wrg, hrg, is_random=False, row_index=0, col_index=1): """Randomly or centrally crop an image. Parameters ---------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). - wrg : float - Size of weight. - hrg : float + wrg : int + Size of width. + hrg : int Size of height. - is_random : boolean, default False - If True, randomly crop, else central crop. - row_index, col_index, channel_index : int - Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0). + is_random : boolean, + If True, randomly crop, else central crop. Default is False. + row_index: int + index of row. + col_index: int + index of column. + + Returns + ------- + numpy.array + A processed image. + """ h, w = x.shape[row_index], x.shape[col_index] assert (h > hrg) and (w > wrg), "The size of cropping should smaller than the original image" if is_random: - h_offset = int(np.random.uniform(0, h-hrg) -1) - w_offset = int(np.random.uniform(0, w-wrg) -1) - # print(h_offset, w_offset, x[h_offset: hrg+h_offset ,w_offset: wrg+w_offset].shape) - return x[h_offset: hrg+h_offset ,w_offset: wrg+w_offset] - else: # central crop - h_offset = int(np.floor((h - hrg)/2.)) - w_offset = int(np.floor((w - wrg)/2.)) + h_offset = int(np.random.uniform(0, h - hrg) - 1) + w_offset = int(np.random.uniform(0, w - wrg) - 1) + # logging.info(h_offset, w_offset, x[h_offset: hrg+h_offset ,w_offset: wrg+w_offset].shape) + return x[h_offset:hrg + h_offset, w_offset:wrg + w_offset] + else: # central crop + h_offset = int(np.floor((h - hrg) / 2.)) + w_offset = int(np.floor((w - wrg) / 2.)) h_end = h_offset + hrg w_end = w_offset + wrg - return x[h_offset: h_end, w_offset: w_end] + return x[h_offset:h_end, w_offset:w_end] # old implementation # h_offset = (h - hrg)/2 # w_offset = (w - wrg)/2 - # # print(x[h_offset: h-h_offset ,w_offset: w-w_offset].shape) + # # logging.info(x[h_offset: h-h_offset ,w_offset: w-w_offset].shape) # return x[h_offset: h-h_offset ,w_offset: w-w_offset] # central crop -def crop_multi(x, wrg, hrg, is_random=False, row_index=0, col_index=1, channel_index=2): +def crop_multi(x, wrg, hrg, is_random=False, row_index=0, col_index=1): """Randomly or centrally crop multiple images. Parameters ---------- - x : list of numpy array + x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). - others : see ``crop``. + others : args + See ``tl.prepro.crop``. + + Returns + ------- + numpy.array + A list of processed images. + """ h, w = x[0].shape[row_index], x[0].shape[col_index] assert (h > hrg) and (w > wrg), "The size of cropping should smaller than the original image" if is_random: - h_offset = int(np.random.uniform(0, h-hrg) -1) - w_offset = int(np.random.uniform(0, w-wrg) -1) + h_offset = int(np.random.uniform(0, h - hrg) - 1) + w_offset = int(np.random.uniform(0, w - wrg) - 1) results = [] for data in x: - results.append( data[h_offset: hrg+h_offset ,w_offset: wrg+w_offset]) + results.append(data[h_offset:hrg + h_offset, w_offset:wrg + w_offset]) return np.asarray(results) else: # central crop - h_offset = (h - hrg)/2 - w_offset = (w - wrg)/2 + h_offset = (h - hrg) / 2 + w_offset = (w - wrg) / 2 results = [] for data in x: - results.append( data[h_offset: h-h_offset ,w_offset: w-w_offset] ) + results.append(data[h_offset:h - h_offset, w_offset:w - w_offset]) return np.asarray(results) + # flip -def flip_axis(x, axis, is_random=False): +def flip_axis(x, axis=1, is_random=False): """Flip the axis of an image, such as flip left and right, up and down, randomly or non-randomly, Parameters ---------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). axis : int - - 0, flip up and down - - 1, flip left and right - - 2, flip channel - is_random : boolean, default False - If True, randomly flip. + Which axis to flip. + - 0, flip up and down + - 1, flip left and right + - 2, flip channel + is_random : boolean + If True, randomly flip. Default is False. + + Returns + ------- + numpy.array + A processed image. + """ if is_random: factor = np.random.uniform(-1, 1) @@ -307,14 +390,22 @@ def flip_axis(x, axis, is_random=False): x = x.swapaxes(0, axis) return x + def flip_axis_multi(x, axis, is_random=False): """Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly, Parameters ----------- - x : list of numpy array + x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). - others : see ``flip_axis``. + others : args + See ``tl.prepro.flip_axis``. + + Returns + ------- + numpy.array + A list of processed images. + """ if is_random: factor = np.random.uniform(-1, 1) @@ -328,7 +419,7 @@ def flip_axis_multi(x, axis, is_random=False): data = np.asarray(data).swapaxes(axis, 0) data = data[::-1, ...] data = data.swapaxes(0, axis) - results.append( data ) + results.append(data) return np.asarray(results) else: return np.asarray(x) @@ -342,34 +433,38 @@ def flip_axis_multi(x, axis, is_random=False): data = np.asarray(data).swapaxes(axis, 0) data = data[::-1, ...] data = data.swapaxes(0, axis) - results.append( data ) + results.append(data) return np.asarray(results) + # shift -def shift(x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, - fill_mode='nearest', cval=0.): +def shift(x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1): """Shift an image randomly or non-randomly. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). wrg : float Percentage of shift in axis x, usually -0.25 ~ 0.25. hrg : float Percentage of shift in axis y, usually -0.25 ~ 0.25. - is_random : boolean, default False - If True, randomly shift. - row_index, col_index, channel_index : int + is_random : boolean + If True, randomly shift. Default is False. + row_index col_index and channel_index : int Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0). - fill_mode : string - Method to fill missing pixel, default ‘nearest’, more options ‘constant’, ‘reflect’ or ‘wrap’. - - - `scipy ndimage affine_transform `_ - cval : scalar, optional + fill_mode : str + Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform `__ + cval : float Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0. + order : int + The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform `__ + + Returns + ------- + numpy.array + A processed image. - - `scipy ndimage affine_transform `_ """ h, w = x.shape[row_index], x.shape[col_index] if is_random: @@ -377,24 +472,29 @@ def shift(x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channe ty = np.random.uniform(-wrg, wrg) * w else: tx, ty = hrg * h, wrg * w - translation_matrix = np.array([[1, 0, tx], - [0, 1, ty], - [0, 0, 1]]) + translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) transform_matrix = translation_matrix # no need to do offset - x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval) + x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order) return x -def shift_multi(x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, - fill_mode='nearest', cval=0.): + +def shift_multi(x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1): """Shift images with the same arguments, randomly or non-randomly. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- - x : list of numpy array + x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). - others : see ``shift``. + others : args + See ``tl.prepro.shift``. + + Returns + ------- + numpy.array + A list of processed images. + """ h, w = x[0].shape[row_index], x[0].shape[col_index] if is_random: @@ -402,120 +502,223 @@ def shift_multi(x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, ty = np.random.uniform(-wrg, wrg) * w else: tx, ty = hrg * h, wrg * w - translation_matrix = np.array([[1, 0, tx], - [0, 1, ty], - [0, 0, 1]]) + translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) transform_matrix = translation_matrix # no need to do offset results = [] for data in x: - results.append( apply_transform(data, transform_matrix, channel_index, fill_mode, cval)) + results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order)) return np.asarray(results) + # shear -def shear(x, intensity=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, - fill_mode='nearest', cval=0.): +def shear(x, intensity=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1): """Shear an image randomly or non-randomly. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). intensity : float Percentage of shear, usually -0.5 ~ 0.5 (is_random==True), 0 ~ 0.5 (is_random==False), you can have a quick try by shear(X, 1). - is_random : boolean, default False - If True, randomly shear. - row_index, col_index, channel_index : int + is_random : boolean + If True, randomly shear. Default is False. + row_index col_index and channel_index : int Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0). - fill_mode : string - Method to fill missing pixel, default ‘nearest’, more options ‘constant’, ‘reflect’ or ‘wrap’. - - - `scipy ndimage affine_transform `_ - cval : scalar, optional + fill_mode : str + Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see and `scipy ndimage affine_transform `__ + cval : float Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0. + order : int + The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform `__ + + Returns + ------- + numpy.array + A processed image. + + References + ----------- + - `Affine transformation `__ - - `scipy ndimage affine_transform `_ """ if is_random: shear = np.random.uniform(-intensity, intensity) else: shear = intensity - shear_matrix = np.array([[1, -np.sin(shear), 0], - [0, np.cos(shear), 0], - [0, 0, 1]]) + shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]]) h, w = x.shape[row_index], x.shape[col_index] transform_matrix = transform_matrix_offset_center(shear_matrix, h, w) - x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval) + x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order) return x -def shear_multi(x, intensity=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, - fill_mode='nearest', cval=0.): + +def shear_multi(x, intensity=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1): """Shear images with the same arguments, randomly or non-randomly. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- - x : list of numpy array + x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). - others : see ``shear``. + others : args + See ``tl.prepro.shear``. + + Returns + ------- + numpy.array + A list of processed images. + """ if is_random: shear = np.random.uniform(-intensity, intensity) else: shear = intensity - shear_matrix = np.array([[1, -np.sin(shear), 0], - [0, np.cos(shear), 0], - [0, 0, 1]]) + shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]]) + + h, w = x[0].shape[row_index], x[0].shape[col_index] + transform_matrix = transform_matrix_offset_center(shear_matrix, h, w) + results = [] + for data in x: + results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order)) + return np.asarray(results) + + +def shear2(x, shear=(0.1, 0.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1): + """Shear an image randomly or non-randomly. + + Parameters + ----------- + x : numpy.array + An image with dimension of [row, col, channel] (default). + shear : tuple of two floats + Percentage of shear for height and width direction (0, 1). + is_random : boolean + If True, randomly shear. Default is False. + row_index col_index and channel_index : int + Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0). + fill_mode : str + Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform `__ + cval : float + Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0. + order : int + The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform `__ + + Returns + ------- + numpy.array + A processed image. + + References + ----------- + - `Affine transformation `__ + + """ + assert len(shear) == 2, "shear should be tuple of 2 floats, or you want to use tl.prepro.shear rather than tl.prepro.shear2 ?" + if is_random: + shear[0] = np.random.uniform(-shear[0], shear[0]) + shear[1] = np.random.uniform(-shear[1], shear[1]) + + shear_matrix = np.array([[1, shear[0], 0], [shear[1], 1, 0], [0, 0, 1]]) + + h, w = x.shape[row_index], x.shape[col_index] + transform_matrix = transform_matrix_offset_center(shear_matrix, h, w) + x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order) + return x + + +def shear_multi2(x, shear=(0.1, 0.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1): + """Shear images with the same arguments, randomly or non-randomly. + Usually be used for image segmentation which x=[X, Y], X and Y should be matched. + + Parameters + ----------- + x : list of numpy.array + List of images with dimension of [n_images, row, col, channel] (default). + others : args + See ``tl.prepro.shear2``. + + Returns + ------- + numpy.array + A list of processed images. + + """ + assert len(shear) == 2, "shear should be tuple of 2 floats, or you want to use tl.prepro.shear_multi rather than tl.prepro.shear_multi2 ?" + if is_random: + shear[0] = np.random.uniform(-shear[0], shear[0]) + shear[1] = np.random.uniform(-shear[1], shear[1]) + + shear_matrix = np.array([[1, shear[0], 0], [shear[1], 1, 0], [0, 0, 1]]) h, w = x[0].shape[row_index], x[0].shape[col_index] transform_matrix = transform_matrix_offset_center(shear_matrix, h, w) results = [] for data in x: - results.append( apply_transform(data, transform_matrix, channel_index, fill_mode, cval)) + results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order)) return np.asarray(results) + # swirl -def swirl(x, center=None, strength=1, radius=100, rotation=0, output_shape=None, order=1, mode='constant', cval=0, clip=True, preserve_range=False, is_random=False): - """Swirl an image randomly or non-randomly, see `scikit-image swirl API `_ - and `example `_. +def swirl(x, + center=None, + strength=1, + radius=100, + rotation=0, + output_shape=None, + order=1, + mode='constant', + cval=0, + clip=True, + preserve_range=False, + is_random=False): + """Swirl an image randomly or non-randomly, see `scikit-image swirl API `__ + and `example `__. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). - center : (row, column) tuple or (2,) ndarray, optional - Center coordinate of transformation. - strength : float, optional + center : tuple or 2 int or None + Center coordinate of transformation (optional). + strength : float The amount of swirling applied. - radius : float, optional + radius : float The extent of the swirl in pixels. The effect dies out rapidly beyond radius. - rotation : float, (degree) optional + rotation : float Additional rotation applied to the image, usually [0, 360], relates to center. - output_shape : tuple (rows, cols), optional - Shape of the output image generated. By default the shape of the input image is preserved. + output_shape : tuple of 2 int or None + Shape of the output image generated (height, width). By default the shape of the input image is preserved. order : int, optional The order of the spline interpolation, default is 1. The order has to be in the range 0-5. See skimage.transform.warp for detail. - mode : {‘constant’, ‘edge’, ‘symmetric’, ‘reflect’, ‘wrap’}, optional - Points outside the boundaries of the input are filled according to the given mode, with ‘constant’ used as the default. Modes match the behaviour of numpy.pad. - cval : float, optional - Used in conjunction with mode ‘constant’, the value outside the image boundaries. - clip : bool, optional + mode : str + One of `constant` (default), `edge`, `symmetric` `reflect` and `wrap`. + Points outside the boundaries of the input are filled according to the given mode, with `constant` used as the default. Modes match the behaviour of numpy.pad. + cval : float + Used in conjunction with mode `constant`, the value outside the image boundaries. + clip : boolean Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. - preserve_range : bool, optional + preserve_range : boolean Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float. - is_random : boolean, default False - If True, random swirl. + is_random : boolean, + If True, random swirl. Default is False. - random center = [(0 ~ x.shape[0]), (0 ~ x.shape[1])] - random strength = [0, strength] - random radius = [1e-10, radius] - random rotation = [-rotation, rotation] + Returns + ------- + numpy.array + A processed image. + Examples --------- >>> x --> [row, col, 1] greyscale - >>> x = swirl(x, strength=4, radius=100) + >>> x = tl.prepro.swirl(x, strength=4, radius=100) + """ assert radius != 0, Exception("Invalid radius value") rotation = np.pi / 180 * rotation @@ -528,23 +731,52 @@ def swirl(x, center=None, strength=1, radius=100, rotation=0, output_shape=None, rotation = np.random.uniform(-rotation, rotation) max_v = np.max(x) - if max_v > 1: # Note: the input of this fn should be [-1, 1], rescale is required. + if max_v > 1: # Note: the input of this fn should be [-1, 1], rescale is required. x = x / max_v - swirled = skimage.transform.swirl(x, center=center, strength=strength, radius=radius, rotation=rotation, - output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range) + swirled = skimage.transform.swirl( + x, + center=center, + strength=strength, + radius=radius, + rotation=rotation, + output_shape=output_shape, + order=order, + mode=mode, + cval=cval, + clip=clip, + preserve_range=preserve_range) if max_v > 1: swirled = swirled * max_v return swirled -def swirl_multi(x, center=None, strength=1, radius=100, rotation=0, output_shape=None, order=1, mode='constant', cval=0, clip=True, preserve_range=False, is_random=False): + +def swirl_multi(x, + center=None, + strength=1, + radius=100, + rotation=0, + output_shape=None, + order=1, + mode='constant', + cval=0, + clip=True, + preserve_range=False, + is_random=False): """Swirl multiple images with the same arguments, randomly or non-randomly. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- - x : list of numpy array + x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). - others : see ``swirl``. + others : args + See ``tl.prepro.swirl``. + + Returns + ------- + numpy.array + A list of processed images. + """ assert radius != 0, Exception("Invalid radius value") rotation = np.pi / 180 * rotation @@ -559,40 +791,59 @@ def swirl_multi(x, center=None, strength=1, radius=100, rotation=0, output_shape results = [] for data in x: max_v = np.max(data) - if max_v > 1: # Note: the input of this fn should be [-1, 1], rescale is required. + if max_v > 1: # Note: the input of this fn should be [-1, 1], rescale is required. data = data / max_v - swirled = skimage.transform.swirl(data, center=center, strength=strength, radius=radius, rotation=rotation, - output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range) + swirled = skimage.transform.swirl( + data, + center=center, + strength=strength, + radius=radius, + rotation=rotation, + output_shape=output_shape, + order=order, + mode=mode, + cval=cval, + clip=clip, + preserve_range=preserve_range) if max_v > 1: swirled = swirled * max_v - results.append( swirled ) + results.append(swirled) return np.asarray(results) -# elastic_transform -from scipy.ndimage.interpolation import map_coordinates -from scipy.ndimage.filters import gaussian_filter +# elastic_transform def elastic_transform(x, alpha, sigma, mode="constant", cval=0, is_random=False): - """Elastic deformation of images as described in `[Simard2003] `_ . + """Elastic transformation for image as described in `[Simard2003] `__. Parameters ----------- - x : numpy array, a greyscale image. - alpha : scalar factor. - sigma : scalar or sequence of scalars, the smaller the sigma, the more transformation. - Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. - mode : default constant, see `scipy.ndimage.filters.gaussian_filter `_. - cval : float, optional. Used in conjunction with mode ‘constant’, the value outside the image boundaries. - is_random : boolean, default False + x : numpy.array + A greyscale image. + alpha : float + Alpha value for elastic transformation. + sigma : float or sequence of float + The smaller the sigma, the more transformation. Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. + mode : str + See `scipy.ndimage.filters.gaussian_filter `__. Default is `constant`. + cval : float, + Used in conjunction with `mode` of `constant`, the value outside the image boundaries. + is_random : boolean + Default is False. + + Returns + ------- + numpy.array + A processed image. Examples --------- - >>> x = elastic_transform(x, alpha = x.shape[1] * 3, sigma = x.shape[1] * 0.07) + >>> x = tl.prepro.elastic_transform(x, alpha=x.shape[1]*3, sigma=x.shape[1]*0.07) References ------------ - - `Github `_. - - `Kaggle `_ + - `Github `__. + - `Kaggle `__ + """ if is_random is False: random_state = np.random.RandomState(None) @@ -601,11 +852,11 @@ def elastic_transform(x, alpha, sigma, mode="constant", cval=0, is_random=False) # is_3d = False if len(x.shape) == 3 and x.shape[-1] == 1: - x = x[:,:,0] + x = x[:, :, 0] is_3d = True elif len(x.shape) == 3 and x.shape[-1] != 1: raise Exception("Only support greyscale image") - assert len(x.shape)==2 + assert len(x.shape) == 2, "input should be grey-scale image" shape = x.shape @@ -619,13 +870,22 @@ def elastic_transform(x, alpha, sigma, mode="constant", cval=0, is_random=False) else: return map_coordinates(x, indices, order=1).reshape(shape) + def elastic_transform_multi(x, alpha, sigma, mode="constant", cval=0, is_random=False): - """Elastic deformation of images as described in `[Simard2003] `_. + """Elastic transformation for images as described in `[Simard2003] `__. Parameters ----------- - x : list of numpy array - others : see ``elastic_transform``. + x : list of numpy.array + List of greyscale images. + others : args + See ``tl.prepro.elastic_transform``. + + Returns + ------- + numpy.array + A list of processed images. + """ if is_random is False: random_state = np.random.RandomState(None) @@ -641,98 +901,103 @@ def elastic_transform_multi(x, alpha, sigma, mode="constant", cval=0, is_random= for data in x: is_3d = False if len(data.shape) == 3 and data.shape[-1] == 1: - data = data[:,:,0] + data = data[:, :, 0] is_3d = True elif len(data.shape) == 3 and data.shape[-1] != 1: raise Exception("Only support greyscale image") - assert len(data.shape)==2 + assert len(data.shape) == 2, "input should be grey-scale image" dx = gaussian_filter((new_shape * 2 - 1), sigma, mode=mode, cval=cval) * alpha dy = gaussian_filter((new_shape * 2 - 1), sigma, mode=mode, cval=cval) * alpha x_, y_ = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij') indices = np.reshape(x_ + dx, (-1, 1)), np.reshape(y_ + dy, (-1, 1)) - # print(data.shape) + # logging.info(data.shape) if is_3d: - results.append( map_coordinates(data, indices, order=1).reshape((shape[0], shape[1], 1))) + results.append(map_coordinates(data, indices, order=1).reshape((shape[0], shape[1], 1))) else: - results.append( map_coordinates(data, indices, order=1).reshape(shape) ) + results.append(map_coordinates(data, indices, order=1).reshape(shape)) return np.asarray(results) + # zoom -def zoom(x, zoom_range=(0.9, 1.1), is_random=False, row_index=0, col_index=1, channel_index=2, - fill_mode='nearest', cval=0.): +def zoom(x, zoom_range=(0.9, 1.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1): """Zoom in and out of a single image, randomly or non-randomly. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). zoom_range : list or tuple - - If is_random=False, (h, w) are the fixed zoom factor for row and column axies, factor small than one is zoom in. - - If is_random=True, (min zoom out, max zoom out) for x and y with different random zoom in/out factor. - e.g (0.5, 1) zoom in 1~2 times. - is_random : boolean, default False - If True, randomly zoom. - row_index, col_index, channel_index : int + Zoom range for height and width. + - If is_random=False, (h, w) are the fixed zoom factor for row and column axies, factor small than one is zoom in. + - If is_random=True, (h, w) are (min zoom out, max zoom out) for x and y with different random zoom in/out factor, e.g (0.5, 1) zoom in 1~2 times. + is_random : boolean + If True, randomly zoom. Default is False. + row_index col_index and channel_index : int Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0). - fill_mode : string - Method to fill missing pixel, default ‘nearest’, more options ‘constant’, ‘reflect’ or ‘wrap’. - - - `scipy ndimage affine_transform `_ - cval : scalar, optional + fill_mode : str + Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform `__ + cval : float Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0. + order : int + The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform `__ + + Returns + ------- + numpy.array + A processed image. - - `scipy ndimage affine_transform `_ """ if len(zoom_range) != 2: - raise Exception('zoom_range should be a tuple or list of two floats. ' - 'Received arg: ', zoom_range) + raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range) if is_random: if zoom_range[0] == 1 and zoom_range[1] == 1: zx, zy = 1, 1 - print(" random_zoom : not zoom in/out") + logging.info(" random_zoom : not zoom in/out") else: zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2) else: zx, zy = zoom_range - # print(zx, zy) - zoom_matrix = np.array([[zx, 0, 0], - [0, zy, 0], - [0, 0, 1]]) + # logging.info(zx, zy) + zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]]) h, w = x.shape[row_index], x.shape[col_index] transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w) - x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval) + x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order) return x -def zoom_multi(x, zoom_range=(0.9, 1.1), is_random=False, - row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.): + +def zoom_multi(x, zoom_range=(0.9, 1.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1): """Zoom in and out of images with the same arguments, randomly or non-randomly. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- - x : list of numpy array + x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). - others : see ``zoom``. + others : args + See ``tl.prepro.zoom``. + + Returns + ------- + numpy.array + A list of processed images. + """ if len(zoom_range) != 2: - raise Exception('zoom_range should be a tuple or list of two floats. ' - 'Received arg: ', zoom_range) + raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range) if is_random: if zoom_range[0] == 1 and zoom_range[1] == 1: zx, zy = 1, 1 - print(" random_zoom : not zoom in/out") + logging.info(" random_zoom : not zoom in/out") else: zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2) else: zx, zy = zoom_range - zoom_matrix = np.array([[zx, 0, 0], - [0, zy, 0], - [0, 0, 1]]) + zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]]) h, w = x[0].shape[row_index], x[0].shape[col_index] transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w) @@ -740,100 +1005,324 @@ def zoom_multi(x, zoom_range=(0.9, 1.1), is_random=False, # return x results = [] for data in x: - results.append( apply_transform(data, transform_matrix, channel_index, fill_mode, cval)) + results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order)) return np.asarray(results) + # image = tf.image.random_brightness(image, max_delta=32. / 255.) # image = tf.image.random_saturation(image, lower=0.5, upper=1.5) # image = tf.image.random_hue(image, max_delta=0.032) # image = tf.image.random_contrast(image, lower=0.5, upper=1.5) -# brightness + def brightness(x, gamma=1, gain=1, is_random=False): """Change the brightness of a single image, randomly or non-randomly. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). - gamma : float, small than 1 means brighter. + gamma : float Non negative real number. Default value is 1. - - - If is_random is True, gamma in a range of (1-gamma, 1+gamma). + - Small than 1 means brighter. + - If `is_random` is True, gamma in a range of (1-gamma, 1+gamma). gain : float The constant multiplier. Default value is 1. - is_random : boolean, default False - - If True, randomly change brightness. + is_random : boolean + If True, randomly change brightness. Default is False. + + Returns + ------- + numpy.array + A processed image. References ----------- - - `skimage.exposure.adjust_gamma `_ - - `chinese blog `_ + - `skimage.exposure.adjust_gamma `__ + - `chinese blog `__ + """ if is_random: - gamma = np.random.uniform(1-gamma, 1+gamma) + gamma = np.random.uniform(1 - gamma, 1 + gamma) x = exposure.adjust_gamma(x, gamma, gain) return x + def brightness_multi(x, gamma=1, gain=1, is_random=False): """Change the brightness of multiply images, randomly or non-randomly. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- - x : list of numpy array + x : list of numpyarray List of images with dimension of [n_images, row, col, channel] (default). - others : see ``brightness``. + others : args + See ``tl.prepro.brightness``. + + Returns + ------- + numpy.array + A list of processed images. + """ if is_random: - gamma = np.random.uniform(1-gamma, 1+gamma) + gamma = np.random.uniform(1 - gamma, 1 + gamma) results = [] for data in x: - results.append( exposure.adjust_gamma(data, gamma, gain) ) + results.append(exposure.adjust_gamma(data, gamma, gain)) return np.asarray(results) -# contrast -def constant(x, cutoff=0.5, gain=10, inv=False, is_random=False): - # TODO - x = exposure.adjust_sigmoid(x, cutoff=cutoff, gain=gain, inv=inv) - return x +def illumination(x, gamma=1., contrast=1., saturation=1., is_random=False): + """Perform illumination augmentation for a single image, randomly or non-randomly. + + Parameters + ----------- + x : numpy.array + An image with dimension of [row, col, channel] (default). + gamma : float + Change brightness (the same with ``tl.prepro.brightness``) + - if is_random=False, one float number, small than one means brighter, greater than one means darker. + - if is_random=True, tuple of two float numbers, (min, max). + contrast : float + Change contrast. + - if is_random=False, one float number, small than one means blur. + - if is_random=True, tuple of two float numbers, (min, max). + saturation : float + Change saturation. + - if is_random=False, one float number, small than one means unsaturation. + - if is_random=True, tuple of two float numbers, (min, max). + is_random : boolean + If True, randomly change illumination. Default is False. + + Returns + ------- + numpy.array + A processed image. + + Examples + --------- + Random -def constant_multi(): - #TODO - pass + >>> x = tl.prepro.illumination(x, gamma=(0.5, 5.0), contrast=(0.3, 1.0), saturation=(0.7, 1.0), is_random=True) -# resize -def imresize(x, size=[100, 100], interp='bilinear', mode=None): - """Resize an image by given output size and method. Warning, this function - will rescale the value to [0, 255]. + Non-random + + >>> x = tl.prepro.illumination(x, 0.5, 0.6, 0.8, is_random=False) + + """ + from PIL import Image, ImageEnhance + + if is_random: + try: + assert len(gamma) == len(contrast) == len(saturation) == 2, "if is_random = True, the arguments are (min, max)" + except: + raise Exception("if is_random = True, the arguments are (min, max)") + ## random change brightness # small --> brighter + illum_settings = np.random.randint(0, 3) # 0-brighter, 1-darker, 2 keep normal + + if illum_settings == 0: # brighter + gamma = np.random.uniform(gamma[0], 1.0) # (.5, 1.0) + elif illum_settings == 1: # darker + gamma = np.random.uniform(1.0, gamma[1]) # (1.0, 5.0) + else: + gamma = 1 + im_ = brightness(x, gamma=gamma, gain=1, is_random=False) + + # logging.info("using contrast and saturation") + image = Image.fromarray(im_) # array -> PIL + contrast_adjust = ImageEnhance.Contrast(image) + image = contrast_adjust.enhance(np.random.uniform(contrast[0], contrast[1])) #0.3,0.9)) + + saturation_adjust = ImageEnhance.Color(image) + image = saturation_adjust.enhance(np.random.uniform(saturation[0], saturation[1])) # (0.7,1.0)) + im_ = np.array(image) # PIL -> array + else: + im_ = brightness(x, gamma=gamma, gain=1, is_random=False) + image = Image.fromarray(im_) # array -> PIL + contrast_adjust = ImageEnhance.Contrast(image) + image = contrast_adjust.enhance(contrast) + + saturation_adjust = ImageEnhance.Color(image) + image = saturation_adjust.enhance(saturation) + im_ = np.array(image) # PIL -> array + return np.asarray(im_) + + +def rgb_to_hsv(rgb): + """Input RGB image [0~255] return HSV image [0~1]. + + Parameters + ------------ + rgb : numpy.array + An image with values between 0 and 255. + + Returns + ------- + numpy.array + A processed image. + + """ + # Translated from source of colorsys.rgb_to_hsv + # r,g,b should be a numpy arrays with values between 0 and 255 + # rgb_to_hsv returns an array of floats between 0.0 and 1.0. + rgb = rgb.astype('float') + hsv = np.zeros_like(rgb) + # in case an RGBA array was passed, just copy the A channel + hsv[..., 3:] = rgb[..., 3:] + r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2] + maxc = np.max(rgb[..., :3], axis=-1) + minc = np.min(rgb[..., :3], axis=-1) + hsv[..., 2] = maxc + mask = maxc != minc + hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask] + rc = np.zeros_like(r) + gc = np.zeros_like(g) + bc = np.zeros_like(b) + rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask] + gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask] + bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask] + hsv[..., 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc) + hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0 + return hsv + + +def hsv_to_rgb(hsv): + """Input HSV image [0~1] return RGB image [0~255]. + + Parameters + ------------- + hsv : numpy.array + An image with values between 0.0 and 1.0 + + Returns + ------- + numpy.array + A processed image. + """ + # Translated from source of colorsys.hsv_to_rgb + # h,s should be a numpy arrays with values between 0.0 and 1.0 + # v should be a numpy array with values between 0.0 and 255.0 + # hsv_to_rgb returns an array of uints between 0 and 255. + rgb = np.empty_like(hsv) + rgb[..., 3:] = hsv[..., 3:] + h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2] + i = (h * 6.0).astype('uint8') + f = (h * 6.0) - i + p = v * (1.0 - s) + q = v * (1.0 - s * f) + t = v * (1.0 - s * (1.0 - f)) + i = i % 6 + conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5] + rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v) + rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t) + rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p) + return rgb.astype('uint8') + + +def adjust_hue(im, hout=0.66, is_offset=True, is_clip=True, is_random=False): + """Adjust hue of an RGB image. + + This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type. + For TF, see `tf.image.adjust_hue `__.and `tf.image.random_hue `__. + + Parameters + ----------- + im : numpy.array + An image with values between 0 and 255. + hout : float + The scale value for adjusting hue. + - If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue. + - If is_offset is True, add this value as the offset to the hue channel. + is_offset : boolean + Whether `hout` is added on HSV as offset or not. Default is True. + is_clip : boolean + If HSV value smaller than 0, set to 0. Default is True. + is_random : boolean + If True, randomly change hue. Default is False. + + Returns + ------- + numpy.array + A processed image. + + Examples + --------- + Random, add a random value between -0.2 and 0.2 as the offset to every hue values. + + >>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False) + + Non-random, make all hue to green. + + >>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False) + + References + ----------- + - `tf.image.random_hue `__. + - `tf.image.adjust_hue `__. + - `StackOverflow: Changing image hue with python PIL `__. + + """ + hsv = rgb_to_hsv(im) + if is_random: + hout = np.random.uniform(-hout, hout) + + if is_offset: + hsv[..., 0] += hout + else: + hsv[..., 0] = hout + + if is_clip: + hsv[..., 0] = np.clip(hsv[..., 0], 0, np.inf) # Hao : can remove green dots + + rgb = hsv_to_rgb(hsv) + return rgb + + +# # contrast +# def constant(x, cutoff=0.5, gain=10, inv=False, is_random=False): +# # TODO +# x = exposure.adjust_sigmoid(x, cutoff=cutoff, gain=gain, inv=inv) +# return x +# +# def constant_multi(): +# #TODO +# pass + + +def imresize(x, size=None, interp='bicubic', mode=None): + """Resize an image by given output size and method. + + Warning, this function will rescale the value to [0, 255]. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). - size : int, float or tuple (h, w) - - int, Percentage of current size. - - float, Fraction of current size. - - tuple, Size of the output image. - interp : str, optional - Interpolation to use for re-sizing (‘nearest’, ‘lanczos’, ‘bilinear’, ‘bicubic’ or ‘cubic’). - mode : str, optional - The PIL image mode (‘P’, ‘L’, etc.) to convert arr before resizing. + size : list of 2 int or None + For height and width. + interp : str + Interpolation method for re-sizing (`nearest`, `lanczos`, `bilinear`, `bicubic` (default) or `cubic`). + mode : str + The PIL image mode (`P`, `L`, etc.) to convert arr before resizing. Returns - -------- - imresize : ndarray - The resized array of image. + ------- + numpy.array + A processed image. References ------------ - - `scipy.misc.imresize `_ + - `scipy.misc.imresize `__ + """ + if size is None: + size = [100, 100] + if x.shape[-1] == 1: # greyscale - x = scipy.misc.imresize(x[:,:,0], size, interp=interp, mode=mode) + x = scipy.misc.imresize(x[:, :, 0], size, interp=interp, mode=mode) return x[:, :, np.newaxis] elif x.shape[-1] == 3: # rgb, bgr .. @@ -841,20 +1330,75 @@ def imresize(x, size=[100, 100], interp='bilinear', mode=None): else: raise Exception("Unsupported channel %d" % x.shape[-1]) -# normailization -def samplewise_norm(x, rescale=None, samplewise_center=False, samplewise_std_normalization=False, - channel_index=2, epsilon=1e-7): - """Normalize an image by rescale, samplewise centering and samplewise centering in order. + +# value scale +def pixel_value_scale(im, val=0.9, clip=(-np.inf, np.inf), is_random=False): + """Scales each value in the pixels of the image. Parameters ----------- - x : numpy array - An image with dimension of [row, col, channel] (default). - rescale : rescaling factor. - If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation) - samplewise_center : set each sample mean to 0. - samplewise_std_normalization : divide each input by its std. - epsilon : small position value for dividing standard deviation. + im : numpy.array + An image. + val : float + The scale value for changing pixel value. + - If is_random=False, multiply this value with all pixels. + - If is_random=True, multiply a value between [1-val, 1+val] with all pixels. + clip : tuple of 2 numbers + The minimum and maximum value. + is_random : boolean + If True, see ``val``. + + Returns + ------- + numpy.array + A processed image. + + Examples + ---------- + Random + + >>> im = pixel_value_scale(im, 0.1, [0, 255], is_random=True) + + Non-random + + >>> im = pixel_value_scale(im, 0.9, [0, 255], is_random=False) + + """ + if is_random: + scale = 1 + np.random.uniform(-val, val) + im = im * scale + else: + im = im * val + + if len(clip) == 2: + im = np.clip(im, clip[0], clip[1]) + else: + raise Exception("clip : tuple of 2 numbers") + + return im + + +# normailization +def samplewise_norm(x, rescale=None, samplewise_center=False, samplewise_std_normalization=False, channel_index=2, epsilon=1e-7): + """Normalize an image by rescale, samplewise centering and samplewise centering in order. + + Parameters + ----------- + x : numpy.array + An image with dimension of [row, col, channel] (default). + rescale : float + Rescaling factor. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation) + samplewise_center : boolean + If True, set each sample mean to 0. + samplewise_std_normalization : boolean + If True, divide each input by its std. + epsilon : float + A small position value for dividing standard deviation. + + Returns + ------- + numpy.array + A processed image. Examples -------- @@ -865,9 +1409,9 @@ def samplewise_norm(x, rescale=None, samplewise_center=False, samplewise_std_nor Notes ------ When samplewise_center and samplewise_std_normalization are True. - - For greyscale image, every pixels are subtracted and divided by the mean and std of whole image. - For RGB image, every pixels are subtracted and divided by the mean and std of this pixel i.e. the mean and std of a pixel is 0 and 1. + """ if rescale: x *= rescale @@ -889,17 +1433,27 @@ def samplewise_norm(x, rescale=None, samplewise_center=False, samplewise_std_nor else: raise Exception("Unsupported channels %d" % x.shape[channel_index]) + def featurewise_norm(x, mean=None, std=None, epsilon=1e-7): """Normalize every pixels by the same given mean and std, which are usually compute from all examples. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). - mean : value for subtraction. - std : value for division. - epsilon : small position value for dividing standard deviation. + mean : float + Value for subtraction. + std : float + Value for division. + epsilon : float + A small position value for dividing standard deviation. + + Returns + ------- + numpy.array + A processed image. + """ if mean: x = x - mean @@ -907,42 +1461,58 @@ def featurewise_norm(x, mean=None, std=None, epsilon=1e-7): x = x / (std + epsilon) return x + # whitening def get_zca_whitening_principal_components_img(X): """Return the ZCA whitening principal components matrix. Parameters ----------- - x : numpy array - Batch of image with dimension of [n_example, row, col, channel] (default). + x : numpy.array + Batch of images with dimension of [n_example, row, col, channel] (default). + + Returns + ------- + numpy.array + A processed image. + """ flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3])) - print("zca : computing sigma ..") + logging.info("zca : computing sigma ..") sigma = np.dot(flatX.T, flatX) / flatX.shape[0] - print("zca : computing U, S and V ..") - U, S, V = linalg.svd(sigma) - print("zca : computing principal components ..") + logging.info("zca : computing U, S and V ..") + U, S, _ = linalg.svd(sigma) # USV + logging.info("zca : computing principal components ..") principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T) return principal_components + def zca_whitening(x, principal_components): """Apply ZCA whitening on an image by given principal components matrix. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). - principal_components : matrix from ``get_zca_whitening_principal_components_img``. + principal_components : matrix + Matrix from ``get_zca_whitening_principal_components_img``. + + Returns + ------- + numpy.array + A processed image. + """ - # flatx = np.reshape(x, (x.size)) - print(principal_components.shape, x.shape) # ((28160, 28160), (160, 176, 1)) + flatx = np.reshape(x, (x.size)) + # logging.info(principal_components.shape, x.shape) # ((28160, 28160), (160, 176, 1)) # flatx = np.reshape(x, (x.shape)) # flatx = np.reshape(x, (x.shape[0], )) - print(flatx.shape) # (160, 176, 1) + # logging.info(flatx.shape) # (160, 176, 1) whitex = np.dot(flatx, principal_components) x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2])) return x + # developing # def barrel_transform(x, intensity): # # https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py @@ -954,20 +1524,27 @@ def zca_whitening(x, principal_components): # # TODO # pass + # channel shift def channel_shift(x, intensity, is_random=False, channel_index=2): - """Shift the channels of an image, randomly or non-randomly, see `numpy.rollaxis `_. + """Shift the channels of an image, randomly or non-randomly, see `numpy.rollaxis `__. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). intensity : float Intensity of shifting. - is_random : boolean, default False - If True, randomly shift. + is_random : boolean + If True, randomly shift. Default is False. channel_index : int - Index of channel, default 2. + Index of channel. Default is 2. + + Returns + ------- + numpy.array + A processed image. + """ if is_random: factor = np.random.uniform(-intensity, intensity) @@ -975,10 +1552,9 @@ def channel_shift(x, intensity, is_random=False, channel_index=2): factor = intensity x = np.rollaxis(x, channel_index, 0) min_x, max_x = np.min(x), np.max(x) - channel_images = [np.clip(x_channel + factor, min_x, max_x) - for x_channel in x] + channel_images = [np.clip(x_channel + factor, min_x, max_x) for x_channel in x] x = np.stack(channel_images, axis=0) - x = np.rollaxis(x, 0, channel_index+1) + x = np.rollaxis(x, 0, channel_index + 1) return x # x = np.rollaxis(x, channel_index, 0) # min_x, max_x = np.min(x), np.max(x) @@ -988,15 +1564,23 @@ def channel_shift(x, intensity, is_random=False, channel_index=2): # x = np.rollaxis(x, 0, channel_index+1) # return x -def channel_shift_multi(x, intensity, channel_index=2): - """Shift the channels of images with the same arguments, randomly or non-randomly, see `numpy.rollaxis `_ . + +def channel_shift_multi(x, intensity, is_random=False, channel_index=2): + """Shift the channels of images with the same arguments, randomly or non-randomly, see `numpy.rollaxis `__. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- - x : list of numpy array + x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). - others : see ``channel_shift``. + others : args + See ``tl.prepro.channel_shift``. + + Returns + ------- + numpy.array + A list of processed images. + """ if is_random: factor = np.random.uniform(-intensity, intensity) @@ -1007,66 +1591,80 @@ def channel_shift_multi(x, intensity, channel_index=2): for data in x: data = np.rollaxis(data, channel_index, 0) min_x, max_x = np.min(data), np.max(data) - channel_images = [np.clip(x_channel + factor, min_x, max_x) - for x_channel in x] + channel_images = [np.clip(x_channel + factor, min_x, max_x) for x_channel in x] data = np.stack(channel_images, axis=0) - data = np.rollaxis(x, 0, channel_index+1) - results.append( data ) + data = np.rollaxis(x, 0, channel_index + 1) + results.append(data) return np.asarray(results) + # noise def drop(x, keep=0.5): """Randomly set some pixels to zero by a given keeping probability. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] or [row, col]. - keep : float (0, 1) - The keeping probability, the lower more values will be set to zero. + keep : float + The keeping probability (0, 1), the lower more values will be set to zero. + + Returns + ------- + numpy.array + A processed image. + """ if len(x.shape) == 3: - if x.shape[-1]==3: # color + if x.shape[-1] == 3: # color img_size = x.shape mask = np.random.binomial(n=1, p=keep, size=x.shape[:-1]) for i in range(3): - x[:,:,i] = np.multiply(x[:,:,i] , mask) - elif x.shape[-1]==1: # greyscale image + x[:, :, i] = np.multiply(x[:, :, i], mask) + elif x.shape[-1] == 1: # greyscale image img_size = x.shape - x = np.multiply(x , np.random.binomial(n=1, p=keep, size=img_size)) + x = np.multiply(x, np.random.binomial(n=1, p=keep, size=img_size)) else: raise Exception("Unsupported shape {}".format(x.shape)) - elif len(x.shape) == 2 or 1: # greyscale matrix (image) or vector + elif len(x.shape) == 2 or 1: # greyscale matrix (image) or vector img_size = x.shape - x = np.multiply(x , np.random.binomial(n=1, p=keep, size=img_size)) + x = np.multiply(x, np.random.binomial(n=1, p=keep, size=img_size)) else: raise Exception("Unsupported shape {}".format(x.shape)) return x + # x = np.asarray([[1,2,3,4,5,6,7,8,9,10],[1,2,3,4,5,6,7,8,9,10]]) # x = np.asarray([x,x,x,x,x,x]) # x.shape = 10, 4, 3 -# # print(x) +# # logging.info(x) # # exit() -# print(x.shape) +# logging.info(x.shape) # # exit() -# print(drop(x, keep=1.)) +# logging.info(drop(x, keep=1.)) # exit() + # manual transform def transform_matrix_offset_center(matrix, x, y): """Return transform matrix offset center. Parameters ---------- - matrix : numpy array - Transform matrix - x, y : int + matrix : numpy.array + Transform matrix. + x and y : 2 int Size of image. + Returns + ------- + numpy.array + The transform matrix. + Examples -------- - - See ``rotation``, ``shear``, ``zoom``. + - See ``tl.prepro.rotation``, ``tl.prepro.shear``, ``tl.prepro.zoom``. + """ o_x = float(x) / 2 + 0.5 o_y = float(y) / 2 + 0.5 @@ -1076,113 +1674,145 @@ def transform_matrix_offset_center(matrix, x, y): return transform_matrix -def apply_transform(x, transform_matrix, channel_index=2, fill_mode='nearest', cval=0.): - """Return transformed images by given transform_matrix from ``transform_matrix_offset_center``. +def apply_transform(x, transform_matrix, channel_index=2, fill_mode='nearest', cval=0., order=1): + """Return transformed images by given ``transform_matrix`` from ``transform_matrix_offset_center``. Parameters ---------- - x : numpy array - Batch of images with dimension of 3, [batch_size, row, col, channel]. - transform_matrix : numpy array + x : numpy.array + An image with dimension of [row, col, channel] (default). + transform_matrix : numpy.array Transform matrix (offset center), can be generated by ``transform_matrix_offset_center`` channel_index : int Index of channel, default 2. - fill_mode : string - Method to fill missing pixel, default ‘nearest’, more options ‘constant’, ‘reflect’ or ‘wrap’ - - - `scipy ndimage affine_transform `_ - cval : scalar, optional + fill_mode : str + Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform `__ + cval : float Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0 + order : int + The order of interpolation. The order has to be in the range 0-5: + - 0 Nearest-neighbor + - 1 Bi-linear (default) + - 2 Bi-quadratic + - 3 Bi-cubic + - 4 Bi-quartic + - 5 Bi-quintic + - `scipy ndimage affine_transform `__ - - `scipy ndimage affine_transform `_ + Returns + ------- + numpy.array + A processed image. Examples -------- - - See ``rotation``, ``shift``, ``shear``, ``zoom``. + - See ``tl.prepro.rotation``, ``tl.prepro.shift``, ``tl.prepro.shear``, ``tl.prepro.zoom``. + """ x = np.rollaxis(x, channel_index, 0) final_affine_matrix = transform_matrix[:2, :2] final_offset = transform_matrix[:2, 2] - channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix, - final_offset, order=0, mode=fill_mode, cval=cval) for x_channel in x] + channel_images = [ + ndi.interpolation.affine_transform(x_channel, final_affine_matrix, final_offset, order=order, mode=fill_mode, cval=cval) for x_channel in x + ] x = np.stack(channel_images, axis=0) - x = np.rollaxis(x, 0, channel_index+1) + x = np.rollaxis(x, 0, channel_index + 1) return x -def projective_transform_by_points(x, src, dst, map_args={}, output_shape=None, order=1, mode='constant', cval=0.0, clip=True, preserve_range=False): - """Projective transform by given coordinates, usually 4 coordinates. see `scikit-image `_. +def projective_transform_by_points(x, src, dst, map_args=None, output_shape=None, order=1, mode='constant', cval=0.0, clip=True, preserve_range=False): + """Projective transform by given coordinates, usually 4 coordinates. + + see `scikit-image `__. Parameters ----------- - x : numpy array + x : numpy.array An image with dimension of [row, col, channel] (default). src : list or numpy - The original coordinates, usually 4 coordinates of (x, y). + The original coordinates, usually 4 coordinates of (width, height). dst : list or numpy The coordinates after transformation, the number of coordinates is the same with src. - map_args : dict, optional - Keyword arguments passed to inverse_map. - output_shape : tuple (rows, cols), optional + map_args : dictionary or None + Keyword arguments passed to inverse map. + output_shape : tuple of 2 int Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified. - order : int, optional + order : int The order of interpolation. The order has to be in the range 0-5: - - - 0 Nearest-neighbor - - 1 Bi-linear (default) - - 2 Bi-quadratic - - 3 Bi-cubic - - 4 Bi-quartic - - 5 Bi-quintic - mode : {‘constant’, ‘edge’, ‘symmetric’, ‘reflect’, ‘wrap’}, optional + - 0 Nearest-neighbor + - 1 Bi-linear (default) + - 2 Bi-quadratic + - 3 Bi-cubic + - 4 Bi-quartic + - 5 Bi-quintic + mode : str + One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`. Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad. - cval : float, optional - Used in conjunction with mode ‘constant’, the value outside the image boundaries. - clip : bool, optional + cval : float + Used in conjunction with mode `constant`, the value outside the image boundaries. + clip : boolean Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. - preserve_range : bool, optional + preserve_range : boolean Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float. + Returns + ------- + numpy.array + A processed image. + Examples -------- - >>> Assume X is an image from CIFAR 10, i.e. shape == (32, 32, 3) - >>> src = [[0,0],[0,32],[32,0],[32,32]] + Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3) + + >>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h] >>> dst = [[10,10],[0,32],[32,0],[32,32]] - >>> x = projective_transform_by_points(X, src, dst) + >>> x = tl.prepro.projective_transform_by_points(X, src, dst) References ----------- - - `scikit-image : geometric transformations `_ - - `scikit-image : examples `_ + - `scikit-image : geometric transformations `__ + - `scikit-image : examples `__ + """ - if type(src) is list: # convert to numpy + if map_args is None: + map_args = {} + # if type(src) is list: + if isinstance(src, list): # convert to numpy src = np.array(src) - if type(dst) is list: + # if type(dst) is list: + if isinstance(dst, list): dst = np.array(dst) - if np.max(x)>1: # convert to [0, 1] - x = x/255 + if np.max(x) > 1: # convert to [0, 1] + x = x / 255 m = transform.ProjectiveTransform() m.estimate(dst, src) - warped = transform.warp(x, m, map_args=map_args, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range) + warped = transform.warp(x, m, map_args=map_args, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range) return warped + # Numpy and PIL -def array_to_img(x, dim_ordering=(0,1,2), scale=True): +def array_to_img(x, dim_ordering=(0, 1, 2), scale=True): """Converts a numpy array to PIL image object (uint8 format). Parameters ---------- - x : numpy array - A image with dimension of 3 and channels of 1 or 3. - dim_ordering : list or tuple of 3 int + x : numpy.array + An image with dimension of 3 and channels of 1 or 3. + dim_ordering : tuple of 3 int Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0). - scale : boolean, default is True - If True, converts image to [0, 255] from any range of value like [-1, 2]. + scale : boolean + If True, converts image to [0, 255] from any range of value like [-1, 2]. Default is True. + + Returns + ------- + PIL.image + An image. References ----------- - - `PIL Image.fromarray `_ + `PIL Image.fromarray `__ + """ from PIL import Image # if dim_ordering == 'default': @@ -1194,7 +1824,7 @@ def array_to_img(x, dim_ordering=(0,1,2), scale=True): x += max(-np.min(x), 0) x_max = np.max(x) if x_max != 0: - # print(x_max) + # logging.info(x_max) # x /= x_max x = x / x_max x *= 255 @@ -1208,7 +1838,1043 @@ def array_to_img(x, dim_ordering=(0,1,2), scale=True): raise Exception('Unsupported channel number: ', x.shape[2]) -## Sequence +def find_contours(x, level=0.8, fully_connected='low', positive_orientation='low'): + """Find iso-valued contours in a 2D array for a given level value, returns list of (n, 2)-ndarrays + see `skimage.measure.find_contours `__. + + Parameters + ------------ + x : 2D ndarray of double. + Input data in which to find contours. + level : float + Value along which to find contours in the array. + fully_connected : str + Either `low` or `high`. Indicates whether array elements below the given level value are to be considered fully-connected (and hence elements above the value will only be face connected), or vice-versa. (See notes below for details.) + positive_orientation : str + Either `low` or `high`. Indicates whether the output contours will produce positively-oriented polygons around islands of low- or high-valued elements. If `low` then contours will wind counter-clockwise around elements below the iso-value. Alternately, this means that low-valued elements are always on the left of the contour. + + Returns + -------- + list of (n,2)-ndarrays + Each contour is an ndarray of shape (n, 2), consisting of n (row, column) coordinates along the contour. + + """ + return skimage.measure.find_contours(x, level, fully_connected=fully_connected, positive_orientation=positive_orientation) + + +def pt2map(list_points=None, size=(100, 100), val=1): + """Inputs a list of points, return a 2D image. + + Parameters + -------------- + list_points : list of 2 int + [[x, y], [x, y]..] for point coordinates. + size : tuple of 2 int + (w, h) for output size. + val : float or int + For the contour value. + + Returns + ------- + numpy.array + An image. + + """ + if list_points is None: + raise Exception("list_points : list of 2 int") + i_m = np.zeros(size) + if len(list_points) == 0: + return i_m + for xx in list_points: + for x in xx: + # logging.info(x) + i_m[int(np.round(x[0]))][int(np.round(x[1]))] = val + return i_m + + +def binary_dilation(x, radius=3): + """Return fast binary morphological dilation of an image. + see `skimage.morphology.binary_dilation `__. + + Parameters + ----------- + x : 2D array + A binary image. + radius : int + For the radius of mask. + + Returns + ------- + numpy.array + A processed binary image. + + """ + from skimage.morphology import disk, binary_dilation + mask = disk(radius) + x = binary_dilation(x, selem=mask) + return x + + +def dilation(x, radius=3): + """Return greyscale morphological dilation of an image, + see `skimage.morphology.dilation `__. + + Parameters + ----------- + x : 2D array + An greyscale image. + radius : int + For the radius of mask. + + Returns + ------- + numpy.array + A processed greyscale image. + + """ + from skimage.morphology import disk, dilation + mask = disk(radius) + x = dilation(x, selem=mask) + return x + + +def binary_erosion(x, radius=3): + """Return binary morphological erosion of an image, + see `skimage.morphology.binary_erosion `__. + + Parameters + ----------- + x : 2D array + A binary image. + radius : int + For the radius of mask. + + Returns + ------- + numpy.array + A processed binary image. + + """ + from skimage.morphology import disk, binary_erosion + mask = disk(radius) + x = binary_erosion(x, selem=mask) + return x + + +def erosion(x, radius=3): + """Return greyscale morphological erosion of an image, + see `skimage.morphology.erosion `__. + + Parameters + ----------- + x : 2D array + A greyscale image. + radius : int + For the radius of mask. + + Returns + ------- + numpy.array + A processed greyscale image. + + """ + from skimage.morphology import disk, erosion + mask = disk(radius) + x = erosion(x, selem=mask) + return x + + +def obj_box_coords_rescale(coords=None, shape=None): + """Scale down a list of coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1]. + + Parameters + ------------ + coords : list of list of 4 ints or None + For coordinates of more than one images .e.g.[[x, y, w, h], [x, y, w, h], ...]. + shape : list of 2 int or None + 【height, width]. + + Returns + ------- + list of list of 4 numbers + A list of new bounding boxes. + + + Examples + --------- + >>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100]) + >>> print(coords) + ... [[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]] + >>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100]) + >>> print(coords) + ... [[0.3, 0.8, 0.5, 1.0]] + >>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200]) + >>> print(coords) + ... [[0.15, 0.4, 0.25, 0.5]] + + Returns + ------- + list of 4 numbers + New coordinates. + + """ + if coords is None: + coords = [] + if shape is None: + shape = [100, 200] + + imh, imw = shape[0], shape[1] + imh = imh * 1.0 # * 1.0 for python2 : force division to be float point + imw = imw * 1.0 + coords_new = list() + for coord in coords: + assert len(coord) == 4, "coordinate should be 4 values : [x, y, w, h]" + x = coord[0] / imw + y = coord[1] / imh + w = coord[2] / imw + h = coord[3] / imh + coords_new.append([x, y, w, h]) + return coords_new + + +def obj_box_coord_rescale(coord=None, shape=None): + """Scale down one coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1]. + It is the reverse process of ``obj_box_coord_scale_to_pixelunit``. + + Parameters + ------------ + coords : list of 4 int or None + One coordinates of one image e.g. [x, y, w, h]. + shape : list of 2 int or None + For [height, width]. + + Returns + ------- + list of 4 numbers + New bounding box. + + Examples + --------- + >>> coord = tl.prepro.obj_box_coord_rescale(coord=[30, 40, 50, 50], shape=[100, 100]) + ... [0.3, 0.4, 0.5, 0.5] + + """ + if coord is None: + coord = [] + if shape is None: + shape = [100, 200] + + return obj_box_coords_rescale(coords=[coord], shape=shape)[0] + + +def obj_box_coord_scale_to_pixelunit(coord, shape=None): + """Convert one coordinate [x, y, w (or x2), h (or y2)] in ratio format to image coordinate format. + It is the reverse process of ``obj_box_coord_rescale``. + + Parameters + ----------- + coord : list of 4 float + One coordinate of one image [x, y, w (or x2), h (or y2)] in ratio format, i.e value range [0~1]. + shape : tuple of 2 or None + For [height, width]. + + Returns + ------- + list of 4 numbers + New bounding box. + + Examples + --------- + >>> x, y, x2, y2 = tl.prepro.obj_box_coord_scale_to_pixelunit([0.2, 0.3, 0.5, 0.7], shape=(100, 200, 3)) + ... [40, 30, 100, 70] + + """ + if shape is None: + shape = [100, 100] + + imh, imw = shape[0:2] + x = int(coord[0] * imw) + x2 = int(coord[2] * imw) + y = int(coord[1] * imh) + y2 = int(coord[3] * imh) + return [x, y, x2, y2] + + +# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100]) +# logging.info(coords) +# # ... [[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]] +# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100]) +# logging.info(coords) +# # ... [[0.3, 0.8, 0.5, 1.0]] +# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200]) +# logging.info(coords) +# # ... [[0.15, 0.4, 0.25, 0.5]] +# exit() + + +def obj_box_coord_centroid_to_upleft_butright(coord, to_int=False): + """Convert one coordinate [x_center, y_center, w, h] to [x1, y1, x2, y2] in up-left and botton-right format. + + Parameters + ------------ + coord : list of 4 int/float + One coordinate. + to_int : boolean + Whether to convert output as integer. + + Returns + ------- + list of 4 numbers + New bounding box. + + Examples + --------- + >>> coord = obj_box_coord_centroid_to_upleft_butright([30, 40, 20, 20]) + ... [20, 30, 40, 50] + + """ + assert len(coord) == 4, "coordinate should be 4 values : [x, y, w, h]" + x_center, y_center, w, h = coord + x = x_center - w / 2. + y = y_center - h / 2. + x2 = x + w + y2 = y + h + if to_int: + return [int(x), int(y), int(x2), int(y2)] + else: + return [x, y, x2, y2] + + +# coord = obj_box_coord_centroid_to_upleft_butright([30, 40, 20, 20]) +# logging.info(coord) [20, 30, 40, 50] +# exit() + + +def obj_box_coord_upleft_butright_to_centroid(coord): + """Convert one coordinate [x1, y1, x2, y2] to [x_center, y_center, w, h]. + It is the reverse process of ``obj_box_coord_centroid_to_upleft_butright``. + + Parameters + ------------ + coord : list of 4 int/float + One coordinate. + + Returns + ------- + list of 4 numbers + New bounding box. + + """ + assert len(coord) == 4, "coordinate should be 4 values : [x1, y1, x2, y2]" + x1, y1, x2, y2 = coord + w = x2 - x1 + h = y2 - y1 + x_c = x1 + w / 2. + y_c = y1 + h / 2. + return [x_c, y_c, w, h] + + +def obj_box_coord_centroid_to_upleft(coord): + """Convert one coordinate [x_center, y_center, w, h] to [x, y, w, h]. + It is the reverse process of ``obj_box_coord_upleft_to_centroid``. + + Parameters + ------------ + coord : list of 4 int/float + One coordinate. + + Returns + ------- + list of 4 numbers + New bounding box. + + """ + assert len(coord) == 4, "coordinate should be 4 values : [x, y, w, h]" + x_center, y_center, w, h = coord + x = x_center - w / 2. + y = y_center - h / 2. + return [x, y, w, h] + + +def obj_box_coord_upleft_to_centroid(coord): + """Convert one coordinate [x, y, w, h] to [x_center, y_center, w, h]. + It is the reverse process of ``obj_box_coord_centroid_to_upleft``. + + Parameters + ------------ + coord : list of 4 int/float + One coordinate. + + Returns + ------- + list of 4 numbers + New bounding box. + + """ + assert len(coord) == 4, "coordinate should be 4 values : [x, y, w, h]" + x, y, w, h = coord + x_center = x + w / 2. + y_center = y + h / 2. + return [x_center, y_center, w, h] + + +def parse_darknet_ann_str_to_list(annotations): + """Input string format of class, x, y, w, h, return list of list format. + + Parameters + ----------- + annotations : str + The annotations in darkent format "class, x, y, w, h ...." seperated by "\\n". + + Returns + ------- + list of list of 4 numbers + List of bounding box. + + """ + annotations = annotations.split("\n") + ann = [] + for a in annotations: + a = a.split() + if len(a) == 5: + for i, _v in enumerate(a): + if i == 0: + a[i] = int(a[i]) + else: + a[i] = float(a[i]) + ann.append(a) + return ann + + +def parse_darknet_ann_list_to_cls_box(annotations): + """Parse darknet annotation format into two lists for class and bounding box. + + Input list of [[class, x, y, w, h], ...], return two list of [class ...] and [[x, y, w, h], ...]. + + Parameters + ------------ + annotations : list of list + A list of class and bounding boxes of images e.g. [[class, x, y, w, h], ...] + + Returns + ------- + list of int + List of class labels. + + list of list of 4 numbers + List of bounding box. + + """ + class_list = [] + bbox_list = [] + for ann in annotations: + class_list.append(ann[0]) + bbox_list.append(ann[1:]) + return class_list, bbox_list + + +def obj_box_left_right_flip(im, coords=None, is_rescale=False, is_center=False, is_random=False): + """Left-right flip the image and coordinates for object detection. + + Parameters + ---------- + im : numpy.array + An image with dimension of [row, col, channel] (default). + coords : list of list of 4 int/float or None + Coordinates [[x, y, w, h], [x, y, w, h], ...]. + is_rescale : boolean + Set to True, if the input coordinates are rescaled to [0, 1]. Default is False. + is_center : boolean + Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False. + is_random : boolean + If True, randomly flip. Default is False. + + Returns + ------- + numpy.array + A processed image + list of list of 4 numbers + A list of new bounding boxes. + + Examples + -------- + >>> im = np.zeros([80, 100]) # as an image with shape width=100, height=80 + >>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3], [0.1, 0.5, 0.2, 0.3]], is_rescale=True, is_center=True, is_random=False) + >>> print(coords) + ... [[0.8, 0.4, 0.3, 0.3], [0.9, 0.5, 0.2, 0.3]] + >>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3]], is_rescale=True, is_center=False, is_random=False) + >>> print(coords) + ... [[0.5, 0.4, 0.3, 0.3]] + >>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=True, is_random=False) + >>> print(coords) + ... [[80, 40, 30, 30]] + >>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=False, is_random=False) + >>> print(coords) + ... [[50, 40, 30, 30]] + + """ + + if coords is None: + coords = [] + + def _flip(im, coords): + im = flip_axis(im, axis=1, is_random=False) + coords_new = list() + + for coord in coords: + assert len(coord) == 4, "coordinate should be 4 values : [x, y, w, h]" + if is_rescale: + if is_center: + # x_center' = 1 - x + x = 1. - coord[0] + else: + # x_center' = 1 - x - w + x = 1. - coord[0] - coord[2] + else: + if is_center: + # x' = im.width - x + x = im.shape[1] - coord[0] + else: + # x' = im.width - x - w + x = im.shape[1] - coord[0] - coord[2] + coords_new.append([x, coord[1], coord[2], coord[3]]) + return im, coords_new + + if is_random: + factor = np.random.uniform(-1, 1) + if factor > 0: + return _flip(im, coords) + else: + return im, coords + else: + return _flip(im, coords) + + +# im = np.zeros([80, 100]) # as an image with shape width=100, height=80 +# im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3], [0.1, 0.5, 0.2, 0.3]], is_rescale=True, is_center=True, is_random=False) +# logging.info(coords) +# # ... [[0.8, 0.4, 0.3, 0.3], [0.9, 0.5, 0.2, 0.3]] +# im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3]], is_rescale=True, is_center=False, is_random=False) +# logging.info(coords) +# # [[0.5, 0.4, 0.3, 0.3]] +# im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=True, is_random=False) +# logging.info(coords) +# # ... [[80, 40, 30, 30]] +# im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=False, is_random=False) +# logging.info(coords) +# # [[50, 40, 30, 30]] +# exit() + + +def obj_box_imresize(im, coords=None, size=None, interp='bicubic', mode=None, is_rescale=False): + """Resize an image, and compute the new bounding box coordinates. + + Parameters + ------------- + im : numpy.array + An image with dimension of [row, col, channel] (default). + coords : list of list of 4 int/float or None + Coordinates [[x, y, w, h], [x, y, w, h], ...] + size interp and mode : args + See ``tl.prepro.imresize``. + is_rescale : boolean + Set to True, if the input coordinates are rescaled to [0, 1], then return the original coordinates. Default is False. + + Returns + ------- + numpy.array + A processed image + list of list of 4 numbers + A list of new bounding boxes. + + Examples + -------- + >>> im = np.zeros([80, 100, 3]) # as an image with shape width=100, height=80 + >>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False) + >>> print(coords) + ... [[40, 80, 60, 60], [20, 40, 40, 40]] + >>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False) + >>> print(coords) + ... [[20, 20, 30, 15]] + >>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False) + >>> print(coords) + ... [[30, 30, 45, 22]] + >>> im2, coords = obj_box_imresize(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True) + >>> print(coords, im2.shape) + ... [[0.2, 0.4, 0.3, 0.3]] (160, 200, 3) + + """ + if coords is None: + coords = [] + if size is None: + size = [100, 100] + + imh, imw = im.shape[0:2] + imh = imh * 1.0 # * 1.0 for python2 : force division to be float point + imw = imw * 1.0 + im = imresize(im, size=size, interp=interp, mode=mode) + + if is_rescale is False: + coords_new = list() + for coord in coords: + assert len(coord) == 4, "coordinate should be 4 values : [x, y, w, h]" + # x' = x * (imw'/imw) + x = int(coord[0] * (size[1] / imw)) + # y' = y * (imh'/imh) + # logging.info('>>', coord[1], size[0], imh) + y = int(coord[1] * (size[0] / imh)) + # w' = w * (imw'/imw) + w = int(coord[2] * (size[1] / imw)) + # h' = h * (imh'/imh) + h = int(coord[3] * (size[0] / imh)) + coords_new.append([x, y, w, h]) + return im, coords_new + else: + return im, coords + + +# im = np.zeros([80, 100, 3]) # as an image with shape width=100, height=80 +# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False) +# logging.info(coords) +# # ... [[40, 80, 60, 60], [20, 40, 40, 40]] +# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False) +# logging.info(coords) +# # ... [20, 20, 30, 15] +# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False) +# logging.info(coords) +# # ... [30, 30, 45, 22] +# im2, coords = obj_box_imresize(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True) +# logging.info(coords, im2.shape) +# # ... [0.2, 0.4, 0.3, 0.3] (160, 200, 3) +# exit() + + +def obj_box_crop(im, classes=None, coords=None, wrg=100, hrg=100, is_rescale=False, is_center=False, is_random=False, thresh_wh=0.02, thresh_wh2=12.): + """Randomly or centrally crop an image, and compute the new bounding box coordinates. + Objects outside the cropped image will be removed. + + Parameters + ----------- + im : numpy.array + An image with dimension of [row, col, channel] (default). + classes : list of int or None + Class IDs. + coords : list of list of 4 int/float or None + Coordinates [[x, y, w, h], [x, y, w, h], ...] + wrg hrg and is_random : args + See ``tl.prepro.crop``. + is_rescale : boolean + Set to True, if the input coordinates are rescaled to [0, 1]. Default is False. + is_center : boolean, default False + Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False. + thresh_wh : float + Threshold, remove the box if its ratio of width(height) to image size less than the threshold. + thresh_wh2 : float + Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold. + + Returns + ------- + numpy.array + A processed image + list of int + A list of classes + list of list of 4 numbers + A list of new bounding boxes. + + """ + if classes is None: + classes = [] + if coords is None: + coords = [] + + h, w = im.shape[0], im.shape[1] + assert (h > hrg) and (w > wrg), "The size of cropping should smaller than the original image" + if is_random: + h_offset = int(np.random.uniform(0, h - hrg) - 1) + w_offset = int(np.random.uniform(0, w - wrg) - 1) + h_end = hrg + h_offset + w_end = wrg + w_offset + im_new = im[h_offset:h_end, w_offset:w_end] + else: # central crop + h_offset = int(np.floor((h - hrg) / 2.)) + w_offset = int(np.floor((w - wrg) / 2.)) + h_end = h_offset + hrg + w_end = w_offset + wrg + im_new = im[h_offset:h_end, w_offset:w_end] + + # w + # _____________________________ + # | h/w offset | + # | ------- | + # h | | | | + # | | | | + # | ------- | + # | h/w end | + # |___________________________| + + def _get_coord(coord): + """Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates, + before getting the new coordinates. + Boxes outsides the cropped image will be removed. + + """ + if is_center: + coord = obj_box_coord_centroid_to_upleft(coord) + + ##======= pixel unit format and upleft, w, h ==========## + + # x = np.clip( coord[0] - w_offset, 0, w_end - w_offset) + # y = np.clip( coord[1] - h_offset, 0, h_end - h_offset) + # w = np.clip( coord[2] , 0, w_end - w_offset) + # h = np.clip( coord[3] , 0, h_end - h_offset) + + x = coord[0] - w_offset + y = coord[1] - h_offset + w = coord[2] + h = coord[3] + + if x < 0: + if x + w <= 0: + return None + w = w + x + x = 0 + elif x > im_new.shape[1]: # object outside the cropped image + return None + + if y < 0: + if y + h <= 0: + return None + h = h + y + y = 0 + elif y > im_new.shape[0]: # object outside the cropped image + return None + + if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image + w = im_new.shape[1] - x + + if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image + h = im_new.shape[0] - y + + if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow + # logging.info('xx', w, h) + return None + + if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) < thresh_wh): # object shape strange: too narrow + # logging.info('yy', w, im_new.shape[1], h, im_new.shape[0]) + return None + + coord = [x, y, w, h] + + ## convert back if input format is center. + if is_center: + coord = obj_box_coord_upleft_to_centroid(coord) + + return coord + + coords_new = list() + classes_new = list() + for i, _ in enumerate(coords): + coord = coords[i] + assert len(coord) == 4, "coordinate should be 4 values : [x, y, w, h]" + if is_rescale: + # for scaled coord, upscaled before process and scale back in the end. + coord = obj_box_coord_scale_to_pixelunit(coord, im.shape) + coord = _get_coord(coord) + if coord is not None: + coord = obj_box_coord_rescale(coord, im_new.shape) + coords_new.append(coord) + classes_new.append(classes[i]) + else: + coord = _get_coord(coord) + if coord is not None: + coords_new.append(coord) + classes_new.append(classes[i]) + return im_new, classes_new, coords_new + + +def obj_box_shift(im, + classes=None, + coords=None, + wrg=0.1, + hrg=0.1, + row_index=0, + col_index=1, + channel_index=2, + fill_mode='nearest', + cval=0., + order=1, + is_rescale=False, + is_center=False, + is_random=False, + thresh_wh=0.02, + thresh_wh2=12.): + """Shift an image randomly or non-randomly, and compute the new bounding box coordinates. + Objects outside the cropped image will be removed. + + Parameters + ----------- + im : numpy.array + An image with dimension of [row, col, channel] (default). + classes : list of int or None + Class IDs. + coords : list of list of 4 int/float or None + Coordinates [[x, y, w, h], [x, y, w, h], ...] + wrg, hrg row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.shift``. + is_rescale : boolean + Set to True, if the input coordinates are rescaled to [0, 1]. Default is False. + is_center : boolean + Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False. + thresh_wh : float + Threshold, remove the box if its ratio of width(height) to image size less than the threshold. + thresh_wh2 : float + Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold. + + + Returns + ------- + numpy.array + A processed image + list of int + A list of classes + list of list of 4 numbers + A list of new bounding boxes. + + """ + if classes is None: + classes = [] + if coords is None: + coords = [] + + imh, imw = im.shape[row_index], im.shape[col_index] + assert (hrg < 1.0) and (hrg > 0.) and (wrg < 1.0) and (wrg > 0.), "shift range should be (0, 1)" + if is_random: + tx = np.random.uniform(-hrg, hrg) * imh + ty = np.random.uniform(-wrg, wrg) * imw + else: + tx, ty = hrg * imh, wrg * imw + translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) + + transform_matrix = translation_matrix # no need to do offset + im_new = apply_transform(im, transform_matrix, channel_index, fill_mode, cval, order) + + # modified from obj_box_crop + def _get_coord(coord): + """Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates, + before getting the new coordinates. + Boxes outsides the cropped image will be removed. + + """ + if is_center: + coord = obj_box_coord_centroid_to_upleft(coord) + + ##======= pixel unit format and upleft, w, h ==========## + x = coord[0] - ty # only change this + y = coord[1] - tx # only change this + w = coord[2] + h = coord[3] + + if x < 0: + if x + w <= 0: + return None + w = w + x + x = 0 + elif x > im_new.shape[1]: # object outside the cropped image + return None + + if y < 0: + if y + h <= 0: + return None + h = h + y + y = 0 + elif y > im_new.shape[0]: # object outside the cropped image + return None + + if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image + w = im_new.shape[1] - x + + if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image + h = im_new.shape[0] - y + + if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow + # logging.info('xx', w, h) + return None + + if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) < thresh_wh): # object shape strange: too narrow + # logging.info('yy', w, im_new.shape[1], h, im_new.shape[0]) + return None + + coord = [x, y, w, h] + + ## convert back if input format is center. + if is_center: + coord = obj_box_coord_upleft_to_centroid(coord) + + return coord + + coords_new = list() + classes_new = list() + for i, _ in enumerate(coords): + coord = coords[i] + assert len(coord) == 4, "coordinate should be 4 values : [x, y, w, h]" + if is_rescale: + # for scaled coord, upscaled before process and scale back in the end. + coord = obj_box_coord_scale_to_pixelunit(coord, im.shape) + coord = _get_coord(coord) + if coord is not None: + coord = obj_box_coord_rescale(coord, im_new.shape) + coords_new.append(coord) + classes_new.append(classes[i]) + else: + coord = _get_coord(coord) + if coord is not None: + coords_new.append(coord) + classes_new.append(classes[i]) + return im_new, classes_new, coords_new + + +def obj_box_zoom(im, + classes=None, + coords=None, + zoom_range=(0.9, 1.1), + row_index=0, + col_index=1, + channel_index=2, + fill_mode='nearest', + cval=0., + order=1, + is_rescale=False, + is_center=False, + is_random=False, + thresh_wh=0.02, + thresh_wh2=12.): + """Zoom in and out of a single image, randomly or non-randomly, and compute the new bounding box coordinates. + Objects outside the cropped image will be removed. + + Parameters + ----------- + im : numpy.array + An image with dimension of [row, col, channel] (default). + classes : list of int or None + Class IDs. + coords : list of list of 4 int/float or None + Coordinates [[x, y, w, h], [x, y, w, h], ...]. + zoom_range row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.zoom``. + is_rescale : boolean + Set to True, if the input coordinates are rescaled to [0, 1]. Default is False. + is_center : boolean + Set to True, if the x and y of coordinates are the centroid. (i.e. darknet format). Default is False. + thresh_wh : float + Threshold, remove the box if its ratio of width(height) to image size less than the threshold. + thresh_wh2 : float + Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold. + + Returns + ------- + numpy.array + A processed image + list of int + A list of classes + list of list of 4 numbers + A list of new bounding boxes. + + """ + if classes is None: + classes = [] + if coords is None: + coords = [] + + if len(zoom_range) != 2: + raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range) + if is_random: + if zoom_range[0] == 1 and zoom_range[1] == 1: + zx, zy = 1, 1 + logging.info(" random_zoom : not zoom in/out") + else: + zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2) + else: + zx, zy = zoom_range + # logging.info(zx, zy) + zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]]) + + h, w = im.shape[row_index], im.shape[col_index] + transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w) + im_new = apply_transform(im, transform_matrix, channel_index, fill_mode, cval, order) + + # modified from obj_box_crop + def _get_coord(coord): + """Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates, + before getting the new coordinates. + Boxes outsides the cropped image will be removed. + + """ + if is_center: + coord = obj_box_coord_centroid_to_upleft(coord) + + # ======= pixel unit format and upleft, w, h ========== + x = (coord[0] - im.shape[1] / 2) / zy + im.shape[1] / 2 # only change this + y = (coord[1] - im.shape[0] / 2) / zx + im.shape[0] / 2 # only change this + w = coord[2] / zy # only change this + h = coord[3] / zx # only change thisS + + if x < 0: + if x + w <= 0: + return None + w = w + x + x = 0 + elif x > im_new.shape[1]: # object outside the cropped image + return None + + if y < 0: + if y + h <= 0: + return None + h = h + y + y = 0 + elif y > im_new.shape[0]: # object outside the cropped image + return None + + if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image + w = im_new.shape[1] - x + + if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image + h = im_new.shape[0] - y + + if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow + # logging.info('xx', w, h) + return None + + if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) < thresh_wh): # object shape strange: too narrow + # logging.info('yy', w, im_new.shape[1], h, im_new.shape[0]) + return None + + coord = [x, y, w, h] + + # convert back if input format is center. + if is_center: + coord = obj_box_coord_upleft_to_centroid(coord) + + return coord + + coords_new = list() + classes_new = list() + for i, _ in enumerate(coords): + coord = coords[i] + assert len(coord) == 4, "coordinate should be 4 values : [x, y, w, h]" + if is_rescale: + # for scaled coord, upscaled before process and scale back in the end. + coord = obj_box_coord_scale_to_pixelunit(coord, im.shape) + coord = _get_coord(coord) + if coord is not None: + coord = obj_box_coord_rescale(coord, im_new.shape) + coords_new.append(coord) + classes_new.append(classes[i]) + else: + coord = _get_coord(coord) + if coord is not None: + coords_new.append(coord) + classes_new.append(classes[i]) + return im_new, classes_new, coords_new + + def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncating='pre', value=0.): """Pads each sequence to the same length: the length of the longest sequence. @@ -1220,17 +2886,23 @@ def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncat Parameters ---------- - sequences : list of lists where each element is a sequence - maxlen : int, maximum length - dtype : type to cast the resulting sequence. - padding : 'pre' or 'post', pad either before or after each sequence. - truncating : 'pre' or 'post', remove values from sequences larger than - maxlen either in the beginning or in the end of the sequence - value : float, value to pad the sequences to the desired value. + sequences : list of list of int + All sequences where each row is a sequence. + maxlen : int + Maximum length. + dtype : numpy.dtype or str + Data type to cast the resulting sequence. + padding : str + Either 'pre' or 'post', pad either before or after each sequence. + truncating : str + Either 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence + value : float + Value to pad the sequences to the desired value. Returns ---------- - x : numpy array with dimensions (number_of_sequences, maxlen) + x : numpy.array + With dimensions (number_of_sequences, maxlen) Examples ---------- @@ -1240,6 +2912,7 @@ def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncat ... [[1 1 1 1 1] ... [2 2 2 0 0] ... [3 3 0 0 0]] + """ lengths = [len(s) for s in sequences] @@ -1269,8 +2942,7 @@ def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncat # check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape: - raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' % - (trunc.shape[1:], idx, sample_shape)) + raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' % (trunc.shape[1:], idx, sample_shape)) if padding == 'post': x[idx, :len(trunc)] = trunc @@ -1278,21 +2950,65 @@ def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncat x[idx, -len(trunc):] = trunc else: raise ValueError('Padding type "%s" not understood' % padding) - return x + return x.tolist() + + +def remove_pad_sequences(sequences, pad_id=0): + """Remove padding. + + Parameters + ----------- + sequences : list of list of int + All sequences where each row is a sequence. + pad_id : int + The pad ID. + + Returns + ---------- + list of list of int + The processed sequences. + + Examples + ---------- + >>> sequences = [[2,3,4,0,0], [5,1,2,3,4,0,0,0], [4,5,0,2,4,0,0,0]] + >>> print(remove_pad_sequences(sequences, pad_id=0)) + ... [[2, 3, 4], [5, 1, 2, 3, 4], [4, 5, 0, 2, 4]] + + """ + import copy + sequences_out = copy.deepcopy(sequences) + for i, _ in enumerate(sequences): + # for j in range(len(sequences[i])): + # if sequences[i][j] == pad_id: + # sequences_out[i] = sequences_out[i][:j] + # break + for j in range(1, len(sequences[i])): + if sequences[i][-j] != pad_id: + sequences_out[i] = sequences_out[i][0:-j + 1] + break + return sequences_out + def process_sequences(sequences, end_id=0, pad_val=0, is_shorten=True, remain_end_id=False): """Set all tokens(ids) after END token to the padding value, and then shorten (option) it to the maximum sequence length in this batch. Parameters ----------- - sequences : numpy array or list of list with token IDs. - e.g. [[4,3,5,3,2,2,2,2], [5,3,9,4,9,2,2,3]] - end_id : int, the special token for END. - pad_val : int, replace the end_id and the ids after end_id to this value. - is_shorten : boolean, default True. - Shorten the sequences. - remain_end_id : boolean, default False. - Keep an end_id in the end. + sequences : list of list of int + All sequences where each row is a sequence. + end_id : int + The special token for END. + pad_val : int + Replace the `end_id` and the IDs after `end_id` to this value. + is_shorten : boolean + Shorten the sequences. Default is True. + remain_end_id : boolean + Keep an `end_id` in the end. Default is False. + + Returns + ---------- + list of list of int + The processed sequences. Examples --------- @@ -1300,17 +3016,18 @@ def process_sequences(sequences, end_id=0, pad_val=0, is_shorten=True, remain_en ... [5, 3, 9, 4, 9, 2, 2, 3]] <-- end_id is 2 >>> sentences_ids = precess_sequences(sentences_ids, end_id=vocab.end_id, pad_val=0, is_shorten=True) ... [[4, 3, 5, 3, 0], [5, 3, 9, 4, 9]] + """ max_length = 0 - for i_s, seq in enumerate(sequences): + for _, seq in enumerate(sequences): is_end = False for i_w, n in enumerate(seq): - if n == end_id and is_end == False: # 1st time to see end_id + if n == end_id and is_end == False: # 1st time to see end_id is_end = True if max_length < i_w: max_length = i_w if remain_end_id is False: - seq[i_w] = pad_val # set end_id to pad_val + seq[i_w] = pad_val # set end_id to pad_val elif is_end == True: seq[i_w] = pad_val @@ -1321,9 +3038,24 @@ def process_sequences(sequences, end_id=0, pad_val=0, is_shorten=True, remain_en sequences[i] = seq[:max_length] return sequences + def sequences_add_start_id(sequences, start_id=0, remove_last=False): """Add special start token(id) in the beginning of each sequence. + Parameters + ------------ + sequences : list of list of int + All sequences where each row is a sequence. + start_id : int + The start ID. + remove_last : boolean + Remove the last value of each sequences. Usually be used for removing the end ID. + + Returns + ---------- + list of list of int + The processed sequences. + Examples --------- >>> sentences_ids = [[4,3,5,3,2,2,2,2], [5,3,9,4,9,2,2,3]] @@ -1332,211 +3064,126 @@ def sequences_add_start_id(sequences, start_id=0, remove_last=False): >>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2, remove_last=True) ... [[2, 4, 3, 5, 3, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2]] - - For Seq2seq + For Seq2seq + >>> input = [a, b, c] >>> target = [x, y, z] >>> decode_seq = [start_id, a, b] <-- sequences_add_start_id(input, start_id, True) + """ - sequences_out = [[] for _ in range(len(sequences))]#[[]] * len(sequences) - for i in range(len(sequences)): + sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences) + for i, _ in enumerate(sequences): if remove_last: sequences_out[i] = [start_id] + sequences[i][:-1] else: sequences_out[i] = [start_id] + sequences[i] return sequences_out -def sequences_get_mask(sequences, pad_val=0): - """Return mask for sequences. - - Examples - --------- - >>> sentences_ids = [[4, 0, 5, 3, 0, 0], - ... [5, 3, 9, 4, 9, 0]] - >>> mask = sequences_get_mask(sentences_ids, pad_val=0) - ... [[1 1 1 1 0 0] - ... [1 1 1 1 1 0]] - """ - mask = np.ones_like(sequences) - for i, seq in enumerate(sequences): - for i_w in reversed(range(len(seq))): - if seq[i_w] == pad_val: - mask[i, i_w] = 0 - else: - break # <-- exit the for loop, prepcess next sequence - return mask +def sequences_add_end_id(sequences, end_id=888): + """Add special end token(id) in the end of each sequence. -## Text -# see tensorlayer.nlp - + Parameters + ----------- + sequences : list of list of int + All sequences where each row is a sequence. + end_id : int + The end ID. -## Tensor Opt -def distorted_images(images=None, height=24, width=24): - """Distort images for generating more training data. + Returns + ---------- + list of list of int + The processed sequences. - Features + Examples --------- - They are cropped to height * width pixels randomly. - - They are approximately whitened to make the model insensitive to dynamic range. + >>> sequences = [[1,2,3],[4,5,6,7]] + >>> print(sequences_add_end_id(sequences, end_id=999)) + ... [[1, 2, 3, 999], [4, 5, 6, 999]] - Randomly flip the image from left to right. - - Randomly distort the image brightness. + """ + sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences) + for i, _ in enumerate(sequences): + sequences_out[i] = sequences[i] + [end_id] + return sequences_out - Randomly distort the image contrast. - Whiten (Normalize) the images. +def sequences_add_end_id_after_pad(sequences, end_id=888, pad_id=0): + """Add special end token(id) in the end of each sequence. Parameters - ---------- - images : 4D Tensor - The tensor or placeholder of images - height : int - The height for random crop. - width : int - The width for random crop. + ----------- + sequences : list of list of int + All sequences where each row is a sequence. + end_id : int + The end ID. + pad_id : int + The pad ID. Returns - ------- - result : tuple of Tensor - (Tensor for distorted images, Tensor for while loop index) + ---------- + list of list of int + The processed sequences. Examples - -------- - >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - >>> sess = tf.InteractiveSession() - >>> batch_size = 128 - >>> x = tf.placeholder(tf.float32, shape=[batch_size, 32, 32, 3]) - >>> distorted_images_op = tl.preprocess.distorted_images(images=x, height=24, width=24) - >>> sess.run(tf.initialize_all_variables()) - >>> feed_dict={x: X_train[0:batch_size,:,:,:]} - >>> distorted_images, idx = sess.run(distorted_images_op, feed_dict=feed_dict) - >>> tl.visualize.images2d(X_train[0:9,:,:,:], second=2, saveable=False, name='cifar10', dtype=np.uint8, fig_idx=20212) - >>> tl.visualize.images2d(distorted_images[1:10,:,:,:], second=10, saveable=False, name='distorted_images', dtype=None, fig_idx=23012) + --------- + >>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]] + >>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0)) + ... [[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]] - Notes - ------ - - The first image in 'distorted_images' should be removed. + """ + # sequences_out = [[] for _ in range(len(sequences))]#[[]] * len(sequences) + import copy + sequences_out = copy.deepcopy(sequences) + # # add a pad to all + # for i in range(len(sequences)): + # for j in range(len(sequences[i])): + # sequences_out[i].append(pad_id) + # # pad -- > end + # max_len = 0 + for i, v in enumerate(sequences): + for j, _v2 in enumerate(v): + if sequences[i][j] == pad_id: + sequences_out[i][j] = end_id + # if j > max_len: + # max_len = j + break + # # remove pad if too long + # for i in range(len(sequences)): + # for j in range(len(sequences[i])): + # sequences_out[i] = sequences_out[i][:max_len+1] + return sequences_out - References - ----------- - - `tensorflow.models.image.cifar10.cifar10_input `_ - """ - print("This function is deprecated, please use tf.map_fn instead, e.g:\n \ - t_image = tf.map_fn(lambda img: tf.image.random_brightness(img, max_delta=32. / 255.), t_image)\n \ - t_image = tf.map_fn(lambda img: tf.image.random_contrast(img, lower=0.5, upper=1.5), t_image)\n \ - t_image = tf.map_fn(lambda img: tf.image.random_saturation(img, lower=0.5, upper=1.5), t_image)\n \ - t_image = tf.map_fn(lambda img: tf.image.random_hue(img, max_delta=0.032), t_image)") - exit() - # print(" [Warning] distorted_images will be deprecated due to speed, see TFRecord tutorial for more info...") - try: - batch_size = int(images._shape[0]) - except: - raise Exception('unknow batch_size of images') - distorted_x = tf.Variable(tf.constant(0.1, shape=[1, height, width, 3])) - i = tf.Variable(tf.constant(0)) - - c = lambda distorted_x, i: tf.less(i, batch_size) - - def body(distorted_x, i): - # 1. Randomly crop a [height, width] section of the image. - image = tf.random_crop(tf.gather(images, i), [height, width, 3]) - # 2. Randomly flip the image horizontally. - image = tf.image.random_flip_left_right(image) - # 3. Randomly change brightness. - image = tf.image.random_brightness(image, max_delta=63) - # 4. Randomly change contrast. - image = tf.image.random_contrast(image, lower=0.2, upper=1.8) - # 5. Subtract off the mean and divide by the variance of the pixels. - image = tf.image.per_image_whitening(image) - # 6. Append the image to a batch. - image = tf.expand_dims(image, 0) - return tf.concat(0, [distorted_x, image]), tf.add(i, 1) - - result = tf.while_loop(cond=c, body=body, loop_vars=(distorted_x, i), parallel_iterations=16) - return result - - -def crop_central_whiten_images(images=None, height=24, width=24): - """Crop the central of image, and normailize it for test data. - - They are cropped to central of height * width pixels. - - Whiten (Normalize) the images. + +def sequences_get_mask(sequences, pad_val=0): + """Return mask for sequences. Parameters - ---------- - images : 4D Tensor - The tensor or placeholder of images - height : int - The height for central crop. - width : int - The width for central crop. + ----------- + sequences : list of list of int + All sequences where each row is a sequence. + pad_val : int + The pad value. Returns - ------- - result : tuple Tensor - (Tensor for distorted images, Tensor for while loop index) + ---------- + list of list of int + The mask. Examples - -------- - >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) - >>> sess = tf.InteractiveSession() - >>> batch_size = 128 - >>> x = tf.placeholder(tf.float32, shape=[batch_size, 32, 32, 3]) - >>> central_images_op = tl.preprocess.crop_central_whiten_images(images=x, height=24, width=24) - >>> sess.run(tf.initialize_all_variables()) - >>> feed_dict={x: X_train[0:batch_size,:,:,:]} - >>> central_images, idx = sess.run(central_images_op, feed_dict=feed_dict) - >>> tl.visualize.images2d(X_train[0:9,:,:,:], second=2, saveable=False, name='cifar10', dtype=np.uint8, fig_idx=20212) - >>> tl.visualize.images2d(central_images[1:10,:,:,:], second=10, saveable=False, name='central_images', dtype=None, fig_idx=23012) - - Notes - ------ - The first image in 'central_images' should be removed. + --------- + >>> sentences_ids = [[4, 0, 5, 3, 0, 0], + ... [5, 3, 9, 4, 9, 0]] + >>> mask = sequences_get_mask(sentences_ids, pad_val=0) + ... [[1 1 1 1 0 0] + ... [1 1 1 1 1 0]] - Code References - ---------------- - - ``tensorflow.models.image.cifar10.cifar10_input`` """ - print("This function is deprecated, please use tf.map_fn instead, e.g:\n \ - t_image = tf.map_fn(lambda img: tf.image.random_brightness(img, max_delta=32. / 255.), t_image)\n \ - t_image = tf.map_fn(lambda img: tf.image.random_contrast(img, lower=0.5, upper=1.5), t_image)\n \ - t_image = tf.map_fn(lambda img: tf.image.random_saturation(img, lower=0.5, upper=1.5), t_image)\n \ - t_image = tf.map_fn(lambda img: tf.image.random_hue(img, max_delta=0.032), t_image)") - exit() - # print(" [Warning] crop_central_whiten_images will be deprecated due to speed, see TFRecord tutorial for more info...") - try: - batch_size = int(images._shape[0]) - except: - raise Exception('unknow batch_size of images') - central_x = tf.Variable(tf.constant(0.1, shape=[1, height, width, 3])) - i = tf.Variable(tf.constant(0)) - - c = lambda central_x, i: tf.less(i, batch_size) - - def body(central_x, i): - # 1. Crop the central [height, width] of the image. - image = tf.image.resize_image_with_crop_or_pad(tf.gather(images, i), height, width) - # 2. Subtract off the mean and divide by the variance of the pixels. - image = tf.image.per_image_whitening(image) - # 5. Append the image to a batch. - image = tf.expand_dims(image, 0) - return tf.concat(0, [central_x, image]), tf.add(i, 1) - - result = tf.while_loop(cond=c, body=body, loop_vars=(central_x, i), parallel_iterations=16) - return result - - - - - - - - - - - - -# + mask = np.ones_like(sequences) + for i, seq in enumerate(sequences): + for i_w in reversed(range(len(seq))): + if seq[i_w] == pad_val: + mask[i, i_w] = 0 + else: + break # <-- exit the for loop, prepcess next sequence + return mask diff --git a/tensorlayer/rein.py b/tensorlayer/rein.py index 9ad3de7..43882f6 100644 --- a/tensorlayer/rein.py +++ b/tensorlayer/rein.py @@ -1,25 +1,37 @@ #! /usr/bin/python -# -*- coding: utf8 -*- +# -*- coding: utf-8 -*- - - -import tensorflow as tf import numpy as np +import tensorflow as tf from six.moves import xrange -def discount_episode_rewards(rewards=[], gamma=0.99, mode=0): - """ Take 1D float array of rewards and compute discounted rewards for an +__all__ = [ + 'discount_episode_rewards', + 'cross_entropy_reward_loss', + 'log_weight', + 'choice_action_by_probs', +] + + +def discount_episode_rewards(rewards=None, gamma=0.99, mode=0): + """Take 1D float array of rewards and compute discounted rewards for an episode. When encount a non-zero value, consider as the end a of an episode. Parameters ---------- - rewards : numpy list - a list of rewards + rewards : list + List of rewards gamma : float - discounted factor + Discounted factor mode : int - if mode == 0, reset the discount process when encount a non-zero reward (Ping-pong game). - if mode == 1, would not reset the discount process. + Mode for computing the discount rewards. + - If mode == 0, reset the discount process when encount a non-zero reward (Ping-pong game). + - If mode == 1, would not reset the discount process. + + Returns + -------- + list of float + The discounted rewards. Examples ---------- @@ -33,7 +45,10 @@ def discount_episode_rewards(rewards=[], gamma=0.99, mode=0): >>> print(discount_rewards) ... [ 1.52110755 1.69011939 1.87791049 2.08656716 1.20729685 1.34144104 ... 1.49048996 1.65610003 0.72899997 0.81 0.89999998 1. ] + """ + if rewards is None: + raise Exception("rewards should be a list") discounted_r = np.zeros_like(rewards, dtype=np.float32) running_add = 0 for t in reversed(xrange(0, rewards.size)): @@ -46,40 +61,107 @@ def discount_episode_rewards(rewards=[], gamma=0.99, mode=0): def cross_entropy_reward_loss(logits, actions, rewards, name=None): - """ Calculate the loss for Policy Gradient Network. + """Calculate the loss for Policy Gradient Network. Parameters ---------- logits : tensor - The network outputs without softmax. This function implements softmax - inside. - actions : tensor/ placeholder + The network outputs without softmax. This function implements softmax inside. + actions : tensor or placeholder The agent actions. - rewards : tensor/ placeholder + rewards : tensor or placeholder The rewards. + Returns + -------- + Tensor + The TensorFlow loss function. + Examples ---------- - >>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D]) # observation for training - >>> network = tl.layers.InputLayer(states_batch_pl, name='input_layer') - >>> network = tl.layers.DenseLayer(network, n_units=H, act = tf.nn.relu, name='relu1') - >>> network = tl.layers.DenseLayer(network, n_units=3, act = tl.activation.identity, name='output_layer') + >>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D]) + >>> network = InputLayer(states_batch_pl, name='input') + >>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1') + >>> network = DenseLayer(network, n_units=3, name='out') >>> probs = network.outputs >>> sampling_prob = tf.nn.softmax(probs) >>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None]) >>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None]) - >>> loss = cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl) + >>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl) >>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss) - """ - try: # TF 1.0 + """ + try: # TF 1.0+ cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=actions, logits=logits, name=name) - except: + except Exception: cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, targets=actions) # cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, actions) - try: ## TF1.0 + try: ## TF1.0+ loss = tf.reduce_sum(tf.multiply(cross_entropy, rewards)) - except: ## TF0.12 - loss = tf.reduce_sum(tf.mul(cross_entropy, rewards)) # element-wise mul + except Exception: ## TF0.12 + loss = tf.reduce_sum(tf.mul(cross_entropy, rewards)) # element-wise mul return loss + + +def log_weight(probs, weights, name='log_weight'): + """Log weight. + + Parameters + ----------- + probs : tensor + If it is a network output, usually we should scale it to [0, 1] via softmax. + weights : tensor + The weights. + + Returns + -------- + Tensor + The Tensor after appling the log weighted expression. + + """ + with tf.variable_scope(name): + exp_v = tf.reduce_mean(tf.log(probs) * weights) + return exp_v + + +def choice_action_by_probs(probs=(0.5, 0.5), action_list=None): + """Choice and return an an action by given the action probability distribution. + + Parameters + ------------ + probs : list of float. + The probability distribution of all actions. + action_list : None or a list of int or others + A list of action in integer, string or others. If None, returns an integer range between 0 and len(probs)-1. + + Returns + -------- + float int or str + The chosen action. + + Examples + ---------- + >>> for _ in range(5): + >>> a = choice_action_by_probs([0.2, 0.4, 0.4]) + >>> print(a) + ... 0 + ... 1 + ... 1 + ... 2 + ... 1 + >>> for _ in range(3): + >>> a = choice_action_by_probs([0.5, 0.5], ['a', 'b']) + >>> print(a) + ... a + ... b + ... b + + """ + if action_list is None: + n_action = len(probs) + action_list = np.arange(n_action) + else: + if len(action_list) != len(probs): + raise Exception("number of actions should equal to number of probabilities.") + return np.random.choice(action_list, p=probs) diff --git a/tensorlayer/third_party/__init__.py b/tensorlayer/third_party/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tensorlayer/third_party/roi_pooling/.gitignore b/tensorlayer/third_party/roi_pooling/.gitignore new file mode 100644 index 0000000..08030a8 --- /dev/null +++ b/tensorlayer/third_party/roi_pooling/.gitignore @@ -0,0 +1,3 @@ +.ipynb_checkpoints/ +build/ + diff --git a/tensorlayer/third_party/roi_pooling/README.md b/tensorlayer/third_party/roi_pooling/README.md new file mode 100644 index 0000000..d597cea --- /dev/null +++ b/tensorlayer/third_party/roi_pooling/README.md @@ -0,0 +1,56 @@ +# Hint from TensorLayer +- This implementation is from `https://github.com/deepsense-ai/roi-pooling`, date: 31 Aug 2017. +- To install this, you have to clone TensorLayer from Github instead of pip install. +- Remember to modify the `CUDA_LIB` in Makefile before running `python setup.py install` in this folder. +- Make sure `roi_pooling_example.py` and `test_roi_layer.py` is runable. + + +---- + + +## RoI pooling in TensorFlow + +This repo contains the implementation of **Region of Interest pooling** as a custom TensorFlow operation. The CUDA code responsible for the computations was largely taken from the original [Caffe implementation by Ross Girshick](https://github.com/rbgirshick/fast-rcnn). + +For more information about RoI pooling you can check out [Region of interest pooling explained](https://deepsense.io/region-of-interest-pooling-explained/) at our [deepsense.io](https://deepsense.io/) blog. + +![Region of Interest Pooling animation](roi_pooling_animation.gif) + + +## Requirements + +To compile and use `roi_pooling` layer you need to have: + +* [CUDA](https://developer.nvidia.com/cuda-toolkit) (tested with 8.0) +* [https://www.tensorflow.org/](TensorFlow) (tested with 0.12.0 and 1.0.0) + +Only official TensorFlow releases are currently supported. If you're using a custom built TensorFlow compiled with a different GCC version (e.g. 5.X) you may need to modify the makefile to [enable the new ABI version](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html). + + +## Install + +Since it uses compilation + +```bash +$ git clone git@github.com:deepsense-io/roi-pooling.git +$ cd roi-pooling +$ python setup.py install +``` + +Right now we provide only GPU implementation (no CPU at this time). + + +## Usage + +After successful installation you can use the operation like this: + +```python +from roi_pooling.roi_pooling_ops import roi_pooling + +# here obtain feature map and regions of interest +rpooling = roi_pooling(feature_map, rois, 7, 7) +# continue the model +``` + +Working example in Jupyter Notebook: [examples/roi_pooling_minimal_example.ipynb](https://github.com/deepsense-io/roi-pooling/blob/master/examples/roi_pooling_minimal_example.ipynb) + diff --git a/tensorlayer/third_party/roi_pooling/examples/__init__.py b/tensorlayer/third_party/roi_pooling/examples/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tensorlayer/third_party/roi_pooling/examples/roi_pooling_minimal_example.ipynb b/tensorlayer/third_party/roi_pooling/examples/roi_pooling_minimal_example.ipynb new file mode 100644 index 0000000..c1edc35 --- /dev/null +++ b/tensorlayer/third_party/roi_pooling/examples/roi_pooling_minimal_example.ipynb @@ -0,0 +1,148 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* blog post: [Region of interest pooling explained - deepsense.io](https://deepsense.io/region-of-interest-pooling-explained/)\n", + "* repository: [deepsense-io/roi-pooling](https://github.com/deepsense-io/roi-pooling)" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from __future__ import print_function\n", + "\n", + "import tensorflow as tf\n", + "import numpy as np\n", + "\n", + "from roi_pooling.roi_pooling_ops import roi_pooling" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# 4x4 feature map with only 1 channel\n", + "input_value = [[\n", + " [[1], [2], [4], [4]],\n", + " [[3], [4], [1], [2]],\n", + " [[6], [2], [1], [7]],\n", + " [[1], [3], [2], [8]]\n", + "]]\n", + "input_value = np.asarray(input_value, dtype='float32')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# regions of interest as lists of:\n", + "# feature map index, upper left, bottom right coordinates\n", + "rois_value = [\n", + " [0, 0, 0, 1, 3],\n", + " [0, 2, 2, 3, 3],\n", + " [0, 1, 0, 3, 2]\n", + "]\n", + "rois_value = np.asarray(rois_value, dtype='int32')\n", + "\n", + "# in this case we have 3 RoI pooling operations:\n", + "# * channel 0, rectangular region (0, 0) to (1, 3)\n", + "# xx..\n", + "# xx..\n", + "# xx..\n", + "# xx..\n", + "#\n", + "# * channel 0, rectangular region (2, 2) to (3, 3)\n", + "# ....\n", + "# ....\n", + "# ..xx\n", + "# ..xx\n", + "# * channel 0, rectangular region (1, 0) to (3, 2)\n", + "# ....\n", + "# xxx.\n", + "# xxx.\n", + "# xxx." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[[[ 3. 4.]\n", + " [ 6. 3.]]]\n", + "\n", + "\n", + " [[[ 1. 7.]\n", + " [ 2. 8.]]]\n", + "\n", + "\n", + " [[[ 4. 4.]\n", + " [ 4. 7.]]]]\n" + ] + } + ], + "source": [ + "input_featuremap = tf.placeholder(tf.float32)\n", + "rois = tf.placeholder(tf.int32)\n", + "input_const = tf.constant(input_value, tf.float32)\n", + "rois_const = tf.constant(rois_value, tf.int32)\n", + "y = roi_pooling(input_const, rois_const, pool_height=2, pool_width=2)\n", + "\n", + "with tf.Session('') as sess:\n", + " y_output = sess.run(y, feed_dict={input_featuremap: input_value, rois: rois_value})\n", + " print(y_output)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tensorlayer/third_party/roi_pooling/roi_pooling/Makefile b/tensorlayer/third_party/roi_pooling/roi_pooling/Makefile new file mode 100644 index 0000000..db9de78 --- /dev/null +++ b/tensorlayer/third_party/roi_pooling/roi_pooling/Makefile @@ -0,0 +1,18 @@ +TF_INC = $(shell python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') +CUDA_LIB = /usr/local/cuda-8.0/lib64 + +all: clean build test + +build: roi_pooling.so + +roi_pooling.cu.o: roi_pooling.cu.cc + nvcc -std=c++11 -c -o $@ $? -I $(TF_INC) -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC -D _GLIBCXX_USE_CXX11_ABI=0 + +roi_pooling.so: roi_pooling.cc roi_pooling.cu.o + g++ -std=c++11 -shared -o $@ $? -I $(TF_INC) -fPIC -lcudart -L$(CUDA_LIB) -D _GLIBCXX_USE_CXX11_ABI=0 + +test: build + python roi_pooling_test.py + +clean: + rm -f *.o *.so *.pyc *.npy diff --git a/tensorlayer/third_party/roi_pooling/roi_pooling/__init__.py b/tensorlayer/third_party/roi_pooling/roi_pooling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling.cc b/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling.cc new file mode 100644 index 0000000..d1f123d --- /dev/null +++ b/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling.cc @@ -0,0 +1,162 @@ +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/op_kernel.h" +#include +#include +#include + +using namespace tensorflow; +using namespace std; + +REGISTER_OP("RoiPooling") +.Input("input: float32") +.Input("rois: int32") +.Attr("pool_height: int") +.Attr("pool_width: int") +.Output("output: float32") +.Output("argmax_output: int32"); + + +#define Dtype float + +void RoiPoolingKernelLauncher(const float* input, const int* rois, int n_rois, int channels, int height, int width, + int pooled_height, int pooled_width, Dtype* output, int* argmax_output); + +// IMPORTANT(maciek): need info about storage of the data in memory, assumed something but need the docs confirming it + +class RoiPoolingOp : public OpKernel { + private: + int pool_height_, pool_width_; + public: + explicit RoiPoolingOp(OpKernelConstruction* context) : OpKernel(context) { + OP_REQUIRES_OK(context, + context->GetAttr("pool_height", &pool_height_)); + + OP_REQUIRES_OK(context, + context->GetAttr("pool_width", &pool_width_)); + } + + + void Compute(OpKernelContext* context) override { + // Grab the input tensor + const Tensor& input_tensor = context->input(0); + const Tensor& rois_tensor = context->input(1); + + auto input = input_tensor.flat(); + auto rois = rois_tensor.flat(); + + // Create an output tensor + Tensor* output_tensor = NULL; + Tensor* argmax_output_tensor = NULL; + + auto input_shape = input_tensor.shape(); + auto rois_shape = rois_tensor.shape(); + + int n_rois = rois_shape.dim_size(0); + int height = input_shape.dim_size(1); + int width = input_shape.dim_size(2); + int channels = input_shape.dim_size(3); + + TensorShape output_shape = TensorShape({static_cast(n_rois), + static_cast(channels), + static_cast(pool_height_), + static_cast(pool_width_)}); + + OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, + &output_tensor)); + + OP_REQUIRES_OK(context, context->allocate_output(1, output_shape, + &argmax_output_tensor)); + + auto output = output_tensor->template flat(); + auto argmax_output = argmax_output_tensor->template flat(); + + RoiPoolingKernelLauncher(input.data(), rois.data(), + n_rois, channels, + height, width, + pool_height_, pool_width_, + output.data(), argmax_output.data()); + } +}; + +REGISTER_KERNEL_BUILDER(Name("RoiPooling").Device(DEVICE_GPU), RoiPoolingOp); + +///////////// RoiPoolingGrad + + +REGISTER_OP("RoiPoolingGrad") +.Input("orig_input: float32") +.Input("orig_rois: int32") +.Input("orig_output: float32") +.Input("orig_argmax_output: int32") +.Input("orig_output_grad: float32") +.Attr("pool_height: int") +.Attr("pool_width: int") +.Output("output: float32") +.Doc(R"doc( + region of interest pooling grad +)doc"); + +#define Dtype float +void RoiPoolingGradKernelLauncher(const Dtype* orig_input, const int* orig_rois, + int mb_size, + int n_rois, int channels, int height, int width, + int pooled_height, int pooled_width, + const Dtype* orig_output, const int* orig_argmax_output, + const Dtype* orig_output_grad, + Dtype* output); + +// IMPORTANT(maciek): need info about storage of the data in memory, assumed something but need the docs confirming it + +class RoiPoolingGradOp : public OpKernel { + private: + int pool_height_, pool_width_; + public: + explicit RoiPoolingGradOp(OpKernelConstruction* context) : OpKernel(context) { + OP_REQUIRES_OK(context, + context->GetAttr("pool_height", &pool_height_)); + + OP_REQUIRES_OK(context, + context->GetAttr("pool_width", &pool_width_)); + } + + + void Compute(OpKernelContext* context) override { + // Grab the input tensor + const Tensor& orig_input_tensor = context->input(0); + const Tensor& orig_rois_tensor = context->input(1); + const Tensor& orig_output_tensor = context->input(2); + const Tensor& orig_argmax_output_tensor = context->input(3); + const Tensor& orig_output_grad_tensor = context->input(4); + + auto orig_input = orig_input_tensor.flat(); + auto orig_rois = orig_rois_tensor.flat(); + auto orig_output = orig_output_tensor.flat(); + auto orig_argmax_output = orig_argmax_output_tensor.flat(); + auto orig_output_grad = orig_output_grad_tensor.flat(); + + // Create an output tensor + Tensor* output_tensor = NULL; + auto orig_input_shape = orig_input_tensor.shape(); + auto orig_rois_shape = orig_rois_tensor.shape(); + auto grads_shape = orig_input_shape; + + int mb_size = orig_input_shape.dim_size(0); + int n_rois = orig_rois_shape.dim_size(0); + int height = orig_input_shape.dim_size(1); + int width = orig_input_shape.dim_size(2); + int channels = orig_input_shape.dim_size(3); + + OP_REQUIRES_OK(context, context->allocate_output(0, grads_shape, + &output_tensor)); + + auto output = output_tensor->template flat(); + + // Call the cuda kernel launcher + RoiPoolingGradKernelLauncher(orig_input.data(), orig_rois.data(), + mb_size, n_rois, channels, height, width, pool_height_, pool_width_, + orig_output.data(), orig_argmax_output.data(), orig_output_grad.data(), output.data()); + } +}; + + +REGISTER_KERNEL_BUILDER(Name("RoiPoolingGrad").Device(DEVICE_GPU), RoiPoolingGradOp); diff --git a/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling.cu.cc b/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling.cu.cc new file mode 100644 index 0000000..bbacb55 --- /dev/null +++ b/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling.cu.cc @@ -0,0 +1,214 @@ +#if GOOGLE_CUDA + +#include +#include +#define EIGEN_USE_GPU +#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" + +// CUDA: index helpers +#define idx4_4(index, d1, d2, d3, d4) (index % d4) +#define idx4_3(index, d1, d2, d3, d4) ((index / d4) % d3) +#define idx4_2(index, d1, d2, d3, d4) ((index / d4 / d3) % d2) +#define idx4_1(index, d1, d2, d3, d4) ((index / d4 / d3 / d2) %d1) + +// CUDA: various checks for different function calls. +#define CUDA_CHECK(condition) \ + /* Code block avoids redefinition of cudaError_t error */ \ + do { \ + cudaError_t error = condition; \ + if (error != cudaSuccess) { \ + return 1; \ + } \ + } while (0) + +// CUDA: grid stride looping +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +// CUDA: use 512 threads per block +const int CAFFE_CUDA_NUM_THREADS = 512; + +// CUDA: number of blocks for threads. +inline int CAFFE_GET_BLOCKS(const int N) { + // TODO rewrite this part to be consistent with tf conventions + int optimal_number_of_blocks = (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; + int max_number_of_blocks = 65000; + return std::min(optimal_number_of_blocks, max_number_of_blocks); +} + + +#define Dtype float + +__global__ void RoiPoolingKernel(const Dtype* input, const int* rois, + int n_rois, int channels, int height, int width, + int pooled_height, int pooled_width, + Dtype* output, int* argmax_output) { + int output_size = n_rois * channels * pooled_height * pooled_width; + + CUDA_KERNEL_LOOP(index, output_size) { + // (n, c, ph, pw) is an element in the pooled output + int pw = idx4_4(index, n_rois, channels, pooled_height, pooled_width); + int ph = idx4_3(index, n_rois, channels, pooled_height, pooled_width); + int c = idx4_2(index, n_rois, channels, pooled_height, pooled_width); + int n = idx4_1(index, n_rois, channels, pooled_height, pooled_width); + + auto bottom_rois_act = rois + n * 5; + + int roi_batch_ind = bottom_rois_act[0]; + int roi_start_w = bottom_rois_act[1]; + int roi_start_h = bottom_rois_act[2]; + int roi_end_w = bottom_rois_act[3]; + int roi_end_h = bottom_rois_act[4]; + + // Force malformed ROIs to be 1x1 + // NOTE(maciek): roi_start, roi_end seems to be inclusive + int roi_width = max(roi_end_w - roi_start_w + 1, 1); + int roi_height = max(roi_end_h - roi_start_h + 1, 1); + + // divide the ROIs into smaller regions for max pooling + Dtype bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + Dtype bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // compute the precise coordinates of each pooling subregion of the ROIs + int hstart = static_cast(floor(static_cast(ph) * bin_size_h)); + int wstart = static_cast(floor(static_cast(pw) * bin_size_w)); + int hend = static_cast(ceil(static_cast(ph + 1) * bin_size_h)); + int wend = static_cast(ceil(static_cast(pw + 1) * bin_size_w)); + + // Add roi offsets and clip to input boundaries + hstart = min(max(hstart + roi_start_h, 0), height); + hend = min(max(hend + roi_start_h, 0), height); + wstart = min(max(wstart + roi_start_w, 0), width); + wend = min(max(wend + roi_start_w, 0), width); + + //printf("%d %d %d %d %d %d %d %d\n", n, c, pw, ph, hstart, hend, wstart, wend); + + bool is_empty = (hend <= hstart) || (wend <= wstart); + + // Define an empty pooling region to be zero + + Dtype maxval = is_empty ? 0 : -999999999.0; + //Dtype maxval = is_empty ? 0 : -FLT_MAX; + // If nothing is pooled, argmax = -1 causes nothing to be backprop'd + + int maxidx = -1; + auto input_act = input + (roi_batch_ind * height * width * channels); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + int bottom_index = (h * width + w) * channels + c; + + // bottom index is relative to 2d image only + if (input_act[bottom_index] > maxval) { + maxval = input_act[bottom_index]; + maxidx = bottom_index; + } + } + } + output[index] = maxval; + argmax_output[index] = maxidx; + } +} + + +void RoiPoolingKernelLauncher(const float* input, const int* rois, int n_rois, int channels, int height, int width, + int pooled_height, int pooled_width, Dtype* output, int* argmax_output) { + int out_size = n_rois * channels * pooled_height * pooled_width; + + RoiPoolingKernel<<>>(input, rois, n_rois, channels, height, width, + pooled_height, pooled_width, output, argmax_output); +} + + +/////////////// Grad +__global__ void RoiPoolingGradKernel(const Dtype* orig_input, const int* orig_rois, + int mb_size, + int n_rois, int channels, int height, int width, + int pooled_height, int pooled_width, + const Dtype* orig_output, const int* orig_argmax_output, + const Dtype* orig_output_grad, + Dtype* output) { + + int orig_input_size = mb_size * height * width * channels; + + CUDA_KERNEL_LOOP(index, orig_input_size) { + // (n, h, w, c) coords in bottom data + int c = idx4_4(index, mb_size, height, width, channels); + int w = idx4_3(index, mb_size, height, width, channels); + int h = idx4_2(index, mb_size, height, width, channels); + int n = idx4_1(index, mb_size, height, width, channels); + + Dtype gradient = 0; + // Accumulate gradient over all ROIs that pooled this element + for (int roi_n = 0; roi_n < n_rois; ++roi_n) { + const int* offset_bottom_rois = orig_rois + roi_n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + // Skip if ROI's batch index doesn't match n + if (n != roi_batch_ind) { + continue; + } + + int roi_start_w = offset_bottom_rois[1]; + int roi_start_h = offset_bottom_rois[2]; + int roi_end_w = offset_bottom_rois[3]; + int roi_end_h = offset_bottom_rois[4]; + + // Skip if ROI doesn't include (h, w) + const bool in_roi = (w >= roi_start_w && w <= roi_end_w && + h >= roi_start_h && h <= roi_end_h); + if (!in_roi) { + continue; + } + + int offset = (roi_n * channels + c) * pooled_height * pooled_width; + const Dtype* offset_top_diff = orig_output_grad + offset; + const int* offset_argmax_data = orig_argmax_output + offset; + + // Compute feasible set of pooled units that could have pooled + // this bottom unit + + // Force malformed ROIs to be 1x1 + int roi_width = max(roi_end_w - roi_start_w + 1, 1); + int roi_height = max(roi_end_h - roi_start_h + 1, 1); + + Dtype bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + Dtype bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + int phstart = floor(static_cast(h - roi_start_h) / bin_size_h); + int phend = ceil(static_cast(h - roi_start_h + 1) / bin_size_h); + int pwstart = floor(static_cast(w - roi_start_w) / bin_size_w); + int pwend = ceil(static_cast(w - roi_start_w + 1) / bin_size_w); + + phstart = min(max(phstart, 0), pooled_height); + phend = min(max(phend, 0), pooled_height); + pwstart = min(max(pwstart, 0), pooled_width); + pwend = min(max(pwend, 0), pooled_width); + + for (int ph = phstart; ph < phend; ++ph) { + for (int pw = pwstart; pw < pwend; ++pw) { + if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) { + gradient += offset_top_diff[ph * pooled_width + pw]; + } + } + } + } + output[index] = gradient; + } + +} + +void RoiPoolingGradKernelLauncher(const Dtype* orig_input, const int* orig_rois, + int mb_size, + int n_rois, int channels, int height, int width, + int pooled_height, int pooled_width, + const Dtype* orig_output, const int* orig_argmax_output, + const Dtype* orig_output_grad, + Dtype* output) { + int out_size = mb_size * height * width * channels; + RoiPoolingGradKernel<<>>(orig_input, orig_rois, + mb_size, n_rois, channels, height, width, pooled_height, pooled_width, + orig_output, orig_argmax_output, orig_output_grad, output); +} + +#endif diff --git a/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling_ops.py b/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling_ops.py new file mode 100644 index 0000000..be971c3 --- /dev/null +++ b/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling_ops.py @@ -0,0 +1,50 @@ +import os + +import tensorflow as tf +from tensorflow.python.framework import ops + +module_path = os.path.realpath(__file__) +module_dir = os.path.dirname(module_path) +lib_path = os.path.join(module_dir, 'roi_pooling.so') +roi_pooling_module = tf.load_op_library(lib_path) + + +def roi_pooling(input, rois, pool_height, pool_width): + """ + returns a tensorflow operation for computing the Region of Interest Pooling + + @arg input: feature maps on which to perform the pooling operation + @arg rois: list of regions of interest in the format (feature map index, upper left, bottom right) + @arg pool_width: size of the pooling sections + """ + # TODO(maciek): ops scope + out = roi_pooling_module.roi_pooling(input, rois, pool_height=pool_height, pool_width=pool_width) + output, argmax_output = out[0], out[1] + return output + + +@ops.RegisterGradient("RoiPooling") +def _RoiPoolingGrad(op, *grads): + orig_inputs = op.inputs[0] + orig_rois = op.inputs[1] + orig_output = op.outputs[0] + orig_argmax_output = op.outputs[1] + + orig_output_grad = grads[0] + output_grad = roi_pooling_module.roi_pooling_grad( + orig_inputs, orig_rois, orig_output, orig_argmax_output, orig_output_grad, pool_height=op.get_attr('pool_height'), pool_width=op.get_attr('pool_width')) + return [output_grad, None] + + +@ops.RegisterShape("RoiPooling") +def _RoiPoolingShape(op): + input = op.inputs[0] + rois = op.inputs[1] + + n_rois = rois.get_shape()[0] + n_channels = input.get_shape()[3] + pool_height = op.get_attr('pool_height') + pool_width = op.get_attr('pool_width') + + #TODO: check the width/hegiht order + return [tf.TensorShape([n_rois, n_channels, pool_width, pool_height]), tf.TensorShape(None)] diff --git a/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling_test.py b/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling_test.py new file mode 100644 index 0000000..a21bd6a --- /dev/null +++ b/tensorlayer/third_party/roi_pooling/roi_pooling/roi_pooling_test.py @@ -0,0 +1,99 @@ +import numpy as np +import tensorflow as tf + +from roi_pooling_ops import roi_pooling + + +class RoiPoolingTest(tf.test.TestCase): + # TODO(maciek): add python, implementation and test outputs + # TODO(maciek): test pool_height != pool_width, height != width + + def test_roi_pooling_grad(self): + # TODO(maciek): corner cases + input_value = [[[[1], [2], [4], [4]], [[3], [4], [1], [2]], [[6], [2], [1], [7.0]], [[1], [3], [2], [8]]]] + input_value = np.asarray(input_value, dtype='float32') + + rois_value = [[0, 0, 0, 1, 1], [0, 1, 1, 2, 2], [0, 2, 2, 3, 3], [0, 0, 0, 2, 2], [0, 0, 0, 3, 3]] + rois_value = np.asarray(rois_value, dtype='int32') + + with tf.Session(''): + # NOTE(maciek): looks like we have to use consts here, based on tensorflow/python/ops/nn_test.py + input_const = tf.constant(input_value, tf.float32) + rois_const = tf.constant(rois_value, tf.int32) + y = roi_pooling(input_const, rois_const, pool_height=2, pool_width=2) + mean = tf.reduce_mean(y) + + numerical_grad_error_1 = tf.test.compute_gradient_error([input_const], [input_value.shape], y, [5, 2, 2, 1]) + + numerical_grad_error_2 = tf.test.compute_gradient_error([input_const], [input_value.shape], mean, []) + + self.assertLess(numerical_grad_error_1, 1e-4) + self.assertLess(numerical_grad_error_2, 1e-4) + + def test_shape_inference_1(self): + pooled_w, pooled_h = 2, 2 + input_w, input_h = 200, 200 + n_channels = 3 + n_batches = None + input = tf.placeholder(tf.float32, shape=[n_batches, input_w, input_h, n_channels]) + + n_rois = None + single_roi_dimension = 5 + rois = tf.placeholder(tf.int32, shape=[n_rois, single_roi_dimension]) + + y = roi_pooling(input, rois, pool_height=pooled_w, pool_width=pooled_h) + + self.assertEqual(y.get_shape().ndims, 4) + self.assertIs(y.get_shape()[0].value, n_rois) + self.assertIs(y.get_shape()[1].value, n_channels) + self.assertIs(y.get_shape()[2].value, pooled_h) + self.assertIs(y.get_shape()[3].value, pooled_w) + + def test_shape_inference_2(self): + pooled_w, pooled_h = 3, 4 + input_w, input_h = 200, 300 + n_channels = 3 + n_batches = None + input = tf.placeholder(tf.float32, shape=[n_batches, input_w, input_h, n_channels]) + + n_rois = None + single_roi_dimension = 5 + rois = tf.placeholder(tf.int32, shape=[n_rois, single_roi_dimension]) + + y = roi_pooling(input, rois, pool_height=pooled_w, pool_width=pooled_h) + + self.assertEqual(y.get_shape().ndims, 4) + self.assertIs(y.get_shape()[0].value, n_rois) + self.assertIs(y.get_shape()[1].value, n_channels) + self.assertIs(y.get_shape()[2].value, pooled_h) + self.assertIs(y.get_shape()[3].value, pooled_w) + + def test_very_big_output(self): + """ + This test checks whether the layer can handle a corner case + where the number of output pixels is very large, possibly larger + than the number of available GPU threads + """ + + pooled_w, pooled_h = 7, 7 + input_w, input_h = 72, 240 + n_channels = 512 + n_batches = 2 + x_input = np.ones(shape=(n_batches, input_w, input_h, n_channels)) + n_rois = 5000 + rois_input = np.ones(shape=(n_rois, 5)) + + input = tf.placeholder(tf.float32, shape=[n_batches, input_w, input_h, n_channels]) + single_roi_dimension = 5 + rois = tf.placeholder(tf.int32, shape=[n_rois, single_roi_dimension]) + + y = roi_pooling(input, rois, pool_height=pooled_w, pool_width=pooled_h) + + with tf.Session('') as sess: + y_output = sess.run(y, feed_dict={input: x_input, rois: rois_input}) + + self.assertTrue(np.all(y_output == 1)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorlayer/third_party/roi_pooling/roi_pooling_animation.gif b/tensorlayer/third_party/roi_pooling/roi_pooling_animation.gif new file mode 100644 index 0000000000000000000000000000000000000000..9d35d21a901061f8b0fd3995306e0a0e03dabdcc GIT binary patch literal 578933 zcmWhzWn2?p8(p(8y1Qc_AYF>mor+33LP}ahP|*!G7(G&&(IcgVQ942z94JVO0!pbM zsG$Dm>-*t;dA{BAd!FZ>d(JVlFxA%a6azbfegXghBoF{$g1{jFE&xCf0ODo@fH(nQ zApnvOz|G6U2?5E17}XdR)B#|aG>8|4ez8w+nu+$&U4#dE%LgYxxc!Z z@44#%YEA*VH}0#O-;?vZEgKVnx^++6|A9=@J&lMEK`%cGb1S1TOCyu}hS#EPZ%0~W zYDZ9XLsBKxQ=Ju3B6PchWSU~-n;*$kKRDMKi|Tlw(RELw`|icwNR7_>x;^)`yJFPq zqEN-bc3D@ib=ieAT3O}-IO?+q~QyJJqdcfISrebe3G?uekGD;EbMoW?w^PTq)~ zvk98H<~!vYIOXa);S@RJcyHPzaQ1p6%`tk`A!^noYTD9d@b<(YUHoxvX2asN21w**vV#zMk4h&dTZ^*^v;zx8-7FmVkYVs6P)Jf49--%m96HLsS=cV(Kk!sPNvMGO!X>th!^~3~;V!U{6s(4W{DmM{@&r-=w7bf9_ zNJ+wV@thSoQZ?~Xb@B2|akABT`L;Nn27+5}qD^y(Pbc2|Npk22?#5uk)Xdc6^vKlA z>@;oi@87>?zkht({`hX=ePv@&e5{oC83d5`Zw$o$B5h3^uNoQHo2V)&gMt4_3jYq^ z|6>Ax(*fTRLgp=`fp|ElyxUky`A{O8q-C*rYsJeHKDEHrvDV7b3^Ai*A&a)E@od!9 zdbjbm>d7a{-V?R#8{S6Y@>KC7Q^ zaB2@+n|Rjn_9=QOS=g$xajD(^b-nv!XVXeo*ycouRaf&`Z}jor+GN+$cP}ttq=+@8 z<^2$W^PI;NrFCFYlJM60wav+eKszQ5Rh``t``@6WFr zL-Z8Uj2O|g#7T`?VO3hTNg=JI)Y<$)wAI2#6l z<~iHUKmJ~cVK!n!8v%5U>C3x2hhZAWtAetxL^ zI{HU~Td^N1Iz=28|IP5NEqd?R@ABbpw@3|ST%PR`&WXOOc|+Z+X`vJD4S~6Jj7Vxr%t$)HryArguV2L)SfGO7Uhxm2kDwng&o3x2 z7k?Z0&ZrbMqvV%%<%13|ayy1>=4&-{7n*F49OHv49>)};^o8L$87eue&A(KXi|{X#Mn`DE?+>b8eR zBjC8PRyyZOjmu%Ci;_<5(oeD7>JyFLm5ZOR4Hcd4eSGq__nl<#Bk#uzysSSCV0#|l zUPCHh{r*c$Sv$J_q<5(QyzeJLzJ-IA)aUGtL2{nXfV0ZJzpIV6V=DyGj60sU?Pf^+ zF}j)2X7VxB!~Nds zmrU2iSWk#oqUPQ97sGc zdzty38JC@6I3Y3egIIWn*>8kHYC>^^>>Wo#(e$=3{>huaqm3;sn!ja)TT1`u6F02z zA@g#hl*F6mx0l<-Z~G$MR_VWXxDg`}QU)hCpu)9Qipg^+-%@G@ICS*SeY48wQkZ;e z%cJS>+#!}D$QO*bp|1>UFTDAO#5cP*Y4t0j#B#v*vy|*BtIpO`8M?Kvrk+>TgVW=( z9~3kFzq_vX$1$XWik>yVqLUa8S9XM5GgdD%29klSc$Ua=HjUf*#|GEibt9XU zP4=B-@7t3XG#b_W&*?KPFFZ~DUMZp2VE1@=HrJH9g z2>?8SVtf^QohK74+8GxW^7YTT0EySzE1~-S4!2#VA{MO3DfHl5Kuiw^LIMD&pM!%_ zT-TO0yE^(6_h9y}Z?>Z-o!no-kwmvQ{9*uD^p`u{=iGN}Z}iNjHQz0{y><|CqjzRr zDZ2IC`qwKzdlvQ1$COX5?fJg#--MpuV>?{g7+L6S$qfd6b9MW@{^Z4UaC4A%%F_Am zg%0xP0|1QV3cZK$eR^9? z?E!*|wgFYf3?MziZYhK=O*$=n%X`V>tLV2h>7{vxUikLnN7WZpi2Nz0_!`?M*5U4% zs2R1A?I4iee)ml5^Y#*G$tSKqF*A|29TmLOJEj+Y&1Q(4R9vNkcpk<;Z_aeoXie|h zJ}7*hM^|NR)8D(2Ex=e~`>g)P^xpMeU8GfKe50SuzRSw51`3{H1yC8=3`ffK0JJ0rp$}{or=1iP@h?WiqSbe% zUEQP8Un096tbIxE5@YdX6#YXL+~hOUnwVvN_@Gko>A3`qwzY zz76=ht|8XvVG+biAr%r-!`~srWvXKHnLl=vep%$3%WB+~glx>X-TF~Z{OYzML(EjD z_?@(?l|0=$6eS;zl#)%iPnQ|)(+ozk5`x=Av0xzu$o${~Xxt=X&s>_Z{$j^a$_zQ3TvX)Nue|6XeBNJa$o5vm7xUDsr6v?%_wH z$1$(6ADxt&{r5@3E{S$EjwFsfB_QL$`o71{eg@OS$LIg7{~Y8~ zRmK7na)WQL$FDr0g2j}8ggvk!TU^WYA3!=gVQ;hiH|CW7V~F+9=ak34KfHSW-Q<0I zls{PT>!_)2hr#0@+z|c8i_4ln9{&x)843OQ^!QiTV+i3fbnG!8|1pSL<*Q6jR_VFBxaa|j6Qj~a&vAAyixbtIi=LpE>0OlH)VS$TKcY9a}Il-5faEmQ5{4^l|l^DR67z9s@ zFi(t)P7KLU3?{&0iHJ-9f}W+1Ahr;4(TLnbc;+EI$s9o>5Hj_XGBAW3OcId|k#q_t zQIhiE$+-YT`C?LreR3fQk*AMn&RWL9{-e{)0BR`)MsN% zT~4Xb{8KuknMO%W9gE24Xr^;)fYU?6=|rRaq2NUQ*gdUB%Qqgao~CW^rO!Wsm!m@= ze2<}v%rI>Xgq%*lhk;U<|Lc0Zm!H0@i~*ao^r4Y*@Hm0fI8h3cu1)--jg;QO3gi=I zZxFfiiS1%Ux;FODPMnfIQa>MQ9*v|N#%sWFg3j^kV{xK&aq8xo+KZV&<8kr?`t( z&j}U~$!p5Y+#g@?D{zL{u9H@ZEQiqkj zS0rnWmC`2i`D1&Rup$(!m<~=pKhF3xeq0W#K+ZZxh?MWnG9e)UoEFbGmzdL|u@p>Z z7YP~s39q74dI6E3V}aL`%hr;^@9N;~$4W0NGx)<3ZTWK@oXaBp%g%Ii9SU-;%Ei=g zz@55tUAl8K{YhD95@UTXw4Rh`PJ%5bWur+%G9nX=$k@m$V0)k%mNFXlq?fN^)G6iB zSqd(oqMMNVB0v9meyUu3Y8RX-B`G+~*>qzlI0+qFBA0$~Ijvy45-SF;-2iW}v9!db zF9M2w-K+ZURM^U2)D-a8j4{JTH)D~MA-G5s=gWL4SL|F*WN|J67gT#sWN1(_DJ}6b z1(}LYIP*_6^5iU~#gZ|LOrE6p$cc>0C|oxN*@q7ISj_(KdMW**bLkol|7kZnAvt@u z4*%Z*ez%)h-x*fU79EI!nbeip?Uvau)fEQjgrH&);7P1KBr=(l8%<)AN01ju$e!Gw z@w^%`BK0t@0Rs|VP7$AYB3EBAou4nVoO(AYMM6GbQXqfAJVjLikm_71eDts+3_fpG zu<93^-;!3^9b1xAu-R2v$`7yF0E754HV@MsEUR}Yg-rof4uMrClnfRfVxMy{C4l(6 z0Q*di$ah-w)`AENc*+TnpRUV%yHqQu9Z&1dTrP++*@^d+$1*2rZx%TSW z7?bj-jO2-2mc68^I2sBUcQKRlKA; z>t|z{)lQoUOj`go%}vCvyEHBLbOyOLtt>Yg1y-+xWk65Uo5u_5bqWu(3(0kl|6?fH z#1#HAe=4y&@jIq(zi6A^{_^#zpn zR>pD`%X*`Rc80*d694i%0AddLghHRFKwrooB2-A!S5RrGcg9nw`V}6oDKC0bKlFkC zw8mQxF6gxdM*$v{Ev2oRH-%cgSPG0K=*O=8N=ssEUbgS552*U)kAbdML76hhbyXJk zyFT?i-fqeGk3iJedCI60=W5ygbD1bY9^j@BISPvBbXu8v;{?Ww`Oe~mk|Kr1i)9o_ z5`PZ@FT`^YkY3UFv_0l_WOh<=_SMDKW>>sK5k9=D?Lk+W((Rm6i!znoI*TBfV;y7t zME&Fn3F?x^URci>m~6100uS(;bX)>)z!juCzm2=GKJ@2 z96X6QxH~YAR3x1G6qu699aB?yVURl@{&N1{deVd{QxI-i-4(cr`#%e1it{HkouCrh2*@{$F^5LW!2Fl zF#X!9p>v|6UAv;c{>e&Fs&w+mKyvB}mwdOPilp%rugS&`+-R6O+Ye>HtZvhPztb-C zzzJ;W1$70>mT;qA;LL|z_kNGT2#=596hh+|bd0$#jMD8q-p!wxl|mHxlhJR{%>&Q; zBQSW*KT`*eL)@mvzpWeOJe_!THZf(>3(+#wsKxQl)e&e;hoIiA#IOSVeyryO{VEyV|)OJ+>=fZ6`2GaO*nYtIjCS zXm@@pvWES;Je$unD9f*C^|*;xF1U2%xzko?0zxyH;(sG(XnoB)5R z^nR;A!)txLwu?*4m#wGV!-hwv5^b0g3wnCLk55e~)Pr=(s|!h9Nz>JZ(_G1`lUDWZ z_sdhm8oKH76&}4aH;Wd;_ZD!7x5GXCgN*s%Hm}rm-+EO_5akLk7ruyXYFfF62oPd#*(#+KqqE49*} zODc+s^9AGCJ$h$&yj2a6t&~2wbqGeKz^s`2 z%z`(m4Q*Ptb6s^vT+8#h(S2l>`SIA7iTR{VzJ6ZIeoMh8N2w#xbu$w3Oq+uFBd1TO zdkxDMp4pY=(@^VMC8JPm=bYAyhl(`4iOwQK^CdABaP-)3Ef#FayL#s?o4u;^hmR9` zs_2ZA$4>RLLSscGK~H7LPdhETvu$cz?#E>dx5xy)pO!0WE}oEOde5=gB4IsL9b3ZK z8^5N5?E^3mSodB!3l+JrQpNJ7cdDo6OQ{||^U5LE#69PZAIwWRrz?H4du`ZaIwxCp z-h6#}GI(oBao+V}2g#|gz^Q+ZdDQ>U0cEG$d1j&Ce*5M9?U&>=hYPQ!ZrEs8O00Cg z3T#-vJkfNR_EAsw-FDC8J2O;AUFt5G_Q$X5tn0D0{6N1PF>Yp2pm?@$^{IAIx{g(cz1k<6@FPnzSknm%+iWows^^{v}4A8Ga|h{Au(y z^Ml@=^fwwS&>3seb%m`N!H(+(q?G8bSH$HCTb|17NhmAa#=jKM+H``;LuzlZduhtfma(2I~rWms2JZ*Omo#>^S@cN^6zw?K@f^LukVc>byG@x|WUfzkt? z-8`$6@}=I9#DbA8K?|eA8L7Zj*Oifv4}R~SOWT}28N9Q8ujJ!FQ{`91bS$0p<%_^L zRHz!1_^w$8vqGfN-6_)UJKSSM>7O&Yx_5pv#^s!wOLs4xsjKBWizitv3tJ3vS+ywi zEh!4UX1J_ZH;Cs808`6&2nqZG9~n~d|rR7EX));dvkX6lYNn4;w9DlIkGlZ z+3F^3IRW#`0{x9YyrQP+TRa9SjNVbt_UE5EJrg;;Z~ggto9l~9`_DRgTeyJ-yF*1c zzIr^5{f`b6aymp0;+aKFi=B?VM$&}A>!WW=F}6tnAP3ius@QaqUMT&`fzHXN*)oG{ z9^(lHO4$3B{*lhwE3;28POLt-F6Fk_RY1+SMMf$HRjt+ zXR&12rOC7K?de?CzhB(?er!#LZudN!)cxg7iP~+71t!D zF2g#c-Xg2&zQhE%@KGF2wjy#aoLn5WH=boy+Gd=6&7O$mZ(bgpkZ)>aygiN@^|ESc zE1XJhXj$YiRqTkXLzaz>NVQw5y)ss_(Riz_pc=c{=ZCRn>84w=y|x`Xww*cYYgeCz z=)6n}?^t5iHoC(Zq8-%X(9vmnOVHNPqdp}>_fOuYQt$S}gXUhF%GXMc`yIVs^1dza z+v)va>R>f+T|PeFPv^jW@B8CpLc4xSdob_l^m_;@86h=HFi4RRG8gj^HYAjXNFCHj zBr+AkD@8w|EQ~X4KXg_qJ!fWj;~o;ul;jNnMWD|g+%mC} z(7BQz`&u=nQF?Q)-s(K!Bcq}$qVq~44iPdZ&jX>P*rc2v(e_T$`p9=)>$ryjW2-y> zR-3cF3LzRp1|XRxhj6Kn`9J%ZSc@)CR@q*#+gHfET1a0^m|Y6IljdOjEFjn+m~R?( zWs%S1m;E~rEA_4qh4C}G&X(1-7n~_Y_$SS`4Zcw9+ne!E_J2u!@3qykd8QM%QE8_K z-qqCL{jv9mo7dGT?b#RS;M}izJ)eJs@_KO1weW?K@ec))l5=;&{E-3)Bqrgl8mH;9 zG))(L-z*_D)cF8poawe#ks4~G0%j&o=xo;Qe0+HRJfu5FtR|hChG8&9gXrgfw2t9|Y>UUIZmEjH4TL1r3J^F6 zrEr6b_J?fwWVAmh9FBxZ#9>fNau= zt8>2ruXK8+;V)S}?J%4*mUb1X%V8e+aW>=W$96O5U0Wl@r0XiWH&Vyhr37ZhMzxex zQmdzp;s#&=9}a}&AzZd^6(X%oAF#6AAUw(3z$xn-IL99{s#SiGk2`H0bq})-?+CK$9*JGa;m4L7DEEfgk z8U+)E1mB$krAEV1K~>;HnA|8U<6UxC@rj^XwTo>GxTwL|GH5y6fW?Id(lunzdB-oW z5!N!iCsoVyCJei1`dzcjll|hv(FeuABavS;6$Tv|^!=ARrB$8!ygh`?2c`0F%D+|c z{3vHtDjaI~n?J$3btoj(uR@OxFKE zP$P+R`JI~Y`N&PKVz+XtbK({BT9?$u-n&+;H46C$Ou9yuvnEjxYD@TJ$2UBjIj9;n zQLL-&8%^%XH^$!|&bcnvNaULU88b@TlqC2OQ1uO8ou-OZG{gutumkEWEk0Cb{JjFido9OfoAA=BX504h0>WV!u-FEWH(ya_JRArY z3j;7v0Jlq2bCE(}7*>1P3rjKc+!=ww?mNLZp6Rz`U9~s8%I=@6wDZqex8HW@v()Rs z9D}nIu)3qqZbVL2y1idqL}q%!q@(pgQugtp_O=mgWGsXOgJCcSz+ToHRNPGk*gnDV zty87iMcy#m&ZV1(AYSsRZlGZYBrab}iQje$2FOMNfcdC+m0mFDq!0ky`Oac*oRzV| z!PyE;meFp-!cZ9M_`$Yv(l{E3(g(k)g4mVTVSTmG;37)W&*&;naqKIO;{0T$2|1=8 zbOR%pu=xr2y4WiZ>aBBfL@F4yhUNQX$p3-l@kUsGbqL08Yfu}(By8=ZLJfFtz z=6SJ*ZD3fU(FdY5Xk@w(l2v;H$f}LWP18l{P83Yu?Zk6EJuZ<-I0_9^Z&w;&0kvNc zgWj5Xb#ikX=V#9qm?i+50Ac3tg9+On@T^2sN-al`mC)pvM;|&!eqaV(sZ^ND#oh~*w0oS`&V5#GT0q{|Ee2Q7LNkkEBb4?*fcn^XDB zu?xL=vGMv($+g~#S*J^)o@}Itg!3uh13Zxi;ED6`^4+>XoY4*huz|UTCd2$O4u>u5 zZJ44Vh@&zd)`D?xQWgqAUERR~De-jOmTXRO{rnc=FnW2eSI){#0xmVd(-tE&b^ zXUMtM3ytAx>oZ;qur~iH0`Fe*Bl09~404dLp+qcK5fG1-iV#Z(g^!5RfF>(?USf^> zBrI1Yu&+>R+=B3Kh=7gGV`1lA8V7+kFw-soSXj0F4q2gYXu1&CPv6X<^i$eXAHt`?N z08v8BdG3b;r6=QM#@TUm3=jAK^fcka9m3z-uxOmtjYbC)(=;fhFuo8OBhbzQyV_Eb8ywUYzjZlVsz^EgZhf=ZI!nNCy z^Sq8#)lij}Rzx?w2(TAo>oCk1H^y8pKIcxemi%oLNX7@~7en6X4jQoCq?&}#q`XmB z07eRn0s`}a1`>I=!vrsDQ^YCk{ghN_G!}%y^7HX9>%c?(Vef|wt~(&{KE z)4wzYp~QX>08o}*&_SF;w%oYTw9Fqb$OmA-NI%&T&IoJb-Iexm(S+(lZHW-u4FD94 zw;>_SH*k;sBsx-?9K$-_c|lLC@E-(ZeB&~U*_6JjXn~nh8Ue5gTz^c3H9wyP$5wLK z9`smM`#`nUi$vwt#&Dn=9yG|}y|tPEwLN6MRPcm^#+>AT$kfS1{#5=v#GnHO$FfI< zfOfDBwAB0o+|N~jaSND*bj4a5V$24BQNRv65Rr~K7FTgONLp#7z*{~Bi(^>OSVHYN zjKGe{;$ni|MJ!6+h{v0&JOzWIU?5^ub>(%dn4F=Df`J$Sgo^Dd#CINJ%+&?k%V94g zfmdc()bC(~Hl~Cy75x|F6_B}_oW;jsZx~LE+(`*s$^h0KR?tz=jR0eC%haDkj8iT~ zDjqP|D0uy-=s$m!pXMwcR1;)t59AaUs15{>fqpv)UYW%HLM$(p%E=~0%Oq+(1Tw}D zuzY#U{5+!h;48_(YW@HipxSLB4j2Z~!ANr-RjFV=04nARtz?nD8+S$d`9An^E}Z7` z8fU|1UPZ8nQsRBCl;ZlqjMWh2HOz9>3$<$izg`@+@kwADB-WrykO1_=q(C3`1`>gu zL?EX*h6@GbsLE1chw+R#2*kqJ4=~_-tF|)Xg+o(PATr?a%q>dKoY;3xFZ?^ z(}x;R!Gb~4BdnZCIROX}bEPCI z-Je-YN3d}37%e&i<@PEp8|CY>xPOd61L4N>KpIqa(2W({u42jRwqdQYjF&_Lzyts* z#hC$(ZTf(3Nq%`^E(9@H7;QE6CSc&F=R!_l0&olm_1wGPSU4O2fn$)T4R%AO-hh$O zC0O5_l>YMiyG^2wy|9x!KH9B%0UP&UPJ9rM*BPW^HNBqsBl>`+SHr)CzGuK*a-a&-AC zfCoVfDhCO*7y3g_6F@}Uw=F$Lw#F_61|0~CZY0;La)pcpH!s|nmj-PcSY<)W)8X7U zpCv#*ECcGL`!#IKo|`2*tCb4(wSPo19wU@5=#KKy_*K+(rPz()%zK()n!3(&D$lKq zVbMP~WyP(N)i7nn=69Ig=vH>y;Vcqgtfdoo@FHxO3t`JR0oM!VVr;+54cQw%Ff(uz0T23yB1l-c zxlxYcX4olK00j^@#fDQbAmxOJ#hz8Cp0{jm`!p7t5bQ@XbQEghx&c*+V;NO2`JV3x zIRw~VddZraluo!~=C5IQyPP-UTC z6S{Ur<90jk3w!C&DV^|n&BUAV5l=LYfRA&872{4}OsOM&yFw9c>+d*c5>YpfzALDq zpfIA@l1_Zc7DnZiXR@&5O7z=@(|OBsAt2gY&hDE;0^%Bt-Hc7LkRfp9Tr=`e8&AiYAVh#pP5=Fz`bDToa=1&hKYPt0R z2>UJyr3_7_@@Ut3_%F#0BtJFJ_daYU1@E%_48w3tNIwHlDvOKx0HlP<1rM-wO=RdI z#=#zP(Fw{d21vMXCk262ieQ-YU0U^+8S6=zg=x70i60ab7x)tQ8|lC>`yRn_*w>Xn zzJIZ_GHlKjb>VyI1meg{D8ffG?v2oenB zJl~DI^(!ChA#U?`0~+{w=97kpeY-7i$JO@;=108#Y`P0Am3Q;aE!t*+FGfM|4i(U$0^i?8z!f{cPSq9HN2m-`6^5gL5K3L4?moX$ zT@Vcx5&aDW$3XZ102JW4#t?w;3xooYGcY>W92fKDmc$rdgjWQhwm*%%0oxBBVI%@K zZns%TGAIM&FZ=@@ATKy?6<$==Q~#rdWwm%}Uj3ZT(>6E#h8J}iE|@LIz{TsO0ABM1 z!&>!gRWJ4Cif+8mwLcGMW`|p(^n0Bq&^2+`^>p99!CTX#rlD-70%4y6-3gO1Y~$T| zY8c_S4pIBprJ2w_oZ^L5(A8H+H)HQcopi*aoSF`_IqZg1U!+-@$dBvn8w;$Y`U za*W8%tJ}vwoe|fU2YT*7FkpFyN!kAQ6-nU@?BK69F)mEZCRPbFzcWPIML=d zw4~3BFdQS#V;``2x^?j@k9Zb2Y#OHtprzlOs(cA{ zCl5q5+z7$~y#;xdgmm0GgQmSMrYt9dMFQs|jsS)%E=q1w_pO5kG*}rN?Tbv#;u%=U zvJgPI*Fy75m+!Ri0VUkewJT`*!&)ys87v&Jg%0n=H&6FOtks(+nlXUoBi{VWyuJiO zbchd>H?Ygi?h7x~M(#(6W?odI)(FiW1G!8z zveg4-Hd>cYmCg9w^F2-Z%Y6(0u+p3>6CkD>RT!5&!4oFn zpV|rrXyt&s66t=a4vb)APTWYMZ#17&rXg&}Br%rEF)mrC?S+tyLbVvk7nA1n!PnXb zCuDMr2gha55fxZ~TSy$jt}ZBR;xgmv075FmcyOyYL*1fQrqJozn7r1Qn6>i6322HD z9%*l*ta|ZKUwLqtEnX#l0s{c4jDw*MU3 z&;cCL?bF)pIh)hk@7&m^OzV-H=RLXZNb~9Ry}3T=)xHH70CGkGzA=pqd#Siy3T%jKUbvqTziulzsBV~<-xHkNJVaK2h4Mlq8nJohJxI{#W78tN-)(9L}I9~%L-{J=gR`?cP3>b z(T0fYY)sdmQ3Eo+LDeiZ*c=g+j!cf+r0W~?ZVoMRJbhv?HSz$CSS2pA3y~pcDCoOY zpcpqS+F7s&Bw&LLiz;vXNA!^=8*FhXaS3%m6VxBTE(gS2eTi=BpWbNWo!~3-EUA`9>~Ir& zML(@#H$3~zj-(iP7sK#Sk+=I00Abqzuyt(QHWXa;B1EaLoqXgSArK!(!OBB(#DE|I zmGKdv+|HnxkJpLqgVbRhRoFijN;cTo`B54YB>7~aaaHq?a-tfb?v6WOysYM-BFyK# zsW%z3hjWD4DUd)BJN92L@%AX-4Da;{W_{63gz5RjSqxP|qziwtZfjs-fY;WKv+fPT z`YbpyFpzvM-Rh4q0S`a6a~>%GlP((6*{Mg1f)EmyhxO1Zv$wgLe&p?a ztz=;LmPioas(py-1FLR^`6fg9Y%w2(OG*Xj#T21E`}tCbq(oYI?|a3$=x4X99`Nik z8SV{DXHDAfKz85bw#9%YhSPPY|ucOz7Jx9`b=abM3S1KXA8GUMgLO8%0-VJb+VKlI)GI zjpz>7dUz48&xU6tJ=+xjtU1vZjL)EU8wQSgt&NWH59!Zj^Hh`2@ds`EUwJ-^Z#Jr662rJaX~Fn%d9;;#{WTw z{wTA+f?lLX?1P<2&o2?HMBhq5MHR-FN<`#FGk-{80x!Eo|GqRw?Y*vERl+ZGQAYO5 z;|vJ%)UVX2`m0!Es-ERy()?3;9_NvIYha40;ltLM9n<()M22c*Z3#%_kE1}H!GIm# z)FM}h#k^;3RN&SA=pWCpjE=T?(P9Hcu}dBbU&6C{S6f`_FCB;j&<-Y}`a*5JYc@Wj zuUy%jus5>%vrPL9W74qvsV{fx)6O#{Gy}|-&MQxvlY%w10z{7U?p*al*?oAY0 zIUK0I`zb&y=KejC^BQxXm+)#hGV6cpq74kzvL4wPdy!dJ-;6%_e&eqeN5cX{Qb8r? zhXdv6EwlM}yWA+2;~cCX13kQp!KaNd5p>1Y43`&VJ9aXlFIOYO|>wF`Oo z-f!z&Z~uiEcU>Bx);7M4y?pjGK!{=ce2sDPEqdze#>>3P)i75Do~+KLgOIT7`zdB^ zExu8w!}pGt)>9LMgT2$GOw#>UYkqWEWcP!7j@ah3u)p7BQPao;wC&zbL z{xdiG#c+agsOfSIR9WQQt(fs6n!8u2k_L7&^FyCDgw&>t=}MCOliK%NGWuC#zKPF?<(>n35pP`A?ZddWiGyxkcXV z&0qE3V3RgP39;uM6`|>lOE2o!1$6k)RSsF1XiqnDoxaSx_omU>LJDWrlE5@?r|Y!E z1eW^sA!nhYy2O&RkwV3UyE`MKGZFu3jB(N7)YzgwHPKEKG|w+N_E6h?e-QNwvHy zK0uh&Wt2(TgPJY73f#d!qb{8aVBs1_oCH#3TjNL9U36-|tw5qGWFJ*D9WY*PS`mlO z_K0~n2csQHUh)F#j;U;H-AI?P(i?9+Y&nZC{&3yCX8rM6!HZj|!vm$Bc#(th_2B~N zQJnK$?vj^}Go7YBQns{In#Q0ZUoJPEd6$}2RFD;dA1ygbC}a23%o`0>#X&soJoK_y9&L`AN+|G8H3=cSA2 zy61nlGTyFPn4KZ)m~nr*OFHGy?G_Sdfv(`R35hFCt?q*O*cHeCAF8Ixvf0Okw33=x z(c?pc?eW>LI1@cLu1dxAVvMry;?IASKzERkA6M)-V1wIY-rC;7CCE8N@wJdiASrxp`aMNKQ8N*lS zCL<6D@#;_eInPgF`~!_T7zl*YK;Div5$6IdZ-u{C6&&F$FX)yrSXuplEQ1`4(` zX)T_=o*M5GY@*e%fskjXwxT)Q;bEn-n?SdJb~qK|A$gWs^q=)mx5!AV$NqPcq)qjn zbKb~ZH?0Vv;C_CNFZ|ahmi**wZhV0qwWi6{nT@_ob2Syc+h2Jydu!^SbJ4kZ5>w7O zjm<26dy~cga@0+Jq5-OE+2qWnWccPAJ+k4Yn)&%clQo1X!nLd;!>?Z0cW&)(xQsnr zZ^c{$oOsP^Ie4YjM8ZIuWYt`g^4!0>e1*XiB*Hc+H|QF&{1LWLW2;%3Q@3H)LmH?< zGv=Yuv}+zf$sqgfV4o!x9-jS}+9(O{d#0 zvWp%afwuJD4IIjzgull*z791hd#qkqyj;nDpMZfOZh~s`VLo*+=DfJL*TRbr^$f+X zlSL*(-=4j%Sa$hkQ-Y0gk8_TbRCWT^_$M2(CC$4h-SS;Qpx0vkmxPU$HPC=CiPUEK zhV+$p(~D34*ay`}e0aHHDK=;`w_+_~2&dOX*p@1mI(*`m^E|zVu0yLIg6KKR2~VKY zs#j`ANx!tH_SCtp7U9j|vHdm8Ec#o*Y@>5vzfEA>s-KDX4GD2B8Rqk1qP$-| z@mfBa9uK%UD0yK8^?;@#=OUbR&vKps+Edi^a^uX;k`s^@{7 zr%c_WA^=%_GF@rcg@_uIw|ql(8b7e+;17w|u>Rps%Oge9h_!0^z4&|(!SdO`bbt2J zZcy}6p$*SV_R741Qf1rSm=kA?tZnVNf(LPx6Bb#sII{_9&peOn8#QL3EoPx61m2!H z9?N&%K7T5g`LJuFPpZC z^v%iArz%YrLAl|rM5!4n#-N-5hRsPHfDn?-Bi=%w!m&ubguqzZX@nnE4kRvpRS zwGgPBXZ|>B#`n08djIB&idOwS@6)uKXLWH8x$^<6A>K8|e{|{rm*a*leOh*p#2;<_ zm8OAYAZ59r>S{oRzJ7;DM))3a~}uiR=e{M z{h9kpmbQ=)aw^ddbkb4~xKt_Q8Bg@H zk@aWa(8{He3W{>Y?$z9LdC^&N%JI^3`MduD?b4qIIu!7EYH#*&8&`0rLb1Pil71%= zR({}co^Gb*{H4H4F~?yK%2Ho>UJiaEwG~}L60J6MA8ZzlgL;X9Vv0%s19d=(zq5{K zk3-4HB%rk=EEq+$HL4VYU9-xm*tN*kHC<;pst81>JcHT!0;=ZSd5`%)n?&jo(Yy5+ zx~RF0w%aQH7@X6px=gYN3Bo4(02pBd(CRK_C(&f%cXEa_In*#M&;saHrJ6P6bV4PT zw(?5fAWLs&W_9>Bf+Oy=jc0+Q$kA4M_(t!YD>)L)?rG&N$W5Ih+H_G!e25`4w|ZS} z!%|yFTtWyr41?z2qwL7IpiXtt?Q5yYAz?;>{v({3EK#pN{+5d>#2Dh>s-j zc>Lvfo_yUTSi&YSSTSrYwNc{8ZaZCjd$lp}W_F5~XK>1rxw?<8OGYvt1Oit<#Zwdu zR0smS+j~{Wd%eqhj^&sQ;kygv`&0BgybA&*B!Vg6IX0xj8ZA+u^II(j>BH}$V3dO| zKqr;T2Alcd#6vtsMPXGm3h;nyt+V z_4B#oe4aY_uK3Kk@Y=8S{LsC~#85bBbkPu1~w07t0RU%h^nPbQ$> z)?4Z-(7;I@8%TO=WxGEJtV&gD&+vZeOu&R|q|RFYYIwh4>uMbjp0bwS^117lRI0gl<+4R}*H)ulsaC~ml&meR zW6h@RnoDa}Tv>175^D^>PyonD5}geFu}nd06q{y@I3J-PR;d z7c`;DST@szb7#+;Lv=>;`EzK{q)~?+-6zu|OOqa1dNkX$Y)Ft`%bv8G_v}ZwT~C7a zXt!_KuXhix6kIqYN|Ii$(jnv7F=fZFD`R$SJN9G9xGPhJ?D}=`*R7*ZuRgo@^WuSd zzrMnNJ$?7_=hwfVfBt{}{{ak8zyS#?(7*!`Oi;lE8Eo*seei=&pL_u5UV3|B#@~A51-Rc{iuo59V2YWcm|}eCMF#rt zxlcj|G0s@yjXCbv86!_T56}Mj=Jipp@!NQUwElu>!*2fK`uAjV$&?J zy7EdYvCfj}OS#B$8!ooMyy{K1&W?c`Y<_5>n;))eg2x~FCR}j7hZy`C!nF}xn;#PY z;c>zKAtzk&%Q25Uacr94`v-dHnFI7WNC$oN&`m!*b<;_2UG&!JDFyY=ODUao(Ltx9 zlv5JPL>1o?QRR2xRYB!<-;ozSdF6!961Z`}y04WE)Jl0R(((r{m+@TETK|>e*For>-;S6Oc#23zkiCp`WS=>~Wv_(-)W-I=i zSF(Z?u(VApb)(bT+(ai_;DQ(Nc*hyhh(>p$@f~YCqZ{9tMmVyujcSae9O?LuANT+U zK0pB<|L6lh`jG~L{9_;wiAX*!(vN+F0SbQ5$3sT)kb{)uBX@uWH1J@PXy^kEK>0~f za&nZL93?3~`AJWn5|o`JB@aZ|N>sM;3~{IjJmi5(cQz1-rC~Rs)*%IWgE**r6?y`=pqJYR00x~fCM8bK?ze;!V<1()v9ils#HBf zR;jAhBP?O8VWp}&yt-8*AOWgTUBWw}Q4D4j!x-_37s7-=*J9)~ug1{pUhDd>zy>z2 zd!=h(`^wjc9oDcj_yIh&zy&s}VGCbaLmak{SvG884Qu#9W--eJGO&RSpAEwnHf!0+ zR@SqbrR)~sP}98{F`QHUw9=!fk_a{@pO#;~KZP%5?)A zjN9Ab`t}XL1@0QGYlSEbARiX1=6AsxUh#(an)$(|Hnq5mWaM-zK3VU2cYzG}`s6Lh zK!#7AWMB181sChRZ+qVo-}d^%7kdlG2ahBUY#`L2HDjTI6)Gf&F7z7L_$WGx;Eo~$ zAqYkAa1hv`Qg;Ml#6-|=5ine04XYHz96s?nNE`$qj9A1P=J19)?9q6{pr>9OYFwgd zrnsnuDr<@ho80y$DIS?kY{^QM#@N)S0^ta3U2B!A%vK~EANli1vzb*jtGBRf zE}S~ItX?&$W6gzDr&`yzzV)efy9PF>0lQ9M=6I2vY-KN7W98KkHUppnbRuJn)wcE+ z(OGR@j1$}0PG>rIQPXdaQQX@Ww>X_-ZF8D1g$@oOL{aF_lDIqG3k@MQ5bW-EtOFe* z?l2L8unr+~R0!)Rf)EhSj)cn?;SJYl!xdiegC~5WB1JfkgV1kwl*1QFo+6*j^e3V2 zlq-MI=}>d()3lsP$zrie+N5j*Ft2sXMp%LoaQ^<(n}2nxU;XONuS!;%U$v@BxVe|V zdexuHysI}~LOfs~*LVqR>U$*yG3;trsb5_f!=UdhvX&M0WbZEQS(CQ2e5Wkl!P3eJSQ*4$$N1s{};fgC+`DFPhH4B_vBzO3$Za#h2GmG2;M&T zQCQHDmRJZx0QkV)vC)A)oE=0^I7iq2{{Ddv-2L$%IQ-qw@F1Aj9qXWB4CKbilx%FM z!YGbxEto8dwj!rIZY|aV!b}$Bo!R_cF?&2T} z=#H}lZ?ib-vxM!lS`7-JaMyOtv;MBMR_zO%@U*@l3@&R5lIysVE4g-Sy3DY;+JMx8 zE4i>s4KWY7eoGD05cGJ9@CeThR4?{muMhq352r~&>>=9ZM4Yaz+PqD@c;OHkQQgEX z85Z%{yiMH-Y@xCNCA2SvGLhcC{ttvQvEGh^qXe$uNRbp>EXEM7!~BimI1J!8EEQvn z9Y8GN5RM2o%oW#Y5r@pDx`@7{Xyw{MPmJO$$^tBgs!bL!iu#1Uc%iALs^^$4%w(?S zW=_mxZmq5n%(QW;#BAoIQL4mj%e0`4W`G##jOyep>#~mQ#%>+sg&oxq9^cU&=dlQj z0S4;94Y&ZfFzfC}>mP+J@(ypZ2=X5}Px2zK?+9|Y4AKoq%OU%&?@|p9@sPAC5)ToN z*n|!8hOHw{%_4{GB8P1aDiYY-fFy@a50A~+0I?-q@+CRuVrb6<847Blc;*rf%KH|I6TxqVT4II3K^8|0;h3QO z_RlMYpa|HZ2n>$H%reGGEZ|6tEQ(Re8_uMAn+U#wSbKclI!rIlh{OWILTAEMv^3h4N<|f^c*re*#J@r@6;BL zvedv0w*Cu#=yOv!wNssG_U@Ar1GGP{jX+8DR4I`h!hwSpv=dwPp(K<-8w?vRbjCPp z7Gcc8VC+^^s-zJx4%YA=xtZgT32 zplGQYljy#zs*sMYR4_`#HA#`K%BU0^pK(do$^xGhT{F0fh9#-u*QERiB5brk9VDrvS>vSVyEAAFn*akMUu1h1UtFx4|4M@@r*f1p>^(5C& zV*@p~NDcEKPh&4tQ8Us~FVzVKpkg}pWnmU3CFH!MZ9flj67lm8Ytq`{iJrzOpv28o z{;kaybmT}N)DyFBK{@dfH?hGcEQBmfqv{Xg`me)Mal~j57G+WX>XYU8U-)knX9lQ3avO1?7zFz%Dg$5FYhx2*I=mr%vqj^%=ah>fWvmB+Ct4OC#|P zVGq{t0&-3z&mm6|@Cc6y^K|hNwX-}HA&G4}Nme5j6(vb;IHyzCCN=SlH#>#GV#wIbrl5!`sK|?a z!fl@{Z|Tb`+7_qg_D!0~1lKC6E>IgkIO(V~>6#P>un{!jwQ#{~TszYUfZ(kh#&d~K z?Anp*_-Y<$xQ5&DONSv|KsR2Bfrim8?B?pRf(uYND-KR_3t~5Q{W0qZl<_(XW6e-ecNZY*G$ct6WKot-u}fnWRg6OtwTkW7p!bd8SlRYr5LIAjZSrPu z_KxqkCrOn+Yibb(6elb3-eNUEdu9_Iv=ccoYPS;N-WL{QEXK~S{L20>{}ir&vzA!j zSAGXh#sW^G?s6F2!cJ@|jCP7GdWyXYu#~?9PUZqG-Xs}o^vO23s;IPG!*NNwEa*Hl zGDn!poGt`USk0Og&H5Gz;=y64F6@BVOF!4@`1OYOHHW2nhwl+!nfN5_&L8I#PZ4j} z0hDMoc zZ^5;L#dUGp)#tb|s=UksFH_6{7X-D8%#yT9mF}&6K(2Clh?&`kbr|fZ8Hiu=9fNr6 zpgFAV%FeEN)qsmQyEAsXxlTC???8+6XqU87&0u|3Vb9Z1DfRSpOJjj8o*^%u?-003 zPq@7KPd7{QfcLWYP@paQvg?PAc{ZMcmU@dXXOT~#9hwn)68ZFoh2D*Q!7phw+7lzx zePxlvN|E7Qx@+~%Ed7mGHR=@K?|m&9fNixM=HZ-jp%x4fE6~KIyXb84R>|;IN8vVs z_tH#$>MC;7Nt%jp;gzkhYUUglNq-Kj8dqI$u1QBQ{;Hl4m$gypLQ|RNkq76I81zie zoZ;(eGd0<|tu0sUsG01Jp%;w7h+|D4CyxmM^}yL6oEh&Q`}(pBlJHWOA)^?u54N%I zaPl;AP!D^|Z{o3KzP zZ^IoMP&Y50P5n~M__#7}4d)pTf0wY^_@7sN)@l7g@+X^AfO-#l62r-!#%SBF4cI#y z+d5ml(kTYmt!D~ZRwq=o(|1BGOe@ci;FA3QsP+^~F+@wTx2b&L0Pa>v%td2B7lx5f ztm4OX+D+glF~_KG!U!pa3`~@9D$bo4o66@xaI7}dt+r8@J5zDl+{$P!N>BLT@!bgC z!M)otbi>*l)A3%_IyTo4ntzxb_3RzpPE7$7P7lw(-TAUWp6~n)wO}pq?i#x~-HPj! z)G&NJeV0&E^6?;&(&JRONUb1C59a>Im?5jHvTOb4fxbToTAK`U61BHfUwl6oQ5Wz@ z`Vu{GRewG2|rx> z_8GlQa(DRh;Zbu99jq-LOn0EcW(=Yr<n?L=#BsQ-?N|vG{=M1Nsh9ncJ}%0iH{v|w6P5l ztIyud7ouX7X%8&>vTvfiU;g`!R-vL@{jL_n4zAlvdi`E8>}jknY0TjIFaFz4eoagk z-EX5_6v|pIy#QjDEnBy8?Ygzg;4WOaa@oQq(BQ3y4i^^0rOTEsj1FxbymbrW#2E3C zEHPq4$q^+gkGNE_WC@ZbE0HKcas&wyAW4km1adN`N-Zu|7DZxYCeo!xmfS@0?ng0Z z&xBdEO6)4JV#b6OE2b4})~i^bJtLNNtT44#i=Fja)vB={R@Q8Bx3zt^i zxpUvjem#?3B#r_wr4;E!|e=t z{o>9mS+a$=cr}Dr{yX>e<-y}p|K0m`^T^7DyN@3~Hf{OCVe6j_Hvj$SpEdys7~p}~ z=J3>q-ggZg7GgpH0un|fIhE59 zJ|RJrmpSRA(@Qwvv;+`Kk;!IJZZ@S95Gp0rluJpxGs9F^S@l&_S$*}DS75ERCt1t{ z+SXOixCIyfS%uE<13YnT(S{p{3HBFmj~Ob=*Udfsm5i+%T{BOrm}jN@Q9=`eU-H}VolV7>zT z*Pk}+u#?1u?6kO!h3+JrqQVLjp>T*3#wbKPDzd}Gg(R9N1Q8epLBxeIx>$}c6%B+g zksTq#&_D?xl;q4*E_6^rSYk=#Km;vG(2yYILQ)bs(Ftc$PQ_$|Of~7GrcNv&Wdsl| z#Wem@OIj$A6ih0qbmmP<#FHm8V-YiqFw8JR)}L3gy|$x#dgWDDZPP6`S&PB|kEL$J zG3l#Y#8IkWml~Fu8+5qw1{|rX5yupHO~zTWkD2PKr;uf)X=H^VW>{yY1{T{IZM=aO zUVAmJ8enmJrk8A=0ahBOxanrkwYu-ld+)yg4m@%81fVT@xCHY{FzcDOU6kd1CndV< zZsh!S;&QhxFTQ9`j(*r=voF8i-xr`Z-Gn1=e*fw>@PPPbqs}T82%)N zU}f=rMyC!M^|B{*q`m#DzPH!VdbT|)^>auO9{u+34uP@560 z!VG5^u_|%#%Aux0mADOxQCbm3Rj8Piy}<)8c)8SJkoB0>5XMwbz)KuZ2f5KT#xJgW z)nzsp88~hM3!XF9X4-&9z{qMcouSQVHnY0Ug<%Swn#~kURhZay=61UYo+BR#$w*3) zch>REKJr04?(AY*!HAM4!v(JN{9-)Fz=e32x2|*jLVBWH&qBWQ3tyZsU+~k0fwHk6 z0EGj5{z=Dz=taI=`ZAaI37GhH3BPqj2!IOw5ITy04hz}O9SX$Q#h?kz{umLEMd%1Z ziyWrS1a6Fg?0CmJbc7`d4lNh8s0Bz4qKkG)DQ6#HXOt?E5NcQ{Nc{BRK?Z^lUKEXM zD*-~&aB_qqa1CoX@d-+jP!t;~bR|Vd2^D5SLoXflXkVI1OxPh6vt1=^!$1oX*G3h! zIinfTc!o2cF+{eR#Vjvv;-cn8H@29i7?JW@WwyYDUh$=KTUZQG2_u-G3QjLU{Z&!9 zz=b`sDm0-v++t$G8aTk=HgO16XQE0NsQ!gZbCygrG$n|Qv#l*(nLXV1uQUyTN2$;jIqSj8B}?x5QX83v?xk$ zjM^y->fuwuq~Uapd0noG>N>zh#tl!P1MU8%m)3|;GP`=CX3p9d*EFuIm0|-Siz*w) zxPgw4lU(CSbGk~!=5(R^%drrr20G|Qu6Ql7k&ldI=v4mlH~ZMf@QSw`bScl1x=*wZ+v4M>D$j_@#T$j%$Yyq zkn=a@QH^H|JJ>OgD@1__?4VfvV#EX{%p(f%kn}th_6|-* zVIGm@^u-uRgp#eOwIE6vFQBxGS!nPUwcteyT)<^r1gV0=rA7{F!DV+&P@mN3atnw{ zT!a`GN(u=HmIgwSE}_IHMW{y=u7GVOv}6fPAX9p)P=!ugAtp<>_7$vvZ6I9XBo5^+ zO5QXzB*Y`6Wcf-kWMGV7*zKiP0mi*$AOjk7W&Ul@AVx0$4sc@-0~(3iilPFnC}Z%= zE~Yz{;uMDkEYzh{cv(ZnGcJ#h)8UM8h{MOR;Nj1l4vr?y+~!utnI~+44j(twX*?4L zE6^MZAewFuB477(jppFyI7NsDe$nwiK}0Bq=AE z$$kCACmD1g0SJ&l34qZRgJKLaPy}>PPy4 zgdz-c0S|9*7jAG!@*sX0z=QtkiUSY@KkyFm5FtZgBBSRHp{9vf@F5381S#So9^x?$ z^bif<5!9dp2G9XHAejyj0s4eb?1Y#TfB?jIB()#`4xj=#-~#S6LPSC{A(0T+)m&Rr ze=A`GKQI6$a0Q*Fe)Z^4HsJ#SifvM0kU9ZwS1<+G#)?t!Ze!vTPqP$0R)5I$(whI4D)b7E?3>G06pZ z@daV9EKitm>W2kxH5h+01t{kR7%%`%um)B@lUU#h3s3<`I4XicEVN@8nF2;!#Fc;H z1O#BD{#c-XR{lI8f>YbkL+&x^v=3W3+QDNL4$FgoQ+QmwyVVfw~;ovMn1i zb_XDtZP)>bx-Dr25z9ag9q@HxxC>+F0A+SO9&i9=hY@VpEve)SZYB=;F%BhA06(xF zr5XUnFb?oDFX8YFawh`?Z~@;is+lMOQ9uLV;0+D3cmQHBG*UlIPy(X&4nbgx1MmaO z#34+O0mj*z=5%V7#ttxmizw0|xQUFb1|si}BSABJk$M2XwhNJJ04^|V29ZwGpsCvO zY`ky)6F_Et)dFq!Bs!z6&_@sjQ8W$Z64zG%QQ$&V@T^VXeo4Uu@gRQkfNj_&Lr$Uo z1WbT#C;$Lauw6*uCDlhz8M`%6aiY)A3ppAL!VnCK83R+HM0CS&T%iMa7z{I@m^wfV z(cq#7Py)Tsl2%kGI$I1dAP;Q77;Ufxa`3ELx&|0P09P<6Qbl4xXewsJbF@kaR9GsF z5l61#a$dR`bEJxs|D!+{;y)PjTR(CTEdT)! zKn)&Y3*Y$x4p9)fph3G(3zEv7&84{Fn0yc50>8F10s#>oK|(-6G$rvhEAb8$zyK~( zZZtpuDBylJu>@DJ01|)!l_$LNWdv8?1Hroh>&9IT#X|^HeMj(|Ge8W-APms(494Jq zBU=o;umTTYkuG&oYf}|05TdeX67UY~kZAy_dMUyV?J$u12mlL!oD?zyATdfQ@SC zP7tvWKf^&iauM1Ee<*PU3{U}AAVX1ri&UTmNKpmLQiQw6{s05Oen6lmrT2NvJfKHF z6f%*)2xSy2K{h-3k{f9^#Q?)1Y8GsRC@&e2F`#dXX#&Pz3^>|k#+H&ekCB5qIVqvyaYh*iT!1;5qZv$HM|5g@UQz{2V1zgD-t*9eSD*w3%>)yC0TJK?^=KwCwB8rW zLiY6oMgQoF))`@a0U5^yVI&z)IfJg^7i=(Lj)SB!9t8^Ugkwz^j3dWM z*@Ilr26mbkR31Q8IN6T$?<+%ZL9F+%Xn>!9BZ@MzZ?1q=|d z+-gAJlxhiNArX=?60t#s49-CEogToQ_=FK2aIU0zm_x?RDBT#@<69x#0HhI#aiCuahYQ@2hdXhw0u1BdbAguksD@82QnE4 z8BhRl9Hdn~8h5-%i-a3}e79iE^iA(O*70P`6D1Jv0MxJy?_mOIZXOP?9WtP}m1$VK z5Kk?z0x6IK;H`D0WIXH93*FOMt~(B-jxVkYx+h@h-$M>35CAU#12jMb0U!Z1@B^fH z+Tq|0HGsOd+aI^t+p5k4y&DA)!UQl7+!cZjrPWOCFzT>=F+!la7w``D!y%^?BNBo@ zHsT@W^gugP5YOlU%cZ{L{#!wl%reN1u0Wy-nhF9FqzhC}j?}eWBh(}Myub73H1Tlp z>!t-u(8oUT-d__BCMXYDPz3R?n?67jMKLx$P{8u{eqqujFL4CslqVq!7S#{|4`2*z zgTeuj0>;2_GRzDzyaEXDkqu5L`St=PAOksYhP?m~#hwWxC~ROsg9MKi`vEV`mNjnN z&||;=%fmQM2snwuW(ymTYTjVUFu)0&HEL9ALYW0D^cLbxN+LB zbt_%~Kmq{*02DZ(P1|m4*uE8qF)llA?AXBb#>S4F5FzK;F}ZM+T@WTG2>f^_B#3$; z?t&B%GA}`edDwAhy%GTUfr)pD2x7qj^Fxn8A_g??ofvMxWfxm;v89<Bv0g4i9jl zmfCFDMZ*?6aKMFIcG*Q27YvY~1zc(g#sLVZ!7P_sdc?5DTXM0b#av>DCy6DFFhYqX zuDF1JD~|qr=Y#@$sNzZ{sPfd^y|#ue*4`^F}v<@OtQ#oF~I>3a;QZHNLzq$gBiI*$Bg+^UUwk14ymYiF=Y<4H_-5t=A`lLSAHucwq)? znkeU+afZ+#f&{)9kGyoA4o91J^g|E5c4#1=ge%@*A^`@jXy=C}%wwVnuZ6h6gaJ-q zVnGBeVuFbw@^fbhchGxJ2qMf|a2WM*%Xs23{(=E~pfgS+mN3FjI+37L2_^D;=Z9yW5w#d&RzRSH{$ONa zID$lDh@lwxV@Tiu7Gy96QG=K2Ix4spH zC&YnWBk{oiZot4TNW==s%au`}MU^cCv4JZ2)mn5ChbX+oUsI%F6|HzhEM^fe_83^n zbRhu;bif0lQvqirZ~$F!rUhbrfB--s#@awYjH5vS)?-l)Fy$g zm4E>RfWh1tumJSL<0U0H0V)|Vgmw(*doOuM+Vm5IzTL+@K^Rcp+@Z?~!Olgt$N>lr zFaZxh-~f*Soda6XBMx-{0s)wS7(G*q4;UbfDtbT;D#sZYxkyNe!v&CvbR;^}2~F+E zfC*MO0WNTY1yqR%NC4|PjF}|}MX zfe!uT&6Mq6&jP%Kf_Jpz9T9-#+%B+6@0I{T>?lIsp7aA;4&gn*B*J|L)VHf0^Bv`g z!H``jy`Z1%pQOm)Tjk#9S{Hn9DuU5@cv8-R1iAUvVaVsAbv823_9=t14Q6c#4tujdl9J9Lin^E1qLvR@nq;qKn4ID;6ZX)5P|Hp7&EBH zP;H_?k}TlHPKfIi^cmGHJfT}9WMB%{picl0pa59i!9zX*5uE%g(Jj;p7j~i&7{+B7ET95b$QDuEu)$Ym)hjqr$wyYTl9c>PUI4&{VqN`eSjSq{cX6x=ZX+4g zv|wYX5!vW+k+EEiPS`Erg~)W#0vSjQkC?&5{x2j}GJ@fz9JLisI>htHNFIkbxz*Wj zvXKqsOgkLlKHAKX#>wh-@*3+1!qwIz1o^&WJxnkUdgfLi@pg>~^GFYRiV$xm-3&vc>DD0Vvd&ol|=g~S(RJb4+ zu1CC(or&*+Cp{VOOGBnY72mHtFd_5u;IY~yi=uQhr z5Mvl47*dDd0I4nvU^F@EO)dsbqfS1lPaRaHLbEVL#Z!f>xgKl?gG2%w)r*K^DsiAg z9L)8FT+v|)Y5`&1YY3|;eBsb7T;Wdski`~=*d!{jY6(K1DDOJL+qmcmRzw>JgBmu!{q+)(H~M zz=aL7IL|1#AZZZ`fsAyDKnw|u)-e$wX&qcJ5++$Fn8TBZf+#eJr#ykEGf@(yh5yTTBOM*O65+=zLb^!jLfyk(WP=*0&6j31tMqvg_2^3#2DPdp+jVcsqIEG9i zyHhy`GZTn{V1^lJ2&(9nkLZ=MN(q&qi7Du+p6Dv6GQ6AEf-s~jTR9d#TQspEHDzIy zH(&!8f+00PyCq3A0$m;!BSptV3lU*UoV!ihEjtYpa+7aG0PYeYwUL`P&Sx?nxY zNQM)c4B0ao4Adjga5g-eyWe<>3x|~%4&^|;>mUzS1P|qs zKIFp=T^x_^c$($IhHEencc`!VJCA-_Km9n5yy-vt5T#?X4|LF*1tF&TYd7*kj|kB} zVqypWdcYPzoY&C>*8Uk1|MD=@IS~wLI2Wln40#>IS;q`%CmuPu9Vr}JFp@mco|jva zI>{4;VuFV{0wBN>h`JsoffFH{6Eukv7#zszX+fiFs3p*XBX|cGC7z>_`n@X z9`32Z>nSni)Pf?@o+U_vm$Q@E!IOCS0jH}JVki}6aE3eqFfvYRG}NGr9Vu*wE{7f3q`p!kTV^h&o7OS3Fd6Ft#>skKkMql}?Fy>wA8x}(^V z43bIy#o;Ty>C?sLTfR?HOsHX*m7vPPH|rIU z7)mxgmN{gWqD-^|T^3`x31ty0Q_YD(e98z7(X~h+3T2>Ior$T6ij^3ORUtL61U*|# zwTN(n!9r1C9adr$41ig^E~-nte63~GL}qnXz5K-3pe=9XjUELg@AFYx4ANcHR-OK_ zwqUHcX%t4#{7k*Uzx(PNs}Uw?>^ES7($MU`ZS=PZ>8~=~%?`s?#32&PDV*Ho(s|?& zeic*QY@H0;QqPc`;N;UKp`8;f6NW;%d^E^At)7Nb5~PdRip9C#yg`fl0UiuOMHL1> z;Ycc!PmUbfVh}=+Ei*J5pZF}49c+j<5Q;ZD7M}Qt!&{+6(^;MkD;DZm1m()GD#}9K ziFT<H(I z+4zMM!3~s2%pesHZuM4e%~ovn#pb&P$9#}s?7wog#;eJ{`f?Csv=6KK%>K+wQes*V zgKIa0dk0@QCw`sBa_m=f{8A7DF?oE)4FpFZfkz*)kPvG*gS1oZkyCvL>#EY`!LvK_t|ZB~M5Up_M1 z*Wv}-ctz}pn(UZcA*EYh+}7$+u6j$4Y5XO=2~GWzQiL<5z%@p8%^P64#`E~U(3BAV zG7$?|Ck)o$k~3J=8OIP?CkcF=5$;mgRMQN`;5QvIg2XwOvy;usQGH$ol{{rx zQ10d4LsSbdl!~ZqqND_@?OlskecDw8to~%sc|l(|j$=q-mUBkqL@Z~dh0v}tRYsI!e9q^?lEk@KUmKmh-_yhv?PIeQTY}D` z3z3ZbE#&Xx#pTP^Ts-7%_1|wzWJs3Af@9afCE$cpk7Xp_`ue59wMM@&xRB0LU(i6$ z{ak_--8KEvHQh}g0XfTAoqZh=#9_7|(FM{CvL%sWm}4l0>c@!!&W+V!8;sZ?E0gQw zp6aPxM9nGjA>y1mS*kW-va{+Sj^-wYhdC6`#6!xWW!j(E!Yo!$a`p+Nd_#9p3wZA0 zkicH2O(9-Y#6xT#yH3@R@WQ^zS`yV~zz%FH+6VqRhOD}jUxJ<(O?=~hy(T=u8|PD<+Mf+t5dGe}PGiHf)ol4{ zrezCKd)1|tYrcb*WJ$a?7Hk7Qa4Mq2l9@|fqu*xDnD&kEJVpjzNX-1DXv*FVyQS#; zEsxQzWC_`12EGq;)jxF2WWG@kgd1FibN*ySCRYm4KyqBi)oGE?;M~J;MAK%8!;(6Dh5W9ooBF`}E{hq~btMDBj<9@xFu(w!(F`(b1rVwT+M_(bBT`_8f( zVrNz}C1!}VAqOlm2wjkYKat`c{o(xAII-3h8Yus!PurTwy}?WO=j#nV@XJ3U$bQ?sm74( z*_V{Y*w>JVQezh_NYSRTh9pUw`s(NR&wZZz-@VU0&pDs-zUT9L9qX9YiV!@oEs&WP zz`6N`j=tZDKPkDln~X`1I?DK}cBfn>(r(1T*~C?W=i^WFsxI#Z1=zIy7sY?Cyt#AU zs78F+aQ)aYvT*r?V7t<3)8pq-|8aM<_;tnY6f`=GX|D(L4`*k9xH+Xyca7M;iE_ZN zh_4xR`4BCS;eUsTTUr?$hJSHB)fI!gxteV6hS-dM-xN z+vOLsxJ_k&F#4S0TH+jVL08W3rmNd+prXhff;k=xZxAyKXS;>X@pJt@} z_t(rbmm_&TYCXEpv47#f+ja27@}~fNI7sR8ZO2Dfg=y3w^PKC+VefQ7c@9U1(s+)N zvkB4okxU^~-zN>xQe(N;<8-9_?vhd3pDhu`6BRXQ_&y!(;ZHQvN> zy(WCT?ZLc;?`RhM>@%#^6YMfi{w6qnngr+B!_gr*J);LHT=JR)X<{++ zRacMFG0b%F;$f7v!g?nq^U+R0VV3UuJ>P8A{h1>!4tg@hxr;0Js;?QYak*;Gdo34x zk;-a?JWaj6mw5W-Tv_ony&)LjmT*(X%`K2)K-wUtb=lQ$T-xpA=^lp2Yn6~6#aYxq z=1F(I)YkKL)TFpTo1nmlujhkol%hS)w3f@h<+IKf5{M<5yun_Y9c;2PPmb9ob>`Gyxh?-avn00f9t}h*K(*!yBnNG z+xypgu3Y*fL!i=x5POW|Cl8}>olnghS~|nRo$}6yhDST1MRG9*ZTFT7JllHLN`6gQ z$#<;Cs%Weg*=DKs>8fU{{HELF9On>xdF}7+_^hMZe--F~4c+1#ZEtvVO-Xzly!MoT zz(`b~zt)Xco@nQcm#*GvSA-1`7nkOIBMx4c`q6yc=C4Q9y*?QjX4H)dEckl2Q{}R^ z==Ja}NyOW5ljgw2=cNm;Ji_n4tb>MFJoq*g)nt7_T+KDIG_;w4|0y5adMQyn{=>`V zQK5)?e4Jt{zY(YDpW*iuZ~y$PfAnJXm&~3k91`n{t|@zW_FZ1YJZ|ytk@XK97i=&V zWbT_{y(@*z-o2~)H_Gt#Kb~Q#KPry6INSYLn`Nr50Pv#RV?qZ{s;@!+q}ypDxt)gf z%~At|`J%7LxP1}(*XFvz6U31;Vu_Cw^8WH*nOij-4#W6fuFVd-c`nBMtnV8WZFm0^ zzIgYEb3eYHOCg-d?M#96B{g;#<1bWlCUhsZFNxD2xkr zUctf5eSp0C;zz=Qhs2f~PyMFOWfzcc9|iNp>RCOxAC;V4`O8M(KWpEC2+bl%y|40= zlVv9!rq&P_S1nJ!@jdiuA7UlHX1SUc2<5Eg5;tV*vl@}U2RwfLqf;$8ssgtD&tl8e zr&|&eG_x!BhC7eH{^EE=e|84{^3Dv3qM54n^v|1*1WOSgcyqK1zN${XS+d@>rSqAr z)5uNRm>Adk%-kmCko*_g*by&xPtKeXtner&>1a6b`@>+!58mv#u~?Fd1mgk)he@w@ zWiDsFwgDY^=Kkk5uln4Nd*Wn`>oYg%(q>!>1X_pHRS-|Cg^y;_n9$p88QKw2wH}*Y(ybuUTCfNX7BfgSlO@@Ux*SNMC->7YBpqJ1@>f_{`8}CbaJ=Etw(d1BDUy z&Jr5JBn;~Zy*~1+N|32^R4xnp?$jK^?3@iB&G8`h6Xj>$hkyq6OzOgh&_LgZ_Yv{s zxP>pwgnrv|7RXqi5Ans>GS~0l&Wy^(Kiv=(?)`PU;#b%Q$We3Q?a=&+OHL{0+D+=S zmh2)4;*aL)* za^Q!m#?2#4%0|5W@wd)D302t@n?-VJ*6Tert^9T)$M$OKVd{4Cy~eFSN_xkZr2yG; zq$7blbm3w4)L}wi#rJvLe3CJpV;{>D><>OLbI^ZL7O#o)N2Nr@LgJWMExid+u5)R% zSe1%alcz+ebLr4*^Y-IL&!?d-&3!MAmw|65??7>YvVdtdOi%T(QyW5#`qPy{I<-%8Hlm0AOs(1L zJn&<02;ur*L`yPX%x2fS8_z?wEjtXJm8PaBF0wy*(hbOkscF21i}IJwNpn~3Btxv> z+w2bmANY8WvT!SS;orRGJShbghN}vP&;pa(7|bsK8%0Y&2l8CioPW2b^+@m8+1zh8 zf4=)@#GCl+@r`df@5FgrX`F_e1ABD`$8T=fU)!IpI@oJ8=-;F~>U-I95PP2ZpXZMq zjn|VAKX@++;eT)$quG4ddd7x6AIh}fZXCq-j$PbNyn12b&!vR^}#abzqeUjBs^p7ziKu@pQZM1I{izWFC~BP`S;(}bK`4@JI8)coHqJa@g?aaFNkT9 zehF1~^rEEKql8)B$-TSJ|LZY*^v87k()SOPiyi)#B}}N~W1FOFKOcSgZIfaf;!+qlUm+z5@Ux03`DP6<~D|KY`k*MFrbaJI!z$Zd48h zT&FyapTOWV#R<^7CJ&wB*Qpa6C&xQzK=%9By=?A5ocX$l$-5!IG+EFRBSuIDY2Q zT^~urlB5t5OJE*_Ey=A*5>mlE@G2CZB%>`Uh)d>kV+uqtdAyfo)0n)YCUR~|GNE|X zP7wi7#XoDZ*9tk%N0P|XOG-_r$w!j)r+k&0ZmDb^UWZV)qJ7Dt!&|jcUxOACqB=-? zctGP4Nj!;!G4<23uGYLn()O0pj#$}8q{!d_1c^=&s`L^1K_=5BO%$Xt>O+Tb zZep||f`yonYlOfi#BghqOI5^i76J+w6^$cDsn$&^uW!~I184%aB+>B|+ZlhmH!F6} zYJ}L*cAG2qKiJ4$U?NwodQ_DBfz7x7Ry-m*d4&-1)LHBpV}p1|N0ONNut3Z z6b?jf^QFkVq9l>FKbGWL7gU{-6`sWYayF>a{n3%0Ey=%3#TSWBqj^51Y))VBR6HEU~-ZC za*>GoXnauQc3?CjD7s)k_yGtN#Zl~EbZl_B^ue<6;BtISkYW7C6T>43gHlEXPMDk1 z2Bs!X+5s-2tCYcdN}W&A!}<&60ZB{s7w{&|O+Lb4h}BYxRm%$PfIs-wO@9;ikd4Ox zKUPw3ye5T0;#tG-di(Mqe0bHxc)f#DciH^r!MxUlR23i56~@fS5N~RgI#(?b8DRRX z$~u6Kr1s7NXAFYVnO(wh$y zO9F{FN@0Tiv^O`jel%8QE>|K#H4CI}ftz#!ml@uEVgSe>b1v3Z&g8M*u}!I3%40)t zO>OR#SSPqi)yTk0KRsflDtbt~Nm9f^`kHwSS%2jqi|*6-%15lO$+5}Dk?upx^RE!tTcaf1!+^!F_R{RzLMs?O2wR}&HD%laz#>FA1@7N<}ZakrFWYZ4M2YKB3$X*Rc; zKEHY67zk4A31~)-x2Ehe-lZ{g;CFKlzf*`Pk3|^#wQ!){& z!awSX4U49MD=V*pFTZM7af?`ums}yMn7>+=@UA_Y7T_)F>!uLmu{}`Gq*AE9v5~uq z?cCl}3BeLK-0K27$jsA-Kq^n2PaCt$d%{m4kSbfJAoOL&D6rNzgfIv?(>YZBa3gRN zf8Abn?0RGQ^}4->jp}ZJ1;_8{U&8ug0vW8-2R6k9+pCoo8YNjcc|I%!E9W>s?znc_wM0uVU6+wVWB2 z-7RUMzDFJ<5ZOK(YXkiX@{$U^Qk$!C{#^yWGLjK{SfeG8Z&f0HJ&Mwpvdma&oo1;@ zWU0Np%_B_|qsvMPlmV_w5mHy?@ss+rG@7{OP`@$SwxKk-5u6z*(;T_q*Ye-yMwPbK z0eEx2^Wck%#ofO~S`&jLuk5jLsPi13!Y1PSsWNCm!c8-fM(z zU;O)LEA64A<0*<_@!Q68!8tLXbKc2|imXyvHXICfVNr&vOl2eCD6SSWsbv)Ozu z{wTk%50Sie+1D!$yQh9u!TIQDWF7vQdb+YNfKgX*F zCWN(_2uy*VT)+LBEcFDhQ042#@U5OK=+P~Zn(W+qVjn~lYeiX}7`XENB&|um)35HJ zbLi3uI3ICfn_=cY*Xe<2C6$<{Wq*G8hCsTUqgvGJ=zFHwx|MEm&!i-j9QxTf@>}cL zCY(DyJ?ossy&7KKFwymIX_PR&#C2psgMfE@cFOuOzxDUe&U~0SbNr~L@ij?`N(ANL zm$QBVjI%dW^t=mMWg=;KfYoakPBKkwSJa!jF ze$~qG|Frs`sbIfhedo7TeV5hGHiaf%!_?DznQP_VUsNsyR;DNg2Zl?D`Jsp)3HI&Z zi35`bG0r;Q&s%7@$R#or6PY&E`$ln91tjqYBmt#%FX@c)xjeT<9qL)_#A5 z`)8|R0>Mt`lmFy5f6w?kCf$b5chaqsjXs^ND{3{lUFD)M#5os4NnAYcsnL6Ub?M-o zYFB*W$AHhdZJ#q^MV_mOUhVjFJzB&7Pw0{<*p<1{ujzAXqe9@Xl}&Pm)WE>y#sQ7G z7eI{fkLdWIFMHOiBNc1vZhu4F{$6*x9UfsAwB4nTCJEr~Ru64C4=Kr;e19ykS;*iN|N0reQlax?zKm-}K-EH_5`JC6kam>v9k0EGG%8k%&P%?yk7l*MYIyD{KUy_y4 zyu9c{zLV5MxcBN~o=%%nV3m5(J(}o+d4UUkK-hb%_J=phSD&>mxZ%6_+ul9AmlmP; z?ot00m77tq_sI%v!c7Wm+OeTO^~E|D#X5skvwg_7wJ}U%`QTXdTVGEwRl>)#`>&}L zS|2=U?!fuVlw2muv(EU5M)++<{b-~n|JBPa3esGciDs9MA+j$1ZH}@kvh4hWdw1}!{Rnpi z(x5r(?XP!Ws9gR;}%8Qz}auEYSV6+RR7vIDKn% zx?263fHKomFUJ2iRnzif=WjULz9MmN1@y8oXo_(mRY*cX(_z}-d^rD+P>opu1cKH z<;&U`9o^d>;y$CleSQmlCj7WiCgr#9>)lda>XI$&+wac6XJsZAM&F%}U7x+Rd=FGG ze2NcGF5r^n_v|*n-%q%I$Ide$a$zWibNb8$-AKu;Lv6=y8{Woetjy~8L5P!o{1(5@ z=mVa@RjX{#UWg|jyVB_Ur`J6R|FD;met+AT>$!aN@}>X&{Qmj#-%-}zEf6kK9;4oX zP~ai2Gvr$=EeD{rWn@H(P!W^CA>RTvfx^k@CaB{{!7wVZX}iqNewM4u&q(xksh_)b zM~!zTTz(`=H_>Snqa2ejlB>;pEbEy40N!})#=dDoZGn2G6(e;&~^n%8l=hT-o zx)($h74H-=eBlcGdrgYXQ?WKr+n!M-n|r3n&KZ(iVoSvSDZX&E{#_Tj=f~da*{Y2K z9I+Axzio?DW4F9GCF07`v$LNKa;0=mYJY0coFE=MqB%7r1vos{>tN@I`L}`Z&&YEO5ioaX59)l`x#l-$xwM zY@nr>a4Jd@1}|&a=Hb%o{9tCn#nF7y)NHlrrH$OL>E^lG+qL>WNduPVBB%damf?=C zwF#pwAHxriyUwH#>aIg&cokD7M64>iy40;J3o>^L9qXnBZF=8?TV^yjplm`Ks-7w2 zbgz(^lFi8th)<21v)n|MxPg*GJYv$O{myq$Cb9FkudTy@+zmAnp!G>Pc;zw*h@J^s=Wx89j# z!9mycq>fxLe*FvfboVUKNmAxN{BrfXQ+rNj zSp$#+=@@74MvTha#|}lzTE@;mkUjk*=!m6e#r)yyC*?u7mnYkTvr}?$qzlaesYfP4 z!lDaal+WOWxN{rr!uYCAJbWb_|F>54Gu6{?xSOZ%Ky8hfMK9t>n3p5}wonMrO!C;B>ana}JCZg?% zY}wtco@ZRMw_M+4HxY}6m3Ql6%<_&1(49xub`Z?_kL_Y&Q}kwK8zuq1w^83h|Iv!jdFDEG&rYY|Y}RHXBU8Nvs^WUlP3K!QcDA%!U3+{5!9|^6OPg zXalDav9WCOZ)!nhfo&z`Y>>mmYwDl86T#czox)`fgiLV}#R=88x?=|oZ> zMZjHUQcH#)>7LhjIML6RiEoA+I9+(N_!=hDosj!9E%(hwG{>8A`ld4SM!rd%da>?P zXj}AUsiz;LmtnGho4D3lIU*rLJkLT@wEKPwiUtHYy-sqf?#^%6bca8pM%7CvbWK~A zb===X^J7*onxC8$6A1Yp9Uophe||>L< z&uebHcngsD&RW zB`w1n6Us=j$|;!k@kTd^1A4L;lcnupq9v>9^}~nfDgf1u@?lh13yS>e?s+G~*OPRA z);}golu$R3Ua@&6EW6rC>j&&a=frvob22z!AWdg3&SC}M>V((*8M8`&UH$Y?xuC1z zfn0{4c|7M5Z#|cYtfx$zOV@;3=XdD=%N6_Yo?)IJJRTLGn-Vicsc_P`ZCO{%Lmn{a zW%-&E@kGZ|kxq7s+FcyVXTzK>`Rer|zd7sIssH@_kE7;zTh@=v#ErK47AY)`5MPvg zm#03dN5dCPw_oR6lwDDJzk)mk7SOK8qbAy>Y+^Qlx~p^7Cs$`YcB$fiGyAn+I8THU zednX^n)&mYrsXTS2k>vcv+v|A@BTjGpd=oA9n(bNi^#n7)pL1er`)LLL1wP!jR4pY z=!9D0Ojoa-W#qD$q+z|b;Xl3Tc)#Y4JT}@-_~a+FyC?Vi)4g6X-=As@u?rfudFA9?&!94W;J^hFLvHo-)i4pD7T;gd;+3A5?GN?H~q|) zEac&+82p?6)hu1`<>eBkz~s&s<6y0pu6ETV(l^8RuY`?PGaM75&l$+`Bqg@_QSASI zUZC9Vc`BZZJfC$iRLjC$J*Kn$ySM*allq;hMf#{#yIw?8_RhD{@}K?=mWzK>(64yD zxarPXI&${hck`lXIWsx)>4ciQ$ox~b9>Y!7w0%9-Im`dt$g&^>Wh&5J?aTYc^rHkG;4CY}+oJKGuG18Jvj6pa0*EgS}F3Fi?OJe9% z{gj%P2ERDnC5~%8{ky8Ae+ipB8>N<{vIFbenT_pOOWqiA;5lY{W@9;1{&ukK7~Pzn zB#Um6?aX7}8I`uPza7#=wVfq%=d!ubJaF4D+&T>vm0J^2WbTD3u334_ItV85@*WCo`+he1qKZGxs&iPJW1U6s^w}{cTdPDT7dIgh~t=HyM8sT_Qj6M@(>D zd{XmSGmDH#y)eU?pH;iqH&GLR)l7R(xUcoH+s6c#!NKix{g{lM6*(zchuyYTX(b2V zea3>y^FZS&-k4EKp>eq==7JGOaxq7ai_p>TKVG>a@QlVC{ydCr440Nxxsi>Wi?Ti7 zU$LDU#2GdqSnbHCOA~z|icjRZ4ryEp+gC6Uhc_^Zc^|GcoclTcW&E>AS|`yu%yBqd z^<8)sF<%q5nW;a-T^XY>jLXoK%0PVN)^$?wd-mKhh5uAamP+g2cRmsdV;|{5?z1zl zzRPR+n;&yi&bDQp?Fu{Vzbuh7AdG|gH<{+Z@?fR+Eu z=Rji;nw*TzcEsOKr6u_qSPh^pebBizyksAAp9Y!?LT7KttsNoA1|#wq2qhbME)8Mi ztaX|K&7nPhpr&c^LTpH$w14)ySEOQ5+Lg^gVtjhULX|j^A-)4eO=m2ZQL#)ab}bn` zF?uLSfKT{DVB3i4&q*(Rx?xw5vE1D44D<@kIZ%^l1u&1?GxOo$USpemG&WZ>$@m+t zrJkZxJK38`gD)&1))pnwSQ0MNI`fMX1vo5|Vf}FtQAX3NOo6xIumyV(u{`&UC;q0h zCCoSRN`r%p7rKk}(lx8XjBk1a3cKVj-LruHqh{flgDAY$5r3BbD=6b(ys`%OOUE;I z;Fl1wTDB4^8qtP>99ot!EY3DYlD;Js`;rrL*k;NACjl3l*iNd(6k#@aM(s$6@}Q6s ztLCX6%>t&+28rXqZpIGR1_o0Xg(=2hEbD}UF^tVAn(OSw%LYlagx$bHNh^4=FFqJ7 z#Y-_T@qq|Y0R;vpS&}Z>z^Oz7WaBtf<(M z1TB^8FqJku){zE6P(cLOJ8ewFCAQ~nW42qQve$F&qgNcDwQv=hvlIZ^g7SWocd}*x zPzIpg4FJ=qw;&jT23*1s4y0C}psH1Nh-#^XD3uX&sW{3_YLDoq`ie2dEFO4R3|e8V zD$$((2}1leUDP4ye`&5ttp{Z^mwzugWv}XlnuI*k3W8HXX#q1>?I000w2YyR{@j-) z1%DHKKXU+6@X{FLLNTgYRrr=XH(Z z)R~i=f-{3iY!*U+2B-rN%+Dyj@p-5wr7n3Sfy&+$ zN*zx=Kq~=&HiHvG13GcMzi>f6aK|oCDS8Y}1uCG;Htd}PC(xg(QzNdz z094q}eL>j5fcP#wB8?5@VnOYVQythp}!aI@~N4xBqP9Ln>!H0J%X+=fFYQjd>~$Eq%0fMaJR4S*7GgH(~yETM8vCOQ;<3 zJ<344I_~3^Ty1P90+(tw&v~vI-r|~S^&Ltb(al@#eb{h-XXd#akYUm+kp7_$0RXwN zq=Mxzm+nGig+c3ky6}_d@)vzeoXs}nENxT0nE6qPuL~rt3Z`<6(^?TsM)dD*r#<;i zTUeYh0Md@r>__vAc7TP9wOXm-&XHt>9lUG;%Dr^5MB_4@#K8l&-+FQ4pS?bXt#|7M z6wH7!?;HYTI5NvsPaucyMFhk{J%XOJ8CMCwXprnF0SpV=r4@uCpp~egzocVUi>Gt) zI1LvA$W`cu9$CP~NJCxW)k;VMt5D5Y>oygH$Emy`9x?AMQo~`FK%~QRI@cOuPX*Zm z08%@ZfA%ptTmif<%Y6k-r2^J8tI1>#o(4Y{d_um;fWmNyLrIq7z9TF^d}6+;8sPo3 z}$Hw-oFwp7%&`#a=jbNn5 zH(iUL@wy^$*lcG{Gbv|R-z{?Y3c#wcY96_8rO{4KU^x2rQsFpkDlYf@vnfXlYn?Ot zsi_=ECprBY94H+4VU`ywlw(&DW6J`pS-y0ia!){iV_F*j<#{2Lcd8V1#8ks<(Mrk~Ma8Jwi*HCF!Zd$@j+)DW_)Lsr8Q>Sk zu9-}Rr_QLPu{jX{2k{4|zvUBlh67fH7@=QZyUx7=(2iAUtN;)$Y`2i`hwi) zZW=389wHU!1h`f5R~kfJA5zh(In&G%_(MaiJryX>WG;|G?DP>#oK@`}f}o3~2VfP1 zIR+1Ana0+AbcT4^z#uH6gO8MpK1w(;fNmk3`NIYO9LV$s_&VF9 z+!!o`L(Kck*aLt)6+XdV6I1Q}L=%s2IP8Fm7-;%p2Y@JSkG{{vbt%09{ld>6ZxVXk zQd~X21#bdsT8)_M8lEpSPJjC><@93vaQq)W6$ydwei2P>qXO#M5N+3&CIU?$2H26E za;p3W6j|&dXo_RL^&iGGfk0TC`*HGXJ0Rbt<=~}dOPnVIX2Swd0GOT-;m4Q{$^6uE zQ%32zWL6*=!vI@W@pj!2;G$MMeQ!B>9srE}@30MTM#&ow2*6li^Q!mp_+WlZ?woKE zWFKL)e_@7*fB`sr%SJK>yjT$mLyohh25ACrc`nfumc;`BDAR=$v%YVZ^KsvhuPGJYo9eiHUBPe2Zdx$QF&h|8<}T&f!rk3<0s();$)4-hbc zF-4!-{!0&h$TeJi_|Ho+)Tje9F24BV=o|Ochi@84eo3mNMf)^ND%Ms%$Wlr^q?m1E zH7?oyiRO>b{UE3a+j3X+8^9U^bzAO4Auen2Z|`l&)R)E>9ECK-m3FEO3U|(Na42PS zv_#SG)dRhD@3G24oo@W01Zrn_@|MG+hyVTg_3hJ#=Z_x!`v<@XY#LN37(dYwA*+g3+%>n1_4DUlQ|Gz2GvK@CHN~t z5JmZlON9(*q!0)9X2ggCRw!H53G~n)NW#S#OYDbl*dSoBRAi9^7mbGWx}Ilz2pn4W zQuT+u=GI{4%^hT^YUb@)VK35l;e@I~F%^L^_4+YbEU(}y_Iwh*H9_1xkAw3F0Whn) zn4}&=K%%Cs2#Mnx4cJ@jRb$u-)Wk-HQ0P}al&gD(F~?U&5R_jekZ5KYfL+;LY>MxyaP+VmOsL*>+D-hgFFO(gO(D69*DA39 zq0@QMZsHEQi>5H7u9G`8wZHbOXBx*34sl~NpP0aw>4gCUGFSdEua4THJ)ep6B9DMM z@xVs|(Ka$bjDqKinjdbr8&*m+AKi@*zCHVWCv=RUUnTuA!)`R51P$Aq4w9{}o# zV2D>)T4RwN$u|w-4uYaw)(`8k-5OA;;8}@F$!WSCJ{(i&Hcr2D4JnJb9G78@6xDz< z2`}h?G2{>8@nanF{KJ}rn{Y17d2Bq4%V7n=+oGKPC7A!?-ud-uM+D)#%dMcBLaDwN z<)C|vV@EHpsAX~>%lHw58iFC>XcAXwCI@84cmsP8Km^xa4^5P>3ktUHwX{BH07LL7 zB^f121Tdd%*+uOIjhk>=+#|z;*wAxqCB%vJt8lI;ju#@_@W-MV{6#Y4o0eu0w&{S= z1GALyWNwL1RL=V?9J%KYOVV$@M&UxJE9W0bK{(E3-ggHN#dSfw%{3?V=|E~5&FplH zZn5|~{xl^N011>#8QA3zl0yPnr>EU9+jI`366>^x0FsLIYge8DaamrtiYUiH6COyJ z$ekOTd^eCgCjK<{L>cNH`3?X^8H0%WCsDa>6=QqT!MFIzBj{b?4e+2dW58&EH7c9R z1&JSvHaiF$jkIZ@T$wmQv{KP z5F0=NNmNPcyiEjzaL5ljEQawl;1lU&vMnK}T7b`|_s{NOHtjVO8pdN17j6oPcMY>N>!R*EfgX>gPRg5gp*#Y{%3N0CAEU6Sr%^Iue zC8|9z>r95H2)E~50jSe{5@FS&y4;<3u4J_&rKh}(e@yD{Dc$Sj8=Nl5+nB4#z80T7 z#(_TAfs~(=g~w%o4k!|WzBlyKdzw$;YMlrw?&kTRRScSG^! zRS}FH!K%%YtSs&f@g3_P*{ANi#-dsZJ5&X8)h3*))2`xb%t$L zfXI6;3w@9Gx~kF~bEh&2zL**p`sbx2ZEL{Ph-$W07$jKk3nsj7SX|z|y?W z&YCO`zB4g|GH=ETI)DJsAZy>CC!ft_*ssW6L=dkK$W>0Tyuoqt@y_2$#qyi~!Jkyl zd=R-uUE|A4MR>nMP6(Rk5i}R2Pv{N+wvLBfVYi>gL4%C**?ug?KeIkgvW!dp6(jkXO zpwmXim^LK5vnr^jBmnWtgFM0zIraP(;a7i?y=Qk%KAs@)Pzh%8YX$DtLX=Kp6`y#H zVm$EoAn)-eJAq{=juUQ+5_ab@yU*0!Ie!D^84DG)+Tk&91_X_jvrVBT)0Vlj7&Awn zX-6a?L877tqK<-m){G@_%Bbf#mg8*p)AjF=<^2&(?C3n5F1Cm?gA2qMH=K! zyqTcHr0`oapcayXh1$He(I71ra9Rs$i#uWXmFJKHDdPyvu5+^*h8hk)Gk+dwuHrFR zR57a&Y|w_bz%V`_q7Xw;2PJP0oHXUSw)id`uks z0Z7CW;ruQ<4TBJ46<<2=@n`Jh@(Uu4O##%$z(S!MP7AnL2PBR{O7U_vzbLg0oK-uA z_>_d^m~4I*%RmucXmaXc0nFx#V&Hj>`@a%En!>g?r6F<O$eXiTtZA!JHa+Ntl^5PS{0{QZY;61+v1QOPim=Rz zWO}TQ+%ak`rkde)%PGH=I1G)LBEqaNAFgubeHRS+dNC?l_>V5;=J$&;^`znPj(lr)Je0cSZt)-uwBZ=w4gpe z1o@gn=7Iv`2In{RBU1); zVm4al*vYewjx(|)%;V*hW6Y_5+VKF!6lap7d}K_*DOtJm0hd zirak^OmH(pj^FcMg#|;uN~f8_DSJ}g^)e^4pSvn7RyY#pp(>^R zGql>Y-Lg?ey?feXmPtdD6-bAQkQ^UqHK~7Z(lmAC;S;l7o$k61>CC)NXHlaDcDnAG zN{2&^ggeVlI+tH0V5|1%N@92RJRkiqzzHQ>;pxzgLY4?5P^ygAZ2Q3aQR+6u2a`rZdP5gkrSLGWJ{8AkULo(5UOM3}92IQ(91!*5t z^X~B+o8T>(rH&bX>tb3vx?h_Q+B~ z+FG*|NV1viBBl2H^4sAtf=R88>XB0;4KNemm;CVe{3oY*HrPBnzPaY6EsAQ>zj}Gz z^(EajtKv9IWRKnAf77>CA07gI|G3Wl{v^zfsyVcZ(fQA_Yw@;1bvyf**W_e7;bisp z)&9P(I zDVhG_N)%PUM?ARuhQ9ccq!FQROxOOb72Zzc119^4(%Qhr^qd1&I{XDaXmCzEd;SvF zL|~_*hSKXK>M^2MML|wN;vY)4QF?t^dox;=5;XtBOLj(MC?Tjx;d?;*{b#hn zxXZiUUT(+VXAkPO$k$#^$c=#|O%8ptRT}9$Xuf`@S&Mux_VIH$Nqfg)lDPKzhc zl9rsG_WOh9cf&T#FYj&3*#6fP|2{)J8HRJYjwnCP~$EN(sDNPy=mc8o!N6~#pv-$pg0MC$EK@fx*v162& zMOBm7JN7D#n62$=SKCC4P!zR`#;Bqws;a6QTNOo7RBdXP4%OxNc>cLh{>eE>&dGhQ z>$*Sh*SqarX;1Xa%a!eyec62)TE*L%GZE_Ztxz&@g~8n?F;0?)w5taMcgj#JyWLPA8ki)ov`1`|ku#?IPI}ROc1U zTuVok{9(H{2$AwKgc~AA-Dw&}SGMAk#>|K#81U`R_k;hP-c-}l2(W6?{eUxv9% z-2LCMt;Oqw{{d{IErlrdP_uH0hxA%loI1mTCn16(9@)*&SuSDAv8TkrZ zvh8Yot!(iabnVdHv{_T}K~pBsd}epcE?xW<52|C{diOvt`quDMyPC;L(|byWafs8c zM-NSLtp5<#*?1b=U)Yu>NGbB2cOR6HQ^Z*TephZo zb>x6PyY{ZsIwjjZE=u?UHnCljM>r-_f^nNyHphS^eDh6#nqNUx?Dyq88oQ~j!O%-T zw4~8@B=@XAzQF5frs{AB!=fIu3dE-EtclOavT@0i`pqYNz{71Y1`A}CYQloK3@|u| zfOiv)Su7EQXO*pL!m|r5Ff-zrXsc|fD!>F?9)@QTr?SX%8zdnm&>YF978cY#q+dF> z5&vU=N=A$?+DJDQZ^nu&vL7Cj%(In{laVkbCF@DP=jD;fC*vr&1iiV+M-LJ@cH|#s zRhZqa$cmG{Tan50&AculPJTpLKbEA_0^@mH-`dsk?Xk`esXOm1#|9q=W2duKcjj z&?I9sOIM2Z4_>Sm^Y%DiiF|dS6P<=S`zExTkC0z%zXo^Sxt?}c@VinPm+rB|Ji6j* z)4xo~Vpp?TUE=SjpYi{_GrZs)`N8M8d-S;xI3s2s$Vip5_pU*m3LJg9U>MJ>gDw}g z?rUB~1k|exiuC3=R$PA=Ia8gpNVPWk8($JG@vZttaMzI@e=^i|!Jb#4ZUmO*{lMPi zy{z)+6ganLKH?j%?IZxif*);p{JD9!LO~J#&dj9#=?To7t@1S+yK*$sx5pw8Cvkz- z8S-n7TOy=N<97Pc-#VHalgka%(`zi9@)k(#_K z{>1BB)hAjdafk!68147yRn*6j!Am~@+ZjD+H@k(95P$RCfIv{BOrVK4*J1am5q~dRhC|*I>?3MK4w%m#<&ZJ@hpdzsK`lfZohLXG{<| zTZlL|frBTEC)hqlT+GZ8T%{FR!<4G>fy94Zoag$3)_0 zcYZ068O{^^xKD*Mb)ascIgOA>!uGi;3A$Sv_%)=uSlmHp=ih8~*7s`ne0s`C3k zu20V48>ZkB!L+hAWDev^(EEVz=cjGSq z9&3?cHOcN>tK$U46XU!TQ@q_%Ov>t(a$avSt{&pCFSy^#QIX2sgVfvPZDcIR8UK zrl~>WNm?H4OY*seg{G2PQ%laH&Dw}t&&-gsHsY^Y4oc-z9s{xayHSF zCKeD;@mWynp}G(a<;T=?Q^Onj0G!&UDpz*uYom}OX18^wm{s~vtna--C#!@?JXL(i zdP`Q#PQk`V=8?g4EvZ6&*jMnav#$@EYoWPQE~2T8S%0 zKza`nbnoa+RjX+myAo72N!#M8ft@ukj`4`B-+NYJC2~izO>cAjlXv6({kp5?{nJggU~a^3CMEPN)S&rv$na-}QbaBBlW_NoQAJf+ zg!{S##nRJQvuHgwRcIoZ!gU8_t{iYK&ehQFj%2@c-Eh-adll_|baS9|^#`ZzGq~PL z`v!*gcYX2+l1*HfO6Qjd6o96k8nT+Rn2!mk*Yx-n+CCqc*Oc2rzxn=9Io79VUR|+&Pt1(r)0 zx(vnWU!y%~0v$S9h0!nAotXN+J!iYV72*=`K5mh}b=-l~n0V)X3MLVy{%wY`vOKN) zFmj+(a+h)qk^EsI@xoTjX~%3hz)0}^m02M?y&y&7;mo04> zI9>K&2>p2Ocq#DUaJlOH-AEIS>-Q7o6wTgX&d_9?ag}-hHj$R*knnnlF2_8Pfh-ok z_AR43meb266UClS;y$BG=J?#hggK=^yW@EqnC{PX*(kbj|1KxJn5zJpIvLv#N5xt8 z&@#M4CW>eo42cX%_wf6MN!}iDDLhzEO$U^9`wnO=PHmGlTcDdeKL@nCBM>jgqD7^i zy+~Ckym9nYkWTweP8Cr{7iOgv4$(5nsrjfJj9HF)Nve7-YbqmEpT*dU|+51 zZ(R}L@PTtB`!gD*$pi(qEe7Kg+$um|onpiFqURVy)1;uopDcAP(5smA*W+}*>nSzm zLL9#}g5s*VhF{w*^u4ADagdl>*fX52w7mo534rokax+{aIL;OKPuCdQvgW1AuzvZ-#gHPc$%1aQI|;e%MH zdY?q7sY&okzQ&cIhD(`?^kd^a>tJgCrievE^1Pyax0l@CGl zNRHRq8T{08_VdKcqXq~sA%!**uogB!L+ig4fwHo8n3y1RqFkO2Equ}#Cz;7LDaa?TlH#P zLv}15O&9W5S3}N)mqmK0V>m_#enTjzN5-bgI{Kuk5v*vgonGHuTosX5A20is4Q%_Y zepo(Z6fC2}^&!8z@fGYul7KAY!0 zW9e>-h9EIe|Ip-RX($QsVEJb9vf<*ogU6b~qNmp4hL49I3hIYU_H|wqL_^fJ67~(j zYFO%*eULiK)uRoM1H-e2UQ1sfOZyyGziTWViY$Hkk#OK}RT&LdMN^fqF7NZ^@;9#L z>@WQ^Yk(NP|K%lYP;r$^Nut!R6%Oc49CZ3Oz^`q%Q{{ zp=UIX=gG1^idj@J8QMlX8yolaf6J-_WiA7X9agIxCJ4)bRkc1kMSgT~H$&;UiEDlo zOLlv{vGQ(tMNG9t(gY-VV^unIRknIn+JsZmBtf!iRn{s~3X>ptpM)+WZAPR>$E-f8 zBqx;}rB zfzi5=rnj-)y0M41sjs&_G7(>+Y4`3W&SQttb zSzJ*&1c~Z_F*o$tj(3Pq2}CESoWCbbBAjg2V2U{W0LncH3wE@MZHR@ zSEaH@vSZuo9w50flAPvRsjrv39tfk{)>;$)ZUn2h%?b4s*R+m9GU1!nTlE5-dF(!uVZxIGo@p|5GzPSZCO*&HeKi*F zOeXQE5|iyAg@l6IZ$d*@Jc5zQ15tsyK6^W7@44hND6`>PxWOF^9SPoZ@C|hE-Rlj8 zhM<$h=v1|s&8f{zztfFxpFIAa2Z#6u&E4`1Iof+)p+<4-HFF&TxlS-& z5*KHW%=};M(K3Qq|ID_3*K8$`fpGw^3X+;D1I~bwvIe&9aUT8+zLw>9O>}YP9djVO7z$O?194sev46Oxe7x;->AG^;A?MFkzUQHB zL#xq+DJgWXBme7Sa+cp1J6VjKN7Z2=RhH?!Yd79LZF`4ACU}!KS za7=V#R4-6F<-eWMz)Md8$yR$FM*9n=J~IRR-lE`-{1FSHU{BwmVE#?R!64Pp2)ux= zx|ZLrOz>yTV2dvYdq(T4eZjjk!Jn0at(|r)-W?nqL|j4z`oqyb0S}HPjDKZD?lw;z znJIrgl@IwtOU*bR^1JcJpGE(d-B+Jo+J;=+zGHRvQyt#X1LrDHGrN}VMuqweZh=3C zVCYo2_HA~qqmVC$AzVkH|BEU)I1GJ9LI_059*6QMgZ}$6UR@S;wLL8E=T3Fk(VeWH zYxmJN-~IeDdqg{d=-*!LGaB}ZMnZ>g^q;txb z@3%rue>6sZ@8W6}lg~Ao{hdtFrX8x$sDB(*{hW^PI9^jeQ(HH;o*9M)yxwB^@+J4! zzx&IVuDjF?m>0q+I_zqE*w4`OO3{M_@f?29ngi^dOWYV;E+X>e)@Avmj<9=85_P9P z>0gtswzEtL+--Cm1Q097^-pHMtX)>xeV%;ss_iSIZT<4>*Qd93UmOIqb?u&C3NRno z{djv9bNg%eG0-_G(2;XvC~A*j^vkAtaR>5iCn$D*FmNaDTkh*yS;`xe`PBO#eoc2> zEw%PF`}_+pKviS;jkAt;@cimNU+^<%Wc}sf{p5fK<{x+{iyoDYL&`Z6^#ISt%xN~8 zC4TxKvBIPrE|Xr=pU!`Sb&bX?Iha18l{_+ZREY68cb;@*?(xP@#$ftCI&(FwP>nd0 zbp+b<6I9l_oijK0-rPpjFK3l$&F9*Ls_y;MD;>Q4?;a4hsG4YK&q6Wx+CauE=C~oHEEe?j^AcA?y10=bDa*!4kX_Rsng~~QE>A+b#&}W% z!)KSN$h2-x0e|w>EF9bUzHvNfgEI{a6Jhq!S5Gj8XUer`jT@kmk|QcoD%45ibFmQ< zL<#XuT#dqi`n#DDF={wi($FSjyc;&EefqpayqDE! zLF~PY(?ax|AfAn5eNJ}y)=q=Kax3WgQCa-I34tZbSYg3ZEGSOleB5C+xL4Q@GqjQ- zls3DPv>&DQr4SwkT%mf!I429blPe@*;G;Da`k;QaR|c0zx|cqLH{A=*X47hG%ny8T zV=iPWCv7Gj#Poi^{*2Nb&}XA2SYGOPnR382#QIv3_PncYF2okuJLv8s9u)_B^ba7e z?>jKL#=h~1px=~v_1)d_oTtD8Qz@Ioi(f|Qa}SHJ4-0x1Nk<`lO2dEq`;+ zHipFkq9R_xJZl ziF79Ih=m@o?7|=1#mRPw9<{|;NpmyJYc^RbN%oT_=yG*w)0z8KHd*6pb|ZNgIFjDy z8{&K%3JoYOeD}{U*;kK*>m=n`>x%z?l$y2o3b+Jz=LwvVJgs^Z!}I^WVJ|DLcuO-KK8R|&gx^sFNSh4WAhOhB&#ADql z`SJKBpN_c<6aAx{(mk})li0{+m_K8@P*e)a%lAb**g~`iM(Ruwz8ez}A<1B}rKO5U zddbHwnwzw|%1)}6{=mE5pO{#M_E|?R-|uyBgo@wiHk3UGa4Bd^cv`=g?qj>n^R|l# zNe?F~5tll#@xlk+{go0x%l09^66 zuNm=CeOfdoymHNX=&_K@+mwq;T#+?zM1)uOlg0$|BS^@Sb=sid`Yiu?kArAr8FRSB zl}EP~e?AD(7z>|ueZ;$6Df*SJ5Y-$oe)-c6ag~zH^wai_<1h8LmDCD>91l=sq8rF& z+BB_#@y*>Zo4GS%%;c_$Ew|yVT$O_KTtTD_tGL23&s+7DSX-g(?`@w=B5c0muoF^6 zdfN{lHSNi8hQGJ1$IF!aOUtg`dtlWCNCcJHQf{xlzmkTnc?g6nH7L5;-6KA8wZ#

IXSK>Dm^#ZfjhkY~UE3kW|_w z^fn}!gM@eoZj>}Lzr53W<;M55^wawPT)#F~+?es|JG;R{BhcsrVS6K%(gQr=>uOPeIGzTtNEZ7;hX{8g{P#@bm|;HN6aJ=qM;y>%)y5~xw- zAk=m4l($@e_BwYuu&_?rZ(wJMWv3CFAt`^dJ>{&PGEli@RN%76To#3&fkDB1* zUIC-e+DoR6OQ1)>t5UKqpTt$3c22avc{S(;{%w6rYmhXk!GHP3$^v=EYi>mEvEbrK zx3KF4WwmtiDALF$oXvAriZ9xrrjnp9^84jNn@uLGU6~-dHCC4ze7g6O{ za8YDD&)U*8e*1Ajd+Q|G!=;6H0|9!;~fe)4bwJs84%S?1_!xvcVa8Dv-rt()UY7`z4$7#?Rz5i*CJE0Y909K)D zQb!kJO-cjuk=v-#>WTlEaerMHbek^oYq^Jh;EPmwI^317_v@Q>;3vPq@-?G2<34fL zYgo6dh>(G`366L%)!El;XQDCd}%uqv5oD_@Yd?{OYT zyJ(VqawGfw{cFQh7yQ4KU3-MHYe9esMTebsgM*3)SIGP3hF1Rb#^`U*o{Oln~G48hB_sTCqth>F9OWz%u{^Ka~ zbmw)6=8#Pe__uQP{9EQ;M)U1csh?$ImdBS18XQ~eO|)Zf?KZEIk6Kr$L68RX3?JQ~ z#90w!s#_8O^)kyXsnAV8*Z$RyF=OuZ0zRg0K<1)gU%qK>KE>X!YVJo9LQTrn2n`;h zBXYCS2ZVe7OG-C4P30n*Zoj&F!cuI1l4x}}Qd|Q3y^wn{^{xoBFdCS2;}h$?$hfP4 ztAb#8{3}~JBE#NiewyW8b-bCAgP960&8sj2!f}ss(`Y_1>-$Y{WU~0<>5P#0!6uEt zpRo^4<_H`VSk6+%f*`%+1YB4FKA;v6!yDwgE#k5re8dWHDKqP7+kMOp{y@q z_QXEcexlV(B>SK1tY<&hR_4RvOu;D$vlVSHcj0Q@WPG@=qQY9CudkR^YE8VHd6u_8RtIr8ej|mpO5h^u##bL3spAXrjbwWJ8EIZ<=+~oP0 z46)!YR(-`dm>GboqFeCB*+Tg*4TJ4f!GiB$dew3hBd(?A(k)(Pb#FWL6C5X_YR#<& zOa6W11j-QOrnwMr^?`iO{xPjRM&VFRTZ^9 z+Qj#EnaSdE+`XvfX&Rr@a-k=Dd-it8Iwl5up}z1rf!|DG!Z2LKEPQOH+?&F}{VZ^X z@3d5`qbc8=e{M(DaId8M@~YFVasfd-CD}~G zdwXx_)}@s$oi!nEX;C9(&OPD*bHwut4pn{9u(!}Oc?Fa1I;=vz;smfQ(^@kH(z^wP zeP~#T9Xc?OyG(ACU*3w2UoQ=uW@-AmV<65XlFICcPc_vRRgb3|>gtPen2N>v8?{y& zqQPzaMnkzw-LZ11EqzM!xgEj~&iQwxpZt;O-2%=@!Xxt5d8q`H%ov%+ns2%kC|

z=#CMZGMNX;0U4qcmrySkY%Pdl2&N1dH{_}O1Nqj^DZ6YJLJMJp`aFTLVviyx|eQTn;ihjDkmAw`nLq=%4;vZ7Nj&IUcPZe3 z{L1^#O^^0oUEwE~8a_`ZQ`Nw00|{)sAlB2s#<>EsfdbGM72Scbu)MrmM&42(L%zzg z(O-iYwINaWTE62vOpVJVK55}n1o@e@Hu4?_;l&ZxBWTP~*LVKxr$mzK{+G#q+dJPh znNvHaR}C?tlfnKR?e_1i7}@ju)x|rHRS`no``1?J!cCu<1m*~BnZmj(mW@`pm7R|7 zZx7%w$K%?S4Vx#7aPk3;JYXXBeH&jDWH0I-$$|t(Z<#bzu~W}b>tFfaoNpa zMU={tnRqWW-6_hT$EVCjw}QY_ZI#Z_cTK?4)lk?#{bpRZ=6jUg9?WDd(Hf2rq#wBL zu~s`|lrA^=&ZkaYkvx#0@SuUb_NUc6sM_}cc=yhtqll9iz!@VIPKcgYQ3QVggv{xF z{Q#JC4iXZV94OX#AZxAR@H$y9swXAv0h-o>F6=qGiqluP!k)>+XoL#koeG{g-A#B> zD85wnq#RnHGOpYfN>#D%g(0aP0^r$(!3$Wb$~c*L4!i-MfOCUAcb~x>>Ta~BL|tc} zf5>h%Qw4pClgkD1r{i`Hy{#wFzE`8SqV;0 z8k)&P=n_1-BtdSy4gJ^ZfmLFYmCH-_sWU4d6=XqpOXeIRE5pH(E(Vw(GR$6|Q0bFH z_|l*D5Cc##C-0h`{U>UCPljfF{*A>GQo8ji5k?}$6lF|F|PqpHKKcI>L zpfi?>20GSGL4PMh&fIQwY}Um=G#miX{RL2DK#}<0aaR|s>J5yxj1TU46fnRjEYlQO z1xCA}d!56C0j`Q=?gXfJ&SJ9kVNw84A7JCJv6q90L^1Qp;Zl6v7&7D?r|311_!DM=7JE;@ZO9mhqpulf}Ff0J24NwF? z{SAN7%ZA%cy<~voZ~c5%$amGqTvveEGp(+QIKC1rlOm(;4>>`t8-fA!{JC+a6>GTi zIgcN3pFk=N0+mfJJQc$-Rbiw2GL)+T#TT*7&#@Sy5k{8`JnIKW0zeU`jISgPW^v^o z9egG8?PLFaJ&ppQi7D8$p6n1T7X|<%YH+d|Xa6sTE07EUFrbM)&LU+wKgrWeIQF2~ zhAG7-mY<^ZdNEikQ(ru5`aiY^_aa{XxdOn)%UDn+U~TJxiV5~C1qHj}y@~l4QC4Ms z{QkLRld!NhG?YBy2W$_}OdRUwTwrkS0hkv60B5rBtO!C&7fNJ6?a96h52=ay^Efp- zi9T%lS+tdvu?17)^$qI>TIh=CHT9IsmR#CC`_auqy%^ z+j3_@y571UgvL}GK+Ld6cHC|3-Vv&9Tpo)x5f8$k8SN(7SoHJDKldexS=!zlmyx zm-#_%&qG!Q@-7wxySM)7Tm1i%PC72}l(wTzrpnn>BGutO{^Mf$$p$WjWv*Pb9Xm3e zM}rOPIhw`6&9nbOxqRT3(Ep^pX}MhCqo-{_0h_RSHYVu^nb)k8wpj|So zSY>~(P%g<6%dhCSUPA;bnq^mB((Xg8!M+ZwJ< zZ_-^nn_yKIK@rVB5R8Cf6dN;P0|Y03yQi|vF)G`D*HmG`n8y@Xj`XB&8x~3iDd3W& z#b_v`Ct16tpdo@?2FVLU1CfHZ7zR^iU9)MjN8y~DndDuIZ%EI$BUWhK&>>z=bv=~y z3bf?KxT7_Ah__N%XF;%^a}o6?wg5yBhb0 zw~IH9MxB;!{@VYr^x}Y<&O3lS=bg*JGHzkS32mY_Uxuo9BiNA`9~C>HCal_+*5P_n z|0#L4bi zW<`~g&&~&7@G8QHUYJ@3;9-{VH{u=+%U%u~+n06Jc;BojbWmGK1B(hCf%ly0DyN;%+vbLD>}ajKI>V(gFG1$Wk*c2GhqN zYr9|SPAQa3t$rF9d)kEbii6PU-w76KKk%H-2a#X`5KUAcSCQI4z_gY7TWzhjCD-m~ zk+mvVa%eTHAW%h&AFGTifh?n5#bT-j;ysq-iZSqXGL1-ML7NmyYQL=i#SO^%o=p!Aq$tO`!GX^!oy zdAB*TX@=^n+}VYFEn7*-;_Mk{p)5iDlWuq^8q0+}F6<4W^Oi0%`1p*;z_Si^7=z?5 zq$czlF`^E?;~YQ}KNqY{MGnvy;CL_ft-|GV>xGj4wXHlA>|@ACH*sD_w@Ii#??V0^C zeVVae3mk2N1Ai2(MRvD)X=FBE2gZZ%B`>%S= z>Eedw$bf`yVY;lG_TM^0z7Dk%77103_k@+~AP`V8EmAGTe>iVF> z#4K4Wewb`T!1GBk$PTa69nm#C#ymOltlmT*lQW|}*M|Y}@VxERgo0yCc)XJgTn-8c zVU3gQg7{^iN^afc{5E^!4+Gh85(TUinp=ee069aktf4gh{J$0MHeso5o;`Ut$Pi_- z)?N3bVy9PMA2)h>R-jLtgaoC15q9+wz_`;x%tYY8nOB1avSlUp;0Mw#ZQ;NG^2j?C zs76(6`WwghLM5YN$i5S!dtZk#1Qv{-m+!0xWF?aUsAbT=uhC#LER~0888QyAe%ET( zT4$4!*$00|9sW-&`7{U?{t?JOvP@SU`Wyhpd_*>70f8_&=wdqsgbYQi$Kc{!^re+w z7?tDJSx7wQ7HCzeLZHciukvu*fJ1h`n@w@9gg5naEO}bv*)?B9+OVAJL7O6m(LVqp3kwIqZ z(`Rk?jjL@WzLr#6VFM$vKr1o`N{@9gbnfB#D*ypqVbqvmdmw6s@RIN>7rDJ1FbbaT zb0X2hg29aX3m!Yfd4+N~{OdGdb?)je>B$K>p1T+IwJ+4oLJ_U}R z_^S!GNAhDnvLX$Qyzac$AbzT;G(f+{OPXQWYzJZL5J?~p_H(Wj4CXKF73p7WJF1h_ zK^i#(DiDDqOmKhyal6Q20yODv-x;-YVp)QJ@Dnrn$lTNOQaDz>WF4NOLim0IM!Wl) z!9f4S5P_5q#3Pi(m%no<(3tVZ=p|FKT(t67VkW-BRXRYJlKSh|EYO%E)J$ zr-qSOrnw?w+Adb6^7=FB5}%CnEysJAUlsY0ba3exQ`hHGt4+TX8t&xd#^Ft%tEVSb zkFUEs*+1tY$K8DR`=LI`c&(!7f%qRdwRD73JNPG3Y6X;4G z3h|xHfO;Ch^}X223w1raf~z7%_I{jl z1xTE3B3nmH)@0X47%8!r#BRrj%8?JJ*3;oM=CT22Q_oq=zU z*i0Z`mUj<9TmUFhbOFa&4+xa6aUQBi#nagqJf#%3oOwvjCM4%oU|r3sOCto74w8|A z$u0L=BzU??VFg<7DK!ah?S13n-0wX|NiFy~GQ}hD3o=_pFfaS3RYR&p-$@k>uIb&y zzox9|1ympV!znv^1`;S$F*8^+EGnN4sbq+PGm}>?9)CG^;6D$)24N#Y(pPjkDh12B z&$XAIyJ7@N>&cWLnnox99EnhgZJm*fbCaVej<3w8oJk?$;Fs9c7mn9}bH1<+EY7SO zG;9>K5Cld5IK#GYDAEy{ta;;`y2JVbdY%SWhj=W7q)tuCWYwQRBUp%F3vq7?pr;W9 z;XKOwn{kIp260R_zC|!j3pci6hEyo^lPc=yEB>ubrMbA+h}FlUMCqBt$Tkwhn+yTb z!7&N3ii)NAgRt9VQ7JRq^~8NM&(0kKKnW4nL$>>y2m?H97T!?$*T>!iAnbWFS3SD1 zo@smBce9UpYZAbcyfIq0?6m4Bo=>(XGHMVNY>njfbsTHjvWOM=_G0%aEe$|eKI8br zm1t6mEG|37wVBHzX+U|$o|3fHQ*=D=acE6LRF7s_Lc_vCfij9)yeGuuZsILZWF+YQ zjC5nfUBQAQ?Q%7pZ)qoHOsbjyfF78e1;YIs#4SK$(kj!`Cb;0{}zp7GXa zHaaA;TsqNH`^qT`+I822Mu{YsM@GXrco+vxb#?aP5AV5h(UmxE)FwuPJ>K-e{MV#L;nz;v2TI7yrRj+V)Pp=v^Mv|)AtR_4 zPSY!)7DP`=A_zcdTA$FV@YQUsM2X4W+gyez*~S~x>a(qX8p&KNL&fL3WhK7Zbv5OT zPYXUv&S7^#4mWe-Sc5vS#(zl$Sn;(p*LG3g%Rn40zNX60AUS*e4jpf4L9wIh04)HU z)r#*8?^pS`Yw3hddp`wd^9wZ6_SvKyX!ul=4oR1YENe1%^V<@pU%;I2&41LB$%I_s zhpemL(v@Rp!&Y)a)HpX-%dmTgwwzJqW%)Gg!DZGlSW@YG5x6EA32!FiH1O-519)3ZhMAHo;)Bp~3WLCu|&Vh!fRM%(Duq_c= z3C#u&(nz`fmN+y9oZ^UrhOe6QlFvR?a^^k9=ZU7m%U7z{6vp~o?ninQe{*g48P{BJ zekbij*h@pk7obT73*AwMmFpIn0))t5Ei$mi2BAp@_7s2-WTA6w@Z?c%DXT9$I`8w($d&Qlg zb*!Y?GIaI=`+5V2g^uW?vk#*rndxyAZ{q+0YQX?78({f26O*5PN5x}y(M)Oa z-uRd-8GsDY^S%URph-Z24z{I(MHv&`#O|swWUEi)%=&g_IkHiPb2TNP36P{hPGn{f z_|pJ|>2^(6$xEKrTfHzyv^W$D!e4Bc|| z_m{I}fNcS1_=CR%IrHWtA+6Ddh@J~BU*sz^ZoGL>(j4vjgL1MOsj=`O2#IZ}Y6h|} zjP+MqhPGz#UWfw*C@yWf1&iPmSKrF}b4sAjuJdL3_6k-*vwZ=+XILn7Qjpdy<6Dh{ z7S>+nmg@hU ze?T>gzb>^w)s_0cxn+t7s z{FW`NQqC&W0*8lK?N9C@e+_n`!ng?VxdB$DzDM#5R^f-X3y;FoF4``R9qc?J!||+E z8zBDWWYYp$Rz~ErWswTPpxZtVNRJ<8Ufh$kRjbv*v+Aun=(425MVcok zTnJrtIonV~k&;*Efy39?ZhJybCICFkKwFg4$0}KbrFMvN$IG6Q?u(sIBE>+yM3de> z63Om{`gh`UOK$|JG|j`E4b%*P+%tckvi-g!?)#eA^Y#VT@)w@xc80XlE0HDv7Upk# zSQe07IQsFIYZW-X46lpmYu6_;=HraoEnIa^B7%7OI^hqoUwiMt5^Fe=QYHp&=(Sn* z$y!yguLvhf-b}>@=1VKDBhE-|{0c(VfUCOJNQ`+9w}LPmH_}Knuo5>mSz}yRzoTr> zee7x`Umsb&Zn}EGP?jbCUQm(vz$S`w1Y5P*$39}z z_Zn*lH{?9A--GwZ$@g3D=n_Ca*7wtORX^H7^FA@ioRa|ia2Mw*Yz~XvHSivmLONTx zc0uF@o7N8zd^UH(GTWx76Q-2aAgMNYSR;eZ60gm7M_k}Z0^gg5NyHvam!p`+oksQ|ep)g`0Y^wF_en3(4Kg zTa4c6YTYxcG~l#U(oL>I!XO~-T7YefgpDvq#crpll+jC6sD%L+eQLmZWd$qC%|qb| zUuU)^aMAu6CxnJpM@5(nu4lBMuPSYhC-&2%EYm1GQJ|d)=}6Q*wXA#dLOskSuzzfF z^OtLr@pbE!!MINPpkf-Qc^ziMpu*q~*4JpgG)s=& zjzCtaXJn*AflJclQabnL)m#2fwH`kl)1;+6R^`hMrOWy)jy+_dqtmxN$1BpL^MNVr z8?pV`(Jza%lm5+${O{*)Xz}Z$+;)MsE5>W0Q?Hk7GBXto=<4E! z=J?0 z5fx)^j7|JpN`=!@zv0lLl3*K9? z7vab&0^rjJI<)A~q)VGVjXJgJ)vQ~)ehoXe?Af$y+g`md^gRHbMEMzH$Pn;B#0d@h zgXs4lLVuY58BQEHA>+iKOE>iVk8t9`tz#EXh!l-)+?qK%PcGUvZs(nGE5>b@xAVk^ zkvngGShM)vhROHupECe96Ci;45u=|o-VpNx9cPG9#uyToL7^EC&UA(uXjmx57!-O@ zh8QA_Az~REc1R+LXqX5gg%p-3h8bj-*hL{W@=?ejHs*L^j)U;QhmL$0Qb=$?2L8F@ zb3y9(2ad$?h~$$$>ZqeAq@chNMI0r@QJ2Cf1C1}jK!Z_1#YmJ8Gs;l3Q7;o|bPPcp z8RO+dS{_r35A=LdMoB1@@e>(iuw;`>6CRZk7&)C`6QE?!^ynFY-ULRUeInZDPL4KZ z)e%J$@zhpNRW+8Ua4n%kR#wGD6;)NW^;TE0a<$f2@4PcZF?RWd7hs6xbr)rN_3{^B z$MgyeGMY8U3|`I#bJwoJV&+RQo!tXkZntgAZMWWj3vRgLj@z2H_XyBQ9>6_^U313y z@y8#(DQBF#%LQi$A^F~`FLT8er<@_mEmzzjr1;QZGtCflUU>pFOpbZZ{wUlGIS4xJ zAAta}r=WlWWlWBN9v|ooF*M9^#u+JgK_MAkY)E2=Elw(Pj3~zJV$Og@=pjlgx+n&t zA)*1wjyWzl<&HYqNad1IN=Y0adKkSUj)egE5 z7qD^o8mBLI$1zv0yaW58K6FD}T1H+>!4TgW1 z;f<_DHKxsQN-(tAmFN^Ujy&URYO|A@*fuwgtgRwnGe$AkrZ%-NH9Wz9 z4k!^Ei6|u^n(PEAB>73^{&W;Cw1iSgDO98U^pZ0u3Q$J*0uacO1SEKcD_J26T$0d~ zu0$m&s52cQM+dvtK}9T~D@R?FSDA$%qcV^wR$d;H8RY%}Lw9=7%kJ>f7tL5QFT2`R zW~>!}>FGmT+iRsOUkS@tUT-(w+fKZu6P@S$CVbo}QgCv~F6>Z8I?buhak#Ub_KC+E z&4`Bo2o#_4NK9cFi{^s3!7u?qP-6appnKS(u>v(?LEhNngE}Mzm;FRiGkeiXUIaoB zohXGOvRTa9*`gMi!6ZJq+K>FXv=?4YhB#_rKXaJ0r9DY$Jn|>ke89FKw#`lxX&cxI zA~&~nWFcOnlQ8IX5jR=UieDO&7U}VmL5)FGU zqB6<@OtqMJ3}hT58EO4Xu!!Zn!lWRTvaG9J?~2#Fa!)k06oq$&>7D4eYSuj53=n6jsab&}P(K0z=YEFKprk+uLN3!; zizFwa8JXyWl9N;!RunWCg-CKF%%_V?0tzZEjSq<+EYto;r5rZ25L5F}43G3rC%v$s z;sK)ErkEu=(e1jxP{uK!VK$DCX>J$ci%0v?mx@lbZ*zE*8YP6&88J>!G8h|YQ{z#r6h8ULmHe?7Fey0{#Hi4%2>n_D?RoxEo+HtA`wj0rxf9;!BT4` zyQ<0JNz0NIxnxDun%0F(t1p|e%dmjK0?&kom3pnQjc<%&+w8-8q9~3n&1W6zw2OS^ ztIoiP1Fu0GRyaxC&UTV%h=UO$vk${hfUs$?W|oJ_ndNMNO2)7Sehi!n6i}EwMhs)5 zR%bbDC(tO=g)WS!gl1^7o7o&9Hk+XfpOB%Cnxr)1#?ZJnYSKSns8FOm3Azfkk(E%u z5Q*YVyU!rTGd$8JXedM(YYN+63}QBhd^97I#v(}5R?#2OW2PiA3gc)pIY-GcMSq%! z;Nrm2%25tcltap;;%LVuyo0I!O&#Q`9?X_S&T5gK%gRyzNGnE$ZV92A#|&lwEx4B0 z#GM38W_+O#Vohtrk0IV)kUZ2=RMpM?#>fdX<*#)e_B3Fdi(Kr@4k zMh+r4m&msIb?u$g+2)*qXwOB1(vJ*{G$0|mp?z)=aSM?MAK`GZI~V%Wepp2%cH~VS z>BSJUq)~+wq#$0}n|iN1MJ;KGidhm24N!`cFa8%PUJwJMx~OZ$O>S_mZfP8k!jsC$ z77TU-f>m5aIO}tUzM16yc)NJ{f%YI+&HExu1niY|0A5 zCVB)y&HZg78e)!5!v};-2$ETXsGY-!N~E)+p!2pJ+E7vQ88j8uS+>-_C(xh|D3f=g z=E=p;l5P~xqebnwhsN5GmfNF*La7i|f&r>UO+}|qq=@*>F4Z{_QQt*37qx#u)PKhi z41SY2GNm~6^#vsI1#uTBJ24Y8(o&975+On^rcZT@8eZHjaR?x2G4petupNyYLDDn>lYf;(=dRo<2vkl`%80)!-H7r&!< zpXY>62!-TAOYZY7+(B?i797(NO!Zb|r{{&$5ibEFK0p>8G*Ap9lY9D+h8{;*9hV>N z5i!v;Anl<|_%SgbV?g^+3=J|OV^C*Fp)(JHGabS~mQqhOlR}8NS|z75;zxc+QZ-Lg zHHA<^Mw47SR5bymHH#%(M~4FE^-&c;H^z`rN;iOY@_!9s5X19->ctRPHzvgZ5@Rr5 zk79N!5j2$}M`mY6n8P?eB{>4dQZ9iKJdsW;6$f`jDp5ggQ&j{pxE5K_78VwGctrhe-uP=3}> zge4?RqjNgMTnFcYic1eI!V4F)BzUE6dnN)7BR6a8=Cy zBYBwwEX^}5BnDT&VwjRQEX*T>zt9+u(HWm{kC%y=_85Bm_&xo$FU3S1)DdL%MjiB0 z9aK0R{c=o-l}o^JFvYMS^5IMmG?5Qfkqfjx(^N77LUFk_Fb!7i3onIb%Yw`=hwDP<`}Lg5l}XKQbC z25;~Lo}wyCbrmf*7Bcucq60ccwU+VLIPF_7JnOy(11 z2(vN1=^kcAT2ods8ka!hgiUD}oC@SX2IO(wgiQ)WAnbKc@KjIr2M=8ihc4s=trUY~`8QK;+%Ce#_=3+^h8R&)?GTJ-IBL1U#K^SHA7`oGL z`M9Is3a)k4quleO-?Na%U8)vW{7iJsh9`&Ih-0%a=2Ynqvedu(EbV?#}Do=|DBX!z`dB(9S5?q#e zB;7eB-D!Rf6_m!6T#Lq&H&R?hLJBbOJn99h@F`JkGB=TyL|Yd`8!=v7#|#9~3!PdL z9k>%Mv7l|lD3a4^C9!s<`Y3FtD6ytSr>ZDlBqBLM2b|MKq+*RuK~xj$sK zhYs>u;U{0N6>}Qla2fD<4hs%S}eeqt$v!;9_(bEj&H9w=W( zffUE6C_tbE%4QX5i&P2*wtI(2DEJh67r=|8Do_CgxH1DUDsBtxD~#!wX2m;p0WEzC zEDapDyOKLNO1K-$!Pmn|+F}m@@IC6YFaEYzt#?fRscEE+dz$JaWRF`N?PD+qLow}v zAXl23)I>23BuyQOo1sfuAGt8P_dp$|h7Ri>W#^JhoO0aPk~_1KfU=S?vb%A%iB^NW zBKx~pJaq8KG*T0EIFv)g>xuoxy=_xo(BK&1Rf@v{Ugd=mZu})~lM&-ZEMXH-zTh|9 z7pswjMw7yUU6eTPg_es`cJ3ucty(yY^9562z)^SVlqA&G_AR1$3Vp{$l^T9x0>Of~ES|x)`Q)VDp<}g~iKnx?W z4I(Kk0(W+X&Ul7>^d!6L?1y!>XXB@S&;^Oku~(Ha|O+UL%T<+MailMDaN{dx%nf@=k~XA(T>q6<8^SqLy7WVDalyj)SV8niP)$ zD7VTfMi8PZIJP2s6=};Bk=!aN_!O-&Drs9fDM-LcfP=+Agrn?^>jrs4ZPe2`)XS3@ zy7D~9ut_RFSGFwGQ+*re!W-^GL-2#b*}*=mS%+3zbY6sWn@v9dQxib=p((4Eeb>UNZwki&IH~ zIg~S&1eQi2oih40M{#$77Hv5=MXNX!g31UwHmy{nvUkCX++xA29}39=oZO2f-CCdo z<-pW&>lb_LD<)dXh@suCym&Jjqj+Hv$)myEO4aGD-l&nwoq!#xx47(sOYieoj|G}l zsCxgBO!G|$^TKbAtEed84G^S;n`P6rXC?IjHH zbV4L*5fRZ{vf4cUbUdCVShOa7gS0Ib6qCa!gEKk%<>YJEW0bN`bnqU-YFo z$>=HD=nmVc1XJNUYb)Kx#?xrN<_3HONI(`ug8Z)zlm5cPK_tbcBcVX0141C)z)aob zK%8ZK$ycTZl82*}AOn7f269>(XPj@!Arb;YBEH0pn5QF-h-JrGC(c~SYrMlw>{P?g zz}q83*)>5lL`9VTsX5*@MXuxYS=#-VULB!nUuT_O08{kED3}9Zg7bhVQaKW$G8CvX z{zwPEGBj1SbZ5VyBIY(oRl$1P(yg`-rc`T=+-sX)NKgdZ2o<>E3qZI$XN3$E z3@qR_w{m;%D+*T&f1=$sR&PZtsDv$w&hhK*qoC2=J$4A9ATU`N96%SE>yz?CvM+_u zKHHH9L~Bno}TTyL)jdr$~_ zV19MaT>Ad3b9#WqI5IVIZ})s}G@$Sapr8j+0}59n0}N3NHT!f6jYOxYL_;KLt!+`I zEhcW`H>H3DazG++AP1!&2XcUGr9cu`a9_9<`VzttrEsk}rAUHe0 z61ENobl}KGKmQjEM>BUIa6jRmo;s+j7d`_3}eEC3OiOz*fU{9hbBdebZD_>#e7ALI(4bg zXH<_W#ahfOv0}uG$p~3W9zC;jN|D5}_M=%W9_5vyHy7<(d1~v@_4JmWzm8bu_N7&ZVPp?gK`2>Ct^)*UlYAcI?x& zXQx3OI}CbQ=&WBak3D;O@8QdrAD>Qq`}Emk$-kc-iWg{j;RO_EU_rnG0~}C5{st9v zu)zf*Y*0W6BUDg86n@xlyB~ny@Iw#<^1(w9H55^xCQejQLlRB=P{Sd9AO{*}pc%%Q zW}JD(86JD=aYrA6)KSPFnIW=Aa(EQRnPG~lW(5;ekn#f)qWnNg6Fi901QM|HKuQ&^ z%reR^qm!D>b5A=Ts`Ji41I=@WLJg|3 zpg-?)^q_8j;D(%X!YQXxOf9uE({3uIG#qj^4W}DYMZHwiPCNBf(@MJuwVZEa03e^_ zYPI!NTyxcRS6+JsZlBuh0pJD9j1lWAa*F+m8O)OM${DYWF(%q+qb0`vn`glSE84G! z@phPv;)nwba^sLA4llkC7mRhqZTDSq!*G{fci%mi-f`=#SDqJq`Q?{jd=;S#-`8)C>ArkSO4B*vK}c~ttwBYC{BnH-^Z zCTU`ZS=z^_w=SB=VTb|7g%3m+;e!!EaC?LiL#_MQ^sobm2C7=Gh$!MPGylieJe}JsWInzs7cLZpY@DYs3LQo8Yq%NBH5zA-;Fpb03~~<981S_uRut zXB-%21=d&YyZ8Qk@WU5>d)n>^UddE6iB8!-c?%t#}3C*2u=X znei1NnByDOXvaI6(QRxz;~ULrjSoPhYg{BoGcJcCrireIlEa@O0a!LGCeo3Wi$*h8 zw>fTv<9FA{-R-Wyyyih~ceev2?rz7sPU`NHo@Czeu91##U=IMX5nnBBc}ra8GJL=y zpKS60fE767GV&vaP`0){VZ3jD#XRO9pCP$tbS;|AoMtq`;;m?I>j!j@g8?I$L2oJ$ zgJ2kE1icAP147W7-_#&F$pyhU%z*|YOoj+sn8I98>=(-*VJ=cw!hg03l*9VNktBsr4Erzd|VfYAv-KK18G+5{vsBUm^m$m(QDqoT-P)@NXdC48a1*; zD_{}0Jo53SFMZ=HXi7&qzLBPWlp{}TYQ~qkU>VRzT@sflx=J?5k&;U!N|)NSNMcQN zSlb+vwvozFp3-@%1Z62zc}nPk)vKN)Uhu*Zj_lD=muqEfTiyCr-r(bW{v&2xnQLW>;U8Ng#(&Xv5O@qV;8GVbGq|`A^e3s>1j{P){~x<-E3$7 zg;~vlHlUR?C_`;{P}8Q?hLiFc(Pm~+nUSrKB269DO1eZQEs<$QFN=>W^_eNVetW>CUKC?AnDS~%S9|M z^;=>bWP2Cs)FL{yYcn`y@3Oi);FZ$8*wDsUSDB4g_HMtU1Z94)AxnI~Rly5paD#QV zz3s{M1#}H8Ht(8OYyQ=)(d6*|?+mLNwus~8bzgw+oR%Ws!WpVjfQJQvWbyt zOq=b}3YjA>V$B=S_yBT&>Qn2!5xaZjZkxBeUGByd&MrN!AW!PW_EwpT>Wi-<3khHK zzRzpV1miZQr#s#yx|8=kWmf)o=T%07HLPFF27(=YX-sEY)4Z`q047}5Xi!+N*?b>f z?OHlfUznI>+hzgL6{V>n&YfSyT#O{AqeE-s@T_}Dsqsqg=E7ad1Xj;?ThX$MtTc9 zjzj;hl>cpBq&to~=4pe`U0oh2Pl>$%BzV&+XL-wS*-P5s1DIhkwKTm275LRvsag53 zs@vB&Y&Iz?#W1V^3;yU%40?gZD*ooKy%2__zccC!jOPn-jALToc!eV5dI@bj?8<`n z#SH7=4wJ0pvlH#ekr!mYEgHy~HSsk%?c!?bV((-108*aeB_$jmYfM zoyM7`Z%%WX<9OZYHn+RbU9(U}>Rv7;vxwmSuJoY zM~PLQA=-D6#vS$qpjMaXUiZ7_RfG$$0%MfgTbB_uUY{9a4HNU4*j&DaX>MU*Y=r3x z)O9&c90oKzmio75ARVlJSrPVPgf@;b*va3*uHU$xz(@u`!CqR1@{rm&T>FuQywIab zRNW|*?Gqc&Tk5SYB?k|XT8z`NNl z?8?AasKD;>qYt13$5|vlQ#jPAC?;vIgu^^QDEME`zrgguScUjlN+Ai zslok%p4<^R+d(;%<2@uyLM8kS2$K!DVu56mpTVj*oFk^1^SN)St7z)0W+;YMn?5k8 zwWr&)2hyOao4%^Mx?k9ZvE#AI+65mww$JK1uFEXTa=*(OA&!B+v%4W|>qGm)A!@s* z%)y$}0ka&zJ4Es_KFczukun^y8cV9EFoH53k(oqnhG&2V7YG5GvKvk zyBmJ~Lo&^k99p?Ksh7y`SB39=kwyN7Bm8wx}n0!W40Hdg_o-%3QgV=Ae7L^1jr zE@G;_yOGD)B8%Fsr}-`8`hev6Knm1A3cNR;TE#b_ynGu;odPe%Nv|$k!ObDGry?X1 zM8Pbq#hG+BaKQ%U8KvhTu;&3QPHHr)0?JP!xkei$v;u%5d`6{Q%H4}FPYb@DlOO)m zxhtS^AEc~%&WlKoam#6aUsKVEXQ+ntTg1YVG}la6d`0t%k!&4H*CwW!?A`z zhHoH9`uiFC!^=KAtsR29qrw_TY@)q$w*W-I%xR)T{Gu&-L@>j$z=T9)D5ITXF6X)< zxe3L{b0ZHtBavLZ3KRiYsIw#zy%UVM(iA<;`@AO_q?2+co7{$R7(!GsxnyiTPx>SS z!%d&`$=x}y>`6+d98TiIwC%A60r)hkcs|e3%J#t@E3`%pW1p$imYTDs8K8r4G$$;` zzFp%!Tstu=*iOZgCmmzUIZQuye8;TQF=n%;hA}98#6P{{%Y?eBYD*m|GX6yKk}~wt zE&lV*tI<34Ae`)9ot8t0cNc3mekE z3XA&T#tjp%u7nopJ2CKltTTKk2SS4 zwzWejAnFEegCZ&ev)t+$xf4v+TpB0>BP4Q7_1Us?E22IC%FsTt1`@;(lxn1yG)>Su!HE;Jm;{SHqZ-o_9!ZN)vf`Z`{w%9R8%F5i8U3S>#5d14~&uM|Bd*vSb%5*qC{A$7cJr96Poc z;xQll7lg6H5}KhKA_w`yA$}w>`~1g*;zxp{wnKy}0@XY7IzTVlJC7PrNm?rZD?BQa zGGZVzg3A&vAJT})%YPg?MyKT zs?uTw!!Iz;tJ5rtaZh;^sH_va_7hXGTT>%5ErA5YgsLk3teI2M2{-ckqN#Goy_3^N zY_|n%NGw}HPV~gPDb%~ct~X<|JaR5aHA%H)#pQAVWvD2ps;!4**o8$wh0Rovf~06L zo>LX8)B~$vOteVbIGzEX+}Whv0ojpVT*fVqUOLVIV1eL^!V2>%>;n*fko>@KP`LlCRMN zQC?IyuqZDuiU#KitKXc_-W*kBtk|A3rINW(7)buP#?4>-Jq`)eyym-HB#q8nrAi`& zSzUr3=}Gr##1vLUkUck+E6qDBpT6Fhs#Cs zeMnxkDAOb_Fd8&)QPETqxg6|GRV7B;*`2=yFrEoUjRhW~49@*6V>7-D@^MOP1gx#p zuw2c;R1+*LET;bPmZD3#>&scFd%AC`QY@f>pEcTLbKceLJGH0<}yTT255-4(L9`fo8Af>+wA4Y zS1w7&^EZHP+gL>6uAx*CwW5$p!OcreUOYI>3nN3@VgVC}z?C#E_F~%f|{ zK4WXXX4*i`Pb)R{0lF<*zG0w>po0qI`PE)AsKIh$!X3xV(5sD|4jCgu_T zYagrm#j@ctKl2s>EIk#yFK|fJ{*EoxMx(UeoL?IiFvZ1Ys%~604vS=>Ts(u80Aiob zDdCr0!36{$B?;&IIhLo{k(>Jr68i!&*q{R1zPHYxF%TyPA_H4Xml7L;I2eNo3Ij4Y zti|GEbBcpMt}K2!zg;j|89Ii4S%!uA1sG~eF~x;t_&Ux)24ILs5puux+ct51XrJ*0 z`(v$!CKVcTl_NWuBLgCWL|XlH-uA&3qxrV-Vxp&wEihx9+A5lc^byQyUUgeR;dO>( zpatYwoK#FMSQuEqY3>i4-sPGb9;QH>lDFo{P(&R?%(R9hW~7UH9ikc0-<_gs@exse zobtLzCU&1BhMlvzop1pD2m0W?+VG35%cpX8F+(&hf%IIIIaegU3>r z3<`rWC>IWrgKqkQaRO{|k%R3d%f<3Of?`ARTbPF_Azqj9NcIdRZ$+GJ+tL916~aARNG<#++8q${tG0cl|dW{)^y00v_S25DK6p%{x` zaCTvOPRxKLZ2uNvm;rhKgSTE6J{|)(zycbFpmTw18pi@p@C36cg?2xnd!cb1pT3<{ z$M8!SU7!YPkb#H^24QPUV9*6!kbxYi2FYGFg=ctOP`0pBOFN8KMt&&#?38c#h8eg^ zY`v;*;D#adhHvt1*@Rs$qI}4B6n>+_q3Wsf|^>4V8SQokc!f$Oj zhie#za`=IAP<7*prIg2+(=%1w*`#HJfnq;)%_mMDoyJs?fev^82Y`SY*bGjkxe}0o z2S9)akboGN7G_EP)sKJ@m=^Vk*ss=><{NhcI`?HNC1F{P{W%yY}>kZIn(Ja<s z>20V`(!-*K%a%>swbtQ&z|?$VTx-_X(rG()PF%SN2`-FNjSY5$1;2mLPM59zw(4-# zR5#BS4u$}H`mSTou6;ZA?%uzH4=;W^`SRw^qff7XJ^S|N@daQZ?3ra}#AHZF&>=+x z2Z@V)hLSNQbWnnU55-^%WC%!50)GgUpkOr5JhKvomRa@;N{`JzM;u>(5n>#2U{L`9 z8Qj3Y09njIMjT#{K?WE*IDx?qH{38l26BWE#ussv!T z6U;Ba$k2fU9hk6I3cu*G3@*zcqss#ibl^b;2RzV%l(?+*0E4wSIpvhV#F@;LcFOsS zFBFl(5klWcz+ep#81RBO+=SE7HzhQ9LJ1DV;LSGML=eCQj3!XRL?Ql#R2+58x0$tIL086$IKnv{w5R8!5UVFKs)mDB17ENe{<+q=2E8NuT zP{qJ{U;#Ct1WgA8Vj6*h#gL<+U?_Q0>rkd`6I^T8Xrs+I7c5Z12@wJrb?)Juv(7v7+_TR=11;V@*$u4^d@mRS znlQzz0HI>RJP^SQ#t7q#hQ#>di!j2-paX+52s1+r!N>rO4F0&vP)sxSQCJ~L!ax;Z zF+X_dMHoiPkp%%5l;TM#3osx@93l1yq8x|IVZ(|&$_NDp3zz@^6krUVq>*=1)9w6XBE>gx&i`NfGnT+RB-ucTePU0!&o+WkU z%?kmPFwZwMME#9IA~p7n^vmmjJV7NCV1i=bU@U{DoPrO1ODe5IOw%P)qnKt%<5XHP z#!PJt4f+Ad4L8QDPyp29h`swv$;(+u=t#gwWc{R0DwJ$X2(0?F^_uOBOk9*G<_5e9{`XKh6-~S z7rw6q0eC?%hH*6vK?54{gGkceH?RR6fCa>e#@0sF40jP@YnF*dF`_}YAA#WuKAJ-Y z0DuKAcmWF-AOIBN@Szxa0YqT%f*cmG06Bnx3uGWs0dBwnB7sPkNrL5(;y@1?;#M_ z1XS$k(v3wGfCk*SUIAuMK!lWo90cLUHoOr9h%&$1Zydm2t=@!X?d$#BOt+7K!}#Ts7o)dNMHy)01eO_3ST27MKtaZWlMVjBW^p;tc_YmLaI-$5HnplYAe!}t`NGQ8ugMZh-Hx0rmeuP0*Z#a-WP*FMaVLqLGlXFwwThaiYkp<*oGP= z#%ll;P!Sl%IkW)|6bzsSO>FEqW{`jrd}9C|5F-~8z=8>KyM zYkTeMUjsYX+JVms;AR;~Molq=VL_&h!3<{H7C^lSlmYZJFlPja9L$qIFf3DyuNKlW zu~n;Orkl#ajbTK9D@BN~pttmnx4a=50~RtsfGhyhx>;BNcrh@5_*yRDl%qoo<`gHW zL4X=k%7qGIH%)Tl{?ksnkbnVLaL;hcpmaF^9zMm%P7QC^#vchsM3XHk<6%_f72)1P zEcK0u{*VRVm_ZaeAOHy*DtzJdMnz^An4%bCeL*1xvrF*RVx{DLxDtT1p&=LJ=eLm3JVeZ zr7=Z8b_8TF6=SdfLe&_Rq8WoL1H5%Z#;5@T5)gm~B!;h}S{MOQq9IB*EF9ukSveSx zGI4yC1_L<37`34qaEwC&4EVu3r09*?Y~uw5SU>>+5P>L^<2ewk%{kymTI%px*uoz^ z@r%Enq6KpPk;Vp)F`|att+}-X!GLyX-nRl1lpq2f;7rwGi)37};4{R4#%IKDY_(R6 z8AzT(9MWC6URD4LV*o?m2(SSx#Gyxc!-6LOHwQ5Ih#W9L1(3)r;h(@+P8?vt3((1w zAVWKq&gn!63tY*XRMQ!bPMgXu!V;X;I&4)9!9oaBNC3eWH$#H4t`3`78pmBTi0 z!~AH9HzW!|5JU~Ez@tRS%b8jN)Bp|q(2O~c&V7$am`1G>MhhClH}FvO)r7Cu2Q<_G z_C3G_7(<_!iqsf`2^31w*avZCT{NJCU>wT$xrJ6BRRm?rD~N#FG1Ulsz_XA=wlt7f zWCi{lg1`hofCZULQ`K1rWkm>B!&f1ZGcZHYNx)**3Jo|-R1i=zyblgO$^~!+!W6>` z9LRrA9%<|qRg4c4X`bn+gEuUI1eC)#q(eB+fE0~`=0VIjToeq9!w-l|I1IrL3;_%@ z2=+n5H#~=@J_?Gq}sJ@X-5YTnQ8d zwAlxKIL$Ep7=>V4eK13Ppd;PbQiy!P7YHB$a={sZ!4(j|4afnEh{$z0!2s~hb?pF= zU_mKR00CeDiCCAwg~35CNt4{k17;lVkkc=$zyWwbFeJk<=o2;lLNKJj0ld>M;QoR- zeG>@kz%KxUm-J5XNYgL$f(fLEksDXUI>}B*;m0;-rla!-Rw8rNcR(9*7MAHl*SzlHTTF+7Cp)1!Pt^{DwD- zQ5V4)IDEqiEIZvtl@{ShEthdlrQyW|kOS8=gbXkPeTYB$3*&7rT$roV30$4y6xLXFS$P?@m00tZ^+1DJja7u%wRp=~xB|6&KpmFLwLlP=*<}T><+TtM2^c{BnbjRo(V1RY11&7W zs(fdU?ND_#4NVXOBuYy8kd>(^^anLbU!^0?@!je8UAGfI$tEH;e<% zQ3N)`U_-pzr*OlfAOH(6R1tCtNWkRyw8WB*ieOMCU_Qf^B~V#7%LiDCR~Wzpl@J?hOPr+z4nRQM1!7V)5D1XP zoYCE#zQqU70x~$_s?5M)RFX&mg=LIJU@VvY=s5$)C_Fz^iBz`q@L^(_^@cy zEg^{Upg}2=K_N#E^&&#_2{)7j4a_7tz$ndvj}Wy))_DUXq7VG6&n3nHhD;3;LW2&V zjABUQ^-X|&@C(A!%GwkeQvy&VVZ~X@;o5Q#SaeHYj>QNx5D8q619dG2jlcvrfF0rm z-8Gf6tji#l&nA7tM3#W>LY=@ELk!$7{)ZUD54;cZE|(=)(hr#cG!)BaDn%ph6-t~& zX5!{J$leLGY86GmDb4_LJk|-EKn>IY3(S}yZ*KO?;txd53B0Z=J_lgIOdLta90lL) zl5#1NvOUDd`8ATMFo+4@N)03-0w@SD_(XvO3IZes{&cPh@bXOEuzg5l5&1EFWG7>E z$l%Dx7a@Jq!*idBGWwNCx;412{pEq$iQ= z5*lCu0T7_wtm7^vBpCiY&gAH8k`y->i~$`qNjSw5IkkW_A;1HuKndo8o1{}P9Do5B zKmZ`X0YrB&{En4q_W@*g2T;SuN$>~?4?l6r3dV*&I1kIs5IJBrM9@Ianbbqr_sbas zN$4D>G}8a%)HfJL0>DsW*oV>Hk4Q&%M0~?w3=v9zP7FNwQ%o%s0+11}&6a6}AZk!r zfB<<*%L6Hu1B?Y(sKs263)qst*^0QDiBJgLDcyOM+oZ$~fV2PzgkoGZTVJ`AHm1?PugA|2BrX0C) z&kjB#tO$Auo>=VgsBlOeghD;ILMX(`@d-ghR7^fkL-i~}|M##bguSqY;wc6XK}BI0 z!`3movNgMv3C7wqd#em;tK>v8SXue#M6H}9oiU5DG*wqLm9>z2S&VzOjQhB6(3;(a z3B7GrnMIy1gHXuO?kdLnScbCgJCEl(0rk7T>y_hvlzlMy_@IWZIuYyEHC=Bq>byW>TBNdYPg`2oNM!5O$^s0Qg z#Z@RJI+9UWrXRO)BgxKZ`o2DsoYWJ@VHnZp$&^Io(dS9h2hT3t6N!f0js5qJ`V&X_ z2}w}>uovMB36R1}g-X~@l`(}U4TVDG?AnmeP>2epuKiF9P!UJPmIe?sa4i#Ol~$O` z2+bJ@1>#ZZg&5<7-ZS;w#rV4TWzdpk6I7c`{jLS&8Xh8?ec)Tc&{OiNM9?`kcC?AIK7R^iJyWqZf8ry{a zEV^_ix}}OIeH24^=3{{VeDW*5k=RX=ETGcciN!`-Kr8(S+LO{Vw=~&Fh6yx#6Z`jr z|MwSRP8IRnuZ^&f3gH3Epps8fRKD92d=W>j|Yo3?B`uL<2bRNFP2+KdiWS~O?UoJzBSqZ$qd0DStmcJ=xd zY*?{l$(A*H7HwL!YuUDS`xb6oxoG!s#TS5uu#tO5KI1D=m|kLj`LZO|v2Xs!VuYg+ zYy21T3_5YZRIVJxa%Rn&Id}Gq#fz8BnCZ~?bvoIvWT{W1ZvDD7YuK-4tBwuKwrkkE z$a(Ab8+dTx!-*F+KAc-{Zr)@%qe)KYOk&MSIs?hAnX~BRtVg##lVkK|+`favNVD1W zVcwiM#}4g72w2uyiGYoFp}?+sN>LHPr$=gQE4=sQw yXeSS8xm?;ntYljn>8wX zyuR}>6?z9YH8}eU2{cTy@VB;q{6#m1N@UBM?3)1Y=TsEGi~9PM>3#HQi#}(} zSI92d$pAjjgGq+??vASznDRIy?AkDx?vShG`L({qC8-IDOruQE-yo`GwH<>hCQ=KA zQuR<)qO`0!_`x-keasem)t=!&rpfL=X~C!WVW&27M`_=48b5YT74TTJLgy6>9l+m4 zPsbLr8tXOvXq4SNO}YxNY->6<|6EGln)r9Z(=?Wdf`@Gq)vy#rw9EyVEpNr1fQ_Q_ zTi(>3=L|Lx&3bMzg)A32uUN+YY}_jp_8(=8@w zxQTIdjP~h&Lu;&}+$j9$ol=C!1VA7@&xc)Ccqp;}4tMOjcLe!zHb7FFzAf#$kSS#^&M^U@BK7oQd-R1v{)#$ z@Z#q_jp=cUMkmIpN{>le$!ul1v?-}2FVnEztuFV-KBubME*;h_>+jMT=S%mxoSypQ zcrh zh}fl~@MCH?o`-J2V4EoooEL~Y@+ zH>uoA^0gT&LH)ipW^wQ-mXEY06AT|Q4-drgZp|q?&1;Thr1T=nB+l_(FehXYX3~CM z+_?2>7#k_VXpPAu%hUiYt8*_RODYYYbS~5O>ZMi-gr`np=B_)P{ zpw0!gENC7r9;b&jYlRh%Qs+N-gF?ae*-5X`+|ZX&frPmE4VH2t1)56Ys*UdT3EQx0 z2@`99fVyiZyRM(lZucwmK6LpYveTxT?pDyr%Ncu|m?y&yDSQ^oy}IIbMOK>^;AJQI zTsJ+>d6Mlf;;|@c8U<&vS_KY#0}nq5D&4VP*{e&QO*x?T$XtM;Hb-;>#?Zu-;mg47Q&?m1L}6+W6G_4a9c0(F7&rkZj;{BG_3(Ep^u zaueZ&j%N&mo<>7>UeM0Ebfp`nhFa;vePGiGH5whimR)w=ELy!@R9xCUPIFvzzIbva zIn>Cwa-ZKo1Xo_#<;sLP04$J4+zsqv;L96uXH&7p-D$iCNJ)}B6Qj4 zYVP|D{xb-Rn>uQj9vNRbxrZfdqXDU=-Lg(0fS+UC-U1$Xh7m3&rtB` zX_@tY&(On^LvAgZa;Xya(kERSncb0Z{Y`s4#-biGk&=uH1>aNL`Sln({A0|1n%gHj zmUV>e(|FWd8y3it)MsVNmAAXI5BXy_6yQYzhoh0i2u!Ao>a9zqmJ{=rnCZY@R6bTy zCv7cs8Uw$7B;R(=oxlD%osU4yhLQ-pY#g<IR{Vav$9)Zc)G)*pAkd?H4ojq5dPDihjH&(2b5w{$>=3-;1i#9mk&*QWC znS@q3Ky!#_I5r2xwP~TW1u|;+%W4~qjy5BRY7`a+kE0H*i*g{TdcP=fx^2+UuHr1VDHYl50T0DU2sk?cTP<_&W|1Q;SH*Ktu95Bdl`m58U+C`a+ zQjzUW58r{O655=%eU-qo&n%?QA!5g;QOh^!v#DkE#@!F#dLXTTsgm}CZ@)(c=kNbb z0Y`~;7XIkD_yE`Xvw%#wf(vOsKNtZ`Eb~IwAMynEZ5f>-B$+ zd>J8|(lyxE8#zdU^YNH}YB|ET3dKu4?|fTD2iwOz4RB)l@>v=`fqe12(%yxI`Y>W` zWtre9^D99fw8WPFp+SjMNuYy)YT*%45}my=)rAr>+wErm5Xi@nrMRnpo7)RcH(jTd z{!%F&rQM~%e8j?RXoCo{MXqr@KE%`?RC}s5PEI&Pe@SdoV_b3?XxAsX#-H4L96JyJ&qcmggcg)i=L+x9Pi{cH~R*OmjTPBsbRBNSaq&|eBIw) zqAXebOhryiAD5RmG0P`3QJ};2k7;cCXKcOvd($73_*%Ww_-anNIY_)jAoJ|kIttQg z?%|1bz82J%WM9+HY10gpnV4U6VlU}|C9Kw`8fa!I_`vZCm#7Zrb>PSC52k;PUQgBx z3i0#QOb*HA@o-7;KJ=Bm(?pyLn-^HyL*}%io#4{y`Hp8mfF*!0WwnLE-MI@>N}VL$ zLA&^e8{Lk%7mEk5aLtnB@K*%t);tTh*&@xSAhMoK-na@#Kl#W^Jav1kTt2)ObweO3ZP;peV0M&y&B&SE=~X$Ums0* z3+f*7xT4O)o7QGcobGlz50h}TW;l`Y(mOV4KsmwH=`Zw<+h_}F25 za7QMy(?G7m#^cGD#vK!X`&uK)b2?&W0JbHirKV4mlGyqEi< z1%WpYyzBlhbnetJ<-#y5Uw51U9Y}*!7swXM*e+z*4(m~;ke?oPDtyWy;u=p++m=rj zWTKs-*33+aT|H4NI);p$D}b+OXO~7?ZI8^|s#~VI5mb}EJiW!JqRV0xnMDNpWja=#wU%pXTQxqFzy%7yCNw`jf0Bs)`{i=IP=H zvoH%g3$2_plSMJYh#m<0~i4!dDQrx9n+=&I^r%B9sgeOVA=ctb!8Ob8P z$ScCqI{)LFF@VjKFmeKASECcYF%6yHh0az>E#oDdUK!1{jCEWv!O<*nTrcc-+_Hc& zoAaj|#{L)W?^i4ghhxWElJEUaGfk-LAZuqkZ#G%%WqU4wl+8cdm}Gjf_f!z)0jaj^^2R2#*uTFkagJAa*Y74iLGZ+^PXjtQ>a#c8Vl${ zX-tuq;ef~ANzWaU`~7V^9fKa3KQUQh(dl8HUaigtDC#%=J;?3f_$G#7?1H=&+uwj%+{HU_a-O^p@R#EwhHHA}@!F^(J0GF9ve$qogK z2M4??h-Yo+n5$vSi;v$qidScr%bSvYo*z%obm6(AF3ruO6UHU)*&i`z+o;KBPV`~U zP^~?hiV^Wj`T9xp-v^STy2Ar4QWe6KFIpu4tpXsW=WI?bze-j8Okzr4&WKe{{y6HG z#yS>3meiQ>bRzx5P)7Ph`g$PQtE*AD%LYP8{F5uz&7pMp4dyW?R%#FTor!0zKLy;w z1YC=*Vz_f;P2IIE+#ebVx+1Q!d^9Jy*e%yIJwl?Sw$N@pS~dWriVbs#YJoNjQYW(D z&X%~QJjQ6twX~){^SaP*tFUyBS*^6d3(N5l&(VdW4q<^F#d1X1aKuDX2g#QtmzJcL z7RQ#B#Fv(2$`>cv6hDrnF0$d!i{&^_B`fNE@65yWN{9uu>F(>!^hBD4NGOe)XNeQM zGI+5Sd(T~PC)NEUB~q<~?Ibe(`*l^P>7#Ep@&Cy``fT&)U+JUH9%Vq;4G8j4U?=w{K#U3G8Axy%5?p|6 zX^3hm{MA%$+uCj0QjToSFKfyiX?gF&hMQ}9p}_VW*j^dlzO&u-@>ly?Wcxx5n=@({ zW0QY@WO6$fGXOY5PB4W49mwx@eDv<3v5TU%8>b%aqTA_$uo8Q6CnBtO!Itiz#TY|K1)VU;+`KIOsRg ze|L1yiFNSd{Xs9*f#jXRbk@P}Jw!+>Q`zXS9iGXaFzh!mTvN_;gCuSm9d_9r8Qd8j z-5Kpw9GNOd%#IE>>_+oMm9C!T(=8~ZTI@lkQCeRu4u-PqgRr#q}1`>Hnw zfrO1f{)Pa3LV&y>K>mz>*c&IQn4ozw!B{~!npE8`Nt&5#akDaaYX7(o9KdYOVjr=qF@XxFiXzoG9+=D02 zyrMJGV>RonI?F2{64!T4QOqkc8LnFHj_U3qT<#rzqyr`vTxOZt*lrEkFS;}?jK0x- z`lsJx0^weR2&qB%J2AaeVk)v@I;}uNk0O3n42IS$KfM3K@AvYzXvAg()9DjL#P6lz z(IG$86%W;AKf5)4hrzq|2SnMKf`Cl^d;Njs1NT(dMJosWY7i%E8-C?Og})I^dk9b^ z0{(Wz3%CY-y5bXyaGhB4QhgQp77_M#$piS>k9E_>3E}!+De&oLdim1Zighb2$EB>z z2OL#X7*$UHcJAeNU8Qg9-|fz)cyI~Tm--zd*!tmPJY@^uZ7ZJgmBZWhzi-x`Qg-aV z{qXl~C*bX;O3Dr#WLNp^*UEQql{uPF9E~WLr7As#D!rsCJ)&w)h6|1)?y;-vGgZ+e ztM+*I_V^v?>r#P1Gj7;t{#ziO5R2-;GgI!uOFtH4-z}O=9nILg&u%O*x-o4(S=nKG1^tU?;#fO;vQl{e zRcp*==+os+l@kvqrlQKv?0-)d-XTujtcI%&KT=sfd@^9d{?gFl^z)ky54%(Tx2q;^ zSMS;l|2KBhs`BEY!x{ANOE315;-{MrpAwhcDqqF2pXC1ip3456e;i?rr%scxSxE4n z^0~M@exds4Lix5$`_GI1mP@mT7lVMyEgWSI0P>3c=gS9geeV8ryZzJc(a&bU&&ctg zF1P>lyLDj97fGM7qEwQfRJ`s{>$e>< zh`ixM7x61^j}-cR>aZIO0lrO4=U*Y(WAjf9o|_+=)ERjEc(WB;{L)s}%$uDhxb?M7 z&&V=d5&m(rz(L*HEp`T4B6oKz0Z1?F&o&Zz7+0##WN>#p^7rhFUAWkrC-K0Cy7^=f zMxiXu+hG|Rl!ptM{AN{)@{;gpwkqmcwF!Fe>-EEDat-sQyoNb^@@Wjmbbh)6oO?wK z5P1D|c9441rDFKEcN4*%8>w{He0R#YZJzg0f~lN0dp$S@{%(bRogRwk8q{@Z621;A`5ABYutNRJqw)U)3DK zYOH9E8>ghJ&Ql~fB~$IG?Ws}|A+9wk+*;FgVqB4=Yw0rm=4M82{iGW@@-IUcGQm3v9x=jQDix=(o<+HR#}*5 z>zQfpt}n8<_u4V^Et=Ur`A2AR1%5)axZ>{OwdJF`Z>~QvIbPZbuzCS<$ru}A5vA^V zx_sO26`GVb4aHg~TeY?Z-xGwgtZP$w##Y^h{3c4h?flwWqE1@c$Gls3^E9+8_}Vpl z^cbYDy&PeaPqY`MCP0d9U6Zs68JHQeg|_e+_-~HhDLLV)h)u>`HNek}PpJ6aB;nJc zgCd{P;iFqu?Ken$S_4fY0jFUs#$oD8t47brV4_A=W4%8{9XeMl${u^bopHN8WNnfI zy?CZ=85_bgO&ed(rOS66v0_8PV69gZ?8G;iuQ1s*}Z{9$Ycsbh_ui^n6QU(Cp(%C3cAA zs$anCS3VDL!VZWAoP~XE+2ZHayk+p;j@|lGL+_c;lGW4qXhXjiGS#e`W(_*)EMLjSVhmc2GTzo*SF zdF*@BSQqiin?Sb6MV=~8+%uL)!Cze* ze&cfz@+L;oV5AxVySbFElAP2{BTXCgx7ZXAtd$KGA+}xlZN-{r*U2~B)80iVT9%<1 zo6T>eXG5GEdZr}O4AT>ddsoz$2W`V8MyjW-UT?(l*+%Q~eMxuK_niPmPTw@U);aG2 zNczTo&&4n9umAlOF0BmqrnIPzLr|9VYlrfgrmLYy`Gb+VGp3SyI_i%tgV{`9QYH|% zs3~-<+1`E8^2@tgBG0ZHIKXK8jjc{oayn6RF5;NSK{PZTL+W2OZn|6ZPLb0erv9r> zX5Klmq<=lX95!-M`Ky~8WR6vYHOz%VRkf0?WVjOq62f?&zgFo?nc4X3AI?>?dY0!Q zQ6jqGPT}^!s34r-`-5ytKiQgO1UkgFXU|)W_Q#;LRd}p2+(#<_IFdR!Cz>xGh9Bux4!Mvy&Y`rN9_|3PoMOj z;HCbQ+9g2Z(kL2$<$M-(HHD&ZOR;1OG(8-^DM*#9W$(WUCYG&V-B*rSNC?E0tVopyv{fK%he2gTRn^}$`t8(F6+COJ>Dqb$Cw}W z^*X1WHjBqcWoJUyS2Hyv4>|?%a2iOW+rvbz#nEoo$JHmy4?B#(?& zre0+hrmB=h^9G`HV=~L0vA(UbhO;wzvevqP_L)B6{L*?SsAj3;SG%WqTck*W7~+FA zC4IXs{W+MaB?3FeUp#&$rNQBS1qSK#FD2jkvrnV*O6V#SW3<>AV=~PI-n$-mRa&8K zKjoHv6*%JNTK6_IVGEH?mu->m%Zg4!YWqJgvRq@}iR69$@wVweZ=QD6tzW`I;xr9N+p2(_5OX4aFJeq4T+HZYup-N zS4C#IMc#3#f8q+BEl$uIO8AYBK0nv|cMe5>Fsp+$JyIdCRhyUm+-4Jsa1vd)K)$wq z#U}u%Nwb*FAMj7fjKkGXbRrbuVsiS7UP~>Ry;s7#NO{^qIq-`9fqJK^XGf#>SpNRovjQiX*t1OK9i@00Ved%5@+;ZVDNygaK zpjDD2d=n(Be=-;Y{mlj##w?&}xuLxICgBmhF5#vzI`jvP56PMtm3e674;@mR+&5>; zVuSFPMJXE=x37xuuC(N_{Hg{Pj=9}>Cxv<$$sQm39mzwGBd!*Vt`Pn{i>V2rjIcoQ z>l=+I+v7>rF}kRL2DW=>_6;Y)gR`IqnlLq8*diXc9e97TheNWDV-+sC9m25<2mFW1 zw2Um|_$kWk?u=#P7Ab-~!7Ex_-fV@@ITPn;K^Nj?Iz=Q?F@dD66Z8^JYZyK*sPuV= z&TCtPa67+aJ3Vd&LrY1jIzQhOJ2btUvV=0Ly0T)=L$YD$dVq?Xq_*RC9mk(4ksy31 zKYtku*?lH{g+nvY5#I1d$4f`vaOCI`?-)Cf3b|zAYzCvQS&BU(zPp)&nDn#bA|GQ6 zXf9MXLNNe(NcP) zwda9>Fee|o9GY+c9W>;&?#FOQnMP4i5s_WQGZv=OP+_rc(E#TRh8$4?s|BTrJ zpPIm=pKFy?%LV@4Ur(aVGRUjnGavBu82M~3k73YYcEf;$sd2%Hb++D<9O`M|H1p5-t#4i{z8t2R*5l3TiICJrEPifF|vQUR=ctMA;3ac-lrn zoIGlbiM7ch&ZfswP-&OiT(7o6tu{ehKIb5MImv2Ur<8x>^>uwcvTEQkH<>W}^hX0EiOm}+5?Oj5~ak$kpZTEyc zsLHMx^k0I_I4$hgl^7eh_#Ecx-JA08ZqslVOzN}3pHez=jWBUFs!?p4xSEt@;FvB` zo8<&lUv})pTibRmSHg0_wNAlE1!rlG1S zYDp(p3q?%&Eb)ssppztfd2(6cPXM9ZT+kLJ9zg|HvF~FTc(%)`qJSGQv>!|Li&;h8 z#Kf;;VW^_SdUaeKk(_G7FmWg$*w5I9AMeW{u~(H6EUR6|P|XV=)?uaGxdguMfn#ev zM?M;lfZJR!CS|?J?X*dK5;AKn(@vCbpv0u(E;uk?@L{?`oflJWfo^wsECdI*Z{`jR&)Zu%?BOr()!w(U^4>vZ`rMXGOPvFaevT|!hn%``D{>D*@c?G zH99L58}2Ip=@-(8c=r zF~eki?u=upBv(L&OPQr$*u)BGqZ*oJ&%HV-u5e${;|L^ovzO{G&?3*((!T=g&mj@p z1r4?7oZ7t_@|92w^POr#zh752Z->g$PS}M*hoTomb4ac~D+3odr;uXfbtwldvjjHGsZViP;~6LcjasM*DiOzFG(j#^M3)~&>gKY9VQLfodr_V-NVm zHs7u*3t{#(P7ibg7hH@E&@rx7>DtpT>7NM=?kG{Au=}*TK*>OO4=&8h@#BI5oE-qv zAOcX;uRdL%M|K~6OL7m61+#PkNC^NV6uAol9Ef^R+y@{r22kFl{fz-o!nUI^@J4G7 zB>TAe z?#E35c3nAc9IP;wDvkwi5}6w}^%($u2D&tYJv9c2oC4^KK>(uM4J>8~F2brc$4)#; z{L^DAN&*i6ubKl=V=gCbu46sYRG@3AoePJoJU36vuM zz#9V)AqFe}D@bJC0hwrkq8Ng)3Bzn#mOvo%zR?8=cVWt@lp6`P$_8iJsJHtzl{S9W1d^LWc`ye2hdm9$LYx~|x8HsJ*9Bbb=qkXzJ0#NVcLE#r z1JMOheupEM@8kmXzEi9KLD_(dEHAht=RTI8Yz2#YPpdLt;ba3tPJ8`d+Nz2#GhsfA6Q=_u0r8LJtr3`< zi^D&EE&>l0CjbNp0^_uw;ni6H0}z~_UMdm5}WMO#ypubvh{=~F*B%i3)0Ovzy$Ii05?-JJke z1YGeKnj4!`1C0bydEq+TPCc>aa=J6rVB%H++(_COP$(?Br;#tf8R`05fXjq_iA!|V zwU8BoY%Fogu;Ge@yJEDG$T@OiOA_Ehjm#`@ky^~`pd3PpvvbM>v)iu>R4lfVuAoFg zrZA<%gL~2ppYF_kGsV55DPv(_<<{R&X1)U9w~sp}0%F-h<~57kY$;}2E1Z`EOV}un za4e3Nb0Q`6)@RLNo2T1g!S30#Hc8vDU~2MjA<9eQJHD}5vNSNP=!k7X9-)uMGhRe~ zmU~KA{##S?t8UwlV4#%)mFFH}bNeZ{*j9l8n`rOTjP8I0<F;+_bsTl5XlOP9W!vm+Q%l+Iby*_gZV)$9UL(g3yS=Vlxi(_Byx0r0R# zE9u773N^CcWZ|V5rk@5At|rnpb)+MbB4_#K@VRzb}ORx#~XMb8`Z_ zN-`QOfMP5kiU%pE#(H}L=-uNIcx$DNWGt3LbMg6t71;P>TqiRekHu80W;vIj05W(o z^=gw0Poh$tu~f=U&4@e0Q$yYCk)#$moDk=%G*w;oYeW43f3CARDW{agT`idrldQ0U z=<6D3`OGj}5ApUEJG zdw1CE>=w+&2a9pz%qUX%m7j8Uz3Zrb;5C z<_VpmI&;i6<_GUS%tNVecI%=*%@P2(kK3rbO%{}ssN|Z34-w}6(QF@-!{C5Pf9R&W zD;^!2ANc(nljsqF;o$_-kyJfxnLE8L$l1(3MLKq9SJT_KuCit&k?1B@QsdFC0Hto6 z6K`12*wW>E3a zxfJ)Va9wNNziLo6Opzjm&qSySmZ|l0i-dlt)phv7{Dsyhgr5d7ayDR_a;UrSBv<_* zZGE+PMh(L^I#>Tky9unWc%Tho@+NOergkeuW%j`b2Fnm0;c5hjxy}?r{r$MZ$k0)lF4Aq zj7=+e020+`?wVg(n35;=+I8f)W{FN|4m(v(@g4emCN>?2TeIGgzPp%L{upQ8ofCot#kwR`qZH3bZ*cgQG zgtOJx%Fe1T2PfB>aOW%j1c_BJ#R|+$@F%-?qq|IWDL`gXjcT|)2{E)S4Nr|=U5cl3 ztSnt{80jeGe}_&N2<~-uRb^u9LMw)?{&qH^vaP1NuWF6Mdgnuv6rHjClMb|g>%s9? zbIG~591_IhXRQNq0o9qC-gw{BI%>^bNX_S@M~u<=9mv-lW4dJ0PX^ex=7I`0DK*( zt`2>WDZRcNznl%2vm`beUVux%Qw5Xx3y>kOcsL*_fl%e)Nya2xurEPz>@=L9hG)>B za313LvNe&qTVT|jQs*+lEj`Jjp_bK^e1p)z;U1AwWYiDUHmpFZ*An^E@#MROJrs(h z#<4dzEBk6n$O_mdb)*8D^q>3`i5jzAT_Q!TqMc_fjc0m*SIZ7_$@&pf{&yH#0FZGg zYfp}AlB9LK3Hl}{({3eknHYg*tIn{mR)?C_ylg_SDNwi^}03j7?CLb9#d?m!-x$8Qnn?{K

!h zEj$4}`w0&>;oLO|4?mE`hvWw$`CqD5$PELLC~_tqGM5_&7KSWIN0p=rsLdk598Knl zOWNhq#nMd>6Z9k$foj@Y1xA4q;gTb~q|d7vo^$RbN8=`Iq6A}Xk{qfTNpL_+fnaV> z3CW6-{RG6}YE*DN5pkB<#v>)sI@vSyNK{dym(b(ioG4Z+bP^zSbMM-)ROY=Rs31Im z04AO<0J2q+QBvXyVl^?fsGw3Z?-XU(cxX#P$@C`pJu^$NQ>;95JgI6<6&;|Ojzqqs zj6Wy-XuWhzO;jYMofI-~o)gb+4ERu|^(C0rD?fV(Qr^`Z2gQ}MHG^)vrUE*ws2n0F zOzPP{YI1*ST&GiRS1S3P+VT4&gHfciXBTuau3g#zgxOfQIR<07nedzi?y;62wwLg4 zq|rBxuyieJ5Re}u#h&Go|GGOtmtvSjILYw{fHjeD*{syrVqgj;a{(mfwk1(u@!oh{ zm?2(ki{H6P;v-!_<+McN)qG9CDocZj#VQ34eXlFw`C>I0CIn9rP{fF77G|I%#Q`1X z8SgfcwXCQ)H5bw$rOMOResiUF;MB*hOYKl~ixJQdE4?B06t4mjhx+v3&`FK<-*5?@egTd@P@&a11e!`T7{32%W(n zONY%HU`9rgC8$LWN-Xpcs;CKy88@&IVugsqFIwx$@+Is?Itl&JnSU2l@LFx&`ULom8ObkTGM1 zgp&?XYZd5H0(7Be_~^vJ6y0>^J9_~Xf4^z9D0His5kllNQ!57`1g9iPj{r0g0PzAr zK{d&^5&-=Xfc^~MFr@ZRx47*H;D8fsWfLmjPLzn}gey$X>BtP9;W^>lb~)S|LtvW{ zyp!2zlzCDD2u_CYr$(N1rk!!T~)G%-W=HO3!#m zR~up_aS$ntZ#D>9U6jcKQv~6A3b~gQG+Wk~Z2>?|N<3t4Yp!-G1PG*%liv8!rc<~i zj~YQHj$!r=1`gsS-LFL){LZt)`FRW08zP#Ls!n+VSp0>vjsK$TxhIU z4$EmZi?JOrlDWN-h2}Tydz)<_F^3045Vb|)9fNZ)%<=r2TJN0ktT?y57MkmovYU*f zc$Zcu7n?~f=-y3^4h0}Pi>2v~`{c(e*Ayq9^U=-UT`&j4S;7N~py9{mUOI^17paiO zgK*Bqza~jjec_5naQu_^e;&;73F;W;km?ZLU;E`z5fau);R363*Fo&pAbDB_U5_pt zi@-d6JCKE$%yVU!c50>45Kn(NtR@&sN`Y6>>Sh!vP)H?H*sA&xnUZ#1#d7O7zi}0L zt3*PbjM%fO=RiS;N#JzMU29(lvtXCnT$*ialDwP1{G>%B3e}SIQa6k07UAjZ`a}mj zwQ4ALF_$JruSDCZbl^wkiwx=ka9>C5`nMITxs@m1Hem_PV7QRalmpfLM&e{t$>@XW z;4RqvAH=qscsGx}a)@;4GXgcoZRoA$lL zT#86m+E`iP6mrVkI3!)g?}glTBa*+0IyZOfnX3Q8Fg9~*_JfZV{Cl#x5tWC0`G&j1 z+nGw6B$haaYT)ri0s9kUTnfXz5@T%>3h0sW2IZ}z7F;^*B_k?n zQ7lyNOJoXbpw_;y(VYIV3YKpHrL!-oW%4u~l;UwoqF%p!y*|_U^clMQ(*7%HCo#$K zveP0>ZCCDD`ouPHxQ0e!LA?0*uH&x(T5;%V+K-~QX=7O_by?|5{6Y-hM-h41rsNaj zr5_927(8o8Z>d`A;qzY0vkT;sX6D0>V?u5@RmuSQ)o;sb)55o|$ru@vYJ>}DgC`QL zDH2hk)vO$;L&HX@DRgG5*G5j(kD5}LC;!g=u-6{3l2<2vR47@n*10LzEudbbmR2{^ z-0G^>0jPc~8cEMgxoB%VS(sf}kcl%x++AGo4W#bO*dkEb z34C51-I>2C;C2ms)>MV{ z^CtRcz#{}sXG%b-diYtQC%KS^wqm4imPlpT{qOqfZKZ{KiRgRZ(}6tU63{U=?ge`; z#!0Hd0jjCC5}TCFhc@9lD(}h}OQqN>*3dm}{ z8!bDy09BS|i^*n_bO)`8(d?aPx985fSP*4R9XV`n{*{daMu~$3oVhybg*k#)s+!7=Z3Fp1f| zP40jckL5H7405$vU*zT1i1?1`{@*aw z=lC4GSR(1M*5-A=w_KqRe*We7f`6r2E&05U2|LqycPLNH@RzWaWaCgC+HQNJd98jv z%;G^AY^$@wsQfe!X(1?P*X!Htrug|)`=IxS7KfX1;kzdxpO+hcTZ+9K82mLUYA;tT zIy6cs-%oqkMJ{NsHz^!joAs@WI6i;qPY#i`GoNP3I?&v`K^q*|DxjgI8 zR1AB=cs?5qyg)DCyBT@nzf+M#d>40$7XSBD$*2PJH1od>N2SPbM|WeaUT}EmA<th8MC+_6nfx!uT85QG@snr;?Z##+fgm?;d@YU_ z#vQR6Pp2Zul~`uoR1=4^t(_M#THMW$n$n2R)nL{%%hBK$(#X@8toF>&m|47X&OkAe z@MO=lp#b_C5P9BQO=Ki*p03dRx{BDA3!c(Sw9r7zDp3YudB?5at!T)4X2ZtP-Nr&R z{JWc#T`YRQvMivx*{Uq8X288Hlq#v%nV3Dnh;&K1KDP0U`C0nPBe6Sl3zv>g+Hx3_ zZz=O@T7~b_XRB;`dXIEr$OCr_J=g;txqo5}eC?<0@OZ2eDHOiJVs#SaYhaQH*E8m- ziX8sv%^7|5(yA)rNsnw>*N?A*oHRt>oK6CR9`k(SsQ0`blyQtbX^=yA(Vs? zddGy`2@rZq=)G4(480@0C}8Nl_acT~q=N{E7J2&6pGFrgXi$86_^i<6H+bH_tih~8jQ`S(x<;cBPW+mKjU1$?#Kgg7&Eb}MX0e@fn$lYFE&2nG%J7=LHVGv!W~^*rmxNRJ+& zDi*8bKtq0j?!{pbo6{G<6Ur+S|EDmg$Tq=$I^F4XvW2%g22=(sB(n-DZVe2UH5T3_ zKD>XW^rc#Cqs~|tDbQ7D?9FUjWPfE@T1PhU74}W$%~TjUIh*s{ zZ)uEeq#<9FWP7cIzdDwRq;j zPo9k=PRdA)8Q&z7G?fBp-Ic~ZR0G1;3C>Wt-3pyw--OjwEf3_wdih{qMk>tK=#)%W zZd^JFJZZiy5nK18kTq5|=~hw-^d==sNBEOI<8M>8&wlSjt_u?XZP47M7KM{rMPWxw zQtz^>R`!70`oUOu;$zH6DkffX?uY>?pT0b>S+B(8Az7Mx^^Em%aMQfIOi!66l6(Iw zk4_lr7sjA9XNhkrL^ydH(%!ml-BLK%Wb7?AM40&TZJbqQ!rS^E zGXJ9mbBqreXD8)TyMUB?rq+2q2^;fs?|P?_WD0Yh{3`uz4-q?e7E(I&wc&kc#kJ$t z7R&i_m6f>n;*!v2_etdK#$t#i_)1Fsn2OH0py(<2a<4<1pF4TQ69NTIKzx1^ORu1_ z(Tyd7wv@O&?CsA?)(@$-$1C4R-9M8|YH~|n91^wdX|X9eJPx)0!NB-X-?SBPBVx_r zYtN}$)Z-KqK6tC3>;u10-oASj(v|)Dc6q}`U`NF7(5nrOt!ehyGZxqfXqRLt(hs|Ni@POHa4er**{bdp4_`vs zLzS|eetrF@@Wh~M%*a!tC!*<#_vHPcN@LB3cF%8@N=vK0b#iW^?@h85wd|X5)lz6J zckIOQv?>=h?&(=*2S_}46U864{bqPS)f#^lvVXPlt>6_`2^v4yazy_s@M+oqyD(>* zV(i*f&mq|jozb}|f_Z8B*1U|5;YcN2hI!`?Uv&A;WDJ_-_!vF31uod6%2!(V_^_u2 zPVWU7-aZ!SP24y1c-~Xo04jRD?9ggCsv~her=qm%Et$;1^YD0;m9(7XdW5F-WII(| z&ZS4c$mp%7n|LWLXl((X%=B!kUYb@v$bK-C#q+&sE62N-Cy1>s%t=Q>t9{l=!o&1& zWw<_*U14uQE5F|z*r8MXs@;!Y%-t_TTu+3Zp=SEy`~ZFgpV6^Hr!ibiSldbuE{Vak zxreZSR+RP)n0uqcIzR*Rh+Ix}I?3NEzxpKcJ?hW+!aHr>1AB$y_P`J$v(|=(FG2-c zj>8^~)ryV2-|&)yw54;tz9w0)G%X8zSX5q7SF0A7X>Tlg`el~6R+S8$ER)CV)MoM6 zg!nA9(dGrul(ZP8>1j_rCatYh>ixyU1UN`Qt_Fy!qic$h{#%&2&~q0L`EP%~jH zX}5rzOwm_kyU{$LU8E8RyO}!DKN4Oxa9tsFPqi z-Zk~_x^=!@^2ixc*aT51znZz>{qt&SGh12W$xwLFT#Qz-;&cnNgrCkhOzsSLU|%?3 zND$0&uk!e45_CU9Ajnr)qDe26xvN3V15%1rjUa6nUJvBn{ zt+e=RX#DQD`Jwo5?1O%9e%14hg+zDiR@{@j_xIIRe(kX?I~w;+vu%zbZ>lEEk3N~f z2G+q4*9qWWHVJzpF-m48JxC>>U;>N43)l-@+GLzgCZOzwN zxf=tw2BtGIhH~|A+AwVOAa49BdzzBEK9yw{kX!eNExTGarsSft0XL9!n z#}9Y=-7dzN1k$G%&5TlSnm*5k%dUbUPP zAMKMHaso+`lDmOXU8-m5Ly!r4oi1n_=(y z>gddx_i49V=ke?>zg^roxkx(MNvk%eb3+kaQ}ZU3Sa1@C$eTPn?oJv?uTM%VKUZ(o z<7|D{PzRKPz7FaWRu)326 zsU>N{NnW=|SJ&wsZc~ZB6sd4#U8cm+>cn*sZL63YgGulI`(>F<$Us@ws$Wla^9Tk1 zP7De!ijkhEDqE8bt+zIAJS4@3e*1Xe-O4@{QiRY_s-KvqdU>ng-Lc*(l6;$clDUcr z=SZO5L)JkKtX!zlHL}PXcd0-5XFBZZXsPqNQ#I6fnIU{g+)mxtObZdtX#fo<< z`&6hM9_PT^P_fRwOSM3$SIE98Ae2ikVo~{~#GL9fY-K{6{y?~SEHTZ-7;>nZ!yi2F zCQ^9i-1@mNoBD>~c4WAB$*VlaZy>36E4varwTd|yEXOHg_P;OjlEloWE>C>EyVfFk zL*-^eDQ@3@KS*59vmP3Rm1BW!-eo@vy2I~HJy-=ZixGkj^r23?>nQTEKDQhH<6ltF01hsi=gU@wwvomqnoBT>n6_a*Coug zDnB5{T{tfEE6*<(|63QTofjh*CriTpJXSSf+K1x3#D`0ut=Mm9e0#;!Qv89>x^FMw zQ;Vbo{IJg05H=p*GyZL_D&R|TDVHz>doGquoMI{&3rhyFM*@Gih@Vu=o(SHZcJT|k z)*$iT^?ZWkhjdggerUA@{#TVU7cTYRtHb}Y@6NsQn;WLMh`l>kEcGY(?n1j1gE5e) z6(nRGpBxCf+Plk7`hK;2_mw>Nm8IrYe#;e0ARq)Jz;C*Zeh6Ts1U-P!C}v~y{n3ir zvgic8&~Yuru{3^d@;62~`H4IBfwD{qRNqten8v6bkGcceU@wlNADzWH*H6W9RREpPxB5>HGe|0PULzEfUaY> zKXy^=NJE#pxIY45$3cj%T{L9Dh#Nl;pQU+j1ak`o@zQlu6~?m@;shgrbY9&;wOB!| z5uw^R;RnGYzCT22v8XsIUCJO-@|RtOD2GifPg^eEmBQA!Dc zJUCUVpj4zAlgG=d*7hja9jiRD99e)w|QHa|^9^H&p*lsBTHGUP-U^ zGl0bOsPE9E(aZKa?bd@$^xGw0EPiKdt0pp`+)0gq$#tAeRV6o2PYn5FCW>T5zbOd+*4I<9WQdBh_oLlN>QCIQJtw!ofXoYEr{mLRGqaOxG^3*-J?`~ zcVOY|z`~n>#m7+DYkkx{+t&ENMKV~WJ<^!JT^J9X>wY{?7IvyOW1{#f?!OAP^3N&j(DE5?IWpYuh2<&1vuN98LldgQAL@y3v9Cvo(Jro~v~ z*e$Iwjp$>#7s+G2Xr`Zu8qpKiPAn@h?W*E@%FwjYiyKYR^LPIJw0dwpH+1g$;=i1s zpRWJPYX^9lp8sO~`|#0=-yIrDe^g7~MDdo!(~tlucVgCL)z-`s{`#u@`}FVi9W_jU zzI-kL9rwy9iBg)EtTh?xtYm#*zCE7KDD4p%8T3gjhu(SM!dCK@L4iRw^gHAwQiAek&n$|yDpElLAitB zXmaZ>qgN9!I|rv1vkenJVg3$x{NJ{i{4rv6a2(#*lc+E%ksOpJJu#?F?kpEcgfLch zH)NGh`fqaC_S6SCkNUo2Qm-87b{(r?Rtfl|%ah@~;eM7-L(dKbR6STo~pwrNRi|}L1aEd$X z@M?MLMjW-Jep9ODEKg@}8mz7VRg00-v)SK}S>ZVbqmF?VYC%>X$f`afo$T-jt8h`;I1C*cRKs1LIg zHX~iLlqBvK%%{gS&UI zkx4lxVq$U7G@|$$-BIXpyXV7%4+ATv|1Pv%9WyvE!?8;Vo8HF+a!7jc>9dB-X)3Btt-4}qbL)Z%^itGd{`)Y}t9F>}$ z60MXhzUqjSx}x#seU1{|mot;I?#9gx#Xh}UKiDN68BFB2dn*V>TbSMcx#rNp`hjmu zv(ympe#2g>H*_Rp_V(-9-6Z0xNLQ5D_PioGoi4$zGabg++NCJcr)VmEWn3`{$u|pN zQYbU~I=s9#sIl{ZjG^aO$#ad zyl1Rt(-ASX5OD%#*he~}22chDH!I&Cb%wHtK7>RJUI!akipf!FX~sDxnK(oNTb6gL z3^%p+_)k--tB2^W<%08}H%*7`-*aU6vX_ppSet+O&w5QqnTGcB z8JN)WDqYIH{D+}3?26N~j>F#+nc(BG{FPI5rLrq}%VAba_}E6#i-s23D;AG4$Fx~g z$IE-cUz{*#vV|r==`?#1cE~lak&Lk0Y$o1(m8BDk}0op0snl0IVtDO;C}?>i&pqo~X*pTH_Cw^rF? z=W-T#5p^j?&(KbkM^qwkh4U>A64i>=*h*MHV~Wc^)cMtk^05bK`v~h+Wm?GiKDr^O zGqdZ}t)lt0?hkKKvq_XpQ5{-9Y~uU{;rDljwrCEExSLlk*5ox!HD}&r(j*Y|HXxDW zhmBN@_l-C&o1~jMNbI!dqtl0hcI{ryI&78;-t9$vNeTLtK403HqC(m~uQwQ5huRf3 zec{?(bu@H*CBa4sxS3}){A}j+`IYV>I;}n)nkD_Wfp9#W?7@8*AVVXQ^|y6_ z!y{{2TNkc4VIiFCQ!k}Mt1!ltn{Z3|#CLE~q~NM#os1}5MrGnxm>*0;05lyA6Ode6 ztK8?pCtOAaz0+nAzuhIxw>?K_i&?s_F~Od#^OJ{>rSZewFghB4omy57m2Y{F8~$y) z&xO=AC-GzpQ+s8gnbinjsPJOU6p8T!uZr6uYnZ}y8nBfVF1PsxzjijVO6b?l@` z!|HVTEeiTD(~W^okj?XuhDA=;tq(qwaK-cT^-#F6w_n1ioOTa|FO2(h`hqkiR0nb+ zdo*(Ep6R86cCqH_i!Y01Q?-Pj7F4G1n<|9u0@61gzYD<{jDqYZFcJY zb%c1quOAosbM6gdrDco>PFc`A3LYR2{89f}lOO#t!AScWnfCP&vgX)BX&iFNoLA$C zwhP!Mxgq%< z2yU}8DSq*0?{gND<&p8ETG^a^lc&^fcnO)}2@enX4}`m_ztxJQ-);;C0fV5@Rz|}w zH%A_NhbmI72*^4dd43it84$Nt1zi0s*pF@hx2e?<_TUG@aUNaFG^_#=Wrq>^~G^{U@PYoNSd{>rVW6|+dYm)bTR6jDKGtduWkvP1NV0YvWp z`s)1e9yCOGkKY!SHc+Z^JNl;KP~_Z;Xy+1v-Ds&7k#p?6++A|1B2m}vOdbFCE((?Q zt{EbO9rQQZQCkLbw^L3Dp}y)R%;^^0Y-KfU8x@)-lV(pMcltyi{|v%K=^w?5AgP`I z>FY-QGqVjA>hSx1`B^QwvyJc%a^bfu$@u|8wW$0mz{ z<_qvTgsAz}=IrpRmbSWGGQO*JP0<=H;VGi(B7`qRFkpL|*j%Ykv6Cq*7%(azNOMD2 z{2Y&>H z;>3I1G~lBa;EKezVPpF8*>}`SK2E!(5%er7~u^&ZApf-o)>J0+dyiN;A}A86BT@NzKqVOX$tztu$u7A*Oq zTz0cO89(yX!Pc?wOSW&MnwhPtdOoj_WLs_B;rhy0ea=3rPns!b>UVc&{R9$E1lQd= z9i`aYeu!3ktq@&4?`X27no0V-6w^6csaY16q+ZpQkRTjaMPqw-4`@gT*cL#hfnZYQ z`73~iXNLZY`)#UedAn?X&i($y8yd47H}~Z6#(zN&Ia^nEN2n!NenY7%a+j_k-#1pq z9ey?yQ=VKnDGq!oG4CQWr<+DUa`@mFz&LB{@okcW|Ja`VTLAUlMD^+8SAN)eBaBNo zsBt8Tr+o_p8jk!SBxaV(%M0;K?HEoI`2VK2x&v1O@jn*8nCgPQiYNqxSc8OH7{s2R zs#v9*zxq+N4f@WKoFK^5G=_R0MKI*Kyl|QA4|OL7tYXj!TaF+- zDA1Cg8Ob|rn}_5#yBTPy_}Kde8Kvv@T2< zD8U67Cfo{RX|5&8Kg_cI+&*Yn7tYMhdpbKp#Fz<;ZfTdFrrg}tR+ZxH9T71|QFaEg z(Y0#d>ohr$yA!n&b|KKyMD2S^isM-PJgGbR#QNNw-x|TKvv~=4 zi#qirL+0mtsE}7%0k-JKklN1|=$NqdvAVIEp#zm*Wo1U+&SZL99qZ9#HKuC%e3oy= z1m6>!@0y8y&mqvCWNTZi-hB{#0QO(?MQ8&wzL*HolYR7fmB2K9B~PY#Ob+6U4dFvb zZM}p3AWsPtHq+E4frqOh?n_Fp$HtzcN{r&%iz8faogFSq?eN3)OqrBb0h6CUL$FW9 zTE2+kzEzFKk1fWFAIFbhXztCQszv2U4#}{E-(|fVHNenx!(U%G0btWzG{f2`{sbBx z57cI&g@h~Y9pV>UE3%Z8M~+X5IbpKS@P1(HeJ6Pqj}iI8`upNv?lJHCS)Z@lrpgb~ zdOlz9{O}nb%+gTkVMB$5*J-`g42><8hv)El{X@{7m%z!LAApzOHZybg(u=G^2aj>| zsXp%uTfQ$P_$`F{7ZrJY2+iwxnPBKjo?=6?xtJ?2nuKMS4?*4}A1 zmkg~|Os|a(J#97&Mo=_iS(i@Pr4G|WaB4Jy8P_|Hge}&K8mU5UiSVcD0xVH$nEg&f z_NR5@-tX$Ss9~*$?9)ioKD_e|8_X@sbi2#qSE{_1M~gLrLZ0ouuWNMh^+Djpy&z0p z$SykE*}uX+DU?qk5a4-LVnvI6dr5-pIx`tNGii^+K0l(P-C{EQRjA_(-%-#q*#+EwOu>p;@990J7dL;EAiNM zh`hb#%;y$)douY*4Ya+U9xg!xm*8qi$~*%vP$U@ylIfF@)((>P%aYtntLRN3&1u_Q zLPAwmoSW5L*Qur6YQn%7NAHM`C5S5-$2IKCg-{kEI`ivpQ>0&16f@kavQJkUtj!wj zivCvRNC5KyfY(0KFQ?&iE_Hk)04*L8k5qq94dEx$Aqn6ihOB26S)b$075cdJ#-K7d zVCp7CClQ5km0ZDHpIuY1;Q=goU^L#MjQ~o$2aF~{`;NIEWusdta!nIB0s&-Pq-(Ph zxGs*vxWPRT&!G(fUH_#Jm@5a9NeF2Zu=v|OV#K`vzuHBiO-knnWkyW8&R`ZiK%79K z8|M|(P;{RNN*x5=!!UJzf@82?+WSCxM3F>~RXj@rIac<0TcdhGsRAB&oeuqmEB@VH zq`O5cjsdb@y7l6Uv`0FDcp#cc>mkDUuFWUmr;VGlzfu(0^d^9d1eT=NfI=wL0qqOF zE8ot6w>RrQZ30zE4oC7I2M?GXfk}UDk^1=S>YeN`YD*Uf;=%w0i3ZFdG7PbPfip-F zBm|admt1ULuP3{nMB)XI$J&#B4yzGB03``28@m*e2oMwjk|c6=T6g$Jb8f~}^cIe+ zuGY#`n*6%5fiwg*sE`2sL@*BtaNRyCgZ@!)#| zASHn@GrUlznu44FlqYpj;kxO!;L5-!oH1Z=%mXy~f%>1#du6_~SY~k?Kpqbk2LP0T zjN9Mf-w;pkle!={Fe2AUIOb7KDRkssBg2%<%_dyuM{8i7NVy7); zB76ImO8d)=0X4*Cty;ScUcTg~eZB$NyFbXxYfa93pgw+>OL75=dA`ryj>JFTjZ*Yi zWNALDT#O^q43V>4LSiq1c|Ey&2mla?Xyd{K9U?O&kX`_}RAfX5fI06)mTaAnhkE(_Q6Twb{5LFY_*U{TAO6%+ptbVMluNfrPf+buqJj$P< zYH$D)hUbT3OU_Lh;V~*oB3Ub*_8o#&lm&*u04f1FyKzk&0NH5&H{`A~t$0X_LbN*I zSqKqA;sVVNSp*}7H4u6>2ml6`fc*ewVyD=?m-olR>~i|5BkhybE82g`aR7Nz5jz3M z@)oYV`;wJZX!7f|UL0H-+2_w*q!ahuS@f{n=mp^Y=oW0dJc{AbZu2wd&hY z0@<)LPb>n&i{t97cJ!~Kf)dFXFrdj1forkI0|Hkpi4000pTrVnr%nO9j@Q(+<9(G> zRP|^QHK%ekrF6}B8k5+~p;YPG$t*4v%Xd$&q#j)lco}ZbuGN4+ECZqY2RMR`q~i z;+;$*mQ0G=+?_E?{alS-=5v6iAARf&3Zn_qX*y;a7 z$#=yzN}2KA>k}nERXGcC$`Ta~V7(gCH=3SR>YUo{G?3CWD zd!D(&WV)()43O*RFUX*(rAf*(kOWR>O3WPoU;s&6))+}N80OB&7%h(*Ss5AV6y1q- z7y*Fo`kif=0Ec+Ul|!_17ZD;Xjv|8Ca9z5NFq|_pi!eM;;*M?FAG-mYnNJ|!WC8LK zaG3#P8AaE$Oon(D=~`8tx(kR0W=SNEiLLCQ;(!SA)0@xRo9}NrOO_&TMLZFB0|3_= zzoP<1q`K(yKA@IK58w&~K`F@2Q0c-zk7W#y%pR8_ys^AD<(EdC|L}(5n_^sKtvwMS z8Yj#!&CQYFflZuK;AmHys2%=Xm~dmD_SV-Bn;fnVUX*L%7zfk`h1FD_*OoiBun%%r%vISm9V2 zjUhOe>?7GheRUG_pdLe}R_=Yes6iKV_R1`0NuBb&>?_NG9gq+0Na2oVx|RUrnv_KI zPQ2s*S*;axSzs^}T_IL)GtrS680!1=$14+KDgX%U0a{gflWXnRW z&TyT0xp8{$)0Fa<{)?1*8(DlK6msEJ6%L*`5YxHJR@?PUouGZyh|is z8zMmXU1ew!h-X;kW!@o6w&Y%3pq!mIsr4HBl~c(_(qN-YR=j@|Gtw1o6Vy6&}+wh-Ju^NzoH%bZ$guW-^7~ zp&Q-LWH%)vB;2wV|9;WmwZZS!c-Qujhv+@*dRIs?`uI|sk!6`03!SE?m5nBL{h~kFTHZ&NFY_=r|X0X5i+!#9kfJS$*X|{I-#<$^&+EP2@ekk$A$6bk$dQl zfdxMI=LvNeu}{8SIZ1lC$Ae38wLfcTq?+sl$GB)qWY-w9i*LHUP&6ZYD`GZ?OTrFg(NH$@=D`yW9}uyueaQ+2A17Y+CnT;17y}FGyOTwOmTad8ZL(Q$73jru_SYBw z@B7|KUXfb2peHQepkcI!=edM{h6`%Cke|uY$*lWsm6gD$&Yc^%cRIneBy-x4QyLLg zT4D2RUMFo@4efRPipGFt@um#N;18s@j|AG!$29-6aU_iJ8+hFBP+C7w910FVgoh|K zC8$g%P{x^y+A zR$Z*N&JcDKtIsjH0~X-9%-aK`4|dj>=-A)#pqXe|P8uP&;>^L|;F1A0aFX&(?u=3`{OT@z{GI$IvUjQ20$-x<+&HF@ zR8Z!b#<4{JA;)0)NSY&S5cMhs2mk+sb$QW_?AJ>uWDeuKh(v~0R~Y8=sY(| zk)jvJpdW$Eaa|By9e}R~#G=^Bj6@bdBEJKabOcJgWr(oFP~0hlK+J(k5SzVi)}Uf? zs|nDl3@kB(&1H>E2>>ib(nzA%8Mgt8Iw0~8Hp<%AnnIgU0ltoE$rW>sW@+zDBxTJK zm9Z9gzdST;i7Ey|nSzOiEtO5M_@pk`jb3B|K{{zThmX$OLtfftKe^R~KB~T(<;)K!JlvgR zciN2%8PigPr)34P325@6w*#bjVXyHIZjh9&u$IPrC}4R3|1Jb{e$K}q==95*AAwUk zSu>{Y$CK;PHiK!mmuR<-K_Aa(J{JP^_-QpzU=ku&&KxXc4rUVu4qMR*dV<`4&7B)0>+nQZ4<|GI}J!3$ZCrbcfBAV?*_0R*GFuTD^CEt zM3Z61WC$kDP!YD6#m^wuyNGZPnpekA zp>736R8xMOJYP-hqyWF#Hh>?afULQDFvA#TN|DE3PF$T!hEty=VR7J)CIAx!;c?vfV z5Z?x=Z38=*6gJq6UP6q{WfWy(?i~N-Q>y_m;*<=!`2mBE08@7<@Wbky!%)S}P8`jZ zq`8UM41w1UM8gSuz)QPJ=;Ra@-UB3jx`rPL(!zd&xrqRp>z_@Z*?;J6(G=moy{ckj zBaEseN~{c(@fu297J9xKFJ6I^IxW`V)J7^vO4YUh$%-0sCK?-w-TsXA$0uWTFis~$ zHgQ;&^x`PJ;y?_A;|WUuO7d^dwU5XKRa7F7$o42sYcClbQp6UQ?0GXuOA@0H3hu|Q zdNqu2l<2`Cf2OtDW|ZH{?6Jrgb*8NQm09lNplhAs{jTK+vZ7qTx9fl+m&~9pjpuO~ zV%|1paLTV1Pqpui*zfYIflud+-+C+b@TarCJns3+Fr*?AuS89DJI*NY@~RDGnX@r7 z6Z0wHwHROE3iPdt(%*}mUmL~^#)S8eXDJS5A?XAWJJl`ou21H)%ol;ItfpXbl~l{Q zT~d5%{vB%DF3Bl^RH^WJ9qr%M8m7bfm+unLEpUknjb=-AA#9QSfLO1k*u!o$iy#ft zAVx!HH)H3bORYxN*C~iq5Bb9|8)?X`Syp8T#mNwa&8^Qa@1;M&T0F2ZUaUWCjiaPw zym_^$9;x*;zpD4xF$LI94YpOnX!i)}bVYH>6=ZBmmn$W3#|KXKA}Qr+2MP|jdn~jO zNBX_Wk8wRWO9+UiUvg=k*V1RidgJ{#4B}40G;~{kLqT2;U?h3P`@m3@uef!*Zl^kNs)y^S|Jg2%Bm2h*pT zO{|!)am0yjm>qNyW>$iOZwsR@oR{X`#4IGRwwlZDGfA8*ys3l@<1CF}i!w`MhN)p( ze(DApmgkJre?GA~{YM^rzWAVP(X}tawyVD6IjcodUzjMJ*s%@tx#VLtSbP&ZHjzU! zapJUF(yt<|B6Z?`SG(IB=48Aade~xF$nAOdHt=$S;9IdSP1#q*-OV!id~Ib}iHB%= zqxY+R-s=68Vzgw;`|r~7TyWGY{DVe|=Nkr!iL!hvw|K7(7PKT76r{QXHZndO{*FD& zm^BzRy7)W$L+1|eud%hIfCh-BwpMTpZ&qjZCN7?4gJ{O5qAbilpOi1?WcemQVoQud zW#RA*=i-(C`MS0(!!WMkYXNM>Sd4ofwo^d;0wX5gu0D9IRuIFm>9;-XQWWJm7Bd1V zQIm|GX0?%jc?%v96Vw;0xD&kRaX_tU+X?Eu&AxKn)G)GS&ZBLmzKh%CZTIIeA5I%w z@G2PaVmL}0QM=#MH`!DCZv3c)(N#O?c;fYz^ap85mD6_{UWiD-%FyHY8C>sG7UYHm zDD%(;@&w7fM#GN0_AatV;x4gkzh|szW@KLRqPMRUdI}#q4D zhh4TUx!DUE670)Sv{XV&m;th;R=hYbLGx30uQ)Om_AR(l}=w!wuX)4kzEcrC} zztuVTVg9h`Pn$;~8l{Kps-jZ?hep^>#xQ1dSK&;+Cv=zEXYoeV;f4=}R40 zyDRnMo*?_Xrh9~p{yS4_B3?iv8HKd0@v%L*knI zD>(Uc#UHPHAM77g#UvR0e)CaV^YVws>DYtInZ!Gsk!RGjU&a0-hTm45wldo%1g<@W?xkm0nxVFjZCc`cv(?;YlUwb&9LMu_TiVe>U3Crx6RaI*uE?X= zO{0^wJFJP{P3QmEeg8NVb}NbC7^O{V-zn<*dWcbi~jG)+iB15i^Tzb{qH}Arf^bd0n}qQp4|QX z;r*Y8s-fBeiSiM_n~BR~Xx2Oqz1 z3ui~YfUuv@y|@!RvkQr2^BJR2=u;G~7TG*r%1TCge#D zz`8=8)#=*ox{YbwBFfsN_PmDbq~i3%LSep4+wKvjhYU(kp{jr%V+NY!L9c|Xla&ve zH0q3Jo3w`yMRK(}WyD%L77bU7(K9X3w)S}jZBfc$q9!<2M4y~p2UUAQd*PNVXT64UM%9(U9^h{rf#cG&PyMP~ zC6$Bb}atZPohV6ba_{(+2f)2!R6 zamUi2+hX^qa!6ZQy{7MD<@Yty9M2tr)*VyFmqr%TZfO(kFLJLkI@KYr95T;ag9Oko z<)8G*uC*+#9<*JVB)BKiw}?FR?ZHyEU5Ri6she%l9NC9aP5kct?MMT8>@AvF^bG_JozuA3%u50Jc z^EkeTZrnEt=lets?`!VNo^fe5sB*YX0P`Bow;K^f-!jG`i;ss*AV#x0&@tM(Sk zyrr5S5+!qP4ku1kht-@WZW`14w)NrWki5gjmRkX(n{C&VzWC3+gFX1mKgr{qJAO}I z_rZr(@^zO&U;JxeOqx#q{xc*sq(#5|kA3UkuRV(#o_NG4K^`CQkuY19oy#Yh0ax z=Q9^0!y8YUSpF)zm?q*?QNUm`-sZ}D57K3Fbe(9ApLq`J9!XqH!4mmu@g(E^^Sh~^ z6NQf!3#Uhm%1e!t*12k@r!$JRpavYJQFIcdJ_j9|aaBLG%rAFb_2~}dt?P$;mc=kt zU2||KZQJ`l*kLcWT0*5HB=y2zHFX`M?95z&@8m?7TL~+Y-aN!H=>9gxrr8y4O08{* zH9m-R4AuYXVC4*gCxU1G7jgS6A1HptOP50**@8C!`EYwI zk~jJ{;1A-pq_zRegT&{7UZ+bc+RIQJY0ZJge#n=S1-o5hn**?aoyC@CLAtMlTiZBq z>CrzG3{G-~l#WbZ$hE5z6%RJQ_haY}W&*=T5@)(vGUeUFS@dYhXCP+9a=sVt{Y+>5 z(21e081g1Tx1f?O^uCzrd217!;o?w~qII^b?VU?UdoSy*jY+%3qa_}RIMmg^n?2ZM z*B`y(o4dZ;O`VkXuq9U}k3V{fk+!?_S+Z*B+RNKjU#mWat}bxzX8kYP`ZkkzodFom zKU25|e(;;BF|6O$?;lejSnu9JLnhf;7^FMm!OiBne_iL(5_B3WmZiO?C=+rv9y|Qi zRx+m5v)6NtzKeK1JAGrhu&e5I84tv2)gVag#|dW7Ga^g**X}bP=Dhf^saiK#*J{z* zf_fz#pP%k1n8;GyILjd>HD|pq4?Fe2J6DnddN5-KgW1vCUaS@? ztvuZMAjpgl+Z~tbY=YV@1rL^S?|cbqHM4)befB`+jknL2nU>AEi7-3`1P$K+U+L23?2D=u z`rpCeq~s;3|DE)?eOl!dn25gfdJN99bD^xeBu(ok$DKuhRi*N1A0-y7;}V5mD!! zIY(#wv?PAzbK@WFv`)aAMpaO`nyw^WhUO){-TPWvX~gkeFzd6k#m9;lbzF9|c&gpTWkBoAksxhL?qrsvO_-dxMqp$k)zFY5T(KhaeCm!GFe847dHeH#A=*XzoD`Qe#Uk=pxc z_&U|K-CFDK_?r}|l}XaR=hvh;8!4&q_UPUi49Qts*50+fK0@*}RBris~fIw^jCMJ(aF_MVbHR;9(k>-_$Jz=yaG zG;3#{S{7`>2SmzmB7!xd;aez{cIDPariUxhR4t9wI1HDu=KbfWLcj?90j5!9~P9}ONj^)5n@w9sMp#V=A07t1L#%>^wEl(~yNmnr}{zVNbG* zqM>CMLsFMnmcYAT8VEFj;CDs3&EOu1HyHSwQ~Eje{qWRgYLN&k>NnK$7jz{C?lx78 z;H>02e4Ip^yt0U*3dX=cXg^8Y%wGiSt~X~0Kh1CxRfbn!a`RxNX5`SCh6_6wKT9Lo zBdyB$lKtO^U_bSmBjs9w)`u~Tn5YZEn%qs*584IzBQ48*OO@67bUvLDt&x)LdR<6n zx*TWN|C*4>7DY^y=~~n5anjH2voFu)RApphc)!9Ey&;y=Wq`Lbxhh>T2(FfmlTuzQ zj!=63KWY#8&f~WR7ZZU(+Ey2nhN`EfDmh&Z6FKq)xsNOAdDBD>XpF_e%H3fLPIHq)=wTFr&WrQ_5Sycf94k;2vH6twIz?hBtG|x zdLSjj%4b@521*V5nJudS3H+uG|C@kD`G`9jScE(ouMHC%Kje+FMRBL3^E`XJAZ5BL zt;W~g9MvPm{q8=|wg!^@1Z7yzu$3W{Z@SxEnThBwB4kQ@ZwQmFIx;ZI^ARpLRNtPg zskVGtf|B`c&ZXufS!XY5r3hv1f`?Kw3LLnrw2WUUmo@ot>xl55mUdz`FAz%2hS1Cu zsmaURmo6obQR!n$mo~Ppw7(o(F$e~;5Qba!$o}g^m2ERKS(hD}%R^Vt>p=`SbJ~k) zL7Oc>-hidt6w79ED->X^*_dqO(UlsLpXZKlKOnPzs+c{h(+tUAuVZFsx}LHU113|i z*rwd)ybf0H68{zl|Asfb7JOBWeNLlf?qWUz5B=b)h7qe_cPKDgTk3zz^FW@GpqLJ~ zCis7aP#PXfJG<%zz!HDYU5c)KA|^~+Lg>RwS-xblq-Ghel&sY1K3$^96SIPjsw}Cx zEiSe_W{_r=g`Vy#RqZ<(wxWqoxd!vf29ae(r3yKa#h1z11=G(5(001|i=wHmMpAZF z)!c>5z|eW-d(&l$Mq>BYI_D^huBt4{b;ipGhNvKhgq585Jk}`mi&z>qePds5jaTa%s<92k*d1Dv< z#?jajTg1TK1@!)(BaX$%N65)n&dINrC~)Fv?*g?)K_V>nQ%9x3rTK;#N$HRg{D}q5aV4^99M`%D>Rfjk?rvcTbE0A`xqNQ;O>~q zqxUO!{^yb}_bx{Wc#-{G;lK)a23-pUEcro{$Z$mx-j&t6mZ!Q(mbjMwAXcWnEB&8q z#SdaRbhYG*>m~a5VS>Rh+^tdf?#;ZrDO`Z7NNDSTTic9V`?_1l7q`ydRXYrk58=*_ zc+ZD^k3hTgXAmFyyZ2QR`*VPOj_!{t-5(FU?;mh~n&aMUOmuWfb%1*e33<=~Y$LiJ zqtA_jzG4aF0+Yz`hur?>|$@Lh> zj{z>{IB-*l0zvCYzN~v4fB6uT2#Y!KvinAi(uIE4^*+<3cPqVZH9y99 zdY>6X&xD}AbiIG8dH=DdUxIx90s(4{H?rV>tap>x0cZeJ4^7rlBtMd(2K&^Hf9Z?VQKuZEdgF%iStz4W1<5EVof2_$IRwU5R0xY znGaj$kA2PaAhLa1ihI5mv?j75@3yMCpVIi2`nfOtvfKxB#RKG4&{>)lFaGlStUQ{LDuuaiyhEP_>HaM=rR3J9>-^A=9?bJ`2&*b5#Hh=_tkBnCvRq{YA)-h~OoC$j3u?Z>F6 zN0|jiqXQqA@5e-xC&-t_=N2WhvW6)##CG*NETqLfhQ*pOM6CdSghNsSASO`|vQv^!KkTjpZEr=_=iH=hZz)xgWu3M)Y>t$?w&iS&l#7ql zt00dWAliL?#>Isu5veA}q13b;lj0ro!_ZNiuu*yZ6+L{Y<}T7J%^MJkEcUTyJ(^?< zMV{}p^M=|-g!pfS*%$9l+Q9tHw%R*xS%>Fpl?;!Q{@@D%v zw3YYBFUptM*|)wU#5T=WYBsc2FZ6LjsD`|sBCDUAjo)LF?Wa{o3dMd}9ey0>ozY_a zC~MfM(&?B}nB(EtK=n<; z$2R+qaS>nij*T?Zb${%mPWBU!2S%7k8`H=?duLa_#@SvyNYD-j>_s|UvXl2fdGjJ-{(X;2;Vi+y{5roI{$Iql95wes6asXHVD|z6mzO z-~&Ikc7%m6z`7fJ!%epZ9)xO)`(Ao*^pw}n>{qnX`M17X+nT(mauP8Trk^k0in;#a z+Kv0%&DTE**~E4&pRmn+Zh@uIHa<<#PGrA+l408-%$*WHZqo1*c=eyVZzBR?C|@+c zet#1Ii#U$ee*5e4HP}b>06&|(yN!E+AJY0JBR;dTnq1BOVD&o|!0=fj;!yLf_`dz; z!Q&j|vseU4W8f?fPLkAwo(li4VT-p3{Q7;6{Jk%zUL*3JN$`$Ue6y)Zfb51W21hZ8 zf~H4RrfsH@;tt|G^q>iah*Rb>nxZ!GE*4NkQ?pm`(Wde`R~Ox%Ywz6ORHn zs&qgMOgce|Y!6c4jJ&cXrg!!2R7w9cfVI8t^qRO8uCsh++pJ(97xf)3yE|DZph1dr zJ>8xx5LN?yhkiDZwk|{4mO)gT>S_${*t9srNr{_V=g7B1CWEo{4&}mQ+J$z~0-_#7 z7MX-L7Y_VfjrGyg>Wa&&Tfar#Mf)k>VohmN?%jTqMYMSR1m?EIhL5FG*8hYK+gw#5 zMkbWfblTqG$8!|eVs*kw9!=$FsKA8Ex>X|XTRR_5m%Q(b+M4!%)^NP47TY)1{3yKq zTq~;o&5ZiBV~_Cl?(n$|A&;L?tB12J3&pISmDo&#(iBO#JNDo&oDo+T;zO^6?I5;b6w1En- z29C;w*sEIpMRwM%yM?x5Uqv6|uQg!uL9=Hd|rhSNDo~xv`&X@PlAjSzv=!8h*z#62wl!QNq z5PfYk`RJ5^GQrZ7d=r`BS0bKY%N}=>3vd27HoJWOYbnn~KlqJ3!0L%ML8h;*Vrwc&X%AdAz5Q;^gw}JKu0`C7nW|`f)Lg6fzrjg~ z+x^7cI43o-(-0F^7?`c|d2A-E`=!n^xA%#&<8Z4rRK$(cO>Jh2@t0@^l?Dz4BM#i@$GrSLe6!1mc7{5d_+uTwzDp{!$6N~*$_4^WqhjpUL+v5w&Z_VAOXn5~Of zuwTX>$4p7n+)5KkA-L#Zb^ps_|8EtK7_-|PM!H7D7nz!SMFN{k>6dh zl12;Z;i)og*{^uFnTFF5Om!s+*%OM4%1<~3%*3RhI_Gv>d!qitJTA0{kv$kcIlmnLjuyY;3R$6Y!?;(}{3|f#Q0O3fJSimDvd1MYb0=ozIZ{b6VRGBVWt?O2 ze_ht{-L+*HgYGdSLsjh~uUZgK*%Ue}iMYs2Q8v_jpk`v5%+1hTIMNeJ5fvzSTGljp zu6i|iM!j>WjFvYd7|u*|XfLo~PmapMw6PR*GP89)RT(OxWH#%o{d>w?i0&I@!0vUS3y59gN> zk;0)J@f-PG>_v_Dh;(w?>_KUu;yttx?X&XQz3^b25Y?{(yei${l|j?29>b z8(ioK2(CB%J{lf+M$AcAYd3FM`q)G2={eaTrOLa$@~K{$yqkOPpvcZVmz;~*s!}; z^$U&W%w4>EVejd?8QN@udH4D!u_5P19og)$N0NGzK{{Q@IKG5RTkqqh`$B(SiwS04 znA&jI7NBc++2Y5mg#FD7B)V-|mH0)jHSqsvW>Zb$A6GY%?tMRfW^XOPvt$9qw8=4q zfMjRiI{m0XmDD=cJ)ga1b-3=T-&JJZ8)4@!)qgguH5)TnuZ!40hN2)Ip2pRw7*q%E z@#X+bYp>tyabAq4(e>9UM}j#SZ%e3F<6-35AL<3!_bE0O&iaA*@K?da$&N1UX58h@ zQ-wk8zH3}uO0L5bA>CH$vOYcI?i<4wsdasR;Zr}#P7`Z_V|dFmh*eko#|MIvBl}w! zrOdRl`14pxX?HcY$LAAZb`t3+x5o0t>YwJ6TP)sfxVl^x=KECeAB$uoye%T(ZHnj7 z>eTdm<2f0H{GrBKt&e{`^mn&?(f~qo=aVHSlo&65)IC5&k5yDF-j&kKDB&?m7tYsb zyHQ(~Y!xfb7e)H(S|U6ZWGp*5Y2)Pc3GF7H(E047aA`7_IdClA(7g96JqeOS;Om?A zzOWLj#+UgSv&-_|1wFj~`KAst;#ZPF&Cg3zuB4_Rl0D?|+dbV40}2B#+b{KAd0k@W znHB1z!Jt|4tXVR$MN>?7HtKVKc}J;e<6-gaia~USSCJO!WAB@1Z@^XR;G>y;!aAE5 zzCLI@F3_jdB(iUE+8lHn2+t+`f($cueB=tT6k)B_G2#?H;Mm<-6tb_e3~Do->By(P zbh&9^mhj}UN5s&(_iCR$lonQFK)W&@E$mf=Vo~~}&U=q_wf@rIZ&!*LK*r(8@_ zvc}$ckr1a#-Qqh>#{9~EWGEz8GZXg2{E@b>tSH@WTo~^oR>yk^B&CAK{{*jIciVaW zph$`|Vs*w_O^Wdjhj5Vf-Un&!RR9^c$)d{(?xqxF?Mc%|%>H!4N z{}yS(;%i$Rxwm+1C@2p@E3;w!3`IsalodRk@Ji_r$bJ4!G8_H}n8L&9;<R-NlP@&$1)R)vtH+8n zFvu*-53NhzC*4kVDl+p^GkvnvTntUt+}z?>OAa78JcWz#HXhKFznhuC?-)%XhGP%m z-frk%kQ9`#Ow zqc%J!rLV|6#>`qB?iapM7`0K@YG$gPx42%v_@SQit|UESh;mJHxI+i~e#v5Bh?cXl zj?-?bt=Qe@DZwW5LD0|dryLCJDEbWFBnfkq9ze?;&|N+|KnyDxtwY||_Zng`aviIE zf;91%p5Pers!XmnOt9*BGwJgW^qVDL8EuHJn#Q^0zQUB;WoDFoym^trCv`_f~^zwAtnkux4S!sw?w!!e~$s{F~_eQY=L*3-Y_2tlFDt=JGoD zo0*l#uv5v7kO4?+dQB`Z@N)00?L@VX6ti~x0yfdsrDW^}hwrSGw;O!!>qMC++>N?k!Z!5#y1y$ zD{(Rm$im9?2qf9s6)%SK^N-@1AA z$ny|zizY?!6U@hsqDcVXS7U&X-aYm4lg`Ag;sdVLrHjhGVahdS%%bGS?6r+j><>AM z+&+)3&cMHHq$CfQGTCGJhdB&@nqq>N5LLg=B{Q|OTg#jNuvhk-Lv%CXtiwuq^dd-ArQeR)uCBbPfj&q6kl!9G&)_z@R_b z&NWQD5_I^{qVpBHgp2k%aa zQ>_bqKYtU6beu7*4CWytKxi^as>A?I7?wJ?sLNW1&Zg5UhtF8~&vHRh8Ifp&K`&)K zi6i&E@+b!Dv%B%C5)q_$_C+E>WStjbcGk{(pzz1G&NHlrx#yBV?__q}=I$Cnx}r`j z3?xY(T}K*B58%O#!xB1*Sj8@grJ?{YuQ?NAv)iO=27nb_)0Q>{&jzq&W01>@t|cd@ zPeN{X7`s1*<^XMJe-AsEcQ7Blf_^<@X8qg6TrnCzoTdfTIV%UHv|CD^KBx) z43&0im$Wco?pFk*k~9s?6eY}v1b+(iJVq%o1r?vRyEl!BHtZwvh4qZE6Gk+X4Y>

nQ^b_^!kd`X5;oNKIz)bDNmD@5 zXm-t-^iG_CxZkjNTY9HMw=~W`g1PzQ4<;DYa!lf@^rINUFZUimF5{P49nvt29RO6s zu#rKNHt@4^40J6TAyt50 zC_th_Omb~VSD*)grV9NV{bGdhb{xJs|GFsq3tuJxt_e_2BFy9f;B<>ThRR^{0G>@{ z5F~&JJu*ukt{KKMqXw@Ouc}=p0Cg}-luOD>ktTHnx~&_eiDBp=9PXwvcTtTysSf<; zD#ET;+$BqZU64*ROVJyFqJ3R1i1SOA-J^HmE=U#%P2A7-1Oy0nBMyeo9$6Pp$p5 zxrtu_0upe;u(e0fwIOuu{R@Axu6+CsLy|yp1`=lk7#aWw0!ZEQd+#y)9zyumcN#i! zep~VD7T@R&nFM90!p;avp^oglB=i{-uyYDi1puQkCFlCkFaX0DAi|xBhM@HM?b+=h zPqN{0%c%&b6bKT-Ko2m}q!U!pin1xzR{)SWfTk!FO!3x;tbrgffHur+>wBFY@Xz0j zpf7U8qoBe6jasVeh5HO9Up$$DjEh$t`4k$C`61naA2NRWl5rPUBi9 zbx8}{%W24HhGA^_ZS#^q-xNxMznKa#EK3;X3h|j8ZxoM3pB8Sdbi`c}!w5eIPzmZy zQxVyQjF3N!iZ@u04ccNifF4vv4-Bw^%9fX<_-7n2=J|9%z71j^8uU`8ngrWPV8c8X zit8TvW5KrZ{R%b#j3a@3hOdNTfI$H1jF*~KrXvOEo!)u&W9MC5mv3cYdp>U{Fp|-6 zAV3u1>;?#|ia}#+gxAhNzrbf@G!R%3gC+<6O6!zb>6Y9Xy5#Vo7gM!AAUl?yom+>w z_Oo4HQ0w0lZv{w-`hT4aNDNaPq+|lagrIxqRX*h~)>qoRz{*Lk5JCS^ATk749q2e} zY{c?p_~Q`R5CnCel*axex7uh~XI>P#3V&e$Po(bx0At3PaqEdHM1<9)hoMA~5dnbz z%4HCwLLh{+pLH1zp{-91WOqmgo1@Q?FswT#`ThMikP)T%v#b3C`pwpt>lI2cS49CA zaHHKFWKzL&(87eul2uPkiqdlcs1U%C{0(e|QL1SDkHt`FnxKSEQ98hY%n0Qd!5>=x z$8{lPdrzw>&~uUYYgy$66Eio#=e{BL#E-*(4R*?iro#PYRkX6dbYi%m(VooE006=D#*Kf_#0HZ0a66|ee4a}~c zUep^x_Dv_wo!-pEb(i{JPVt;V&Hvd`Gr8)mg_Xh!9GVv;oS10!oTV1}DGo)l!EHu| zxunbGG_)hh$h<%-N1qt5Rx&j%heyUhG@WaNiKx* zInVcE9EgQ)oUFC2{`vYNpV>6?OHS+B{mTpEl`21P!XI-&$B6>U9iya6hJ&@QmpdRp zA=_L35E?tHytKnbsH7Gn6Hba7az)35+8 zE9D_F7|8yFY-A$;(2yvM4aRfWbI${KtayFI?m2oHrRJMh;K?tAt;s7&UZLf2OmOi8yt=X@pGyH@Z@{=wS5+0A1hg$w&@LX^$WAecbJVh#-p zW#I?<+}QICa3NM5flIN`X3V$IgB{kgT}g7m0qsr^csA@Q^|h7Wd64Q|-BUm+H!sPY z88Ju7+yKs5h&DYM$kj?y_0Rn+-p%S}hAv?ho~Q;hF#86HWU{;Sl#p57Cq&F8Ce65V zRf7mzWt#fYl4W*ohnK3&n`>KImleXdGyu-bO~qU7?29KJK%8~ zDEtlO!{_>J5?JF7d!VlFiYOwF&QYq;oXDTu`#8jF8HH54_c;Q77`3#D@HAJHzn{ZB z`LZ`zrJGPjPV{E^k+2y62oXl>_boks$p3WNZ6vqXr;l;JB6P()5YC7|GhB(nAt|tb z^ZNVz-(SdZ2bVb~{i944O; z8?09HlS=n4Lni(i2=J7weY5p)5Px||hJka!SoxtLftK{VqVjw-{DsiZwfK*fQ;IiV zm=cudWFh%jL=ci&clI-cAY5?Yfz4L?ApihkqPyfYF0I!x0G~$!m^6ORi`roe_x+Mp z7e)(Vr208bz3trTUiR+Sd=dxauppt)LHV1 zM}(^H+|OMC9=ux$x3g-0KC3p~>SHA7p+6<0%)t zn*cyzvuq>M=AgHW35dak=tWupK?(^@G&uT=kMA-9b}P+3OW#`cakbE@4iYrKF|Pjj z8wpT|fsG(vDrV^C24F-%wuXfD78Pd3LJCYggFb%P$T8s*)t>dqiTn4fLNCite^rq| z^AkiMCxgsGrh*$vPoi3MH>tOFW`w_7A!{6;2Zk% z2cVXDVi;{Slx|RQwI-E@`B*CK2)C-m<4W|ydmqK9E3N*7ER4f>i(%mJVwO$yMV?~3=dtO)DtJ7Kn>LU#Nk z_YJm9l!8viBjeh&XVK#qoUkNF92McrbsR(DB0@$fh@JV|<&M`cj{w*5n!>Q;!Of{_ z^r~n37fnE}{5ngx$2P-9#IS}Hbu#AP1_=5KsJ)2FjTGAeZ7ZhBIcDYtmWc{67tFIa z9HNB%MaYW7EzhMZ_WoR1N{W?|5 z5A}j64Wgpl_Q1n2<<(-QwWhBuRL~9V6F+G^*m7X)V&Jkt@)4KVY_DAbblEc!Y4Ecw zo_fFnxQiX=^@P+JOWXj61>FamDT2^_#`5;TZTLpS9e*mbh<`G@B3)4CMQngwcP_lr zE+TcrDFw)*antMw_e?|)YH$l-EU{Q|XM|erA=s`++32#4Ny;S^bgdY7$Qq71#UJ?Keb08@q_Id{HltZt^b<=|aMUJtp;R zkiZ~H)cEG$X=~~E9bMkh=bZb3i$p4-(Nsb<1`A{t1afI_#+pHPd5NRgP4;6dms2&6 zi4bQ+uGEvQvg_J=-aL9bxu463@u4ulW@UKB=ZwICLQIT5|Ggt}73ScZg9 z(+CEfC@#GSLv8Y?0p3VGkb`@OLvPd&J2axoP{sZ^ZX+0GYf}|;%;^`aIN1eI%co-22n`xIbfMVzGL)4KMj>TK*v zBJ9R(*t){l%z#kRkTa*~j35O}XREFr1B>kK*_{CLupbLRtTnTDLr6Xe@5=elm z3W8f?Rb;^#_DDnmoksTM(nRz0NT;ZyICX5@8jQI|vUps+eRGNxy=5ROvguAHkqEvX zHgj@O)J{%I$YT-dTovz#Ie-~emH_uQqJkSxvGC?x+K{2) zDOS~eDGeVUCQJo7P|C5<^wVMVF;cpH(eeeimHYIyEHWh|?B+qiRb>mAqdE|LGLq-^QH4`k$1mJFApP09*>zI-@wv!mM1E|o0gZMs~_R>1Q+VqBrA0NR- zmS6N12nSMr1h%98o@&b``}8^ltn!*Qi#$XTP$VmrG^9S4I;xGfd5NZq0Z2r0sfO_Q z@i=)qrf7oPx-B?w)o><$cPH3)`RNUKA1OusK-+oOi(^ zsU1)zX>K{H-PzX!CPoEl)2V-$oO55F&jw<#z`(7m_9|wu_A`DaukL?$%@kSa`0!a^I+tXc@92KS%=5^^&J3@`^sv0B`I^1no zT0)}JaU_Jtbtqn7gH<(SL5IY%W}#up)*)++&Z~JSd<1Udxjk*bkK6#5u*jGZ+)#u} zlW=z*qIaG1<$Fc)4H@11=pOA|Rt_Y{ir|;04-%w6v!{UCZ-I)TB4lf&5MzhyOiHIU zH?NLdWc}7_w;fP5f(RlsQTt&51c40N(36vg2n$Rw6H>*Cf=JWXFER-kc2@s0*T)-! z`d_5Ll)gTb*p<;uhU`}QSGq#;yEkRHsh$~OvB9vqd6~K?V6}YT2C2jRvY2^4O4ukm zi?YoxX{l5_$vTrz-Sdf=c?6CD$|7SEONL<>pc5+~jug|z8)g_*Zo?ejiMaC!VVMg_ z?5482NY|X=xl6JqCepL7Y}!@iPJSX!=xuTt5GS`*rheIRGdtQb#bYm}vd>CYrLF zzV%wvX0J45JKW)y@;kxrIWW0?8IEr(VN%sHCV8MevW&T%xilgT)YU*@c0?{CTMq<_ znANvbi%9fF%NhP*R-KOw&4Yz5`I4oI6vs};dX;9KeA0CNjq}aPfRFPG{_yAZRrq7j z3%j0w<@H_q2$}1(3jDhwObc-Wa>h%qA`NT@CWOkXQNTRGL7*m{@IE-EPyRk!WHpW8; zVZDe3KmGSj`6s%q%3_U_GpdAB|WDoml{lKd6_t~7tjZpH(kbuHiUL1+U$al)fB zy?2Fryf?$&tRIOwPwO3&-hD)!}1`zgn{2R^Y<9e>;A zClN3udq*3_Cm{kmH3&E(`A?(oJk_{k1%7)D5-}1*x`J`lqWldh2#WsGtTcfMxjy%i z{1(gdZ_$M2+Pijs>L!d;4@FUYlcB@aX+K{<#w4zA;)SNGg*>{WI;w@zC{M>s^k-Y6 zkPsmztiTcx6R{LEYbnr8)ECKQEbQN5j)@{A_2O zxhC_aeQ;M|OvYT`ShNVvksZnN~8Ijtz;sE9(yj1YS@qVjkTraEFMGLd1;(&^zq3 z)1()~ok7uQQxDI*!9FR|h^)8eQo8%~3?6T<-DG^JDs!R5?vL{D-|i`=EVe%wtP2xs zCiAOo&rkf~&5~m_$7HsZJUeNBJHU5cV)dx5$@G(fQE?6-fh`B#t(*MYh$`Yqp~Wb} zJ%`FqoZG8ttLT^cYl}CWxv-_2Na6xLnK0)0BF?PS{`c7qsk5hs9s$ndM)c})Tn^Ie z{>Lwyf&RrT;o}ROv%9RIu6IWps-qSVz$rR<%|fZA)PLD_?FHLgVbPe#2GmQIg~pNo zJKJ-CDfHZlKoI-;E!T36MXhew0hAq7ISJ|Ae10zTq;u%SG)h2u;F*w((DXA}=^EJ7 zlQZIZ?Tf0(P1T6#2)Ywi(Nx-W*b2XV-Dr5~gQ*MTv+Mfj7lUwr2I_Us3*Z{&!Xro!V3@5T|PhHTjcON^;_Q0`g6&jQ0%om8qk?~>)&jmF(2hc7Av$}l5Tg( zs0q>=ws(6?o9RZaz0J;oe}eDXW9a9OxWepO@>Rq9|ZC+ zAbaQ%O!_N53<-_fXQgEST)gD}WL5u@}nUOC^MpiS}2izw}<IUqMU0wYV&uoMOv?*4Yi`lO&+a|z9$$Nt?w@(d9!5+1{7}JrAa}C& z!nE_2_u#GOuRPPve^1wkME}ZPRQ4Quu_clF^_4(>_zVP%q^Fu7C@O2rB{z)ha=5|9axchY3Va+VpXxZT4H5>vzd}D@Z2n!?k1Z%5`#@MbbV(m zOO|w5(^jrbjV4yxklA*gwNa2W->JHI(tSxl&y79 zNYy- zobqcjsEX*TTU?#=Nh!z`r{)H)!<+9)x)0^;^R~uM(pai)v@WmTxjwb?W*rw{-f%x* zqOc`cVQP)S6g0RRvKrc2l;GA;_Qg$Wx4qh_t*iY>!u5z}zt7#H7Y!`eA_uMQD-Pb} z+I;Eja!>!b9#i^K`sVfR_P-wrfDI*2vcKGPdG@pZ<%-uwj@Le_45ybB(?@ZSqe$;P z5I7<#A!sAzy^&@$k%Q!oJ9}SPYcnZuA3c8;(+BsPK(`piOk!HlFa92s#MWjSdKhB! zjSTE&%HkeXr5Xj=-B`RwFzKoAtP=xtF)T@#4KcZ$&_Tbe}_$>-Y+?Jovfaf!AFmObg5Om z*82V{^N0}gR;%a7;QyoREWe`szBYc(zzj1CF?4sQLnz%{I+Ri(AR-}r5fll5p}RvE zI;2a=p@$G@Nu@zRBozsz93Osvz_ZqQb)T2}oVD(AU3-77FGm9rb!?deNu5iH{>RgC z4(p?8(Y*5!oV10`$r`GhBMg!Br}g_0Nh}wrPoms7DdCCEXG>z=!bk~JY-_^$7l4Lvu&Ei8PoYj6W%lU zMhckMq>i%h!^uWHoh*!4?{7KZV6gP43hON$zI$xSiEQu1aXWutG?tl~vfsCwmf&gC ze;_ZTy)V;vuH{JjluhliZjN<_%Ni`VOwqJf=_H~3tx`yYPh;duvpKO?pAoaH^JLzOg3NCF8=1?HVk%~xO zQ@(j=ADPnSuj?#xQx5{wbOha=u4-J)G~g|q*>A$ zN#C%gMqBC|aw|xe;n(b8-ZGp{VJc0dYkc1{YKHd71HS1SPAFYQ&N#0;2?0B6MI}AR zO-{!ky!3g})w`w5$jL+vG}AWqzE^ERM@MIB#--YFOfT`hE=#uEK!WdtIo<0;zE39y z39kmMlRB0+I}Qka|8?)Y2_uCLlWXx*uyX%A<1L-pRf2Uryf(U!epV+ilX|mu@&LtPb9JQ5*jEZ3cfLP> zD!r!tZ3TOXXtaB5A;CXWJ^sT7x2r;Ue_vnIR?MKleTn7M3YkY0G-erEPuaf9$sP|q zx%HFyL3OiKw!-2!^LOp$e4Hsv3#V20>;Gmv3+I~*H@S$#-Ok*;18OHo=o`GOKs~vgQ7h1Jro-bc4ApSzc#-V#A|Mz^NPNC0+(7>vy*E3ixKIr_TvtB({B^BW;950 zkCxf%ZJ9Xn`01Dr;-n?)2EF@Y5p_#8@qa$1{cxWgyrLhFGmv2>jQA;&``4c$KhMN` z+xJ(}V(h(hxccLEjq3NjtR9EeBKP>sKsi^AqzQ=<+TG|37SohFlc3S~sarkLjXy)& zVoEhpE<7|ocC)XH?6ob1sp17=0A7oOx`4m?uB7jB)h+1XzIZDlX?q9u7Eb3$dVj&V zGHszLu~*$ouYo_2EYM*Fe!nPx!rxhb24r^Q!0k zr=VPD@BijmmnV(W*Yi1iJ+Zf0-7Gfc{qNi>nroWC&6O*A>j~I|%4Eq(MmD9stJ57e z9U^_WW0&mof{%+=DbECUPV5E6L>Cl&mn*Paf<;}`weJD-1i>9OVX9Fp-p0bX_8p=` zs@U_VrCjF#m)X6ZOEIsPH!LEFEv`hpt1wzgQz`hP~ z+!)FGm{b#tQor8L{BedFwZqs#T4D>~fP=!4Sr2T5=WMG4eE{+06SiE_hM;9Ijt ziX;X^J`{_S#Kh0Zb&Gl1e;c%F-*=1PqcoO!+7jj3LO|N68&Ww2M`uN!2?Prn(^@8z zJWTTe<8E6?x&Mn2c=7=AX;9FZO5?=?%~l@S!(uX@5RS81;f*P_QVZFrezvOPy9HV% zwa4@geImtba``8|N&TS>Dd?olxDOk0&0boLWh`>O@%W6e`b|>Or>3Ei>>?0$WvEqT z3cL7AM&&yu$04rg4z=d zDV`*`&t>$Kf!=`2-RptC!Ysz=i_a@BAFo&L1_XP-XA+8BSsLkQ8i`cwx{;0p-uI3} zWB<@veFX{gw2(D`wSM?mm%Y>cAjzD@AoqbAop2J`--ot~WEu30zvG-Sz^5(4h!|GY zHMmJFZc13OFwu0_EWi)FJEr~EDurIa(%r_G&Li!1y1M3?lZVI?5503CDwtzrisRQl z;c~%yKF`TCV$!v5=77GY*Y#Qv=>Pi#^&`x>pb=+ib4%ml=#1er<5D8l|A=*hBiXA` z?`=yeY-Yx*H|`O?wiM$AsngFCGpLHYEa> z7f7bJQ^q3Q`CVf2DzLHYsV}*6yk+S9?Cus6lRbprs*+@gfG2}awqW*~RiCo+^ zNTj#XD%bwWU#L^#Tk8y@Xs0tehX>v z2!0cyoyo4TsL?_cfRQjQ^$J*>A|U=slC4g1?d%jBI483Ay@}GV_&H=w^vm3A?$JLO zPYthmY3U;clB8fJ9xxU0u46#J>$L5-;DnzXk zyJ3}<5&G*lfHdvK(t$`jf_V_kCyITsk=&6g8+bsiS1oPbr2m+b8%i3 zGHG3JC^26iPB6oUG~V`(TzzjH+uMv;>i5sxY^M6;zv^jszhs-(V|$tT^yDHwdn49R z*w;^{K;vAdP%vTIf^6c1OZcN1{tYT25(TKcz-YWz9=H9IaLJBC5gOa*9VFCb0)Ky# zC>!M7wsUot#MsoZ;v6CBi6g_zkpCXdQt1&%%InD??Gkhs!yPAMY)sx7EJ0X zp4A`A%jdjjsa^^)4J}}Wl?En-!OB4?A9U%=!jA}H+XDVjUOZKl+%teFG#;D%l2hFHI zQooaur6Iuhsj8>0K4L^^j^9)MK>~S5($}d&J8|xXlO!R>L!s2eg&0@?CuvCNP@2tf ztSD*vv*E1kp-i&jB0EySNl!^pB7#4AX>olywPwu&tH2k+dumK#HXJ8hp*hK;?VWIJ z!)V9ysQn1MZF#h}5uU3$)^jjAGB%p~bu<@mjUYoH4oJBsNx3jY^++&T%{U@=e36We z2L+zqz%Po9*ENVfV+N0UO;CLs?@94Hy99li3fn&(sUac5c|QHUth4;`(srUp2tkbzSS?)^*&Q!+MWQ%6MKP!#I~ z6a~N7po1s{RG30=Wm-I8TJ-O<#KY+;2!)2{^df#*13%6GxAGNZlj>nrma3aZAUqoh zlZb=sA699T!}N@1vjLdK!`UoPxb9(<=EFHFvDpfMe}ZMm?(atiKT1+O$Z`^%hbK~; zgg@>2{B#A52EMqAe}4Y(%d>GJv?qyA9z+rU#gpyJ>%1>7YrpuALn7V5A`M`nsaEX`mQ)lf`d)HFmA*j~~+@`eL5xC4=xZM30L>LoW9{LLs zT?dQc!9vJ|826P_qm}g9mE4C5pU1)R<10BvE6ZK;;)US2zu<#$@Ie>&7O?i~@2Ui_ zc4oA8oVWHTaP8Ol+DRAq%6;ufZ0#06ggPRUx*5&yUME{!ry#5&DZWC2h@8n6eJ0_r z?7lKlEanw{E$sSAAN17+*kD!O;1b{9UH!@*^z{`I_;@;J&$c<)K}oowP6L1}1psc? zRP5fApV*XJ-BefJ(zM-Dp4hz8y$NdAGF;udD-PUU-9oc}yLb27{fR9Vk1fSIKr??! zyAH4s-?A46?8LW~Y`39y0H|==t!~?+d)uA;yNmdDkBMzh0%03X*mlkT?z#H?0pYu$ z@;8j~&i%UYp?7zp^KaH>{OXP#`<86brV_=j4#iet{w8E{M>Yt6C4ZCov8%zprB1Pz zp}c1$zN;#J6ZNoJLop=G9DKb_`3JTCTXnzLcK;7%zcq2c-FClQdB2VQpeKL7pZ#E9 zV*hvY!EX%Zx$42kANwPOgJJQ*-^rAp^C?I24;PgW=jsm53-|H)`{V3CW;_lyJoY<+ zDAy={e69P@zIrf3I2`N#G1h%Bto(D|N`?N3PXFV=s*K!l!O=RrrKiHA*zhf6;WXY&t_ z@{dnff6jRvp0ED6@H{@u|8Wp>B2RfBNBKigWuJhj%wO2kRH+VQ4KB&%M-iy?R8E~6 zPo+R-G7>{#Q>XEQv?jN(-gBoySZd)|wN`Ag4##b+N52JQ&op|^oF1JSs;E8c`EC2@ zx8*-+?;dIUp0nqafBbv?yk7eg^zRRb<2+R5Jp9plWbk=(!Fg=YdHmXW;=l7`j*C>4 zi}XhqnZXxX1sB+!i`=z~{C^jP9GAr^m!*#`%YrW}3NEX9E^F2<>;7Ffa9lO2Ts1$s zY7M@^6X&q;HPbO;9mlS z)#AvmFM^yyJJ;fu!(alNN+hf0vExXF*rUN*dCL>0i9DrXD4W%(^YmMz0_{AjGuO|x z4|^intbadSY;j*3%(MREvDy{*56W(H?z! z3IqZJ2oMp;jSmd~EC8666dE6Ogi2|Jm$GP?nW{U6s`*rs_++i7bwsiaQnTF zT&vIRwwH<>FXY?(?!5Pxu6!kw>vk{IR6pHAtJ_1j!$rT+U%k^?7x(HxtN;D_mmck2 z4wYsKz0WO%Y)wZWyqY$8Hev2GX6-s=?KEQXV#3U4+{$h0zW0=w@09V&DJ!3Gea+ry z=7Ucx2OTYk9nA(@tUkDy4?7uueBmyqv3ETgNeW9w{V z8?+OfRNLR^_qOP^x0nyVu^j9&{rbkd{|zy=$~V3sJ+UeLeT#Q@Q(|lNR(icwa+72B zXZ6f^HSD}*#?<|a&pPEZ7ELqv-po10j=I!Mx;AayoBnclYvum!(}Z}Z&^l*KT8mO z8YAV9AmJJ>;~XL4oxmTIDCD0Y=o5L{CrZpcMeIqMltYTdvlKQ}Q;$&7j!`p>R`-ZfRt#s$jpRs+W6OzQ%8q7>j^hoF=ZQ%Wij2M;6TzJu z$(0e$g^d-=h?2<+6O2d|Pf2AfjASc~WNg6jmWAm)<6XP)${-7^1H!pW=e0*edaBN~~Vru-~zkh%JoNaG!udlDQ&!&b3-hvT;GdQbm zLt$?ij7HdIsG+Do3dy6NtNX6_LmZ=&>+;aMl8;H88nLW;jitkBf~NnkW1<>H%BJ63 zF;$|QCj4-uxpJl)U80|N!@``awrFx)8EL8hQg7EE%VyA8v(V%`Q}uAPwRY*f=jKSB z!TY+E4&Rf#mC^V0Ydshu1iK-w;cI_1jfm|S?%n3cWFCWj!?wn6qghgJt7C0V-#-;< z#IYN-H}B3=m{!}4x3}znX?Q%EZ`9FxxP}$^H;(ECd z0RF%RK2b2j3i0H|vATQ!<7~+m1(Adv3Ker9$r zg~EK(J(cJ(RpEl4hyB|XgaG-r{s2)5-Xd^mqn}26rGD~$Dx5wY1T=d15vkcY>AR-? z739QJDZqel0pY8hcya~C3Vamb9RtxyK-IEX`Sa)O>byFvTTK96(B2O_9$_I>r4y!| z?Zi6a$ztU*8-?WeA2a9cl>d0P-D-=VICaDa@fQLyLX0(!2XZ3a1j=*huNzduW39Q1 zgvd01MKiy_QRW{V3L-;mwR0$I0oLmuW5K_O@cq9QG-?tRR!dY1NEtnkl*1>4le0wl zuYZmcnJ?UXJ+;BJt}HaRZ`(_@rr11A8o!hu9!MPG?7;Hm0nlI`~r z=|0JsXRmCp<~#QZNpnBYpD3^xh5g}&G-gL1h;o!Ta(;4IZRY=ZmZb*7?Uks4iJr?n z>d~eo747jx@Y<(q>0GvjFrsNLk`#UyA3k0^5jAe(m^A*uU>>z<5_ds(iq=?u$NQ+z8QH z_ewl#ADhZ4#W!6Q&EIPUJ@Kk#vMtK`^7miEzIBDplM%t?S;v@5Wq01WJi-V^pA6kk zjuwmc%=XUt8r{1;H>&70;k?|EDBT)!$8QQmKD-{!mTG&QeHhX`9bYK3Eyfh<(h zOCF4(mtQNQ2!9OVl>1+Vkuf_4+^IPL)iH1Op^N8>tJSi*ADSBz5;#jliib}nM1Rsf zgKUEFFfvAfn0mmrW2P`M{gvuv(JxL)!^Psgkm~15l>8b4y6nc1#Y!6}(SL|yUG*2K zw}$^CzPTRCzs+kyjx-*$Vkebf`+T4-dZ#qTQuvXA`kj8es|RcN5^Q^03WSohspeUn zu+puk-hK9%J#>-nW|dObub=FH?j{Mq0g&{wD}l;pj`yu|+ujja{H&q&L>=CoUs< z;x{EWQm=E1%kd_V`8~BZDG^0Kp4@72i&G(F>XC$59zlD^@sW}7OIM5VQ0{|ip zrf>Y2F&@m0MmKXkZ=tvAY4}wu&rx&e9pHci9a{=Xx7lv2!H9ax)aH9yX?t{%opNuWWC?FSlwUFUkGwOjW7NZ z1fN=;vj10ftM0!HzGqwpB}zdJ4Y?5UXKDfTIIsk+=q6{wR3UYgLZ-CBNZe9F!Q_fu zs0~H7tt@FLE-+qQuoWgTg8UOZ#h9S25KM@A>`^^+8{*}(U)OapAY0`o7pnN zLM5t}PQ1)oziPR0yxjjRMmq0!F%EawcT2r4!!^A;OYv42TVe$imZ&Y-ztLU#Fb z>eW#{`cL$U(lOOj`O&tCR}F)-hTTa&(&zj ze)1}1oV?MnO{NVnMpp@BiVf4r&}cYlfecb)cI`C>pud4Id) z{M%yzv6GcCOYG0rE1#5KwN-bXs>uA^HNQcl)#snt$pBz7PmHG#?kg(@-RqkivwB7N zgQx61in@2b@yC`%tfH695r55$dITMPO1No*3fxn9bonKz`}aG-e}M-Y1s5WoAo$=d z!f~4F^_I%Ni--82vw~pa1BT%9FaHSVX?Bp)`ksqXj^Oi;H?Jf{3|JOJY=IF!0^jls zg!=}P&0`Sr7-R*8>ijjOAknQ!@Pp)#?Bo#E`4IN=5KhKWZrM2y0w~rEi3}F06EtNSTvj8K791B)`c!UT=t8v5VZ`q;M6C zT0V;OV2tuOk95LCdJ0Ab!J=M9M>$zgY~jhjH6REm6#29&q8W{7LLr)Ov=2^1BR(3X zipUs>#tuc#;G-LXm>gJ4UUW>MZwyv8wzVOq8AXn3h;2iWwrKF zcMFPJn3$UopL9#aplmiy5Yeg{)1n%K)r-k-i7$p@ONFr6mRPJDHb*Z8SBPkz#10){ zTMH35-JF(5?0e3f+P0kfg`B1sL<>9@?~>Dakuz9wgzzLbn6lPOk}^rDGwaRSDCj*lvxh-rXq zRk+VOiC=c6YfPDId)c#*vH&!x5+-Y5GV6s^xsN~jB09@&@6A>NBJUzQy$#!rL=e&z zvLl5NbuQVJ3)$5dTUJBlTB{B@eJAm#(FjObr&;5(W8&zk6ht!ln3zx3(8= zd7e``2xmIBWj@uiOPHzgZwq!)^FL7ZB;$4OFe&bs@pGH zN3Ecr+uz&S)!N}}tdsFm{`lFE_g9r9=@?wtJ}k)&m(+nv?&w_`>E(Ea`>|J;Y*(LL zNXn*4q!L?FAkqM{E`=F1++@|j0!mA)8+7#BiA744_8J;QI=GEH!tt3-*f%{LWjz4{ z&s{Pj?O>jZZ=!dJKIY^_`gJB+W_|XIeC7HfOpijMutL=>J~6gwZmB63-t4$sv7*pb zS(#lVoZERZSR&totH{ldjniYPy1SH9uh*S_SxI=0{`gK9zpPgc5^e2Z&c84i!efUz zF7rqCk{vw@k)pN!EWM8Fc)tMrV^!$rNMUSFZ+KiC*X!Q!{W^Nr5zbs3(?M;@eqk!Q zEfG@>-|s8hZ@+54$t!&;);XG6-7ZyAf8VP8dZf>CIO8Gd0CSO+mj;Q(YgiYeF%4*R z!FI5@jl1m)C}GIQ6gppwe2B?z9CrHxQhfs_6@}~-`?9H6+W04lzB%%*h({ouJzdYG`nc!5N5ZRe$h$Q*K3?;_m zu{Q5BuJOp_so{3WD>NK~fD6m_amN#;t62Nu+(vj__a;;qQ(VGQMd0y;q{Ht=%N5@C z;QI>@rGNErPI3Ef*7nkX(n{-5(dz~<^4-Xo9n_wZ&R}5H2KKaa;DP&qGwB zC$(8ZOXaTdH<|5C!LCgi{#XUK$`Z>8<*Q1!z=@WH%F&DFgQf1P1aTf3;?3V?iP zGwAe(Yaj|UZjq3gW;e%TkZD6$@f)5C;%s!B&su*?Zu)vVVPQOBq5NA;SM|c@C-Gf> z#G1VWzvt>>mszlcTH)M+#Y)So!K#t&E~d$V&QJZ7-9%#R?|;ey`^BsNy{VHhUiY@!oLgvY zsY*h-Hu;z`X}jX<_OZNWu))g3ixt%}N_nZW2S($o4>KndGlNlwj&hj~J;sBOtB#f9 z_U@lWs>$~smb-dn1&~rmC?KTwKUc<1$W~X{i#G3%b$L&GkK6m$9Mcl0)E$^m)qL?C z#J*0FUvd6=KH_g>hg}bYZFQ6X0vln6LM(5AG4H3|hG6$FUGRIP;-Ui{P8*MlN8*a( zN0?dr>VFV$H54-?otvT7Gntqf;n&MKBBdX?+6HZIF6|cFVD{hTXs*axUCWiB2|KIV zaL<@GodKt=m4{y_8hzy& zf7VbZ0BUddt03qXd)~%tk!fiCbVdBbso;LOG4H!C)0=7!}K^_{UX z*MS7)=AWW#`9hxxVg{eS{vPVzwbX%iIQS7f{?l0GNRK7AP`;{<>1bEv&o9Iu9J=TC z)mQSoqw}Rd$!zDddijypt$FzcNlEYhbMU~sh_M^TWEdtb>p4oV6fUafwY6ic85_^2 z!vEQ7@IpT+Eet^8;f0*5l6j-gu^yH%8fV0nq9;vZrI% zo^WRC?I!+DZ+kIaNX)&p|JO13z29B=2kglGV&@)*O5o1o2kZCZw%tB|enZI4Z>_5o z`$yBfGWxYK@C7G!)GLGQ!vLP<=5x?{ca~*-#N2UBp-eC1@YlX?r3~yD`S5lF_9yT zPDk9!ktdP44c3>$n~o|@xHId?gOF+-qSm?FsL7{oRb0fF>q-hO!+L~L@iPSI@0I0x z=vN68I8f{J(>C@c# zgvd)4{*C#2MMRM0j0Z#z8^I!Cxv7#=ea=|4I@PU_WNtE^@ZY^o3a-C3U`!#J0~uz$ zA<#lhBfN~c=V5GxXD{DgS&<{z1Ad2Ai(5l*6g=Ruo`x#CIlu3C?n zw#3c@Tg39BYHfg6Sdl)fT5C#*ba=A5vlMukr{z|B#WS7eCz{p!R#hJ_K)AG+}Mt4$aZZo}~F{5er-N_`Hc)FYXI9tY0Zf48(Zj#Jv0Vd+n<9RwBI))Rbxz3&$LCIl}0 zvZX1jPMtX!ep!*L{6hl3u^M^@f!KDCP7TTj_V#HZhL1TlB!&w;q*8v_KjA$=eQ@4% zXxW?}hPHHk)*QKONNelrWm>U7_R}$2^6qHv_lj?f0{%*)**-N=O)I`sW^=t-uO?ne zpH4kZxF^ElcM!%dw<_SezYD4(Hi@h``^AZXXVyU-B5ffzOOsA@F-5N~{Wg{$N z)`<($=cv`H_qve)dl*-tL2%o}H4t{$E1;^p+hkIdcrIl@Zv^0kP@8Y<~!uUVh zXuMp;JVXZQCVJWuy<8uH9kk1d86Jr_CtMHdBLbN9T?a*Ng7e`*6VMktWdoC)YAq?|7YXjFK8QW8^;5)-ei1iEE ztW&V=1Wfx@W-md_)&j*8pfzHh)+_GQmdd~}_VyNChsEHXR^dKg%|`@ujDyJrWe~20 zgTmNIg|;|5h?AHPR`k{(G?O~3OH``s^v%=Xy~wAW;6OVR^XY_*H4z|POqAd;zi3OCKc^trBTj5e#A1MGuZw!wko|V=A{;A zZAapSO@)YiajhQTrDY(LLi{WvSr>ebi^do7{S2!fox(JfON(7(h~uSy7nZXQAW;&p z(IQ8%O|!S$pM0TYCLCj%N%?r8VC1k_O^RAMTwR#84=6;X_YNGeL#+MWWH39W9&x(&KwwlScyCOg2q+Eabl|Oq|J2ryPI) zd2})5+Yh-y`c$##c%NGXxp7=DHD~pL&*H(H1I2K6QgzX$mcZd$H=U2x!{>OGeEN@% z?f$ec*gj_2K%(|Lzmj|UNJ&w~m5X``2u~XFKxYaeWE(l<7OLOdQCZp)Whnw8k^Eme zj>c&9S|z?uYcsSR(T=~MdcCvxgFS-KyD%sk+fT02yg zsE9H5<&@;y7AlvQF|kk*GP_9(6z|C=nilF*%n-JN7i_v4AM~Qix=mo%jtYkff0>2) zG4Y5bU_OI*thF-&In-HGtZ~C^qU8j?ghAW{45H3Df2(iPH>>x=W z6Cl|E3!@w(vqY=;9umsv5D2v$h{abUDpn}!dA%{ka5ZMONi2$-G#TT*MY!Xzs_5Wc zEVAMfZ3m{Qz{Bh~C3gl2iRonV#11Geg#~VCn?T5qDnaH1weQA1yKafwtzW{?c*y<- zZchhJFzJ>ublYj-xLO57rK(^u9P_)R@C2RQ?aK14LT!u66T~tLl<{^&{*MJjj9v#H zIVF(`ss>;3aDs%75`S~jnOxL+Sa5{nh3HN&5KaJ^6=uvRA-P^6i3x-JZv-6!Vg&Gr zFo`T8@H`%b1juuV7}8Y5IqCI%lXVK^YWj#<@}0fZ7^c;p*`nC~G)f|$-D9FHI@ zSr{T+3|to=Rt2a4XrB;6?odiOfC)f$$T#;Z9K_0n)jTVXL!Vmq=SHBd z2m=i%Ad@kmb&%;1kw!ECgJ~ks01Sv2>Jw!sj*AFkb88^;ID*ohtI*w`UXnpeCIahV z5|$eb);hp36!}8(_J7Hp;0n-&3E5gZqeB|RF%804fq@H#P%&y#uhS|sg?y>*W?UDr zEQugRVTd?ERsgLKsUTlt`r7&p0xZIYanPX?1Lq8}LB0Q!75Ri!o5(Z7h4Ic_bciP_ zh^hiGdS0vKi%M=NM4u{yj<_DE1SC*s(DN-I}d<&Fvv*&j0-hJ5#2*Z->BAd>mYXLP~;8PlkWR8kJV=&|}NOxg4dLDpq zLQD&Z2_~2uuw}D641>H$lH!E0!!Yu)9V$msW6#SBY1pQpGcpMRbQZTc8UV!lhuL2s z5@ZN54)|}L>^T}lj0cF1Ko73C#@C_pp!69|(2vbKjF_&25jN|r2u2Ku42fY(hVnH+ ztSq$23I{E%Tis}BH@z#4tppilF;vkZ9&XBv>k0pjwYal@d|rIqD2zbJVhGZLuhMV8 z!t4)m5z+0CN9*h*yHF-ofDSLioTYdjkYpeI-oq2j47+>H66seBqI`*A2850|E0efM zygVDODwzK=@k-)CZz>cu6ArP%5E#Q6QdI9XfQ@`{NEjxAH9CqY%=jn@m?QwafqE*! z(G5bGR8#bZg+!2bP__aDG7oUZ!Spa~jKnkIe@U1J!nqV9WQ-yVaKzQArHQJeZN@QZ ze|6F77zSB@4kd3_p^nA3i|kBqv6FzXmCYQD+ouk8XxV1LhXe<`{GJ>M*eH0Gczxx|T@AxvW#L zb9RJZYO3fNKGFjj0zLwjF?|4MftF3;EjuGU&>#|&G1U=tRb7Py4WdBbe<_@|+()+1 zo0T9G0?CW~#teEf&L+@zo62JH*Ry+8Spb>E0|rdU(quv^fhqgH(N>omJ1n67B?SHE zVsIT30-kHpZHus}c<_q2)ujRAVmBXwMd$dj8}C9Bi7Efn{R}}+(!nsyNA)J4hR_2j zrTg6zJM4*b2o0Oi9d1+>H|`on6pji*?O*^ra~5$3fC;0aDV6k{hFD;tS%@;jhxuUu zJBElnK=|GBwrHfJ!KqqW$B22>Ogui^9KZylAn!;`;pjGVG$vT~Cf}@$U8GvaKSx_W z)BQQTHm8pm6XI|l!R$GzpTO{xvq`EtgxNyFE*U-$4_AShISf^IH*%8czs*t=<++$0 ze!EP=tq^$rhQ2sX4+eo_Fp~q+wtR{`79E5yr9?_-6Lw^+8g4+oFihSV1B>qH594AS zVU5@g`OmK04HzaBx6q{KgQco)Fsxy6=2?-bnO%J6Dw5MtOdSEyl;Zlep#ekgq zA=D@g6^6;#n%oNWg%MZrR0RF_br?KvUbAQ@rWE309$`XyKg7^y_Ynyi+Im9s8aR8i zsj`vOM7lL2FSm4pa!2XrL@&b7(EuD3p~6`u1|6l`{0 z9Pg?aQwjUFI#A?xBu&zQ@j$aZ2kz42zddul?TN!6j+&|S$(UqUooYk8kS#;bc`FWB zF93ahST4N0_RT8<&KP1TNU=2?VJV1yDoSqLwoNq@af6wAt;MYb%GXG#*nlM-)&gvW z5r3GUy7enN5^xbOZ`-YV-UAJd`Zag2THG?a=uXgQk7CA_P@$~Fp{GYs@)nRop`{1s z1Q>-OMutS*6|$ge7`AT6N?0&kfI7jFQbu@CN5tZsi^dihsIL|Audm1*%iquS;H(fz z7$71Gi*1sX<;3J(V?!$n_{1ZqvRT;UHq4@lIjkX|EH#>JV$*dsb6q(T*prsO0N^`P zJ>bY1z!I-I`ph_{<9Uo!+N|7<7{>WY=we|S%|*1$Cr zx-As{8{r~xzB{)N$qbtqIOpfc0@<)`BX%OaCZXRGw01-R2f?UO5pJ2HG&rCvkl1#WWknq0(Z9LhXO&wj3B!m$UyvCrpg%ERz6!i zUvKM)!mCt8H-tzG;>#tGX~mo#Zp^S^d`MkE{T%pgBRxkvjxG#a6m3SYhkzT#`>Tm3 z*~uQ8bh1oyc{XTmN(-Ed0JhzcUxY}kX}9lP^(_1bP33^@oqTrOhRzl-u13Z$2;CUh zA=hWN)|2-ZnjnrjkmC&Tz}CUvyrA0*tzf>8tqB@6OYSwQ{LCW3Z=-8|L3jSrg0O9C zyVSQ%Z`*y>wIHeh9^oTgcDzUnAwT9Jo{#gTNpIQA=U)>NWk?%<{}>}rfV*^zktxEJ zPD8qsbVmtVmBEGpGukOFfzl*q>X*`RX!;vf!%byi5mB?+OX;bfeTDflK&*`qH`RS2UB-*B z?^AqIn2(%A6ox$YgmPV|biaB1z5onuinZTUY!+Zp?PRg1a&2!EL*pd@0IYF+(la|Pg zfawSq%)N~X8v7ayt0#y-cl{4t#h3N2j2POO7RZZwfE0iGiQrQ&R%hpv@{os<+pj(m z;W75!A2%NTiNXqoRsHuSdp&6py3q{$i)3a(Vn$}_6C9z(N0HwnWO!m_wpnpUzPe>{ z|6q@>Ck>XlY!Gq_Ob+=l|ILrGw_=LKW6$_3$tQQna2qa&f$00-XGhYmi0pV~3D*N% zr!jh^3RVd(;>syLIJJqrS9oP&sC*WqUgK$naj99^PL>IuT5shWfMA(AAea$OSxKGp zY-@;@M>~^M&(*O~kjGrs^=N}I7so)3pvC->{OuM2(0v#J2RMmZ)-#X{{`UX;GP}uE zn%{=gAl}0A@nEQ!)crfC`X`;3E|lFpj;*0o0n@r?7n0j!SQ+mh2NzO1lf^n|;_mV% z%0rcBD~RNn)I83`e@Da0?p@7A{M0_#Q3Sn9vdn8iMtrl_(0T^}d`bmN=;hd@< z`WG4;p#$X5#k+p4k^+vUW2x}v0YOrijWaj*tkUNv$7;G zq*6e9>GW7?ld%8}NVLbOws$gc48)%8k>B8xJ+G|tBLX@_1#bjod-VO~wZl4>Fiu@{tNlp? zTvnY+jbCk&q$dO_V?%E~GmM}ClwTHX1l;wa!@(b$(-}c%leIFq!v8W&0?_$daDh4@ zK!R{J2@RkztPS<&Tn=n-jU2v;3XkLi2*>t zT+PU81WkZC>D+K5mgs9;YN}Z$@(b`Ui!cBZ31wOWV)i2r%Pq&PGvJ`XAdi$vlP#@f=3F34VV26Gj1co0Fd#F@1zikj+r?MbZAC3?{NDQTK z^*J>_^9Rj)XfL3FFgmnC*gWWZYEeyEF2A7sFCin1$%aFG2*P!p(zL|ET1vt^9?{

O`4#=0RZUB_~7mz?W@`C z#UG>~LNP?bk9>WO;cy&~!z~G%pbaiSZr)R%DN`Bd}uN$=wz|(XO zE9J;6wdlgJfKP0hh*idK00G>-j)hn~cLC+7_8VP&(c4elNeAk#Rgy{-JR0AZa8Qyi z4kUp|GS>a2Mz&w9X==#Hi_m5%O0=rG91~8D_d>eQLf}l;c-WLXL*0D8HBb?DT?_}t z0}S;rAPO|Pp9gbf1AhsCxouR_$qL)@^TstjGKi;+dd(wPDsbaUrVNs9OYW6^?Z@a7 z4xvcMItx%tI{wyl8IqC14eC5l=CCiJr`RPeRMw`CAo{X#GL1n^UTtF7H*{VcpPc2K zU(5bGYoWeZ6WNC1Y}6VPel)zF6GV9b#c-d#?B4HhkM#acZIHNIrN3*MlfaJm`5jEKq)zYGXgwzqe!8p+WBkEqg-CQ>V|) zoQ;}Tx?q-=e;(pqe^XeGN212^Lb3s#eY<`tAJ?SsbWQkVd$x%Go-2BD)`^6lM>lTSF#WTe}WUHUl=x}{6n+%_GSR6Uz zR_mzYN_i4^B-?O#qxjL^M*s;b`f*Jb5*14s&|Win?FlECNoOrEc`XBYEj?KPUC^sK zVx12m%IVSu5_{_2(F!opk~0Wa=|nnvT?;!cdV$Sh>nV0rD#8W1Yj3i%L9pleII>FI z5uVzk9YySw0;UQM7^`Wys%dLnZlPEBJGqYI(xR_l7annmo_@GQ+B{(e#ms#q-QJeV z9vhfTBk^#v@^UEVYa?~w=}Jy{irj~N?&;ASV}=W352&|S=?|0F^8kiFlDYp}e?Rpk zx4bd8XtoOTn38A4wV-5z_j^_S->rWrBH7S`|6%!~aY_Oyk0aI`=C@&2^1$vilNAqv z$Pqy?I;_@il-2J#32YQx1@OTdQ6^fVaH;4+jG@>ZUlkhh0x-xAMslUH3Oq!KRI-W> z_3dDLHw=j8zxlgqZ^XeHxAPo~r~ zH2Mnqg>OYwGm9)FLS{G;?amEz^Hx38YsS^rtPTuvIbzBVNqo8wGAfYv7-0s8>P;RjZa?}j-A0&gSh#{$=qV>b4)f6> zQLLAj_BK(hW{r|+(_w&@C^z^-ZN&5`!J;3rBXE_as2ve=xiM9WwXMbWT38Me+!u)! zf}h>7qv%jRTc3K{&4NSP6`oZWC7&_=Zb!!NT#UK3jBHv_4C^Yq9CZwKj@0Xk{I0Cj z+{we_BGovSOtib&IE@!jYHW|69L>lmP3X!A za<@;6KbeuhGXQKk0TEsT5q12(ANlD$5ac3U%TCZkLs*Mz;Lr+TJ%Ul_FiX5nt+QJ% ziC=ujnyxO(bNiyD>{IZBl6%dm*xF0nn#Mfm3!tJApk?HDPT3g5Z|(=CA5JiQ z>XjXl44VA%Y|yu+04=t&vvulgy6lx-vzGV$shJ=XzqOWyVxldE>pt6OvKyv8T>6?j zWYkTy*2?s2yL)z5P=ig(weld*mqE3YO?v79A9?aW3l)em4OUhhZgOK*BKjSx$k*dW zpY?G)N|JpvG(n&L#nW}US`Egpo;&lpg{N9V2m`9A&@ub5xbEY!H7@S)$1qPvV(<6C zpq^Y(UG^S-jF7?J*1rs|vAg^g*C)+^=5`|@dV~khVmv1Adg!{Hv^J9cN2VXtiaCW{51sg7yd!`q_E zKH^>F!XY0LB6X_mAAe?^l==PD~$C*=+bYL*6IM)}ARkWq#nn zfdbriw$ndY-5YiP32YgkaMhC%^t2T8GJ5YpRWqKl<>9b*!VyphqI@5BIO=oKnD}^f z`Tb~xh=IBh;Hgkn;%s%!PIm6nL`y(+A{u%KG+C~B{`+zMUenBP|C!A?>n28Xwp)Gt z%@!ifJAT<+4pV(sh^XE}cYev&{LhOcMmqeSEt^XI1g*TJz7f(R-22AQ&Qy!1>Q?lx z&4G=BzCq8^iDNk&IB^6-xNA3b{ZTsW7qydvpU1WCOpj|)x6b(Y&P*PA82M<+e|aqN z_!p%I58FwZ9m7AhA->Rn(Zr_49qv7K{L(Mn!AWM;8zFmdQWLpXL?<0Ws|o65I51fb zpxER;rsBf?ehlZMEo8ezgF#wzjh6Uaqgf_?pT|q_)>GRGTMIf$$--!DC25&YgAzmm zvp@kB=G8ZqN;S>)q0ccr&%RCj4gUHlJyL&K61=s0)5XX%gH}kb%l!|z-sNPx+Ln8p z}B(`=I*m6Vi(0k{{pNMvk2f4hw1zR6KrPRG{cImfQ z;pTq^z<%;CZg?o9k@w}FI}H@P0r1GH3J23qfepF^10M&0MJnu4&;pgbK`kEn>RQ%E zLK3N=_=xXQhrZX@M(9|di9DZcpTkPcl!|lLhbudd`U;EPLW(OK41Qt4u5?~cn^FML z9Su}o6y8^~L@_?}wwu{JdVMR-Uy6BwLeUGH#K3Y_j{7f_*CV=Ix5It5^W%F={+x7m zJ~ehbGi`tD1*dg=^cYXnl5Q8|bj^IO%Ih|Kx<@XaOA#4Q{Id6ZOV2+^Ftdbj;c1q{ znbtcIkyJ<O)1EU+#-3B75u#k9nDP#&f!EIQnvqx$gc3fj_yLd7tR{7>(VHjU9{B4N-Trkp`R!(6o4R)KV7OtJu-FC0o4bB6 zJi<^Vusu$g)}`53L(vz}5iKI)5aCGgsF+IrPxd%P|06ZGC0`edAU%_)oD#-~IF%S& zRF>1tqgk4J(A}-N?P>i;M!kl*HgoF&*n08vd44lxvcKSfPa{u|PjQsLU+V6^e@^4D zvy9M-M+TYDN0F>L`3K8_F=4!4eTKhMzj*5HCcGbcJ@J>vzC?b-cZ7n)?)7t{;B%jk z1lcvxcj;#F=bh}%e92o%56$#UBb$W`r7mAYyli`T8Bs0g8uofZG~G3c*~&E?<=db6 zBmKI___n0-wF_5e8or%2t*2p+_4jzCBWoSJ8o9-O)pt}n9X*-+`N#ynUs&FFxaAkK zXj-mtEnxVadU&9bbW4ZW^P9;zbcwHuhsoFR;m0E9vHM4P{+;)~MVgNqmx|~uN;`IP z>)n*8oM-avh~Pq9t7Mgeb$(XrPK)qAo1+u#km?X-68y2+9`TarMrF(~cij!e{9sFV zyPBJAgU6smTIxTEr{@FiKTr8m;@h@%+^xUz<^D|qh_7IpOhYq2#M@ z{Uf(i+2892BYqLdB{HNO4bW@VZp zWJY3Y@J*IMjqPKN^II93++25^6e;%3=c-jlipuM@hPmr91Rf8{AF8VO3~KwTXgu}N z%aasRdg_`t3L}nDwe#QoTT@p4-fyS)?H+hkHL(5h!Cgv=g_mcp#@FgE(f{#?V2w-4j) zTzuOeyRY%+Zrq=X_etDuzqyd(O_BvqwfljT{H-)Vn!mL;jo+BMelV*TDNo~fBYsew z3^(t$jIr~rNu+KcuTB=~-Q!7Qlm4PZD=ac5SR9@)QJe2&JuzCYsQjdk)8cTvmiy`x zvev!~lqp2W_V9mFAG78sa=#*mQmBz2YuWH1Tv+7Yv!EXDvXwfW_@D=rDA)-`=`yila0-io%`pU`>rk< z;^i#JaedW)B+=zn&jRvw+YjF5fu(ByPJoyq4Qi#~T>ocXpVo9bkC)ei22d-G!EHa8 zXhUd}E@-IZqS7lF?fHdj7(SISyD;&w-MY{1Jt55G?D0!#^sZ-Jo9kT~vhOG-kg05o zg@^i*SQVvZ`Hm1ClWF)!Vsl)mp(4ESnz*hWU1Pm}PfB^~A}EW)XOTm+sV&*Us;SN6 zO}W_pn-XDSK9kbM?7q8RJ2~Q0B*>g@dC=NH!-G99zeeWI$%l`pi8^^kECp93Zv7{{ zmWF-g-mlLmV2*{Y3oeewo$RN|$E}|PO9BkVigpV=#2fm&eEW~=Ncs$7UN0LgQri3G z$)FL7cP%<)SMDdfV*9ynO1*dbF|G7%{fGO&f+V*e#VVc zjI3MB$-@D@7!DiMHKjf(Q+!L(gO6@w$|F3U!HKj=;x(t)gHM1`=?M?m`T)SqUo-op zGJUm^g1iS=rGf^u2Ab4#3`w%PJhbHoy0G^A#IrP-Vc_$0WU`B3`wIVapjtvYpALAS zPG=WNn?8kLlgrnX3~0@ADQjnz`~nrR%9ykooriT;*P{YRlWcwMrO&Fo_3*zYtw29Y zWPQCQ-C7ZiCc3fu_Jw5gR4JdkNYBGRWtS#C{P~*R%%5jdY08M;Ks&yw4D44_9r-PuO-&ByYUEmZ1Vrd zW{2m{X`TA9>-uj}HIzZ#HNHe&X?Ocdv%Jjtxlv4YGQmFDce%YXCE%t2;=|;mXM6F3 zCKbi6xsyKUZTThodNsTBe@=(*3bGqo5u^nIERlv9tmUH-TJ6P4H87HSvp@M`h|{i! zLeGSAi%Qj>^)Ff%-3zx*ZkOC~;y!bOiGKk`zET07{O*6X%lIvN-y-L}Zb+6Ty#2cbz85{a1-N+m_CL9yd*HvWP@2`ZAoW&OI#mj=_T_-j zr54*Ri5eALHgrlPrD?9a)23Yxof0S*1*hJS~c=LJ0D)!f-9^c@K`V9A^oz&sYoj_R|%t zOz9Mj`)1ihm7R2l49Ah(SmidyzQQx@8GMlbgO8f~Dr3>_LwxjA$YkVA<>y13$r1W` zK_PFqECoDYZX8>;beiPvzV}q_2^?wXcgh*>@3u1-Y|v;<=Hu5~BnaN7(|eE{(OCwx{^3`^8 zszlYtO+RyOw(|$jxV$@C0F4WF!YnN1c{S=5UXgvRA12@AzC-;|afoLIpQ8}FL-(zF zWP?abl~mo~Qcz;7!p1+HFZ(PW`Rm<95e;}4b8T!kWjffDEakk;drkFi&-+^se>~XY z*q48)^gb{*yGm%|i?V)<5(x2A`jHZ^+r!^Wtd*!+@PX-QTMF8E9HbQtghvKZDBGfv zc{;&xR|=5w;>@{Ct=dT-^DE`UmRy^TIJn0(6*SyAH#@3H=#c(HKu37HP>zmB8LT|O zY&1%+EBW3l;#or*2|UdWq|_(I?yJ3zw*D{vhuxRI2Wt0X7Hc2=c&LB-i{m}W!Y%TO zx?QSF(0|A;M0J)Ke5zDz-X9`j>*9s*o73RDzdIEFop&pcNe||-S!a{BdG24bY27`| zy2*{Y3FtZRcz&9%;Itd$t9?H7A6~vMVcU0Hi+z(Om>l|26JejP@NbLe-X)@H-G3HD z-hWQsMQxr3|F;Z4%@R;kDAasBs=^I$0^H-rgM??p0!&m-GyzVy$zFcu_-&>LAZuq6 zb0-oKr}-{c6B1m_jKl9Mwi@^HL(H&{SNzbpgz@Aiu)BH5CiE-6$KfIpjzCP458uxQ z!5671Cr+lRP^4uZcp~A5fL|iuIpYabkFZqV0*+hIh;JsGHTa|N$k}yGz6zZe$mfQa z`@-sio`-3kim-W;Jx@a7o#Gi-HsV}w8m@N?&nB!~qL;|Ci|XR&e^|;(5={<{lBxhr zIrq~9#pM&fnw+Fq6Ub7fsaWOCl1xsbuN7y(7O+TLFsQX%s28kZ3+RPU>E&n{oI_aC z-C>&p>uhK$i6+#fIO^~5*4p%XyW9FIj`;&|E24mT$&KZ98#B}t_~AP6_9^r20xA}X z9fqLYrh>otknLGFHpATQdE7RC(6;;u`$!wxMy zi|Ydv$wRB^1uTs@Vi`GWqw}jmw2af=>GbE;6hkCZYU!UfuMVPEwg?D`DdvS~=6h|3 z)vFsMPCUF$JUeElSs+eb$3la)V%xCC%?P1fpqvCssAX5k6eT3VBGs|YhTTzI*dCYK zkuTc8F6Eof@8dfFC+Omc%K14yX=s#-MvyB5_3^W*Vccn$Vw z_k`*{Q!?cPbJbTxsw=s({+RBkl=FpCRE9WUZL$ykX?kkqh;T~oy z4tX>xlAZo z@(;_cAKw&95FZoZt0$D&p=z&+jufS1we+sF9o$7ew|TBFytiLbY8%<23mP05$sHItsEss=S@e;)tveGBfxW2a&x{w zGkz)cg}}!m-=t-#u^&`hQv$l}NYTBR#~J=5SI}?bi&SY`Gv$IJ$U9=kHtQ<|E{C=N zBP!4d=Rbq9Ff8*jEQN}%wA0bPm;V`l9B}#qo{iJ?MA5O*a>d58h)K~`H#69G>e5`` ztHnc53LzP_uswHFCvJynmi4}@!GT;@(j1FSq)SfQf4fWTYt07DopK??GDsJ@kzo!KBEd?IExOpc+?Rq=;W_9VT7x60l zBD75LGP&kug`t;MmJqF^p{0*klMb^BM)UUZ&)HI~59dJ(rNO5!XjmDv+?(TR%CtJ# zX;^6&2Mbwxmn7H~LX=i@R4#QIrbF)WFdZbYGNPqE9*0WZcPS4yj9lDuD|5MbDk*&5 z6>7Q@S}Ys<^0t(WoYaDhV_2F+k;%)swCGj#xw+e8f$SeJpO4^C)ry(TlOu-(o<%Pe z%}7y2H{L}gfH~*KE1F=z+tHe>%rSV2v21wrMa+Mb78%R@wIshKQ@9-+DKY+=oivrz ze6H^I+&@v2JS7P3lAF^K@2PXf%?0PqDCP-T1)8b2ILlaH(TWzbSL`{3P|)^wm-jzc zna<@If(}aUERF*vmf?4aL3g@>6ncJE(9vF;uMjjDzJ@ZgudFpOGT5Bys;#mL(K_8M z*U3OJHRPc-^O$(#Sv^@&yUtwpZCn}{(mtZ3WLBJW%3WTc0$!fFm8?e4%A4F-jd(X5 zZg?L^i#BE~1q#RmB=j>s^gQ@E&LuCeII-#p{5f{m1EH|w;hb0e86DGc!^g8U+P5_4 zzn+|pY`@p}$5+YNuZp>8%efD1RW}x@hv*Ua^Q}g1d|RvLa3LCTND8UJ?)B_n!zvk7 zfrPj3@8h?bJl@^Cl>npbEZJVx{Mo8S5we(Vqn*HbhNfL6ixk_H;Iu^<(2}bcUg0{; zIi)HE_su%R$G+>R&EeTE98=13MT<#jUSrMj@|IjdEyfaE^>@t)2 zPjtrABg$T%wiTbih#aUWM-vZcER)r?zk zxL^8)`TRm=zvrvGP~1Jjw#s#)td$Ni%%oP%*j}1@YYDpF8ZPpS{@@!SsJ}27u%=h=?oo{i$gE zD&Oz+-w_+tp2-B5ow8BYt`Jw=y_0J%zU~)L2G7-iZiQ8QWWM9ni!RP_3%+LE-c6-)HSvb1o=qX$m9h!*tn_=-pP9}9 zwSNoy+MX1zz|lq7u4pLxoi-(>CFYNV#!L_mbi1z8oo1OSy;$Y3DYlk`+;<%aN2g<* zP)Y-7{^$4+4ab`gY5Ngz^v~#FzqT#mD#Z$+*#!4QCoA!=|i#jwTAMSyq`hY>cnp-s!H=+%){A zEH@ppUHajl9eK-+YiRj2Lo;sAkF4rNJ?M_jCO7&>>u9>svwxf zPJ`aD5s{QR|EfQCjQnA#wyNyX{S&J%H?|_vsa?W%$Gw})3>AInn#^cjea1Dr7(*}7 zg*}DS7OiXek{c`MkGCU&)4CcgnwMrd18<9qHuupgnFW`;625PPr+CAlAVn`Fb*X&m zNY7j*Ss+HU$>2SQs#`?$Ep8F!oC~Bjjy}5n7i&n{@oC~TCAzJN{ruA(#s%SjY!iX* zS&Ggt=h!CB0RJitx)?6#`yy|>xzOZP)lti?52znwfmYs_q*B>WKrSU{zH`YR68Njo z6QvY9oBjM`*^l!M-$%kikl%$EycDPYh?8t-_+8c0QZ>!ChPyX6>%~#wGgy3+&=X!^ zj==g5(HE8cb7IruVOh3Cb=1I`QVFNt@DFs2w{D=Kj0!oFcCdzYd_uX$ZAW(mEVwMVjRa-reV3%dw*8~5F`v<0tTF9Q5JizXwLJHO1;8CR<3 zD0dw$H95Yzi&A-U^r6#nONO{pdP3gl3qJndWvtNR*c0??-np+^(sd+KK_TexW$6A` z@WER1vl~6Ph`mYgN~d34p6AS|B z{@Zs&|Dl8@CxEGe+;Qi?Mpu>3N9|eS;3X|{aTeaXtW?mpM>Xzctl}(=;zJ=K73vQ6 zxQZfr@JMuVBpc_5yaAZ$>9+3D=`q@SL3Fu#b_?=xT-Aj-LX6uMI?R5HUUWu(^OIH7 zXT26JOlZ44Uw7u}VlXv~JD4)nQA&AiDKk5y#~$p}Wy5YyaJOZT{2;et>rVjMvjrbN zDY5rUmt%5?Ubbm;iAOoGk{{hU{Lt_q^StDSOa5^=i~T+K!;;!tM?En1;DBZOz{d|W z{hvN0wQuD&hx9zZqCTQkBa3g$_hn^$&4e};u=qsKMNBk0}BPaGXZDe-D7w@F$ z=)>lHXT{Yr{bRXh56>$dK%0JG)Q;VcO=Zd!8CR7rjoMr2UnrjBxYI6hb>gpo7ZM3% z+H~$-0BnE|N8R#?It82~#HR^qwyVwNj8NPe5LJ)m%#x!r&I*zVQbeai#TRpRsBiDi zX`8W0&FgA(>(o!MG{-`iu$ufG6Rht1^ZL}R+qv?xfgc2{hLLJhvp(NtP3CS=e_F6~ z{jQTO;|KfUjk5mri@f9z{{+h99QtO7@o^AP?}fKR!4j;hL@=hoI)6cCxDG!^XOKqGgYJGwlb2AnfsxBRZFaXO%50P9U;2!rhHq{k8f>WASWh^-fiFpGK~ zso*~E?+RdDPm4CVbQPP{@wXoMf4Q;u;+o;{0`?wa$>#_- znx7e7$1g7Icf)7{&?&v?Kom;j zdgZxz5{rm@?wKdo#3N}SP2;N7Z}wyNCpY|GL)3X9v%n8NL%4gM`o~XP=j*`i9p7|! z83(&RY64a4UGBf!8u_NI4@xVxpW55S5%w zCN$}z%PmV<>Sm~!yjgd=GG)pH0g1s|pu!SA#eH2`&4Zh?#&&mi|T%SS}Yp41TL zF=~56e)dOAt?!k*nWfVa z{c0vuo21OhzGqL@SwcjrItTl+{m-D!(?8EUVzimZ6!i`XGWyKoH|wRGOU)8QxPHb{ zsD9}gjen(0+(_AXAhtc=sjbfiLp9;0*7xtN>Jol22x%N@QP8I66O1HiILSPxXNpS8 zaQ9HoLm|5{tzBcsS&*KDbh?CPF~T9v#&i1M(rh(mB={iB-@{)8v8~cl@%Dx?ieH8PMTTagR{7 zcWPy|9>X+Q$A5@G<<25=t$)$nGT6U{X+6&SAfNxE4XyVkA36i6dIvGYPcV*g5p#dG^QTe_OZR7B?pXQ92@PNvge zYUCy_-V8huV9RqC+lN$!&$|n^#{XE*+?-*92=ad`X6w^frv5nKNz-`*egs1ShX79x z&t?L-BgNd;zu4_2f3jM-Z7uig#|cgEWTqPBgd4M*1UK176~$ubOJ+MaE=UcKLM1Q2 z$6(bd+0)(2yzkQW{D)`7z4~|ggf!#X{nF{U{-F8nSosAImfmXW1^f&VRO|cF#VS9yF_8saL4(7!m*@$-Gl(Fk3?5c4kaVII}H>0hz ztD{P?D|DqjP6NG=Da^tTw^Pa~ZCsC-hTmW{$9p#sOmJGxACtl+^26-JZ}*ARC&$Jc7Wova zRhT>>-$FNwc>U9&t=Q`l?{Q?x)wte$YS#Yw`3=`Hn{|*&K20q<;xSSjVyH2PTJ+0s->}oMgxr&Z0_~uMKkwO)sDW_k zWG5f@HXkjm02)YhqaDesgB$@MWpUtsb7Y^JpwD+iKPdrYhkW0fu*HBBHUNL#O*GqP zsE0C+nuBFX)P4kLQoC9A2rSeMYD*G~`T?u{4%1dlJ|!gEVkqPnlidbbBD9#@BF#N% zMdW0!ne!)WcbmmZU%R0RH5yMgDr4~_nraJCkb`%av$R+(!&ze%oVPwl-u6w4{GwR8 zz}}5RU@;&D>=n}v#sZYzY$LkR;CokaC^kJ;U+iuafa0o2CLe*d;baKq5Vdiwc^M;; z51_;Yn$Gkh5U1NXVottsWL?H4CD- z;?PL|H4LpqKM;unK>Eo5F*Fov5XuA8w6LVLrIU-c1>b{b$^(#G)<`)Lh#3c@!~uED z!G;z{Swf-M6Oi_)rs|yRwwIOzA5*tCl#7&ieO5RC044KNAPfOi#B69kAeaaXngy~E zve<~pbhpi-J%r$LLeVnGxE=s<7Gg{C^TS=`@e8IEvyt)6rmH}@9ssAC6EHrfaA3V_)0)K@CnQj4-jz!GsOd>iOO>x;_D%(&;1~%Ie>#0^V}Z5?9 z?PP81FPf?V5cdf$4aXHt)DOBM0$yJlLfWrJ0eB_+;Do?!f_Wg8pARNNjxe^)0@x9m zu(4LD?Ib*7JfK7gC;{+7c9bz-%cb&pAq5C2xALowCPpG2W}$K6dylgN&Am^{11x|L zf#R3~H2_UPNvIKkh6@W|Ypom?h8{i!a?Aq7Fxecf@r!3T)0zUtf`s8&B>NY7Y06BI z)Uk8tNcxAmiV4 z5z>m)TJ_dz5b8sqZaGYxRBvGqz)!%`x}9@DtdhHob@TVrG8;2~v-Sw;^;9eO5JANm zcMA=HUvFfL7u=PldeluozQKavSP=Fqei#iB4>y$URI6M~O2AtQI+%Lq?4>^MZDriFAza6EU0L*tTSGnMGGNC0Q2Gj2i;OV zc;LYkFeyo0c*mF~%$P9}&C-y^b`zpz4iF`h?^yKb3l)W0?zD;mXlwvbt5#`|i+g(J zP#d=@Q%vZ8KWGb<__RDA$2cENW~8Kfg)9yw8xLT>GK{n$Iqi`0m=3N+5R3qlGe=6g z0Yu{yr85AsgqxaFa-mE}YfZ=7+mN`Y$*#hsG18BNzr#Xjfu@;S)_ABjng)R@3G`qr zoWJH^k&+jxi0XpXN(02P$u^`C>sE`1A6=mYnA6ufTL9D+_d<(>##p4(xAMi~RJmVX zjhU0(Iaz6Ya#1rpAXxt0sp{uhX`%xOi1FE+>_)N7$etk}k`M5_AEyzz$SyI+{u&*) zVriXT2Z9p8T~^d>#>lY>)EYSg^@#wYZUv7W=o5)3C?05P4&KC2R|~5GmsQq(aesnA zj4|2$*C59ES?Yu!M}gOC2app0&C;a17b8R*3z%)R`QXW2FROO3O)b~YOQC=gzynTK zJc~r}D)J#amrtZHUT;DGEZ8So6nso!oU$HyoQWa#p!7zwc@TVoE*eVR3rS(Lzp^ab z;$wm7kQs)stEI~17YO4iSc{HeuoH@&laURfe`k)AWlI{E9fX^MUlWn?2@E3ya6KiG zr@Yg64j~HYl+Td$mNuQhA#WA>h4NBxp#dS~Fy}kaZ~V-5?u?G21*=eKk2kaeTWPkM z&FZYv2O5^l^21^`AfVGVZ+b>d$EY>6-{Ho$dr*3f)Kag0tND77@f>`vlX z%zk&0$@q)MX@ejzBM#V4V(&&j;Ulm=bFc!RJ~>$7!QwNOu^ewmEzIT_J#PACpV-$z z0ynl%Ohmvv2Ci0-%okxOxqN^awvTrf)x}5M{wt$&cHq`F=bk!LlSFgCH8oBQ-*(}8`7vIX zGP&J^=LdFtountc_}ql_=aL>89bU#5UOqV@HB}R>1p!E74pk|HsH0z6&D!Z<$ZgEC zKt%bJC01Iuf7WCc#9$A_&|XImgy?Xb=A(-4c^ac=;IH06C@^F!!e+{X9 z=ivwzDPe*fGza6vKw|!aUhVX})<^@6)DeM_(R!%=RiElC*w)R=`wIS5190M#Q_f+h zH~<@Qq*Y)vDv{ZFPUw~!6iFzpc>%kQ1*nl=p@nAFb=|;OAl=BY4ZhCS9KhwK5JAV{ zuhw|WEwVC!CIz{gaID({V@pV5fA6lxaHW>f*O0&h`2a|$yQ|L`ZxQ0rO2P93%6lws z00#4nz|N;pot90GlN(ahO!#NSS^d^YP}dRXjyW|m9@umrz(Gb=^xOeH&ZucST6?+A zxtdQ+9K~q1LWZ20*ABr{Zg2QsP`}Tp67L5BiGWoV%;`25fd{_vReP6YxK$+q>1a7t zMlTRnlkNBv08hVrqNVgS>MC>J$fD}90E}*r=;Hx@vo!n}q&O2b=x-vZ3PN(^pfn_a zb*qg1B2Xv{XGMez*}YLC0Y0-sHL!zf*n;_Eh!v5V1I6!}Dh*(u<*5)DHAkK(F{l7L z)H8kbq-1dxNalQiGzlRt29U%gO8m$C%)188O=$N}+0JV`JsQS08FQFA<_i4#qMV6qjL z`Hhz4Sa9i;Rdye8;}zF>di6dR=mVw8u#E| zz$XB=E!_iSfX8xg^9hDVoRocHzHvgx7Vq~s-Q(Wsu@N1mKEZ*ZSsLjL<3Pt;3M}YV z|J`3Ga24wPjt~fB{#1zo{w4zYWQYEnt-)Q$)0=;fmq);=KObaAYl_*n93?`~07Xpv zsav*9>wp1%YIY;v0wcp)OPlkPjFw9pl=MTYo?g93fuCiOQc4RN?hGc|9dJBQ0r8IV z%E8|Xf|8!~wF<2B*O`P3|A^OeIeqDM3+kMIb)wK<`{XPsAq#Rz&rha;QJC#9aMVV~ z0-@mWLHN!wEZ*^jpA|Id_~m205h=-KTQ}`w1dHdF@AppSU6WEY46bEm?Yq*PHw`ul zk63l%p7<1hk-$G>wc@PujH-Bw9Lq=l_?1I}1>*Wv-&HF=6A|m*v5KkdtKSFo`@5}R z!S&HiT|=mbSYU?;O$Zpe%d{s*?4OfS#Ka&tU3VfGg=S&`mU5Q6R9z1;CXCT)+>#~45 zLWl))`dKeRZ%>M9!KiEYgvWx`0!Or%<+FMRGAGg^XR>Y3GAJVe|3~Q);vL)pB)%)e za+yxD0Ahy?+rTqO`gd3s&A)aioawKz2cbt?_`3l|_N^YTUCeRjm;ov4`uS^G zDWI%n$$rDI?uPf@T>BHCG=fg6B=_MAMoFLb)l<*$0s*tf>#Gle%soLHlS@6I6u-%c zUy0qkD-ttZ@}K5;265R=H zVv^8_qem*x!LEig{5t}a4$Z*bq@>`o^BA~YisuDTkA9$m|62okr2kQe(CSdHgf!fD z2za8)>=aP`IFagco9=xl18jVOQStx;vY5c(peKtRqyYM!`CCj+Iz${mAMzIp?ZEkq2|EeUn*5z~-WWI`O{iCumof@2gn7u9o?epziZR%M ztG5Y(xVWj-=`;BAvBhTUG{@2z5ybL@vZQq3$1DgTz-WytCZ?JtGxL{sxi>^B9>kXT zOQ6ih1iF{P>>I^1;UTyq#FwSmo6dWa@GBivo16Iw`0R-78HHk$C7-11TH|06uqDc~ zp?8*$37~HA_vDwaF!H|^l=oSlU#`XhocXk6elSxI{za2ozQ&u+NkmqNkQuQQ1s3nx zX|Dm$Y^M05D*=q$sUeA_s@U@%7a=+Pn4U2b$qi%4Y{4-82saWz!9_A{@NiDsFw(KmHxz*r+eH!K&c1N>)VWF58Qv3{Uxd5 zPbW}>E5uq9mmHXH#fM>^ex~PLN0l1`00Knu$^Hl66wM%O12p*#vBKZhWZ57{+%MyjKx?|IE1qsc}@|#>guew|6ni5)u;c zL2$A`V93{NORC(H;yG}juIw%5fC#EYKEq-zFca|{8 zN{)n>+iymn1}=bDXsK!ZhBooxk-Eq+8YVyGRz4RULu&1xocK zy`_OLJc8{ySt?~JSMGxlBX0qldU*rE@azBP_sb?;iW7Yf2mW%;40SymRd3xz&5R=KY#&h6QK!+Gz8lq^iwA8y~&Lq-jxbO%aRv& z6UFMMz;|$3%#MZ{!snPMD>MpVPoE?@o)C0LTI(m#0+JevFnb^y&zvbOs>3dB^1vuw zEfz_A$&kQSYl-^jjRaCLC$K%w1?t7f+=BIs^HqXANV!5X3v4jt{i6nd6+`n*RqDv) z0BII0(qlbCi7-@8I!k9T#3^YCrhx#^t_&39;rOtZq~1}^_jvLyBXcpg->lG&!E8A9o+nHJfitamLD$G}WFLHcnr-4SDz0`@(Ge=z5rYFP0-|faA z9Ui$P^@koq6*Wi&zve#j!-0^C*JUsh1l!voT~Ga701%DoQ=QU<=yZb^1OY%9B>P*% zJt~bxi6_sYt8{|5%aW6XbP}_qVVh zeA7J962w|a%+^l|=YO7h1hGb9$^6sV*h&FSzDO(xjfPwU3t&$840rW0kkc6l$TwI# z)1OR$4a=*G9A6Abx!#ms%Ku0b~9FSeExBb&%K~XM|kk zzuP%_b*t}o6mxb;XBE0bvI5DJfRs@fnrHo<)MfGe{P5A+X0Uf=r^FUMCy!G*?y%#VQrqQJw?;DR621I+^RzOkqx zYZ}r_7JzCQ7kAJIRiMWJfa>c2=n$YOJ>>ZRT+t)7A~S`pMZ4$U1q{_;*3DbMXL2|S z8$erY%?Wvtf9`Mncg7sF+!!D;8bEb`8bZ=&B|OaNHW#}xo6)*=<9>kWy-ey8kA}K0ieWAkC`YXm3xrY9?_qQc5#W zGK{%U+=pf}Z3_#0sr4D+qD~lYu1m@#B>6;X<(852_%!)M)lFF>3vxB_|H!-lwUWIB z5_6#{c%HAr#}icwf*Z&4RDkXi3pUH*#ia|jQ1QI<&lD;_N`HA?g!O7Y70o^sJ?%+N zFJ(rHl5-qDG7!mHmCy=b-l8q_M9gDX(Q&PDmMl2=zm`-7hUfRu36i4E>kn|9F;w}0 zw$6A^Q55(_E?AV92DQ``$EB0C$_G;_SPH;56R4Uo8JzZfH&Ve*^84>3f}EmpO3D+% zwqT{wntTIS~7un8U{=hh!HZ~`;JGr|B znbwC)q@qadOHRLmnqY5q=~d<>i)ZVnb7`gjE6YXaLoqS=)XkEM+_2JVi@tb=9&wm7 zv&?XBIgfQRsresQ)K=w8Zl1^?zq2{R{gu4;aPIP*O3zltPm>Ie%aE>ZQJosm2k|o7 zV$}lpc-~YVb%}WX+#dd+=Jerafw^Y_=h)29-tT{Tgw7d-yi?c_kDZAvR(#10hB~f* zqJ&hPr_|);Sl*|x$?kJHsFBC%rKy?ca&B<)TB93I(Z!@{IDas()Qw%y-^HY?yQC@? z=MTK${fRY!eV}KHRUkiY_=MHl9efi<#eb^ttUS$|kKd||s!Te4C879nug4#}3L3+1HzUJ~_E}&Z zJspsN>>no+c9J!uiTTrHMET_7utxAZRo)OSHoG47c?O3AE-81Js8Pmy-bT`&j5I%0 z4}_&0@l8qc8Ov{0Oa4&1b@*&erdmgaxhv5^=P60FmyDS}o$O_soKCdxI-eRH;pN$# zPar0l5mGay$$73G|LM^Ij1~c7Qi8D=Vl*X$(^h$m9+wfTs6lZs79>SKASlcF{5*|z zB~9^U`rS_PO2xZp^rg=XSc_7v9vdtYVix)G7puJ4;?puXUEkFWPgl>qlIMQ;;^4N< zZEM{m_~b&$#)t&ZCw7f;(=7x2na#4z>AL$&*$$;yA@WG)6ZTLXT}|GbKi$cD^yR;x ztUXo8^h&{tsiTr!ZNihpX?2FaB`1lv#spG#u ztw|ouwF2Rv+Q;+Fnz6mT^>=gluhmjeC-3AxKFP!WrWX+MDMI_gXrA@@_R06R?nQ?8`5%e|Z<(EiQMsLRdR&P6m=jP)2ibD_7g zpB$$g^{=XodOO)sQTaHbN_^6Wbbr(^u!_OUx5{^S*s~VPR`-#PTl-jMo<##B24;Fk zVB&++-~gVv8PGw#NG=8?X>SK3+P+lywyEJZ))qzvfT{gBi6Cs$owl!MQhvQC#_ z(ae9oSl_L?x>M|L0qe0`EPbWhfY)u2F7?K)dsD6uI3!MfzEBQ$SK6^YQffyyW$h6K z1en;L-rP9N8dSBMx$pO~@k3oBH-7P&6%o5p9g@7RmDBak*`8+gSnET-hYI4&P=ANR zGZ(H3IcX;iF0dT&Qq<<3bw$N$?muOQDDk(;77VNvlXZnlzD3-#n@e6>sw_V%83Gwt zU3wf&RJB41P$5Pd)`j#pNz59*7&(e%rWB^=c6s33V@f?DRSC(%l|}#M*{(ikNq+a) zxE`8JgF9s>cY?~th^7m|N?Zh`*z8M1>^)c?kXZ)u<*N@s7+In%kj z>(g@h4dp=i7%n9Ue9}vhPi7T28l3WXhVG$3ayFGW@(iodlc%zvME(f-RQo7gJ_oZ?shOn1|Bkz3n?#HiG==41+zIB3qX zx6Gy|7m8dspYK~GvFNNm-&6Nn5RSVS=#-2su&jf-ZTvP-60)*PD5ig_%fMJuOv9Fd z;+IQyI|knpfB4Sbe|uiyQ#|mNV%+T@nNQD=5*nu7NoNz@VH?d28)w}5tqS_)yIf|U zlRA>F&kH2!Xhr|n{(+nWu+L$@V--XzZ7%EsvdJY^v~e$5b-7dNoW&I!(a67<#|pfA zqh^xj?`S2DqwnoJ+>#CL)#^-*O^olJ%$iZQtFM*}5BrcU*#2ax&2 zqmLe?pG`(QtMiw+20r&LrLJ&)%R=nm>v0W`8I+QAiY84*1@22rBBB)C9(8&t=?y3f z+ez(d%CCPT@eS(sEy=;qv%9t4=RT zOyg&Z%korCbDI2f*KT-C)P$=<+VWs(5^|nKbrsYMd(wW ze}s8id)2)cO)}xV9oF-GnX>c&%~F4#H8+Z~ABBoN`^ZLa?88d#TfHo(_f14MYJ#y^fz{^bLCj zsT31l=nc&kD@1|SvV(7Lgp0;lPkC;%aPPET{hFMyPu=!U{dGw1wSyg@V+3+}Ffu&5 zxdaGXb=9pjR1`9Ku?lgS9KP8w4%Fg6xZtlwD>OF+OSsGA9m~}n?W#Stvqfla^4Nk? zR!N(?Zjzqe4r(`E`_BB%4|uXR1G&F9!_)o1Q;X0$%seg>xts|Q7n+$$7k#YxfOpp6 zXMnI*Z7XH{vTYe=zjx)QW2M)5@_vv^t?ZtkV)6QnPOVI}+e^vA??0W$o@6|?^*5c3 z$|}j!^t|;qz#e^WP2F^mYi4TkQlR?j<9|0ptl3s)<92@0r}Q5+^cz<8_i#RIpoiI2 z_D2VBDEX9cM_g;?$Hgevmmy4hXm2Us==?|<#lQ)n(Q3uWwF$=P6-dm=u7;TByvAOX z#*|Co*N*DbpsqDk%c)Myv93>H&VuLYT3_@&luz$xaA4R<#PUhpXJBoLV0uOm2b?rf zAEZH{snFmSy|l$petyh-@~=2bO0QN{(G9X6CRdF3&NNv2IqKa;Y{SF%zplayUJD!~ z+S8;wk$B7Rd{B328_{#;cd-bkwSq%Q+;70utn2R1s}Le2J7CxYUD)}u~6GykWW} zOk}oN^AU(fJY00XR`1cC9FhNHN8^*o&!@Y7C0)&?U9Ru!KYYE@YF82X!DB%0I^{W2 z{VqB3=IUVJ#;8Xd>zab!;S$7-!d)siA}$5Mz94C^K7yqJQ!da3Yjf7W-_qxUjU(qw)l%s+`T@1t;?k=F;?ooUR`H5HQTYG}sCx@9}sSPoPtj zAW3_jq|K)UZN$Z%|NQrl0*ASx>7$mBn_JXl4d~u9PatdgfkQIM7D7l`%&PslC=Q-N zdJfpVgL!=)ot->=R^3Dk4k4dkH99Ol@AY&98}lG~e)8^{oVTe@7A$Uo*z0q(8SU!x z^l3eEENwPV?^rom?0&GZy#rw>vT;)IuyVEgK&2A!Zr4W9C7GQ~*`v{8&u$qV#^!?T zuPbwYK!de)OB2$wbIoH|g1ZIlHJ0GQB?mvc9u9%;ct5W4bnqFekJG`&^7~4E3wp_iB87LzhBCxbN>h-PV5a7qU$) zb9t5W_~97H);ad_Gn+r=R3l>}_VvYyDJsB#tR@Tmuyig7?6OR9N#V6DolD$}iJH$4 z43uBcXR8UxOXso>7042+E!|4-9-6n*GyrjRTP&C>p+0^W`6{uG1K=e8A#rnSy@ zSy`W>cOUMHuRI;LuL@fc+oY8ro4G52nUH-Z5l6VZQXwtlY>VB4MAE zOfr>2awj0AsGvgS38I`Th=cPL72^xej=T;d-VQ#uCoEMY_;6A{ZfbJ&%}-(7M^Cn^ zpB;mws24R2HAXTfO!>5h?%KO1dAxe<^+8aYJIrmV%qN+P_TFNhVQ=Y2^mfUt_`eTw zGr?PsK2Hpg#@kb2%rVS;XO?}r@(67+BUSTaqQbvmJ#y8z#J=NEeyz3tseZ+>e5>LB zG+S*{SZQk~UG)ivS@2J= zz3Ii$JabGo#oB+WIG1f6UGw)yyELAQTVneaO$YtDY$Bz;SiX3$XL0eYd|ol>y4=y+ zs_&_Fppig%6QqgznUQI`)9D~4LPXI#_`yw zC#7i8%$AuvLn>FKJ1;v>c_LN+WhzA1oIw+rI3{-!|r`zubRW zKGCma`;GUbbu{;Ws#nPtgG02@ZmVT9Me)<{-In68N~+q{yHR7C{ftc}o;BUK&i>B! z=R&ku_^pyPaTJ{XiTO0TGoeM><~!0}$L}brFq0Tz=JIF=&`7M~1o9_brd(==k#!NItaSu*{8B`_}ig z+5VOzc2Daz`? z)HZ#SvR^{r{gFK`+m&>c=3zivU5-VTgKxf-&UWG4nwCWFFeL4UgKJOoP@@#jJ5%ih>XtRvnvZWL6krZpflLtaP2xLc^Z5SmWF^cLpH6$FR%cm~k}{drX@4FEMuU%Y7drY$qK)+| zgE3uHbXC?qH;$#=`4jd5s`qE)Ae(jI&G*nX`_zy7D~LLUgAyTdxBhpe|Mv&mtD?Vc zY{wi=M)q&-Mn8iONqL($Or4DyfpZuMQffts&Vz0iUm@dCRMeSATM+p-Nv|~rU+4Io zy$%BYb!h+N?mr{|HHLy1vgs3LrBF~EIA}WQ1k68v2~}p(Vu*S{CQOflBF_o*afmQJ z=oM7P8MMfm;DIAa)j(}Yz(qgl;KiMIvAaP=lIC^49ZL7aZ2e$M!%mT`ANkovwX;rw zd#K#Qua3a4Art<@>}Op^enyxE2v*F=^7oY*#w?!kERN@At`Vhy=w;_{UnMzf&E5L~ zzzi*vMr^U1C`*P0IuKrFx$IoLA)Vlum3_lQJbkWwW6;&ap(Wwwmo7>lHdM8z@ zQ_(=hudsThy;c%PV~?g+_(UK3fpg=Wa6jSeXXLvDq5saOZ(VccXD|0l0xxceX>vk2 zF)O=jms`9qP0zCE!0kwK#by+Z`&`^LM}quY%1i36v!sjd|E|t9UjF;B@!x-cHm*J( zrE(!Eg?KmCqp8l2l&3hTB=&*H|CoXFSPJTL zCC2yPz=kBV%M~!;zaCx&j!j8_$E#0xYEdAAAWCfr%*wnD&#sAj3TDr)QM;@XmvOr0 zKXAlta<=ky&E=@!wdtmjJYQ>gF7P}To4g=b1k9D^tcLdtpgxztiNtbByK+iz3D`g8 ztVI1rZLu5h3)u4uI&Cqb`2}qr3t83*t8OumXp5ECiY<(ZRlADUZmD(GijTUI#9nNP zO=wG~qJV2-FY-1dPq$KpG9|^+m`1?r)B-Z2V=}DUG8}HQ9NX8|U^!MddGT#&**aMs zH<~pXdG~9sslcv>fQ-qyQnG+Nb%Ij7fHDlco0lLbfl~~vR$jtWj{~_fM@0)!>`_|m zsUY^$QH>=x_BW_oqXdn!I?V|bd*&!|6m@GwK$~#~wIaZNu&uLFcWY@|)YeVUcSkSO zT|Z_=ANJXajC<++F<*!Gd3iPWB#yG=Yv8_(6=~LlKej9B1W8+Pz~hboYX29Vukv zx=SaGVqo4v)Oy&rHXu3y0LFLs?ljm)B+&Zc`#!h6g^1iDHf3x15ceol(sR0q>@!o^Nek3iaeor5AurK3`jQW_2vbL#!>R7CXf*aH$jP(8LxwK-v~MOzh$QbjbYR*oeY z9mLCuCg_PKDIO$xdM7n%r$inkz_5wc+G>{=&Sn%_IDxZXH2pg07==xL0nBXl&g?wM zoH@vdA*2sBr>}^nPl#rbywiCqxaheVm{FDEX=PQcRdbb$b6jZU7W%ohX$`T$V!I-n z7Bv$ev;>yZll-b1{GeYrHd9P$t+8yv(|>)hyjo0(Uc0>bOBuOH`9z~kyN^F;j0y_S zJlJx)`0}`ResoP6AP>AAj2eF5TJct9QQtb%uk2_-oo1^ikGT4Kq;%GJhL3XZphEXhJpLe_9p6s=CWN3@ z_f~7imN*X-!~^{nz6I=E_w58}@tgpAK=Ftj@!l=0h&Pbc`&(1Mw?TfwU@f{S$q!ad zc;f5%r1r*>Y(JRqjp6)nk+F8y9hayonq=9Wvh^)~y)9ws#*4@Um0y0(`43XGMB_c1 zRi&|9u|T$9EL=ijO!IJ5Q-Up)z?O;~(`q00Y#%rEeq-aA?m7YqwwQHNgVrLVcUWJDONK zf*L&+(#0291LiwkT_QGw&+X7_7@!A6Zya6zqa9pzb7nxQ!&U-kVSDCNaWNu zv+W$DpB`{pUyGm**lfGfUAr<~pw9Ax{?;~Ul>1*S9dNpP&fX*xS&B`&_@J%7Olh|h?kx|2NIjys%&fvj^0 zG^yo_gN;5Ml3=#LO)crF-j?!mv7pmN83{0R#h0q}M!9K>sKkCqcAyBC*Fo~}K}-m| zE)X6TFVcM6RI~r6yy;Qzk0X0e&N>m7-Vl!c&PT&PqVq*u7G$Dbv1;-Ms)9kPI$f#& zAVF5@rTF(Bs^3pJr1+I&)tblXx>0PUhdK7A=f&SMe1mdozo)+t&3x-E3Kq+%4pL;e zS7sqbnyEQ0Am=U~4xM@tTBIuW4uUOkwPQg0Ont7}eI+b|=&l&ttKjb>w>qJJDF&gF zlP>upMGuzaJ+4?6mT?c38NoQjpD4KX;ZuW|Oi=ZEH)*;$&@f@;F=^$BFq%g{RSM*2 zI2-9cVyYMe+8Jg1viH1gj=fnWnOA)@7Ug`5f^V(%kvZJFmE$SHyuWwW+Q~1{Dfx>V z;n%Ag-mUmco9bH!`L|BVZ{5D~?CyR~2{knl_Yu{?}Jf9S>WlNkB% zq;T%XK*UB;BERg9EqQ^Be$&-&C+ls`BSqDqzZr+HY_V7ix9=8RQt38_7m^D9v?8qU zlKicWe_QW+XOaMwWaLveV#Tj|Z_Iu#jQdRzZn-<_ILCncd>%Ttw6`ICa}(C-L)r^! z+}kKOpJ(jY82-I+8s9Y7*en*{^R>mrBg}w8$$MW6SoPXO&N?~igmIcEVNZJJE2FyUX^{_%Pu zZ3Dvl+77=0|0xFTM~1FPc7G*ww@N@PSxk9|_qVVVdBZ2)3~oFL!QW(5 zM@0`OB=jcWIYNI*%ATIJ{RH*iu2YH!`KwOfP-Dh&jsV%vT^76w|4awQ?As@|?#Ge# zmExL$tiJzw1&VrP7Qx0$Hb0+}&l+qj4wQVP0K6X|LGfAQqrZ5C0>hBDin)`sndG8) zfZxi1bJ)=P>b;E%f9Kb}O_F1?pblTNfNBsX$N$J>F)Hg{_TMaN7I>q_x^&%W2i4uP zn%+M5)!?;jIkW2?tRi5&8pwQjlQrCPjw7IqR)#nuRw*ghcnS&@4y$sC-nNmJg3EU> zwguF6>LeU%g^dPuJuPce5g`)FH_n9To(8N&YHQ06#@QS1cXE|@aX*oN{O9kti|A;^ zi<(FBb;Vxw!`l3HV@do$sY^EyY`y%Cyzz>?DU7`d$p`Uz-&LsxxPpTwU4uw}B_qya zf-bVZ|8Ded^^$GtQvDZs`ul~qC+inLm!qpF>hWP$fhT#j`WMDRx@&JYa37NNlB*}7 zMoPP{^ogg;ulU9VVSlS&@+U3DbU({ra!lI6auuy;hrX%EU zhQIAP?*-)XHoA_gJr@Ax{fqK^+|4}Hhh)21Zgad<>*)&?DKa}IG>J8a{tfW_=PfR| zMH;2fj2EGPQl{O{Rwr3i5E8!dw{j|aPuPz=1^4LG!-YfNxq8GyZ;#VKc=fDd>UGFu z!9J(<6VJF6c0u5enKnYUQnZ{@DoiTAb8I%@e$%Z36$E$UAlM?T%VLUjKQm4?+xNT3 z1KEqfu8bI|Oeck0-`;brtBhXkyo;Uv<6`tl3~9;mn$05NH9P0k<*{wMHM`vdLeG7aN zvh(8)I40sz8}H+K73Qu}#^(Zb?TsS6QC%>su}N3=Re zw1IWlq@2hYna_1`c%^&rNQ=Hi1GTiHB?u*`{xf|Rhfv_O@{WK(9;becASL4)dZBO0 z^nCff_f!JL^BipT>lLaaPca zcpLW5^muLPd+=5v=WLK^#mT|a73X=?)00GmZvu%QLK!Kz`o!%~KDT<* z_pQlCuj4dFB9Coz1zmE;jPvERW+)9yOg~@!`}<+{5Xfb&YD>{|Pm_*Bb3X zz`YBtH-@Lxncm5Na4dMI_*tjT^cVj>@%wMgtvox# zC7u?BF1D(xv4|Un4Hx=%`B_FUvg$-1Kiy1T-B}{-d%n8!anCoP^W(O1%?D*u^{2JV ze+K^nN4}Ho)2fX=bIbX0{6c$+;w(K$>eG0m0skj~7rOf|f){U|gn_la?uO49>i^*) z|LriE75lT{zB40Rc`lI(_5R5^z1(>B25cjDI|X6Ouay>A4EXXozp%N~ElUpMTD5y~ z#ydJ)q19V}FZF&(sX<*jyGWb%m8D19ZPx$%W_75_6a)*Xe^yuTK7LHPIJ&0m0utBz zy0s~j;-wR{GF=#VOWq)kD9ukE1G0qS)8_BkCvtHzfm$5qKIvjM7lj{$qXv5)eia*m zzFRUF$gE545cd5x4pZCY+<;Ti4FOrHf1I?glMWh_hvyU4T|X%8k8~?}Ac~_&i7KZf zA|aO2@sZO75@&*Y_9uLm%=yIjV*_KgX`^_0I2qZ0IMaKV78Gh6^|ay5fsRMARbR)G zI>WnZb#An>JqHx?C(Qkzf*Q^rGv@0rC%w@EFK(yUGw1P1xCn_ER_ob}cJFlP2(tPZ zjtY9_W!lxpAoJnIn0m6zAuz`})26pzgrdlubSwKdqk64@0;+tNwWW;nQlwUSvn}Mt zpT%9t=ugJZ@eL+_a8@<)i*OAFX*0~iw9My|L@fqSm^nQ&C}Xc5VA?^>7(P)lB8a;E zun}$vdrYRidpjQ`UM%&@T2)tc(C~^>3-AANTV%!a$u8m=TUlPwQTX#l?1iU_%J_bG z_BQ1=dPzzv94jMpO#7iA0UBRsZcSOoT6cp>S%s2?l@kRKi_!f3>n5#oo=8ST=6rhykY<$?GTKpt`Y_qnt~`+DM5m z{aafMxs1gOhOLPO^65_Wg~a@W+nKH(+Mo=}E`qs+5Rn~u-r{aWO$m!!YjZvpv-C{@ zrG2Z|V@8o?41;j&C1W?1J|R;a zVtWh(Jg4rK%56As<#%ho_0**-x)vnc_rU~A>nn5rbV|)vnz}psMC|kUd#FR$;4@7~ zwPmlhkJVY3?NTC~)aL6F!hsj2gCBE069HD)7N%d6yKjk#MM_za&HAnvEl-*P|9F}kg^aNk08;TwAN zHhZrcJv$lSjoxi@FMaFEy%4@Z&eTMs+cm2+f;ql`^=#H?^-POW*BL6s#15r;-l95R zR{tY1T{$5qonU#kX?v%apFB)8>ck8fL|W2iC{LJw23S}Xy-j(4_Ue+AK6cJ^R3to zN<-10`cI{_6a#@ zQq41imHV|O2b2s8wyi*{-_#HLJ!!Y#P$oj*8Rge(N zZB02A$wl|^xxJGVt1%n%+mqsebH%4(&e2nSlRN-=j^UeB5QyR)CfVpcJ2jUm#MQ;@ z=tX7?tD1th|4%l2bNXT5M3h0<*Zu}wBh?J-QMq}yVb4|J5V6gFaMFv8Q}Qw5*9n}RwIpL(^Ffiw)Ke~nrD#E4-J-`Xq5If=&W|AoDO&Cbv51!8 zV*#g=UIx^^kU2(!=hn*SC446th;g)*_mR`Jj*P-*X*ByV_VrTt%%NT__j|qqMSdZ8 zrXX62Ae@;e+73;`xow#WO1D2S5<9b=^ea4Klh5pb;Xj_a39lsh4*JAbYgsA3DsJUB znHcYuoTYd~55ylHl(*(Io4&rBAO^f^1N7Y6oWZjerv@Z}k!K|rn0KGcTa z{uJ|ZmAQ%qe(BF2e`p}|cBHW?(2(6mH}GO_+UX7Ls)X{{oU)Vd(5aykX@y*n2KKx0 zRcblpcO_N&;5KFM!QPN|jkvfSy#V8fv1d@STNuE>N!{~OQ{+rW_zaLqKLwSDX4=$+ zrsHV(@hmTCU%trB;n>n~oM)xj`j0eA_3I7rcZPOU6F{I_h+hk;-V%e5aE+sJD@Krs zr4^shv`+qRG1xeeO~;(OZIjuy9oWEhN~w~rO{aYP_au#^6bM!EKe+R&r8*{o zx}QjqZ>-66B)q*1cm2Oups*a!$y%LDv8V{oQI=rAQ|6fDj$^o+o)yJFh<-j03D+r zIcMpLghBa1$E`*J%qB;U@$io*14mbb)GXIjq@kLPMA7wZKes(=e$xw0Gqfc?&sABmVlH=Udq#4LFdhP2;t&k^?mK zjU-Q1XI_J72QS~TBR$1wf66VSUsF8E7h%?^AZrtnw@75Xdpr>Se(%od-gbB!Q*}Z0 z!6aQ#m`WYf(u%UG}lm?&>uUR z5xZO4SICa5H{RU8!ZPKLxK?JxW~pvPN-nHW#juW#ECUY?1R@ZLOM>;?5cBn?t z+O9`z0VyAdxNqc*5@1982m&N#2>KQXU6LhyHHEHsMkO{yz5UI9!bnJ`5tQ_YA`KHA z4T-fN_k8T_SDxazo9s3%;{2(`mo!$S@ay90;yDWnp)Nxv8<+1*6u(_~Dx@jfkgjcj z-g^1TavxYT1}4l3OFpE8y18xF!0OY?Nt%PD$nV}n7fx}A*(N328^hs^q3RR#i~+(i ze4f}E@>c$ixJ`B^8wz*UJEy-Y>s`(Kw8(wIPpY?>)Co*F1SUlnm4~|T6l;8Rfqtl( zq}VZ^o+Y0T{whKdfqR)djCKFU<=m78VZ@r*864Cz6xK@QZbiHCWf+N3@DTZ6;@j%o zP|YYywK&-?mik;11j<+>PbuJ@X9V}sHKPZ~Q;Fo+ak+*hEpXkZoWOE(m5gW}-FZ~R zeV*gwsZTuuplv6HcVIq%C|Ns@Ozn7>9v`jw$4FPU$Zd?h#g0yt7k>2gJ}tH04K2!I zJa;VuM#RFE^w^7#LA#rzs465JHOF0yr_BAxU2H)K_k^=e>aWZQW$rwX*7a<`Qt67a zV_ckF=GeS34?S$o8n^n>YaRxjjWupj5kz8_=egO6AN?!w+N-C8HORH$!K`Q?97mxS zDOFFTsAq=RV}MXJP##C28Ud@sVJI*_W%RmcF~>A6HCn4hM=}EBE-inEOPcmJ~MW|CZJnde{mr%F(qN4PfN%iervOg{ZqGa1 z69fw3PZX&v4HGvOh73e!lPH%F&RoC3)$ka09A)!wJFlDZlxP494JclQk!ykB=pr=$ zZDZ&CvsbLT^88CKF&|3-jM0>dc!78{r82VE?T_xv0CHoJg zGQY>5Dh(UicG!~WFd*A=3&B?q_ox)Vp{h9SLyIVoFp|ujZbkIeY?8jjx6$Q_4F?|X z?g#Z`p18H)%QSG{Ys|z~vft8*ZW?HSJhJAx*17&YCI2$m7JvOodIqUL`V0C^uWbz3 zrp(9icq0A6x2UB6Kpo`u2fQ}fB5R&#qEw=B01gCM)V07DK&Df}6ommOVkml*0p-ec-n&Yv zel}|+$_ClNGw8YcHkdpbppJv^T%%T&?RakPH{!3KRKd0wh%$~M9cPR9#$6Z%s6~cI zyw2wCu*Dg`*04~~+XWwOA-0$gvXsh z=#h|>5|B6m9=q%tvk6(g%kF(mLO?<=SkZG%a&$4(L)#AfWa&H(T0b}6?-J-O5__RZ$~C1fysd60^~q4Yho$nQSJM&Kym={c`|K-+0bB^TBkNFn;8Fd z7*>lL=f%K!$7)-lId=g(&1i4@Xl8T?5Ke>% zlhO7AmWb%c4gkdg8Exfnnsy>pJB~*k1F*i^d?Csz_MVS+T5qX_f*c8{#f08o`)KbP zrGo!hho(d{MirwM9dOaSXkaZ8x^8*z;W$5k{DT<6pYRia9@5hX0Tczmvt8K7%cv@1 zsjz%+b#a}fbQBpQ@huj`9>+JjA)cV;XV_WmsaUz|@Io@#{1MqXWi!6)OXS{l>0>QX zT0Q~heL2yFzuvu}Gnnh03}BCR-;MmNmGf&T+=pW~HZG>gg-CtBoZNrO>tmrQ8g z!vuqLnUM$p68kuA>q|f+Am%lJ`%vJP<-_Y)oc|DkvZQEO&?S9Q z4q@3KvN3>Y)o}oMz9m&OpdCTo{Ew>nq4tZUZ8J8w@TVW2q5 zeW(Fjd$Al~^pg97n;Z~I#oo6PHss*DC&KeXBQ`F?&I(W(Ks-B=lYs$;}LL)U5&t<_+N zWi`W9VzrXq##3>#(nqOzz=~+W!Rl~xWCD}8<&0?Y>CUu$??aZy1Q4k!d9vb_w>b`w zX)BGEq#rua{mCZf^*W9Ne8w--?TpfQEX80(sTMSSwoE)t{@ivNx6cF(v@CL)R;nwY zi9U#5aXvIzXQ4c7fTShKtF7_c;Pj;=GvtZEtCy{{GlDVHC8Q z(ZrPO7%+=~`=yhZ4bXXs_c+>-03cO2Am`!VkZz>91U+$_*%o1eOJ-**tw|>1&r78L z+8k!8`Zcw0985a|PE`HUFO8=@u1}az`PM#$0#ZVW01C|^t6J6V;1i5G4>J+0_O-oW zra+jsg_hpDTy&f+aAZ@LEocPzI@D%lUYG4Q*&G>HaO@`O6`OzLrY`(0*&MdiIs8Ww z%l)lxJjlkDuv?57p4{b($o`bXb&J|1xAsK+1kY$p-IJIpF(j9$sw3J%Mg>051yGof z^(3m(Ng!rbkvaq|fCRke0Hq~bPtTy;{?lCT_`P|73_;Lp+7o}$OD%mP^^98TrO=qM z^mEpqqh$QCo(X_nJ`4sTHFuIBqw%-jUsmmhJBL&|5N&q%pt0?E_;B#s@v{@(A~QgW zm`(9LI`jEuW>fk(6iXH=kJkkfH8%!sx3qvz0I6{(DsJiK@9yyG%?UU6JcT8uB9-lF zvyn@!+aMNiP7|q)7C>h2bLO$t+p~Z$lpdj#gunxt+YwqI<~a;g?{i8&d+F8kvy9*h zVjhJ^RTStgh%;(5Ev!6t-%uoaZQgKkp9Y5s$Hls?#Z-CA;>mHDSzJ6HHbW?Q{JUOH zvYHDfkJFKFaV9a_o~Qw+9{L>o0X*u%5jL#gGP+D;zi?jxZ2TeP$+n3@d{{Egyzva} z#<3aY?Cic76|HG=0M&NA_U^t!FdjrDPEEwn)wXSqWu7)8@r)a}qGaiorj%}~g4K1j zOabj*bW3cIcnk~1X*1}59GzuURNvQt@AS|^Hw@h{gp{B&bTh$uRAcL)eL zq=1050qBqd0@4D~4FXasD9-QC`>u7@UFXwX_pW>Phkf?m&y#Ckt-oE!Lmi-jNgko5%dl}NCpB)!(v=Jyk-R8r)bbO@RmMvL|1 z+9!O1U1NsvNgCLo>_4IY=q9CK=*Y9ay`;Se0L+9$){gCL(@_2<$S8?l9E0_>B7*16 zl5X+#P&9>faRzk&fToy1dvJd@&!59s>}j!inq1GL+#L#A(L_5QVz_vJv=dE!1ZN)@ zc8NPcnAK~j{j!2dV1_`}X0}w|QSduG9ZIt}!#)nn9I^INcwYeA?+J*S(iUxdy|@#k zL!?eM`TR76zF1WbbDa0HaSF`eN_ zl!!(>h-VoMrd-C&HTNl)V(>s-+VlBE%femzEy|zg}E<7C8r_Nc7_te8gu+rIk* z%2SXJGm+|g%{v4P04e*h)6%WGCq~~SSY@N0`GNEIP zj)Dvz*0cd{AC_;gz<2DDE_U@o?`JL!5i$bqmt#eWoOm|6Rgq1m2>Yzb1YKCE6atF54YzTT`34d~Z@+}Y;x#mXDpi3ci2x;=0K)#1 z6bY4GAY~I9-a)cikZtpe`c+1AW_^90D~rNTw8FyURd}1=l4FxXpFro z6EWT~xOG!phx&70Jr+izFwvCs9pu7vdlqIDKAGmLp{9n_5&P5Y93)%8F~AQ~Qs|Sl z$klMmJz{jrc9qu@U;*2pB%f#fVih&KCmphu$eeY>wyqotOUhw%HD6cy%poy8zgD0o ziU9Y#gNTNZiSyrGlvE%QL4>PzZ@_-`#c|>U? zCR*T70#HZg#uU}hfJqRlEU)nB>cc8=W#=B2n_UFgLSvZ!$r?ZZ*dc*L`#^aa^c@<3 zx=AFk*GCp^qD12f0kpi>5UcBR;dB||1okt67TEXjGiO1{T@=Vrb~=MOCsSD3wo}UE zID#e1V@%M2kJ@|iJxdQU+T7k5@L-tC>A4mx<-0@UbA~IU9Z$KRMahG!^rBi^O^GeA zq(}^MVlw#`zq4PM9=P1QYL_z zVq`E9?Trl{^<4_D%wX)b1WNF&Jxp;xLkrW%bi96&i#Vk5^|1z2os;5o|%BH`ETijcR3dij`#l=*HERjVJ`bFtow*LyARu!$LFe zBD^9Wn({K`-~|985fOr>L=yo%6y;*sN5c);JYMthY(kUYIi`?pz8`v$*^ zuG$U>dIS+%3vh>xM#C;iafBdERx^y85Ma&U*9}GX!fKbinsWJHv zW;f>IgqHCKTeMh6zQiewjAFyTdXwo)$IwaY&^9Jkj4bxG`B^=Y)3Pwc& zKAb{gWnt#akB3B3)+;g83@Nv>0^TFSQe;{BE~`A858&D9zSA*s2sC&S3(4`Nv%OiJS03~P%Kpd_^_12;amqt6wA^6{sKI^IQW(}9@eP5qzC)(ASjF;W`u_6 zqhPa5V441uLo=BAC`1aM&34HbyL@{&v?YEzSHNAJv&J z0BXj1eq_Ky0#lDFTLs8I>;$BUPSn@{t1?+mNO1;;PM*O2d7M)&@~HqGly`$cUXvj| z9AK*l3lPE5hv4@N)DjpF69#;BLVh8h(Yb|j02jG`xK!0Z;VdlhX1(=x`k!!ge z0#smTSgkR}<`@{-#ptmm`f;f-hX$QK(nLmymbKnW0uSO7hVC5#Ff)?_Gl2C_HS7;y zgs!{&A`4T)D5%}jQQE|zR+q`S0R1T}wBjNdBEhVX)i0D;XxZLqf?X~MVVs-bDs z-Wui)&8xW&OKG)5jbkjL*{W}kTTtN=V5ee|X5#87s8#D#O@NMSS+!OZNIkAJBmv?m zEH!_8!m(9j7s{O3n%*2I2zvPBA&4I%FN5b`3IHlu(*>%*LVv)6{T#@88yRo1+>j^Z zyqvT;y^WGWvMThlmi`)`lF?a*`kX>p8Fl_liIoV$3a5?kfM8Umthz7^-8V4oea(k5 zJ2zQvHb_8xI@IG(XF)`AT_cf+RFbdDutixiJ>JMjc*@sYME?+A1t3cr41W-EJF{V( z7)U9BOZnX41we_e7oov$yF|Vi9-~-e0eJ*B-)N$kr3bA3UV)Gx0^3~I^bLhQ9j^!Q z&YVKb%&?ETfJ@BnyOA9A6b{ zg>o$e&NJXJ1eDbb_})XSGagMx1Yb*|-8AEOdq*o?4aboE#wS4+h?2?f*Z^p%5H|Ok<<(BWhc`5$CmRs~0Maa} z9K=bV$Ixo~@U7cxWkb#Dow(nq`af+SFWQ*T0pO>`oj_d0eREcjCaWtYJIpBH4m5zO zCY8qM&aVtv3DL%rMD{l!iEWp|$;gK!6eF)>TE&>uJ*KVzZ3RV~!do}r99;!2ie}6n zSBXORK{}U({1v&2jG{w|{xO9jG^I&t_upTN!u>pC;eHBN3~!(7r6?jIEFJorFDk&> z$fufR-&*CJ4*pyg{dJu$^j4JfI~6xuvqH^hYZ%&yDq4+zK9is-k9O$&0YOhuv&}4* zy&e_Jc%Q{FKdA|d>4Vu@y!RXzaX!55L=gVmq(-ze-?S8QglifdXofSWZa@w)Z)%92)lqT4_I6B#JW=lBhF+ChAH2J(KUZ=%VxD$U904 z{?$2&N*nNG5Wqnu0}^Ya8S%_=*Trp}!nwhSE0j8AG> zx(;V)81R+V7R_GBJd%_y+gp};MYEV>jP)~Q=f?7eVtC#o%&#cha z$vDU*w<@Q9%hFK!T#@fAe3ia=_-8nVI^v6Lgw5RO?^geTK*xP+Ji4)Bzno78Xthh? zHLc-8wg&Jw1}-!k!z1vv)Ik-V{MHOY5ggDx*LULXG2hc%LX&8%a-GGuR6N<#JVYnl zDr26j2)kfJhm`ZY0x;nW@0 zc^z>S-}kDv%5%D)e;SL0V$%c)&Rzv_TQ~W%k7B4LF1}R3Y?5+9kJK1AhB%XgzoaVC zw*@tIz(0)jzn)N=&H7h#{E#V+EX=ZN)CqhVzEj4g-#p8q$&xF?&Kb!T-_XGQ%I*gS zBJjLR=OjeVmE$T%u23Wiy02Tvce8dw3M{42)}OAd+M`aP^pL2alhjQerZ1d(Zeaw^m!1yk|G7<+%PsMP^#>Ovz;{MxR49CgL@)QJz00}RbH2-S2u#f-c13Mqw0B{2s$rC-wq-ipVW@n| z?oQ7%NGVW&#|^`7Xs_~BFzRkw&o?W)?J_N2Muc+Zn+IzP!Gl@5+&OTaoKktU?oww9 zu$dCr%xYEe(wXck3U|Yeu82+MZIItARXy`2FnS7J}lNr zCJ%dBM~4oPU5YB-$h~cmI``(dL~LjH{vh)H3CZ>f0u`q>u%*zmR&+dKZCX#_!}u!9 z!C(Ao{5ZlC+V$T{4o(Gz5woN7>aJJMj-WJa&O(w(&JX)!J}S^FfIQ2Ec~62I$1F3q zJ{gapE3Xuy>{06fCOiBcsZOtddGibsq%qUG^e-@1;N@&#L?RDAS29^zPB3C;M{Bt1 z6|D(W`>s`PpT>fU$Mz18E?uY^zDr&5m+#Z(XVMPs?dJ?fiL4U-iy`4_|6;NB)zhP(%=P(HF|g2L53z2tf% zif@&cd=ukn{q!HdP0Nk?SM3q(w|>2vdF|R>$06^Yb7R`+^N@wL;huNB>p*2i$DQA- zuUWx+I}>l{4)I;lG`uY0BIe8`vJ^xH4Irr*O zq+9JDu0KuN?%fB&CeOU7%nJ|ZXQQ^%UY#j9=|^&*+Sb~)PyU&I zPl^8Oro=Ox!X-+dpF%7R>w=Sfg2!k`-iyR^AuJySB*5<*c6FindQcRU`a_4Ef>FvI z=m_$p^A=YG9NdMDq->=InH<_~KN(k(G1i36;b+GlR;!tTaoJ7x@e&VoK58 zFS%SQtEJiB%<#Xe!6SV?=Qb5o)v0V%M;J;u<^+z^Hga9bV#WXI5|KC86>71vG}XRh zZ7tP=APDQ-?be(w!b)9P3i@lTuwv7e+$#SBc|fh?Q=5HbF)H$9%xU00mZ!d9Gk;RdJh@Y=AnteB<4gN!c-VH< z;(N=DGv- zjJ0}wU!pT@rKPg5a`K&2a>&)SmXP!$mZTEK=i5*oz=}vd^hd6sGywL0%SwahBc^UL`7gxo3Zl%4 zAW!`&8UFLOVR-IbMWAagXCt(dWTr97&*;=VXNAtduGNc+oPd1IMUx*5V1=s1|z@O@Io5g?d>+~qZ+Jk&N+O!+=!Vt%oa zUeQj!o9S^^{sCG{Rj8*nf$$p2!z-3Fo;&%3AUZ(P_d^sV!QJ*ok}RaH@(+ z`4#is%hfFO8pbkh=9}{T$s7k(K3$g{P9?Y|@Gf>LJd7$!6&(qPA|ve@U#b_5M(~`< zM1orI*9k>YX_Ozn^D%2=SX~fCcQ|))R zpfIFN-ehvDx5TF5^Bb+|f}wGkq&tXNUHj_I@toj*q`P2dtCKG?M&T`uvowBx@`&NT zEGc0N)_$ymqcOstW1c+Xe1o|%yOGr^?VLW%{=JKtJ)<`<0O!Yfw90(k{5kD+_0Ih% zp%;&a5!V)bZ#=enUnJ*=t9-M-!%+5{sH4%SwO@{k(K;c-RQRB3NvG30eKl2Sltt<9f)gPN#mq-m}`ecxd;XNe633oJiWD#5u>WtGO! z*hHW#^C}!x$R+(r#i44_?OjG!^_j6{)ZivZHv^0R&wcS1;7cjhRabu2+5YGT5qyF& z=K!u#=?(jEjqQo6_(Zir6W{P1q8f*iQTmm4c_seeSKs)U59X)4)M!f$X0w(&l$MEV zdOmfL$UZ|)7t;SV^q{{k^xgOCk4?V5Ul!mE?#*iZzS0v;9CeJQ7EezcBg+Z4-ebrWw9mL@5g%?zc@r{) z6~1zGKi}rFr5Rb=hnnH4C>Gah9)mKzUKpRx9!A^L>So7YqFp9l#-!tWgL&Pex>jKZd&2Q1VcoYE9H={?C-m~Xqd#!DZ_fRHs9UHe57f6VQ<|$th&3f}h_RX?; zYr$O=!=ObLd!2aQ=ph&Bu1It0%-{(1kc-qOk{@_*B~(tZ{GDD1h2HsO^R-)VBTg>G zar!pM(BJ!FWfGe~xi6GD<|J}Ou5;rd-}G)5A1I(IC{ORDC2?!uBeV;k={Zcp9;C;i zAoF9v{C`1D-{p)0-@PkJu*1eNz4*px^Y-{SqcSQigp`f_DjN9NBEQWQV@q0tCq=2|{wJnpQX_srUu zH@d&5;*>60Fca9c~=VvliHBi8#+KyuCq`yZU)D{b}eRI1ZIhrzu zC978cvJ#V4%j$z;sZpCJ(uCxOyAj(AJ1YhZ)Pr2qeKb1wBs=uk)zL!4+2C4S`U$0CC+#Fx_a&% zn3I9OD*d+@dxHz4UQDC@o1+}myT zX7H9m=<^7%`Zx0gn*N3oiNrVk;w+u zWECw_yl*yn%_h#vrmB&e)KBVu>pgp1#fsJZ^dBMh;fnkzo5E$nqHfsiq#tpf&o71n z|GN2F><`ZZNkM2=H=HY(;F>!Yz!S-&M+bVrtRH(U`@ZzKUMV9o<6VErx8?)|RpW08 z$?JLv&3a@xRl#!xjQM+^hb=;ZXykh#aN`w8yN_9qTa?%`U2ZCt+k#)raFij7U2Y-@ zs1u0y@`W%7FMJR$w&T&jVCh?m7Z}M>YQBfL;WcS|?X#6~5DiL&m)y5m`YB(wJ+JfM z<XO$8=*ZilU+YA{g+PXmUimOpMR#C#Pcn7$VZg^dgtpl9Kc+ z|LT$7>+#L%8AAq2ns}9=`bqN&XjBFV1tQoMoL4oFqmU0*mB8(l=W!8FlUjb-tI7mm zWb`0%IAM^QB8rMq2sK!QP%sy+w`_0(Qhnws>(_-l0upZ& z2qE#xpnrVZ8$JEK&bn0eRg4`C_{1!rPAGttK@)5#G`uQ z**M9#FS*zXa%O%lhy>J7SQuB(S5v^6(f9kc^7SP5)mr(?V!f&j4AS{nu5$C_CRS>*%iBS5DC=pr?3ZE6os+x04@e~xU&xBGJ%;+V_}j5KI(5YhbZ0O+-DO|9`#1L zaMMT+unYmH<(YjWr#6rn^%ij#JQ>g+1n6Uj;R7kLQyNd`iEPDVxF zMnbE!CGTgJ{@*6{n7A)(8WkP8@%Kd)4K6;a%3`k;Q*;hebd0}31Dt%=%$hNY2f5*7 z4q|xQ0H?Qe>G(IQ2?+tih6G;&+Ky%Ow<;o|V1ldLG>O^Xnop^-9{~hJgj9liTP`UG zVDW%?@p^E7^!%q}`%mTIeqZ}fh4T;M=7&4x2g2+Jd*_FS=Px_`@BrMK7(Q*iFzM!i z*K?Q(bKoGtXL}dEW-rWkEL<+WO)X6QT$n5m=VpUhpkNjN%pC7%ZtG}f>$nMnnT9xS zWjmS%IPSJAZf`j5IXLe1I$DqxH=`XlxuZY#+Mil4ohHwV+Aa+`EPcMebavl9Bo6$L zEmo8*HZTqG&jI|N${5c4roY+ZjWrfDp@Vi6J=85CqNd8@< zwj-Q;DXk6k4IS6Kg3awN?xb#R0)QrfLk-5?t~4E=_@hY zajn!rcbEZy0MvUX_H8FEdZvtQryh8w!gtbzy)sw`Ni2k{YdfiqURi!#K!{hOA|bV6 z=b7V9?zCst2haT0?c6@EBKR(mWw)4iCxv#WfW@mwdbf<0P)h3sB6(IWdJ%u^6kgjY zt?)`(+^#+Fs^kGTV_7bDI|L0qC)^=a;UOy?eIA?F5!l#(LGuXE`G`&|e zvRCcCGkmb~8NNRzyg#nEKcTZfY2!Pk2teyy(0d7v1f#aX{khit>TN1g*xtgXZ+GQ3 zk;S)&)^C`{Z^Y4e!p5t5dS|U-Z`IvzDEMG&aqmT|*XA#uoxX$7U|%PI&S@F;<1#R; z&0GQh09V>g)Q^PfJ}0wLuk`&(Lxn4`&~b)u;b8x--`?=qe%XlMs)^Tf&fez_ek(CM zUq1NM9Q`}LzVoBq{%>2kx#3lUxA<3ZqeaJomX~__glCxLS|>E?)w+V06L|> zmm}Vdecm8tfTLgZj=foci~rGw1V^*cWS1X@eMg6i{CVheV*8CY@R-FM<&w<&nt&2XA^>r>!wS`dcm&RbpVwfKrOm;_u3YG^d#WG9h>^o z*YT$c!oz1|{SMjP{p#XR5BIIhyUXVNhvfU}ty`~0wxLAOEcw878}_4)w&`tPOYSf4 z_QKzFqMW=w8;5@GvHoaY>LAYJK)QMUSYRW1@!pLG_jP~YfAC=2_d&?h2OBiep(3yi zA()7#gSTkRlOGSBZ2fjYhIk`G!hb&?{Qmt^Cd3~d5~&mtrF-V$xxf(@YhHiG6az{8 za>lfImiz;sj5tsJa+dt@OnLc?R`_8i?|I^XAzRtML!Lf-h={h(d+_+dgL|ocGVvzu)Cbq2Gi;E1rgOD2LAahClRlPW>2?qI6#O z1E1~hocVj1Y`z`JN)FBX@h8dOxl!a%%hJ3^@G4gw`0bY!w9V=}#41+|qMgpABlWK+ z?W*V#mrwUE!;j$h)ZYoE?0y~Yq2&J?{^>W5&%m?bpLIvofd3zNk)W-|ani>~0EPvk zaOi*IB4Jo0bb|HYIPX84=fAJl!=_Wi#(4kD@kW_$L(R9tUD#rrJ>kv(d5IUkECLVO z{^tigS>}D>jE?d1j9K{+wi)}xIfT6J@x+-xcHWNJ{_$_|H=H*Pf&n1%$VjFKklr*x3tN>5Lhz-OjUuy>m znRpc`MCpB5sqRgRP_;JFOD`efkS9KZQxUce_|D6*{GG;)!5lea56Xh14L#lS0z1T2TwFa=YUN5$K>vvjGx2C>xs$5SI z^?KIRpWdRI!X$g#id<~h&RV{*`c;qqy}`_{D_yKVmOn<$iXYwT#Q^{SH6xBCvc7i8 zlWB6Dm0*AE_%Sqav&K$-bF#|3)I=(@>Gf!y6C3H}55o;&;cA*;U-h#~c2o zGDf!YLBqLIFHB8xAOF76G#mynoV~yx{p8V4DHI{e;axrx$#g*k**-QG9~?Y}+m8>< z?&2A@66ccC0E^-C_T-PhMF921iKHCyvx@o!ma?ntBF%Z`Kw!OKEO_U3+GFG@H?M}U z%6gji@z`4M^=lNaJcV)N8nPqMrXjfzYWXCCo3y3nInLG1NyhjhFUvgFxhQvx+g#bW z{4G_@A?lNRN6xJL2}jYa_HlfbFQOQ3Zoi035UnJ-#hpdJ5Y`FxjIj?Gi)Jw!%I>k( z>X{OAHlSviaVcS0*>QY4tbGH27pAjYts62Y#$c|7;s%&UM=jhN&dIB@ZtWNOcJ42e zh4VcdC~oa}4htJf8dkAKvSVZb{PVlB-fdEE7DE3@yxqUq;;2!ItPa54*bC$J(4KihDL7Mh0lIo2{vB$(wb4OFKTj10xRDi}@OUoi!L zmFw*JJT2AV8O`nx<-B9#MsW6GyA_DLy=3pBUAp|pQs|5=ha5yNYUhHvf42PSW1tSh zK6SBoO(>|36(2S=RlOC<4Vu}!|8GOw;U2s5oqJhhW=5W4E){u=pW0cZyx)9=*ZO>1 zrIl*@Ao@44WhUW~+VOzIN41mT6Zg7%#2yL%_DHLz9q*?;w}t(hzY*5)>l>X5k{2+H zbCQSgKRya$Q!+9N-(`#o`1|v2O+5xl$M|2^>0Ntg8(Wr~*UdxPa`LmoF2DF86*X>3 zkLIIEKu&#{GZmL?zGekWMk3OepQk5IkGcd`>U9`NLotwA^E+Nby;OJrdkZtE__A=Q zEXJOS)97tq;_KSvI}+@aGVZ8YjPe1E+jf`5V7;;sk2F~_;cCHhWl2w^UZ7;^UjyQY zE~R#Jn2tX|gMQw*y1l*{6MJs&d~J(WA;>$qP;-s*NbbF;GFlZ*B+wG*+nfjdf?qx~ zZiwrxx!$UySVU3QjG3UiGYe5#58@FN5F609I1WjTD!Qa;7XD-^aU#4O$(Un~Gk?;U z^+K!XEcaPtW;e#q{yAzzh4uOMO$6=^wv5L&vVz+@pBYZgDL* z?ZZm3r)_4j7^pG3vAnzd?x;eZ*f3SYw&v{sEA~D-+yuR^t1GdUnibqEu+Uk6#BGhu zi760O_H~(7G_R(!2;QgAV3H5fDfG!mVims4PHwmDn~pf5R_Hi1@^q=3M!iqEY^f`@ zulB^fVM+?W*3E|OolsL~W|k#wQzh?qEi%1fGOV?daM_MvK-?eih!+jK?35}k>aUck z+&~x{PC73$NZ3^wSm`iN*(Xt2nbJFBjE$?!Ph7;`*GsEr+ZoF}S$Q=zPy3*Ab;w<7 zqGqO{!j|AOEK_a$+@P_g)-O!L{?T1Jdk`AD15Uladu7nv(c^W6Bvj&v($=ErST*`= zOq2>|aKHAY`d>zr{I618K)dxBSioH#Zexe{4sJRbs3D1eqgESOc|OLaY1`#BcSq-L z{>v~a#Rf0A?DT%~ly4E(L%iVK>cpZ8GWA>rs_`4GO|AC_shih+5WZZUcjlAu38#D4 zQ!XW?DT=J!zQ?L+wqwF%Oi?>9%@TTPTw-kt_~Ti$h>3X#6*o`Rv?^;Siqik|GMCGi zDRnep)~2X7J+6IS@}5{_U7U^#DzJWeCMVvC0#ybXRoB@3v@HMNo%E_L+tpq--|>=! z+7(KJMo>&Qa!}cI?pk|>(m!{!`hGJoOg8DZMNbs z50ZnT4`!F1{fhl~D$9P|#IKPA?jsI?;|7gKd@xl*)E7p2aR$v}NnPNW*;XTe$>-^d zW>*DR`B3hDbwnGUH!u_IdSSaYnN>flVx$!I##{H!T$^gH;L5Mr!W)#gsBVw95Q2S6 zWt^K!94ubOjO>hw4^$f`fyR}ny~`tR*|WT|$nJJCr#_~E7^S~;y71HfQ0G{(92e4) z9gLIG<1RF(DUIR_pp>n;Wwh(qTkYINdGC?eE=ME>4BlKP*ZBt~r_)q}zt5)l{G9uT z$;H*%EyC=BHwA&cqki_xw)xF#5B$7+zV7W@iH_ZOoSL@m2&Dl#ovjwNJBj&cGDTv! z5|^?;AJ8CX(J+%ur%r_HVT8oc@~vMuE`|pJEN()Yg3symXxL(&2hNAnE69)J=*tD< zgj2sWMvfb7k5uVH1%KRAAya7Ec0Fsm>-EnESLm&gWbg$OtK!4?MZ&k6ozkIHD)$p- zYrS_a!&u<{!jA>677L-Sdx3_6C9jGkQB(?!a=u?@>FeItm?pY zZ^l6j-q`Vj#lZJw47Wd(S<^RS+ungdRkKgk3_OwPnUo5zJNOAtf+lUst?HIcv zV!}`B5U0tPA?#y*deePc@9Z|byC*NG^zodX!@ioH|1A6b-Oan69oBP$*%I3&`aAE$ zb~1F9vNCs9wMr2*eGkap|BW6}35j?Zc%E-HQ>TUUX~wBCDLV~1U0?Y+8P`}_#XUwG z%_75y#Qv{__rvX}r%=t^Y|PKOsvI1$(&x(6OGls>!5G)cImu!AfKc^I;?N}cIkORJipCc z$k@_<_`N5mNiU2A`9oARsfJ1O7c?faBMef#tb~ah_d3gS z$sC5~g4Koj<@at*{-j`Rn0)Eh&uVS*ip{pJH$5WHz>2?*t%IArAsWV$&0y6D(Sjpx z>NFfKG!w0tz2H>A@B_9)v~yp*{d2p61=&L|rrCDwN~yzQIrSXc&&ct^_P0=XVvYH;a6@W)_B5=Z6>ivu@0k$*a)YMY&iKUM`a{wAS;+ zM5sw=NH`ZZoSr)ToBD?1RA-|aRD5)qitlD(ujJ5_?S>SAX#yKNyF0Nx|T|mEmY!KGoHcj!_x4 zZWE&$=l>$|0|r7=BIPHez6|g_hj!n8g0zd9o5T$or`%jZ5z9&839I$eC(yoT%v|&lm`H0xn-Gx z!xPD&c`&LY;8s~r`~98q;;F5Gi!t4mRl_r*FCCcpi8G}5K;kIZO;>Je_uVg#sHwxT z{g2m8!r*k&H z!zeub)6LX-xzZ%HhVG;EL!!o(EuYsh^4RO=Hup4->Kk6{JU5%0UEVQWAHAplGRIYC zlwIUcxRtWl2*@V-1xEYP&w-cey{xYNPvZM;pkc)|9P4;;p`~)l(=hX>;RMNDm_b_Ht9MtMu}5h)jxW62{&u^a{E>~#of`}Q-d2+ zNNhdBtGPNU`>I>-&FHpQZc$Ri|Kxh5H|e|3XYuhg+r?%vbiUvTlUJAzM1>k6OAXmI zPI>}mlow@pEB~U%X0)#1?jHT!-OpV%B#$*eiTz?NadHW8Dc(vb_~2;o#ju{3h1>Ej zzQ!WRGR~e?w4=RVjGMIMJ?_M1fpv~Ek=d3MEtdImtQ0M=n#}%&_Ev$bTH3t>y2oAe zcn;2G?ZnRrfTn$Hz-*8x+1|pxG$q5-cXOrL#Cx>lbbnLn%#p@WuVCW%oK+ z_hGwxuxbV!=m+gNm<~ga%NE8vJ79S(WH<30&PS&t%qSv7 z19^25q3XgeS7^#W7VW$E=1yzx5j3l4&lk_N6mPbAXC+7BU-E8>ou{ygji`B+ckdE@ zA~Bbm-E(Ci0Lq)W2xwg;+sY)w3hcc7EfO=7;}flWIl=* zb8%g6NO#!UdmW}@`&4);vT&?Eeo*vV$tw?8G-Q^;GkhA5(ejNew}v(KKv}6I z|E5QZGt@`a%kthm_VIk*Y%(9G^^%FMDft zndPO!r4@W2X&K#5NxJ`4%FCl5vI?`Euq2YTo{v=8VIFcJD!Q$LJs~P{AU?A#Cj3WU z9DBPFvlH?ldG<;m4(1C&z+R?_e#PNmjP)c24pl9NEQo@u;pgS3mLHIGMTaru1F>r z;|xr!<-Uwdem#!|$C!`i44V2HY@w#=dMWAApqKXdEr+=!z?{}o=@S((jc4hyR#J)F zMVGsynN8FF30xA- zpOOPSdxnpEUUe2XxynJJqLQR3BIV=mbTJjIkP)k0u~%umt}5oKzc%tL+RXKx9}H-S zQ-EESyg3eOb~rChaOKP2WTM%3N*}I#g=iYGh7;a@|wuKw5X^6Yp0-{ z6@NRL9ImXt=LKf{`UT{Eiany1F&n@B!932Frr<4wK%JHxfoYhZxIF5!gr+x1(6gNk zHh}idrowt}sNAMJ=1t26_^Y^_YqtGC%G6-_41cB1s2hGsIYs1COa%C+ z4_GuU;&dpnW8kw$6j&@uLv+uOkGd;0Ga@sxOKyo%l1BeSvcLt=k$g{6*5^E@`{Zfa zm!1F>nn|qep6QCBW-oQOe3nXQ@AlR4yIqrx1+X;swEst>lR($Ig-7P!F)ynSmy&Iw zItxGHiUeMeSsHV;T;jX~<6y&<{?fxDgs#-5U|mtx1)H9-L~!{d4c_Ak5hGR65VK!X z!rDpAf1~&00SL_~aE0WJv4``nY0-?5?7^C{r(L|)A+pV?H7pR>d+|B0R`=brj#(i5 zx(V^9_bXN2O7uzoWCKWyGUg?zn~9{!WQAjD z=Z7~MFbLw9dLA*Ug)m$)(fhBewYP;|D!R%CIq47UtfcB%RcY3{YE$7g`Q@+v{%J^p zcQRf;n{02b?S8G(Ia*il^H2+TyN`}iMM{z`!4=$(Bhr54{yZeXQP}yMH8SH`=xyK6$U}lMD%dh!sQAs7D`(5Y-@(mo8rbS7Rx5hy_Vv3)%MykLr42 zmZq6BY2pAFngE*tFf%(cA?xy$B$ycqr2OwY;4KqJhMEy5+i7Kej17Hm7%)yg$GVj1 z{hg5kAPOKZ+f}Ao^~?qZ`3@n91&G-0udkg*-lHho z3IO9IJz?JKP5dg&$tr9_M1$cykxbNRWt0U}D}gEeNWA}!i<~RdqYeOX`r<>_mB7u( zcbOihFRq8fnN_nq%}aIDg{|Lbd0CeH0<@AjRwhJDIgy0rF#@(i?J@WWq_EAH3Z@Ss z#9;ufN*obR+>lkuL|=0O!8n*azbvQ|fFrT88&;{#Lnr|rn>{d?S)AkJjPT_Fsq?ou z+>@FJFpPacl1e~lPXh#%Yo+;SV`qEkYYtiS)-OJUkuS!rCn`L$MKcZ#s*9>}aHoET zxvMd>Gg_uepeY^9YG@?j5Z^hvm}KrgGI}6~e;Hn z3V1H1q6H2L6(wxv(bJJo0?eMdquox}f>Cz%Q@Kmyt}*VTrzU&-N;fXo?xX34c~@ zM5KgW2$8^(p}ZQ$pP{EgK@L%4BS2VemoFPm67bF-8dMkoU?~$_Me<+tOny;L+*U|H zlriAZPD#!bxqVtFJ~^k*oekF8N$2HFR;FBWtat)05Jn%oQ2xxq^iMQ@B;EjC&Pq-C zROOcEJ3Z@BoNP1a^(?|b9H{&wm4#HT)3^W5{f}znW@ci?072-9>!6bS0T$5_ppyh>(+kc z$Np@^O;NTL5HM1%V?%G|eT(jNPil|nBn(pX_k zvNU8R^680%%G#bYz*17#6%XUdT}28cn3Zsc6gYLpi&kLyFZgq9!H+`%gdD?u2ECfxu>-O0s+)ocmjcHbQw)Ln|d7*e%)f_2Wy>% z4jJYz#OP+87|DIQjKHyJ(EsYWX%Tos^ct36`u5T}fq(w6M26n#T_dMO)!E|x|whaEaeU>k~KQ_0dxau@V*zA_Wjqu3ZR z;aYQLJ=aLSe}%){UigY=rsiZ1Nlreh*i2`_7?GinFxL~9a+f+%n57<2k14*#r_90C zMUX$N{~twX;n&pLhv99&sL{2J8r?9uag5R3Au$^15-D|zPALHi9Vy*i(%m6Q_@hJ= z0~Hlf=ga#ioaghLbH2~#x$o-=goJq5M-Fx*N`zOSg2qXNQAiw6+9%*Cl=S}91*MrB z4*33>R0Q4%G^>3sN_{Z;L5;Ggo+ZWQ9WViZfuQ1Mb`N9nJp%e%WMJu!G4gskNYGn| zcWeq)jvhxrOao>xNoPZEsu@%XlZG(kGq#DdNrf=E_$X2-BZqo-=*lY&0ZM)2(QjzB zUO+O6Hq;PS^ki|2IZ3iA)yQ?k7&=3_!aaLPUijXsObLY(#!69f2?Ln+%oBKoH7WnE z9Y6Y#^;!F-a8FogOk2s)6xg8D$zG>Z@f5jH2~g?U%BneS)<_3xU1##R-a6Bl@_EHL4QfQ5?OVSBMA2xNuTIweqk(Z7_o5 zET2_njf_Czii~GDr3Y~MBY>nUxZDv-@F6a%vnz@}l3!n+^f5IvgAYI$JW8&4fdzaS z)w=Hhl``=dOZ-wt6=TrX-M#-NkNPV-dWnyM{7Wc|vpzeiwk?DGT@V>he@?5)^945q zwQFX%rgRbz^4Fa6ds`!U4 zQZNR0A+FlNOCXb%#*M|^>Ytx1-+xVM)21y&Q5!0So59?^6~Z>;Bi{@>itC4V#?dcS z1NWkBd6x2|M)Li{v9R>`{?+1H@gxx2xRIjA0Td32=a|5LXB9yLfd~}oYhkdLZ#IB^ z1w{tR2I4fwUoGPoI?6M(?s4cRnIj3n%3wyGW3psv=AT{rFzxbYg04~|0zl>ic$}n0 zb(qo&@yC0tja+cb3bWDwWe_0cMYX6=pqVzC=3-xi2t)t?1+caAmJ~RLqrHS8pa|c&qqe0`a<` z7kJnjY4t{kEA{{B2M)jagcC)K2kb{!Hb|@A%N^>GrND}}J=BgpmCpWL;^BlFW0c48 z99<+pjntErTdjraNqeZH&?bn684#`0`;;-al~f2SN`W!?rh#@?_z ztN^wQTmMn0ftXS{-&>I*9A&Cz8xmJ`_?vwZZE4o^E(pK)2ihI3t zly2Ap)?qKjl^N$TvJ}`82M|8+u^H3k)h6#7JXTHN%bIcbZ8j{>IQ(WKl26-oN$T3T z7Js0CmE=E2B#l$I5@DY3W4a>T0vys-ofmkd`ch2RAcw0j~(8Ois!s=0#Q zA%iHd3~)9pAZM-0D1oSJwD5UiZZXn%!Bl**x|W0xRfpAn%9-Mv94)mickxT~B0iUkcgndi?+Ej*FORl&{8 zAFD-SNTy&S|B^zX+x5@VjC||?&7Zoxse?zwD_)rA;SSVlE2)qS?=kEYlYyV2kC=~+ zD?kC&Ma-5^TWM0Ns8RE;+kfr00uR0%Y`7&8OajL-*C#nWZ4d_&0u(d{H8e>-1CU3j z0fSR0Xkba}D#(Ht>NW;&JSfSyC=m<-;pxK!AAvBu3%G3XXF=H0QD~qCbVLLijiEee zpe-^sI+~>d6DGQ`kbrY)J#w$eEaSmkY8~LCW)xLfR^(LhGk~b^>jQ|Awn@Rr`wA4L zgYUW82wk23M=*k`^LaVN2<1t_Tm|++G=d6~M)3eac_2AuB}&DC0<>m;LJw^pj6Re^ zk>f?df=dWN^u#|@71OvdxOwwZrYan%j@cW`<8&!OS|8vOjvFJROxs_MR z*^k-l?Z1hGagAj!z4IJ|{+&P)qLOBy$2(Cd&iM+#d?Ug-szhPvnA`Ep8rjSm2*Bx^ zZ*n|LYCNO>97dCp4wGHLc!Jt!AWJJhLZ8@3BD9~D7t>FJt&o_=J1^j93L2x4o3Ptu zGE01Dp|T;T4MqV&Y4zqZ??w>g0nLcW2bjq218V+8 zjW{sDB%?HeTrrsB=~@CoNg$>MASqH$QTm*`23K97J9AXdnS!9Q!vb!obb_K(jP+y{ z2(uH5IjWHN8y9o$m*zFb9_&FvoDrY7^aLL8fPZAgnRT#ntioh!84aa*v7oP3M|Olc zLAP(#uJSgOWGn5HiD%W&9CMAY=-X5PQoV91J)lz{lgnHpiLT7$T(#7SU*i-NJ<*N6 zBCkWdGL;eFKEngu2-!3NOAnv9^4Cb$mV<8&n=>rHD`Nm70%|wju`wef=+2HG)$h1a zSo>UxNp5NnfK>4L8qHHtU`bX)uwf0|pdSV+BKt&V!`xCG&F2BRwNriuKvD`UYGD$y zTO+C=;-|z+kADo%i(>|jNK+$FfCF)r!)A*$&E?8KF8(*!b*c2Ic}&%o4+4f2fJIV5 zu>@J4gA#uav|DSi&Bz7m^a#FOrK@)JMR9`GYa8f6V2HUmLf0nk_SeV|5=(xNhF zBYY@&8@*s6dE6X)UhoNm1Jwed28rN5Vgk%)02X2O5cmZ$Wl>0FBh2gSj0M>Mf;och zOz_qj+9LphyyxA!_C0r>xv>VB6K?|uw=t=&G~(!GH0jdhv-RTW4S>w7xCV5KD26hU z4afE*kM0Mm<$6vTSdJW)l56i^>G@hJ`*WKlv0klN>Dw2-eLb*XM5C;FcT)AF9DN$`$xik zXMA+8YnTE*DQb-}`Tu0PJaCI+17XTV0q^=yagwLJBme?sB!T$~;mN?Cs2_ZYy$t;7 z_0>wEm=OnZenaL<(26bxyU~D&>AWfBVU_P-;bwwSz!$MHn2ZabaS=-Pcdr7QNi-Iy ziWcuR1J088pB57U!9+mh16?r&%Db$Do9vnYyi8>NC*yLQN$yBh!3{@}AbF_CRjW() z1V?$`N|}K*W09vi;K@EX4CI_6sRzWWytin5OK&@8vEa=>KIXOGs3)f5YEgA_4AgS) z^k%>X<1~360Ks3IgCZh8Zm9W49ve}gIt5v)oJ1CJwvs6}}bK7^|>e zL{bI<6rglf=dRXDiJN}le3_u&Y-z~B%?9IlMiogO00hD8GyoDC6xoAo9^oSp(t}43 z1x~m)$!I;v4M+PIKpha2yZl*^&(&29^nA#36n5^}k#|v{m_`&-6bLGf?m#g12EFT- z>}|YLWjJdlLG4SCkx>CCBvC_IL?1j7@T6)N3Zh9QyX+&+z*2#&ryUMj?QxW(Z|^@#3yI{sSRe%fIPqR73QLj_1LQ?8nFLAw;7u6; zNhJwORph}M>HhSWgKK{Q#7DrsC}=Akkc#@Y2m(ltb$wu5Itxs1-38N+fJKklOUgN4 z^>p;d-C-9Af0{r&gJM#_^}=xE;CB_H!H_@Sl?%L-s@!Q)e!Cn3RP4ZiId*ze`1bkh zG~a5aVDx)pUiE9liRna(Q4pNMnMi+DBvN3RSTXb#GMdfQnsx<4`0^4orWoa>WpM<< zwL|TAfe-{g936){OwPPUD`AT8aaMi$vrfHJg=)}fELW8o#G68oA+>`c!_QtW1i;^EJWK zpKlB!G{&H>6zbK!yl;A^XI8jUq&Rp5wlhCn=;}sM>js!E4EA(86-SpI z!U`PjxJ2p}x9{a&_Id9^ukxNnvv1WbsYD2~}J5 zfhOH3x%5vpvv%+0&yiF$Bb+;~6c^j?e{Ih#WKqD&D6TzB{$VW0MnIuFFNMZBeoxto zQrOCM1VhMe!7a}b&-v7jWTWa4vE_(_L@P(tZa>g+@X!}d7X|NCC%j$@m2pep=Su~N z{vWuOuSa>M)=}R)8|5U{(Je3_33D`-s1kpOG|(9w;(@kOzSq zO5}w^DBY*^_1kN8zAQc0qb^Bv3CVV*@s@)rvjv53e%wg zw<=-!HkYLWiLfa#`_>d%PRU*9hvST>ps8Sy@x=KeX|KUp z5k5gr11Z~)Vf}s@Ftf0;Q)VN3r+n@4fOrWN))H(asQcom{p1!>dle-Qi5t zo=^LOF~5m4YP}bS6Y1>#0$PxV6y*S#XpI?V0y-A(5lEUs_YAex5cY^mZ-56C2V&uU z#2I_}@y&C0a{hX9>CnX_8h1heU4=i3#gy|eGLb(vb!n9sR>&70?r!%G@Sq>MF1UY` z>lq1DHfJOpZaAP0l;enW3VJ)dcAeED@{9-S=r4W5>Y2a*hB?lp@FojqYED5FQ`-`u z9>GdFDH>_pln6WpF_%0-_b9|e1iv?$j4&6BIzi}iMNL26`7(NfxGnnY^9TbS zVu>Sl!2XJU%%Fn^A#ECoPAp(FS28OCGW)!=_oy*ec;z!Z^UBcc@%J@*ni;6Ok<6At z#EMT2&qR7@M$-=vpoNn>Xz=TjVbIWz*Xf^wXGPe6!b>^791&Xqy00U4hNjEH4>(Ja z(cAagILDlc4ltJo%-7QpxgF>>M0S^coJN{D*Zn^nIZ-zYKdz_r70E<3Zj?xL8Z>_R z?-xQho3keJ___MN+)=Vv-2_htNAcBRGFNfi;i?#!y-v1Ff#wuz84@P=1c!VgP_PNi zO@3^{Rk#1x>~vH&cT@d|Q-O{~>U2!#vsY;8!4L0NN;#GDAtj7o7Sm-XAob6mMhTv* zK4rD;RMh3+foDj|Um@vCO|*;iErKD57RI7AqEqZOUR)y2>&KWyEb0t5C+l1@cyz2= zf5@}ix#x7*GDeBCZCUr@wO`u0c4-%U8m@TE(r#-lZRdIeV-4^+*XV9?-!x#|@rmFL zTaY5o{^50Se*(;N@F=){%R#DXXux#%(+j$Xf4(l8-U|55`Z6ejEUZUhbo=9I@;OQf zyCuo9UN*$1uf3)tEv@-+)CZiA47;>@R9=0Zd)5*bMvcPMIl1wP6lpUNwUNn z$&)S=^i|{6tm7dqGG^AgaRxtkeE&_J_h>pc|C`#~C_31S6-c*-Nb#!Gc}$m8raR); zaL&iDu-e|pTjN`L#9`%w1^*h^XOzZXuOA_4F18BmE6XmFgxp@mnIN8iYYn2UP}{l# zRVZ738lVF)7K;ai6;rsWhQ8rhFQL+coiw3J?g^JqUd>^Dv_C1kxV0DwUW&cqW6-9G zxCPQehjr$bM%I>df*2nqTy`R=^#ov>uk>eHF`5$rer87%xdx)zt*$}_cIR$;&7|wS z(v)!8_T6y?03Jx@(MvKaNZj!!TUWm=D*fV3G@LC&;?Fp!~MgNKDT_+sc<_gcUKlE72$-cS;Fs^EOI+-8Q zCgyY)&DuG*7PVr?CY5atb78iWe_{5)?LkO}bGWJFi8}-t6W8ofH81l>o=qxk_9hk} zTU;g4bjLJ(XUB*Mu<+`?sJU^j#A@B{rB#y&R^gs_CvqGFvy3=eZ~KeQema(YKqdw? zeZ-zjtxu2#ugc_R#pGtr&72^F@ww+~2# zET~e%H}j25~B|fyFLf<&sjd9KXyd7Jj=hSrmt9MQ)~ig~;MO)P`N9ax^} zhHqnMt+x7jvDCr=I>}4k&HY=gb7Sj?y~ zO<}J@g(8Y_B4npPE&iQww)SGYuJSt$Yn?8h$I!&r306)_Wn_zaivHOIH7dLL`M zdpgbO%asT8Exe#3UTB4N(fF8*y7|4mxF~f!ls*{WUgtXF5ML&{q>YU=%fNk|Ub`L@ zsb#g#mMHGD@pqW+D7oKIVqBwtK^w8cbRoWFx_I* z`X~@-7D{f`?k865;>df?f?2>kU%tP9pBVYp4?D)M%EB1XE~F$(A1EUnJ>mZ-OgJim z{zyUh;FO^vPLVFhb;69~86dO!^g+7iqB(0e zv!QAZK&W=G>dcyI@4XmptVb-5_%u_pW`u3{877yy zwC7TUrzCm-4(=b3r{)N!e&_W*Ws}|na{Z@?q(2rQclVAvz5VuQ>W(?Zf8m1afIF;V zGNM-8tRUQ1jM?ZUWaol=ip8m|NM?Ccy65apC;*Rg!<^q`Z2iIGg^jBUb=GB)ZxWO< zCvkqyB5%!f&*2z~guTw0NPT$fy8I?|?9sD`r;;K~EW$ z0RbuP(C8{GCy!8|Okl=WaB2dR@~Uh82J;^s72KtDE0g=93AdJ?B54@C7B-O^tX!8{ zFxi&sU>HkaIm>KVOr$hvLxMqDMa(qFR-2H#+zx*oFL6o&=kD>$_@chI279gvsT;L_ z6yiCr2>jNs*Jlq9C+{4TxH2nAfqQ z?zXzuVHMtvE&fFvIw8n68eMOzB4Zx(8!T#?qvEBlk})A_t)${6oYct1^4H7Vt2_qr zE~z@41y+|6o#-*cr1rv6jB8eH&K&r-%hT=@x|$1nf$~aqg+YI+L&@`!9!tEm^xDdk zJo*6@|C#qT_31kpd*}~JTFbP5?ws1XT&KWiWS6ODqnsq}oRlQDQEc4v*)n48JZu>5 z{0PMOUBu#wDf>xUqcDWJKON>#VE^_`2nuX5YR-HX;g;;o(N)MP5XV&;aqz~hu|SST zAksO^LJaOgbmvvH#T13*6>TULiPT#ftnWX}Sw+!AT3VD3_8X-j4#Mh0Q!MV@^H)8BEAyvs@|6#HVG zEza8DsTCVdk9*eplKHMr z-&5A*z#n`Jqtk=|pCO)6PjG&|^NeErS!~j?qbrA}*Ieg^97!*^$8UQ5FPB9}d5CDzSS-kQ_wlK)<-W%RL9l5G{trb-DK5YAZaE29es1{f#CFI_++RvO$9 zDvuMG2VhJ^>ngI{*7~a|RH=zhbE1wc`ns+qm)*?XqGETb3_`ANY%T`6Q*3vkx#Ov- zx3HuOOn?Ldp%W$v5g{W4o9DZBi4wA>DYdotMQgnjVEaOK(qbLYyS#q5*PXsa_9Y^( z*XwA;J5EoK8ZlgIsuTnSw@)8;Pi;YQ0ZxP&K~s#oItgcSR)xuRovh4%z#TvwZ0tQ8T&*2#Tya;qbZn?$J`G+CopUw##7vV z_f3p!Nj%2+w+hYoxVN5#7CCZX49%GZ3b6xnpINeA7h6Vp^N0MdSfLW!p%RP}@h3}q zT&djq$)L(8F{=1ok7DhU=-&+gS@y`EI*df#Xxc*xBTk7?~6gXS$ zBXj!=G>H@f@Dsp3UovRlIQiK!Rn_m|H<-z`bmx^a1^O*gW2^3IF*5#wQs-Qz#fMwi zT~-*w-Co}yZl&4lAS)lQ*$sE%+JFqDI2~-72P($76gs>Al$)k9=Ma@Q)slC`g4Zap z$S#H7kMG^cYR69TKPLU6o?A+onX#Q>M@OoR<`iL5l|Ovu7Jo#>@Fmpz5z?Nsgh(^~ zuGP9Y)pPP>EbrmrP_ub-Vl4yIS3b0^XN>W@tKY{VqEKKfm%%bUPGs@cYnzMeR}yav zYV%mKx4ZpA;z(vN-(2E-1w7?CL|82JO{{wErriw)`%&-Y7xCpwJe}q%fR=J6O`eHz zj0xdvSwn2`6Ix~LS{vNKMYV}XtG`$35OIts(mwrqW7A|~i4@)LiryLn+yuayi>QW>fT+NhsMAY%a`%THLrk8YaiU6IeVitb$5Oo51a-sH z6*{J`JkUn-cLeig8fj#KjMJPO1dR){8fcawkq?FLen)0LU+X;fes{jtX{nY^I)P98 ztFsyUp2bbeVo1RpJt7Bp31MHCkq}7Gckuy1?pF%wUXQRq=4XowR7|`cbGml5=^+Ruq`DNaC zaSr{|lYOsZ?ioOr-MkLa62pkV!E-~w-2tU{WyF%lOCqWCJtZ*di>2(3M^d!;fAAf} zi(#NLq?ggLLKfHc6JzkWH>G9%^In-7Qa&GPS5p8_=i*KfN0&(njy1GPe3UQnol}F;myda2KT)qQwpMKt*x&zVH{fGvGCKfM0eo4~rxEzT@kyRRuX zuGs^>YxFj)C%S=ZOcx={J9D;+zAR^Rm|;?iv!mTp;lrqWjcfD*mjL!x7P~)xu?VuX z)LFwzN82)YrT~GTXsilRY`6deM6rbq}vzQ8_AAeqpYuVe-6DHqGa>>vQTegTOnurJfU=kPmEz&KSrBXK%eHt z<$JEJSfx}?!9Ym?zX9H?bAtJjbAi{j*@yAw@7cy-et(Sb7EGzC4n63$2+iv#_jKd)`5l+(iQ!(MOW#;C~+}iO^lxBJQGDR-4ps;G>ye&y3Y^-1yM$$r~n)b{^*u z$}*(F-d~sP-u(oTUwx%yyTDbQt^ttHH4+8jUdMdPvJb5kn_P&sZ% z5z4bUG{|Fi<@(#EI)yY*CaEOv$dxV{!}9~XZyT;qJIK zCxmq1AZh8P!n%p~xT)GU~msElX)viHhZe&*~; ztorrM=Si7tkjxwVsZ(K@rz-n>d=x8XE%;9dzYJI#?b>2{qd7}-{q34#FAiqo-)1?W zT!)q>`3n4m%q4s-sBE^8Qr=0_OsCp*A+4T|1a!UpjlR0OC6qCgNH3=sA%is1zktY1 zo+!Faj6_)`eLqtS9?WNmFIKqoGT3U_=6-*;uT-74C{3lq5=Dv9M?|i!r?w`4aQ&a^$)hYp2xBz6FL(%dDrVoez{2F|J{`oug!~1_O z`HY5LE>&w#zi>eC3*y3Mj3yxVGA9^KCDwU?hRALNYtgD+UTD#4F^6Cn4duRIm{s?@ z(J^~BIRFbxjybcLS81CLyXS=TG-uf57hU$43s*3AlK7e)e=5JsEMK~##jIeZ|JPaJ z!crkx@s5mR1Brpluk%Vevlg_-`45bGa_d%Mi*nwGQ`ijtm4C+=V_t`e+nJnro7!9c znkDr&{HLaqS0}R2e5HoxPt8x;c(bBAGjtcC8|h`U^m)2ESM`LNyq_BNBpk2nNtb!& zN1x2dS%}$`kyG8<3Gseu=T~iQ;}CGyo6|99vCGyaY;%({k}Eg|1Sh;|Owx>rnt7$k zz?l#AVMy#I+WS^6D(u1ueG#{VT>v9bndjwyI)<-X-*bxCm6zsb0|pZkxuSjIoTEQ> zvANv;GSuVz@cYND*RZR_oj` z9|jJ596k(Wvw@a>=J*7@@A3ID3AfY)CA&&n>8FWiSCxFi* zLwb3fuR@E+-$uSI6nYT-cK%Q>^1t~$Fh^)?3lA=KkL6+P*LlT%v6s?&*zw_YVevsKDy^bqUaRhV&2Q6dBWaGF3Q)rmUIi(!1_aih{ zQD$;36ec9{cA7L1ad^ZZ1ji{43H;9(o2XT`lp_U%rGABNB$!9uQAvxfJv&jVsvP1v zGtRkYrb*pzsyMhx6Uc8)v4iDh>|4zY#L}kef6!5AUZn{@gs__$RVXBkXbT6Lrymgk zXocLF%*8vE>05rg+X5ahD`s~IU7##xKoGsq$az2=;z ztg)*~88;N33A09;r#8y3j|J)5Fy<9CdI$;4>g(gb=H>7U2%4K28295zrOl?0gu|RP zHB$Hj;Te*!nw^HiK$mc365ROtfEzMUM@F?y!e~y?2rc|3#z*Gp0px~Cacqs~D743A z2IENB*CxOUU428r;g%dP&%6}czR(0Ge1pG4yj30@H3@ZIFSc0#EA>mq1NQjt|32}) z_4k+QWmC%iPcbgi^xt1(@LS>5>;Lg3MNk) zrFJ?XkQhf>FjQRP1^T%#4Tlq(DlKj*=8HQ`Gm;v^dsQ*ye%{B_2?1 zRQ9*BNiL8f16M7b5v!@D8%faR$jt_okg(e)Zj5=!)`2PJnRWu?Ha(hIsr2sbE4WJC zX7G$M6-fch7Yc;LSBCVByUi>-9y$~vwkVJV`&cAc+-@|@^ugEGcP15F})=wh-!G zT>vXoSE-$0>88?O@={MoVdu`?F;!vN*rHD8K$Nk+nPui&D1-6s=rZYyiExRb`H`LI@f@Uec%~01NoJ)n$KX2M0fdFrL^)V{@{Va<8g<1d=868`a+Am9>*XMl z$zed7RJQ8|(#F?*G}^s2@EERi3RbR9)8YEIJr%o`q-*v&=Wi4#U+ZA;?Kj09 z{W`y9L#rEfeq1y7ls3T&Jrf1#cGD&U3s|1fggu#F@LZJX?r?OL^@OQ z&*HM%9vA$9=A#vYJ|b+AWn8V&&mj@_hZ%ogVcM{!e);*%WAH zvS!OL+=ah0+=ssO#i->3&};G5Rf{3$`f}WlSi0Eb`20T>2MvFPWiy#ii95I1O=rOt zoB|Um;TrZSx_gI|$-N%-ZNVvdzgjmVr-(?rN7~u1tVqtNS$$^*jdk{({zO5gM%a>{ z!M)xsA4#U84#tsj`OQkO}55MCv;kFL|Ok@gxo&rmCS(K_}0 zFJsz&Ho9`AErBU5LVErX>ffWYHdJ_f1Am$Uvt3TwpWW(vAIl6>KkZIM>_$;RLOX8n z`*{)Tup1t&0FjdDsV zSD%NSIFR(u-~SZcR2yq3^pkyxJ9_o;gZm%6|BF5BT8Z3024FKV=b?V`zsduNs%vyx z|C+ud<)TNam~Iol3H^?HF#=bfA?%obC+qx=NL&x9ROsIof&W(MTPN!~e?dIvYh6_oU*)ru-ykD*4hQlkB1DDV0hq?!A*} zbyi35^mHW{MwW?QBD1$PZ=iB#wG__dZ?5IuQ$Oy|9Nx#I!B zGZK|!6^)a;PJQ?1#p#_V{gQxIjgRbFGU8fRmhxoLElu$%3VYSx4TZnUP}+5L)w?6w zEL#g5S_{*V1?cWyTCJ@ygyL(mkZ%fPq>8?>Bff^H-j42n)Fb5vBSiKda!5PHL>q;X zVnA?D0InkaJ>NsWre@{ASG%YU_~b)k$&i+T_B=aSw|Eqa5l4nK!Uub&$CT3j5|eND zCgwH58?|ST2DjWP_jhG?ziGO2jIGJkY~hCjk#ay(r@)F}aFNDakmRZR@Y`HD(Y-NW zhv#qcLQmp5{Qz3ha$4a=x_AH3fA)K1?XE?Xc~Oupd|BD0xS(>i18*Twp!hwmcB$JT zCgLvEs+f`t_ff!v5BQd-1{&4_{U@G(Mf$KS-W#82sGl*|8}Msw6HXh%t)p;Zd;{>3 zp*3061_~NAnTg*$nxSAuy&>C~$<4gcjgm&TPNiv(Y}vwWN=zb0m8^Sy$=>`BH>q-7 zdNscz8n7)9)LIi1j6O+kCf>6cj&KtyK1SE8DB+K#s&`Cx zRc`mc+cQ3X7#)Ar?%leO5>fI&Ou*GZ#-1v>dqsOYMGVO-Bk9e7O>M(b+pb*<9a_oT z(ZEpJLQMQrz>6q1x`X+pX zeo43{G+!THIxDHAWIx!O+%PG5tj0*__-o!|C8v+rE3R;yA{Z%eTcW*lr#>)GpHfTu ze(ejXGB6$(d$W#ixYW74>?^0{zH+owFjlz^*X?$EQMYaylhOaTle_OwN@X8jR7AdZ zfLAO+7EmcbRE@x#-9;cHvfC@5hC1!i1%TWHmG#VQl>+>CoeOqkX+lpfRB>o`LY`M_ zDVA55SSj&n4>eqz5;HJjWh{ZlFHVV##<1&8xF@-%#I1%k##689SLkzAix-QlPmdXw zBy{*5^t#<#5V6l~IoR-PO}&fd6Ht@8DF;$tBsC;F4+5y@%Qk=fvlMuL*f@RoKt|(@ z#c(L|e3-vwM8MWZ3q$f+AZlm1&qFJ^aeW{`s=-oARc=7*uPsIbsr7y9SJ;ME9;Q2f zK+kESKOTc=G$I>ZFyq`b-fL?!=vm@7H{w@|scag-Z{lS2=kkWAJ@jMU7biP98fMLT z1S~keuR1qsJ8!Heg(+=>iCfDgX@>E+f%*oh#xk%b?o~@7cO|@vYzp$o3)!E3Eo~+S zujv%4ZXKVm=~m9w8f)m4XoOhU-uvj#@?EGaR=56GC8a_uvEDWvx|^Oh{nsGX>!Rz* zuQeN_hx#C#XR+a?T8_z|nb{M8+_ls8$0Wwu>2WAF!)01S6`5$v=`&Hx^5!|HS!=RJ z*1Kw==_zBXo5JkuLYaecmiA-O`paJjxyjbtx-<_<8(-&-77I8j|AuF)?NqQR?WEff z-pE#qOL1?T9OETLN;~VD3?#)hq{N47Gn)PNmXA9sf?9zUza7ppox^jMDWleZrdT?x z%Xi%$|7lTBH}I@CT!46z5ry5j(MCOB-oDmy$ve_oUAawr*T|r0B{9HG?*mbxOmwe( z&5rBXg?_TFld5snayI*q*m6LV<-*?VvO;m=)b@JA^4K(L|0oy+h!3Zfs8zG%2b;hIp~i5zCQs8gt971XPe)dU>x?PulDvg_w?!VsOW=esMQ#3NBgYZWZlUifPw)Yn;F#0$x{q69z z{(6Z*Os{?@Sw{NFGLt$t%^JjH=q87vm?!HQ*`8WVVR(At`A6|R|CwE}?OvLWnKM3= ziaY|}7xsNFNMij2WNEi#^YM?3FV%e8f@e1Eo%@8>zpUgNI&ABySMqe;xnVn>Dj?PF-Il z%=e|Ewjv|HLoSws-=f4?eSPyFWx(5|bMn6%$fo65*pho*yot}`$=eNeZV%V}zL&mh zH6`(9p>7)AGCBj*CExl!Z~D_0SUeo_i)@f~-cv3BYVrmO= z`Te^8!nzi_Qd>B-E6Ey<;Z*g!p#~_3P_%7Fj?(w^%+?VV+uvgt3*;Y)fX>!Z3JD7;+BHSyJBvcXhRDv09wK}oYNyMM!^ z!Mh3}(G%BtQp4s>*Vl_C&5whW+1$>Z%qK(`tG+X%TcBjP*qLhCL620TZpVSD%Zp;m+1 zdC$z<%GujV@Q3ZU>YFSR_-y5R<0}s)d4Wp?y{;b{C&m_+ulbjKKiM(AERl=zMJ7ef zrxfD&ENN;`vYS?yVoM(>+}FNTG_9GwmiX{TB3#rq>N+Wa<@IT_(6I`+Xo61^{M$$6 zpwl&nn}O=~)3A)64qb9Be{;5O_0`@nAAeN%%uO#z^BmrhT9Bbs^{<=zZ(4s^ZJbgo zVof-nMC*cDwpHc|R3ep`ZD3Y#q(yjsi`-y<_>}i<&5lTUMLBIUhUMEG4?Af>+G&Cj zszyp|LBC?Of@P?A@R+RGD zv-XD;v-ep>4xEQXmS!Et@yvV}rm)@;|~UR@vH}`NUJ10}oqPHdJ+ z6uBqoZde{o0!D$mgDjQ$9k=HT{AJ_)jsl5YW&e|=RQ9*- zj=jO#FRGu5EbYkqzgfH;W6G`iFhrL0pz;xpgwc)=N6uSBz*Ea?6XNO6(DDQ(3%l|} zHn*Y!4f5BrW#P>9TaPK2jm+X`Mo2BUkPH5v<7CVT)F_z@vzC#}MC}DH8EdVXJJu?o zi$%KkFk%Z5I=Gq$bsjvNWDAZw90idgS4i`Xqk&0mHb}D(04UZTKoXR4LoE#+SxqRk zja;oM2^xFl#TK%GV$@`1AdRPBLYT!-_tEFCN=zg8#!@J)5NPcl`jfE~ax*}@42kk( zHmY~~$(q3lF?&2;;ben=22t8!NzAu=jyDwcde25`W!oo=6=iEtz&g?V@r`;D>CHPv zWhP{Hj#@`WBcyEGW*OEK8QNL0l4CxjWb$h3TP_RYeB-qW*>Co&_T{!9c6f$LBS>D( zODLs)k=YSNma%ZCLOI_*peVY`kT`NB zIrH4>%ycxlxq*5nq z=`t`L5)v}z6-iJP@}xax2x)_(uNIMblv`64LA7r3`YN`YEc`8%y18=w2)>qLrZrPR z!|sL`qAWF24Jau0piVOCrSb!9Iwgr7R#w*%}&e7 zBl$RvU(pi#Z8c!AB)-33@Tf7@cn~wdXB(*dt6#B4JeQpPzuu(CQdTaNYjVJkD>(P> zv*}wPnQZ*VbP~jajt&UGOk|I~UIJxMW=J5Ha5_%WqttFIh&-}hRHiSAq{U1tNY2hg z7Vx%bczY5yF;kADeVZs5TMoJ14+5$nsf_Jhfsj@-wErSWDu0$LIEAD*5P%a*7!7r$ z&?2V@jt5XM8P>(D=eI*J#>C4nA;l2pOea z2%b5T-bGlLVOYH5e+->vTvKfrhZk(XHjv&(X=z4-fDT4?cZ?3{P95FdrL=%3AfYJY z=rQEmX8lwc0?8a(WPhK+M{1Tu77C0F*{yu&yNX%SLCO~M5 zkp+r()^TO@lLG*7Fo0=1^@;=UAL{(TaRcWDTscPc+WT% zVGr#GYYUmuUJF1&it&rFodyhmWG_v^kv83x1dNi>0NnYKTa6=x#Fzj@>_D#y`dD8H zsQ+|9;yk7hH{Zj43&e8Q;4YukD;@j8*u1SuZ9b1GM71@5@-AQstrJ-4TbzWzSEYHa zHF}8^tEpTJQrV++-+{YT)$*&rxdQr(y_G+uRHo z;*sUK?sA@|)#tXC5FpxAXYe3bd8RT=GCWW$Xa~&9z6BD_X}IS^PtW`Sf=*>)ZhzAT z0wSzOOv_Iaps~ru z5FHz0dfp^a?to|jn)wCqM%@0IXkl-F08<{-O*H^T^WVw1h&3Nch4r0F7do@6`Z-Tk z4%^iYy!=8K0WDEy$vXR~+*Q6{IxSHGS!_`N8|Lmq{0IoE?|?!@1{V1NO4cJ(XQL^_ zhc}-Tzel!6*|u~EzW<_i6SD=-E5s3nBN>fA6nOmgQiI=nU;}MZWZ4FyQ8bc#*xrU; z_q#Zb&O2g&o@5p%-t%(fnRPiS_wVk(xtHWMtuKx)p3Cx%Rq651xyBqt!jm7+Mf1dA zg;>M^aWiNVl}kMccV*p59#%45Q+B1PaT9ki!14YV;qP#A`%WD8!K82AsbNe!>F;sJaBXDTv=Ap9kEKB1 zVHy;RqjR!01aR{RZ^B53ds-14-d@V_Sw>R0tnR|z9NU$hK+}#uy(0(x=sm!vB(f*8 zkb&&t0$3$k*Am3MhXA|GCQG09CeOLW1^|k|x2ec|G>lSN$X^>$vY_$M)A}*|>Dt_QfS!9%qLWKm~Lm)}VtDeM)>cAdx z42Y8HlkdgWUG@(dVur~Fhs9DWk_Ue_#H$>U$Qpe5b2#YJs4tTr2d7D(7{%Xe#Hf5_ z*o;pFH4`gk4q-B9B#`6rER|XNOPufdkgyrt%o*ahN`NP8W%+4=ED4sAB{B%?EKiAb zPrltsad%LnOECtrk>=^efzVWQ4WlZm0KDzdE{>*xXFy&2h8c59Tw0Z-__pnZ;_S{D zK%0(YTSKhMSg}dx=YD|p6II(OZsKKyHOz3#WEKq=sJ478lRhGINdVj6gE6`kHfFLm z*t?hv*%b_E5);9`=%l1O*@)lpGLqxo3FYQ|PXm8DP^kg_Z^X{yHQs6o8z)f1=kQG?8kzqv49+6 z)M^K7ZN@cw{PTBE3cse0Yk=q_8M3oDV~>h#&UKz15uLaz&7Hj-$G~ zVaE}w#C7YJNFUjNzv&)-{OSG4F4=R8bC4S#HR-xKJG#aG&G)OS4<~?P10pq8NT!o6 zAgsRCI+cZE{_Vm1ia#Z{n4Bey0I*OhH2Dx*Ma~lS2_Q-6sUT-TV1Oie5DDP~N_ZWk z*DdFE+&mo~KpU%zT@^|dO~wNW1W9587lGBLmX5oQuNY&*@<{B~t5_S7i}L2n_yz-Q zxnfVC7sBskLEEO77UwCGJC&g1`u&GRNs6j84XW{l2ajO25~b% zs-WvCG8q0B4`GCICwZsAk0s8*xkQkiKI^U;IT;?DH*8?)P-M$Ero2Qpd*V>eNa{OF zFc}bHiIZq8L3fyeY@TcxO|XkkSTC~hULh#_v}8!IB*6n&{<)@ubh$sWILy9Eaq}jG zV;iePK@DH0{Fs=+s{uDpUct<1SjpCcRtjIxD|8uFJqx_^%Q6!8NY_aml!zvai~ zM=FsRON{V1M!QZ!G?|XbC6{A4xdiE95EUA*USyE4SQof*^%5eLWSI_`V`edNB6aT% z%P2$I^wIMjTuK3fTYwU@W~j6h&n}_O?B_wceMGcU^W!_3LM$~b8)OCpYS)ws{TKul ziF}_1;>-u@Y%F3^9#f}s#_Jq+-XC{G53fXY+jvXk5Ca%yc>Q0RlXD`>a+Rzc&&Y&uon#$fqay!4q}Q>3M^VDR zSltyK_CF0ukVyPaBlN@iaX=hDAtA8_+J1fv-x9% z-Tv&BPV^+{%p?Gy&}6)Rneq3r30T;?XBB9HM=TP&`$oNskI#GZ$a)Cg!UHyj&wr&< z!pL^B7nifH>)U|HW~00StC)O)lxro;-RyN_aSS~b9-1DTJJ^(0oI)%K0|0H582O$d zM}v<$^gL@wqK;C?{*YH*f>JidfG9lhnxpbg7Uo2=QvL}r8E{YSyW0oxw=c6^ToTH1 zQV|xFGgJ_{%k2N|5<0wCWpSvd`e`>ULdjR<8RQcwxhu(l9d8owf8P>rDLzSO5y541 zQ$!kTn;M``P@@8@d13<+hbB%Pv4$_w-qqq^-X)lYw0Ftb*!R!`W=2Y5e3RS4@h;8KE3)5E`~+D z?wTqdk&8-_Jv$1HD!k=~e^xsaVZ$4Zb6pV9{Wu+l+`Sw$CiZwJb>hqSSG;bUpDLeG z==m>luG5i@eApQdOfaV^^KUvI|4_JZG>MpulYEzQSpK4HHmZ!9vQ%igS9qFv@~V8| zN^bm|xr0)PFD_O%OGQIP%ZpY?sB7Mr#K~>k(bsrkUHld3@Rv%$pAeWKy9DG#mVSqz zIGFT7l0`ykZ6nq~6GUG0h9c*anhZeAe1mTzZpi#>2FGF^fU+WSfDC9Dn3*KBru}w_ zhq{qT7DfBH`mMTScy?v@%_2@2tE&vdKPL|lXH_crD&a#x(l1%r z&6i`1eCuUj_kMXx8g&^tJv;K{ej!WuL-NAXsU9XbBI~iMbcWG~up!z8^9vE%GJhlAv^p^xOgStDD`uc* z%dtt1Pc47lR51tQDZW7cQL6Q1%w|%SZ4Md^PkQT=kJMl3VRbGTv&FN#!zj z#WJBEui?MYGZpGj?Ka;8%6@n!l*Kh*9%er}`uCW=_d4vy&cZYGM7Dc>e;==oAZOBS zUd8M|J((|W69B+HeFBJr#IFqSBp{h1pusbd8#+?m)R@@7IZ~S@o|~-)N_boX z;in1V9Otb}G|J(jwW;z@Ey`5>5nmM~zD-Z75ti84Ah6M-?l|nI(aiDuIA^rAi6&PF zJ_{qn_!Gu>S^U5C@0Sh{Y0_JSK8=eLTORy*Lo?BbSoqR9#0F=-y|fWUJ1zUUJa78$ zSA{9I4gl57WFkHgTDqAC{3fjgKR29JiA*17{FH6{O-`gVFaNF9vq&E~(U*TY-<%Yu zy-53xw`@`TyH(b5&-e@W{N2U;U!I}wq#CNAGu%G-+i{BrbKpi1w=D_X5A!pf%6y}RB%*EIB3E7CH3@h} zYdf}LbJ-EgR^>sCpXo#OgNlX%Y$JXQ(c(Hhczs-G#{J6eHrhrLi@6g^q8{nIWNhQU zKRP-b=~dsXIFm}_;1B&3Aolv>UDn`U6gGNN64?&l?Hz36c$w?{p7r-e@b9qOlX_}t zAiLzUuLe6V%xC-Ck@1${dyjs3Lp2f-zscz}{uln|Nl7F&lW8*VR!LoB{n~M3U47QQ zhTXXH;}+_m+eGlyx@+}IE@A}Vn(NzADvpGYA|c#Jxv^I6jtobaTE<~^>NnGMc_E)w z)Uf_TL=z4UMQi2zCw~@I)*9V@1aIJJVG^1c4$ggR6ZT?3kzWLtFuQG_IR{3?#)mA zXlRglYzT`~+10xcaH%+9q)^!NC|H^`e_YS;)#E^ZiD@D5MwqCM`LJdBXy=1E_$Y^& z>Pn-HZBGc-nPKU>4$~lUr_aUlamLZjY1ueb_l1Aq!Ehk;NPcrNxlpNAoiX=l5M&e4 z{5dq9Fks$Z3%BY#Q9!xL=3thF=RKawX<63dsFw~U`MFNpx!S^h(iuB+2w{=fDyA)2 zy~*m+O1&Gfh3!CRM0R7_SN_D8w4=UM`JC zX-+>C_M$*Ux>L+a)uX)|i|s-zcsaU6#ClmAi<^P+AD%Zl%`P6Bb+4IDTz?blb25<$ z21|UB%nBpE;b)^~3|4R>+cmV^TCHs=dKc!XAL)K=CpsU9xT+M5_6jP!_oL*oQOVOv z$hp%s-CGNXATSq0Ndxg4WFk>WP;l0Eyr3U?+PvMQXnK3@h^4S!v+ zL88_!t0wW9QCn4y(%!VKStV<6y&mAzaF1+6j!=cR4LNE+{>zN=CBpQ@z0|xJnQH zKmnYWo~MgZ$ zL+)?Cd!H*%*%-TqH-=c8Z}mU^gl+zdg58e^)5SzK7i1UGH#_j8dBeqO&C~0;+UZG@ z(}{Ut`)(J; zNh3HEVefH+CJ&=8_zyQadw=yn@bo9c(hOm(c+=Z46hhB@6f7_lc3#U=ce7)N>i4~v z3peOkmFV@dA*u{MZ{OLk$+FIMStif3Rs|&+u*4Ph?9dn3v^{eRknK-+ z;3v7moo4)%hK3s}pfL3{3x7;+v#`OfP%)0qzV7#@LlpTsOgPkPElF^!C*F$tX`G{b z*3GxTwo>?Ku~CoF+Zcd464on&oHLhs{QA<*==)(o@za+c=8uZ&`E|3SAL=$^6?T~Y z-hbD;{l0%B^ShxMGx+17jlb?()P~g~CT8~kxUc(cJQz~`$_V4Wc418#$%yLW#|6`= z=)wLlHL25O;o9Pjx~ku0+m{!|NZO%JzIlIGN)Tw&x(f-F_y z1e!lF8F#n7jkbM&jnj2iJbprkGEbY!?$z;J(L?ih+9hI76HuP@A1V{U?e*??hMw>C z={*X4M0Bz?=Tl^Y@0u#gR(_hE5-a_^Kiw2{VB8wh#T07|`Tcv**bJrj5XOD_W_XLp zr38)VME5_s+mxdPQW4O%5{DP^)q>>Gx$`*|QG9tmwVlc-cT$z(tj0S|puYQcgWn)8 z-tcP%QNGG|eq^Ny6|Oz7!@l0aWI7&R-{uE}D zcfy)U*j*C0H)3mKBAi@3>zk*wTxVgj)k$vqFynYvc}bV)deW~S$!FeRDGA`HKSJNG zDQ2NlpU8A?HVXw1`XS7(EdWx*miZE`GGNl!ZqlganL#B;$)mHw2vhzg>pL`Ex2N-s z7o!=gH{ombm4wfqFV~TuO|W3NdY^&kmFq1xo7NAHoqtF09VSws+4qAeYZ_k+c0&io zVNT;3>SD3pYo*!9$effHqQH1JL!$G$hS|x(q14_M*NsWieWlIgX5(c_iYXEKX#?XM zoAbk}Papxp=+kc(&%Z5>W)6b0dR(^|yxFJyPyHZD1V<&J3Ypa&FE?kcg+TTW`o%)2 zsudzxgT@f7iR*AzwnnGy@#gr6>`^s=YHV5TATv&`}_?A6ipwP!M= zoN~oZFT_7HZj#Tu#!Pp(QLIyCR@Xm)!k<(pX7+>4G9B(kR58;~vUbuuycjjNno*{6 z^cntn(^eKLs}hQH@kwE>?L=APcYhNKEWW;zjGLF# z|E2dr)FpNswe6Xj>3`CN7RXFutgM3WVg8k4r*nUnPfnkN{c<11BC~;J`3|y;;{U-; zT6=Q7Uu$}dw_#rYd_W0fWfg8$1NkftTY`SfmqXg$EMP^N?v@2K*ZS6LMH6=3|K|31@-Dhu6Uc!da37u;I)gGYKVNsIK7S_egX zRYDpB*HT5vbI7Db>9EGT$1f>2R#JO>0#J>I*otl0YEsHAK>C|4iZCc5s9nb1nP}L2zaLJn~{%;7L0RO}se`IvcPz zM^(%Ik<^P!#funcjuPP3<1FYsyZbxPo6k>BLs_R2#8Y}hr3bQG_T_%7w9F9rfv+_sfbZZQ)zBrX+kwLRCF98Ay%9vmOEaQJ3dyeWAtbYjh_|m2eLQ= z07CgD`er82TEt)Ixxj{`hsjmy&;oWLjBYd1@qMNqTqv(NsZg^*wmP}JW3wT4^T>91 z_Ppv$qj9F0(cU;b^};-58J>BOCd*@9QYg0{Ls6Ib-=)y($2tTI}XD%nC(7q9GSXUl2FPB!dU+p*P-|1G7?e(0l?!Gm{e=jyTU}_=} z|F#_xNkqBG1fdFJ%$~M3Q*z2=CxwQZnR%GX6e^i%nuPhzhgJZ?KWU+2f2zFQHG9i$ zzKxUXxlH^1#cZTFyd*Sz=+bNeC;t(UJ`$Gx4wa!F#O97O8N@~bbQl*kvc6U>@5J|d%t!Y=iX|J;11?T+Ujp~lcp`_Pn` zfuIiGs}sm0-7pJ2<$>aD^Te34Tn7dXe5^xlzC{%iAeTh^N1^W(iEN9FS?n*f>OvPr zy$t0--|#zTJ0y5ZbuUJ}f@)@i%c-a>lLt2vmMZ9$z~AN)R8I`5_h(rGN7>t}HqWgx z;!HSab+>x&&$b>1jP(F8ilEgG3{goER_WsUZs~BmBfJ-zFw`e}7OoS*zeeRcwmhh8 zx}1buv zR_!i(oheh@E=QfERh?UD-7W#P53KWGdTJQKw$ld-SgUinTbJ;rE^rOD6O-xBk(Jir zIf_L_nP?>CE2Psh25rl>+|BZLkR6L_Optul~5A`5n_WU z;go-Qr|c!aJXGNCOD_3#p2&8&x9y)lzf_g)5L<_sRYI&P$^9^$mTyV@wmOaFJIipL z_Um1ik)3YLU7>F~o#bCVVkQk?CN+}>G8watD>+X9S;m!E2vx81-*yiubyt+VDJy%^ z^0&LOEVqTZvaRgRL=}rC1`t+UHSt%{G!HuM6jkK;X^`mOk=w0<{eR8P&(g(oe6@Ps`E|xh4cwF^F$41aS=b zb0EF?82qXje6|@pfddjfgT8$X0USfB*9TQ787%0-MpC>(P{+vAB(~q9|lB%XrQ;%6Y5yK#B5A&%~#`i2;Sl zM#99H&E&ua#q`F+f(_+L)YNDE#9IZnt!>t=zUgg;=`S18zsje-m(TpWo<5J7A&s6r zzh>PppN6o`v=D9^^v&J2r8nrKW2>0wkDg~$oEP1kXSH3BdpCbaab8Rj#FPiERATOk zL3zTN%%Yc!E0(JImK-XUoD~1sRV=%0F4`(C8CIa2F#zSIp160&7D^~6eCEPS(WeS| zY@v|;flC;?v+uR6r zU>b~GfBSE9q=F#`upx21{(f_FsDeSfXSD6j#uDow>AevNmLc<~LAc$3)b*h6#x{lB zRBsenXa?7+4G-Y1=tqYB3WnFw>q@;0u1XBf4oEFK2I+@e+QeNq2c*X1b?t}i zn#4U1wo!WmY$HrHlt{6NOONh77_m^@TseqgJA5TeQOI=^Of*}?9d61VRbgQD_~XWh z$4}YG>)B!T7+BY1SQFbxAMr~c@pz#3_*L(h=Z}v!0c;yU*b4x?0FXu~=GzgG=A`%A z5gYB{xx`e@jyzA^UE=qV9FO=J4F3b$8d{;tMPOO>UCKB=P$ z3}2DCbzbFfoInuxwfKs&n(u!E0MO~U)w+uFj7{XY^xAvrZ0jc%ZttzC-Z0#?+Yi3K zF~vIQ60_mNb~zAz8EOCf)4$)r+nd1-zmslnWZ3WDR~QKdK2x&!^FRqHt?-+iID)kq zaM&1me}8b~$=@a0LAi~MSN06!il5!Ke=9{X;n+UkzxVmR-L90~hBn)-;p6`dd)GhQ zW_Kp1lYLOxjP&p6`7vKtrJQ7oQg+s8a~MpeGMR#T*{fK>Juu;@V&avky~Cq82>9egbgMVv zfCkKxz5)kco@qYT>3;cnqFCa=$I0&YgPBTf8k_E$j-!PJ?7Gw2LS+&AS5bWz2|Cgz zD{XG;k2A_beOJ3A5P1n|d`mOtKqAz~`sCjRHKjOLdX`^|X$ZrbCG-1eC?p7>xOc^dr z!9-FO)2XAO9ZM~Z`R{$!I3+4;IcOX=#* z#a1C#qlI=6-JeUX?->=ES$46$1s7x0JUANF-BF#(wn(E-MAytaUpWggBC*+6%jR&ivK`> zi?HMK54a@Zixp?LE+%V`4hAez_sx~X=iQR4;IoFVDNmo_r3}xekNdF4!4niKHl33@ zZcl=bX;z*^9LIz{XqQl#;!^icZ#05tA7eJ7&}1@RPslq~EnUQ*JT(UL);`x%2eYc|80Lq#-2;U$6kEj<)Q!9Mr}LKCWO$_T$UOZC1So=WKy;d7hWnC% zwodi6cyTXTjYyy-V&OU$sYafN>MJ(+aM70a((*JGHCYKAfeI|YhBCIr(l@S|u~>cH zkMB*UCr@ZEqw4qe8M7>E%dfe6bSR*_+PzINz%%e&mQrCM+3XSD0hG$S9X9%TMtlV2^MmaC`rkbR(p3$apc4{@ljJb1Y#nTKz-+nkx9e+z2;hk zi$#hGPv^^*FN(9GjjHC$6_W2Lie-xgUEKzkail9VW{Z+JGu@tA23hWiG+?# z*xA*`c~uc4V|SZAQRPX$&oPA=zHZuD5%6v0&uYD{7i9h;VE#6@>eI_#LyMY$NwY^; zkJhla|ERd#D~NPFSOY)zO?VJ+5OshLqK2PTPNcPy)gUSQRYmWO$T=(2sOfa%&{E&Z zU}Q8zk{wsy`lqb|zlmd3;%m4ODG>?HkJZFNYBKdE)$s8vwAIc9g;G0$Cr&CwW^457 zDzVyV-MkWF!+f%O?PR@$Bl#b>3t#wYVCq30|M{_U(K#WL_Z0Y&8Z&z2$$lt|cX70})J&t>Lw4Yt%)9^pX~9 z>lK7QO#j2oS$7{YM1sm#0ETeISsE|7tGI4p@Y6J_ji=6}AP%y*Q`^qb3PPs20^M_W zd!Hz<(tk_U(cvVjo(j*C7_4X%r}q#Hoec)9fUGBoF-?qTN=_}i*d3RX>Q*Udn$}bB zDZeU>Wt*jzxf=cR;_rF5Hjlu-2qX1!m;62w=xj4X`Ws18F=K6<5L~s`jQ7-2>)%8u zgou)21h9)+X0${dXxmSCbxJ*8o`(5zEW344l=9JKHX0o+t)~}AFBw&YfQp*Cp)V|= z7q$@@OU*sv>H**YuK>OzmJ9Zz)|&O1&T{(O166s+1z*f!zbvpv=#I$$DsAq*mR|in z3*vZ>^?t9MKb(I3EsU&_EQ=nRd;0^my>Dg1xk*3GDu^8JCOdW}#5;=lL(Rkb&xt*+ z5|YaqN+UW@tMe+Tpz-xsx2JgCpW58sVyh1{HOsfLJWb9IH-x1|e(;}}jc**q9vU4 z#a&p4P5k>XEYl+iMV?-p*Ba&B*X|QR2kD%V*QL`zFWs!>{C}iL&{?7;E?2q;Vh=*y z)HxoJjoDK@&(RNBOD5B|r&;x9%469mQdwoS=(WgK8sEb#b7z};WMl6<@a9L!=o)I4 zXnkNQEEIV)gCD2OvZ^s=1dU$I)A%7uJ0NE`$ESl;lV4uvPq-K4+ zgZf|s;R&<{zIHWj*`e@GUSKBH)0?_3iAv}vmoL7fx7%d7)#k|8Y++Ko|9pD?(@!qB zNl1udVchxWNk$9Kosm|?7uDfi^k19=M~`wS?Gwf>6CsefEhp9%s^&ZRh`ibV{0>aE?eN1~)j3ZjYbt;>=O9YPUj+bYi zUIuET<2^g%evb%FQ(w@^n&(wV zY?L%Rw`qNvWmB)%rTL_%ZLp8|Hk9H4`)hkMn>_lHuzO#uc5L(=Rm&}g7$4cp%?}-_ z99T>!Z6*yp7W?w@KN*oV?NIXI!@j!#-uQQXUsiknT$x%MNTpVB(CNmiI%xm5B3+U# zR-&2h7m&+tyE^Xc=JIV>BgMej{s;Z+=X;JXZKQtP6a0l2F*dF;At$c5-0@YSyj5fu zEQZQ^ih86ZnV!oR5hDdA31>|dzTa$%ws;Ee|rG~SD|B9D6-H=f}6R^zBN<%wa2Dp;;7~< zd?_ZnK@$A8R(}8*b?PW;hEsDGQFzLuAim=j+TjG`;vCFVSTgf=G@zlQcR7DWb1#6V zwa!Jh-`e%J81aBhsh>vNFx2w{S16Ka*_58O0)+U0H1{h5ZWTT@g9wi zW>W46fmj`Ip}^=Yt2B4KN5lg{Ai&=jhllXV2K);|gbyVT;Ty9&G9-*|z zz(;E})}S!j+reV$u`c*zhLLT%OA)fpLd@QL$S`z zrJ>=I&-bR$wdviT~O_Pu;M`B1ZoQRZhwaS=}GPnDTR zN2NQfg}=Pi6?Kc38xfRFN|gC6GZ;`X?w`4lo4G}l)%&F+SB1U%pV)0B45<^HlYU511*Q8;nT4{6A#wSg zL?fj{2fQ(QYljIvfF#YjAk5$bgGAY!rNB~n=j%2wK*6DJ=S(k zco3c_tfrs4Bzqo@UFORIS0(ykoQaNTqAD*}Ljfc}_Msd@2rMsnZ9kEe-$e;%Ev5X( zKw42G=MlA&m>f+tJv}$~n%Gxf7j?kB26pmYb46Vn7p_mLnBogh{PJ9=S+;8ge_S)7qv-NTW?>y6bF8l~`qfi|pwmXc_JUc(+0N zzu~yuyI{8alJO^U@+5zP7Iz+xQ^Wlra`SsK1Ii+cv2ycIA53$yf1t70 zGmJi!ds2mkFC5%n%XK()5Xs?TwH>?LYp&lAD1ZDjbK?#x3S{sG%oQ$+cFk`BA5r@! z2V3Gfm!uyKEl`yesAZEUKa?eEJj~B_chiVpq-BVM$Wx0ke=Z3j72`C9rQ>acv6`i$ zu2P?QAIH-h#uy2n*UXn{+Hh+6hLq7^z?||54%y(@|k+Ft^Pg}u*6nu*lTq0)~2Rv>5X?h*3vrZmx!%01S&GPdloZ>?6KvTB!2Kj)7L$%Dq zq^LG>9NtPheobbJm(qWd$401c$k29$>~JOHP_VyZWNv0=MrB3x1~J}1KVqO= z#vI>GR@Vs3AF_=gdCC_rh%y*(OPb>=Q}?&hYRP8qE(*wRiGe>urgv2{7sy8n1+loK z*4wG-xF#Up;1QDQ8U0DDT{5f?LpX$kRSXTM78IJND?E|I&c(3IKb4(+C7j-#U^)6$ z$y|0pyy07X=Jy{K?*cgVk??sx`4CIdmwuHk&)CVt;p9Ip+%H9B3aR+}6YWnQJ_$y< zcQo0awh#Uibv04w!8#)ipZT7W_-s95OyDv;<+>%I_%GDQ-~xOAy`|>ob6Ds@Ys{se zO_wzonr}i+{59a}Xvq6Lj^12}_jz}yJt)=0-?py!ekje089zxqEYi@GscEC9e&d2O%J6mKxY#}30MKQgpb8jp%O z=5g=_uJ)w|aN3_N4S|)NaqnidE6;*hO9U$65uywnXm(DhBUP^@Rd3QYCrKT|acfj8 zpf~fnJ>DyuK}? z#0pP@<;l{+UpGr?)%d;*b#0rFKfm`@rzQACWJUpqA2#V#&vXWr1m<5CEu;xusRS-OU_H-o{FY)_f1&%SxXnR9^;STFYLK3h zWV=$((2gX<0qXWAcfI+i3A5GFaH}HZyoh+D{IVEDOBs7Z+}#CSAnBhG$HgKwUMP1J zmDMc7iJ*|J&;bosONy%c?v{-ewDK9GmS>`CB_eQiQ~&*HXdIXP?ww^zfb%@ zs}F#zIJsJVR%CVo*9EV5{6Ynz^fWsZ7S{slt z(bKblG_DiO0U|sdl-8Dy)a3FXy8=zDPUqXMvLFR$PnpiV2J}kcB@bBsT@OiCg1cMc zxrvDGO0XmbE=gu4ME8a@(Q}yu+>w#dEe=PY4xy>QAv%>5-tdlpL)sQY-JqZ)b$EBi z1{%<)ETh`3yFTG9Yb;4o^Dq8Wu|8qh!hQ92C#{-23%KJewv7t@`rXC?VLjMVl$`=B zm%ZMQ$3bQ*8f=-R@P#@H0rdlrno*IOl9En>pZVe-H8{vQ4g#_ZCb|Y~nE|-`44x_M zJJJ1s22eeUOV;>q070>Ja{W;-Hh>_Bl!mRehWKq1+bt6VhzUC(b+6iB-{rYSX%U{h z(GT_%PhJF&;Qvfkgx-Cc4GVy4$?NS(Ws;xmy7JM|f9RZKz)<+bAk&rI4D8${adw-g z2uTBk6#jFt>kA7?M9>8yQr_gLb9j>Lsw-Q!V8g8|Dgj}SdNezT77aw+O!5LE>!T4| zen2`jkc=H9hBqthf(se|NezIU0PrG4rpqzFp@8|g>dDlcC)xnyZwKT8fZyGLqcI>( z4DjaF#R#)8O>k~BkQ4(#6Ef%@wCbG=`P~E3ZxKQ528|a=a4~ieJ=H;5q@M8}s72&} zl7^kYaC?o_3u+WNkEYZs+2_$v;p-v7ZsE=CE0RRU-* zq$(fre%7Qk**|Y%$O9rJ3UIL7**|GcfdP@4g(kLlq_l)I!G7DAq1hu^3{VtbJ3EU2 ztAnL059lyJ&epS(dAI=kIUTVQ6y)j_-*2MpBB_+(?q+v!28&zBhD-VZN!dZ1e!v@I zVLGKCJ?@Z;9mH0Y-`ffz#R0=VUfe}96Yz+_<(7u0aRhPQyO={xG^jiCxI>VNi|T+J z2N1!qE`sLiaR4X=Bsqa=f)-98XE+rp(!XE&-&Qe_9PUJ4`BNUVv$A$%gXw1vxVA3Q zfCnyPL08lq!Ipn{@PD`f9D8D-TzCK(?iiiT-iG^=aUaBEa42{B(e;qEJzJiF$-5as z%7g*1;=xR4fbQ0(;7e%WEb!nF5x50how-r#f>~REBwN4-Vd}Q0O$}1&s)f$?{i>ym zP60{;Y#J#MT&klPOESjDZQKyXPKZ$@rBr;V(#S*1V|<$qe|bjjy?!IlClS-CWLBpol0$_s!|q(kcfLqwu{AhTa$36LYNa@ zm$bf~PE<_1kbQvwTwELtlw1CCovu)P1ZA~4bDyu%eWF%jb?&+R+_tO>X?8*4*=vKM zs)~cAlhf62{)B9Nc!4C~&SJ&-31lvB@rK0!a3Uo&nd9ZpgozJr)@uo*Bj4a9B9e za4ovXs@{JaKwZ0_)awCi7A4p?&N{?{ZAOO}Y`m)K?js#%6Bw-TEvmk=@@$3_(SI?{ z>c9TE0D)epB|8+d>>0QASU8qIu|!Jt?f2KXVpS$992KyStWGGa=k`R|K18tRbSF(@ zI;w^oba!?TCdl9ppcLx~i<=gGi64N;?i>=>Wj`MFrzxnX48#GN!?5Y1JybPmcfq## zaLIRmbTFYzvZLhC2&%&hz9x&U7(OGW6qufQLp4mGKFkr84m!n{@-@j4!EkPkH=VUM z{))Q@34`!4ni6ys82`vfZb*<^r0rP1m@a#ZPDif_8}lf?e~{N$r?1vp9o^Wp+WgS- z2IOj#0{{RQS<_Gs$&C9D5|heACMLet%Hoky@_KtmPrz`Aeb7x1q1&?|CJ~p~Edyjq zbZ|vQCpaKJj<5u#7b6j^2U1!0p>obS>b%{~d6sFq(g40$VDWL#=unYOQyj3^I35w*Y$`~>FP4akO~@9i<%Xzi zG3Eji1oM@7Gf@m=bhFPja67jFm4hZI4su6eC1qpsAdepKxv`2P{qkK^0UZwEfQ&So zsU6rGXD$E?5QmzjQWH}_1DWXJ>;*6wijB|3LF1u!9|) zCmCgXP}CKD#SWu*r#oOL)R3&2c;Q_7lkTK(GiaQR!Hp>hn<(xRS8dJC<)*t15y$&8 zQ{(Pn#a2}P9vcJ8Of%&Bt|-G|!PRftMl){Q>8rfxSjc&@m(5$&2rrI|JskpsHy|cD zio2>~DMRV;-*RD`0kE*~4w9EiY1=WVhJ}0@kZzolO@h8zI?O$hN{EPiP7TY~Waaa= zHOcWBdCS?@f>+E`LItZl9}56p6sbe8uGC`D+VZbLj&X6pLPhs~Mham^M10KMji zyBHYLma6mKh0MUs$uOiu?t7}l=X#U-GT zf6FL=dhjI!nXs5LMB^DHU{nQ4XOM5IQzu|?HD*@tD8e|-Yd?(}wiGcKa71HKf@nNN z?2mMZQ#jMKEWBUD06;$pN$U4Yln7cW61WF_Va)E8@Pi{Ry9Q8;hFLxxmaH-0ba~ei*SQ!xzQ7#O1XtG-EW|Ze9F#nYT+b)cAHbzd*g#tAV13x%gc3Yj~;PD;Fji((!VLg*t(UpV7llBwNkbO$m;?{GXw-aBJ$1 z!uVYPW7LSzF}g-~h@%^6X-9{El!}PDjgT$@DPg3vlnMyA(On{fqEaFt3Ih5A#QEcS zzJI{I=ef_l_nh;-pLZT$Md@xtJbizDE*|nG3seE$rINk~j1MO+@HC@J3kw!+z z1Arq846s!*3ZgtkMDWOBowpR`>#9T40KyLtxd>e!4N^F`SuoOr0!f`1$`>uAHL}Ek zX)qWty*|e(hhd9_bODa}{Caz2M#;NyY9WiT(kDWkqwYxt3CASHo1q>-c?o!HS2 zR0fj9yyY@#N_2!E=Bq0ikt+psb6&>AOM%=il6OyLyexEs)I6@S z!JlsbHefYly2B1F|9mK6__dDe1{#P%&IwVEztv4rURLkCH>&5XiD8T)#?j|tFt2s} zGJDJ?k7CEz+cCFR7V;rqrvEbrnN7=FUbxnt4nVqwW`4vJtIF@7_@N;loa{spNZJW> zX0gCAcpxqpugglI!(uM6ynR3?SW2pOtE{R*qOQO!FV01l z(Qm?~ff#26#irQD0ssr~k7RU;Z*AW!3nqL}a$b~&Mq+O+<^`_5NFwfz%?6i$Ij4)y z0Ric>3TCRL^GYPA(s2T^hT7Ma@@*cQUMb>KTs^LJcpbC&veYNgo{P$g;l1Yj8u^c{ zmC-GbxHCr_wPmHzO|J_gpbPHZdbUeghVJsp*T!%bsmyRC)6c&}>e&BvWiOMquEo6k zY9@SvFQ64Nc-i%fy|ZwjL?*Tz0`G{x{BmzjKd$-UTB8~^PR2yihzM}0$UaADGF^xQ zX-=7Om8vcWIqwJ}r!Hd6g80CSgil3@`vY>Q?9RA2(C1|hT}gQc9E9l=7)%1~D?Hjh z1xp8jd8Ol&%300UF!Vf+R%zKd8d))9jpDm;rNsn+lP+uBq*R;&yLueE7B~&Sft-+% z(Mg~%utaB)B#9ti3uZ?Fsfx*N&UKJoO%WVH1Cvm6rd5t>3-wG1I;HWOQ?wvLi=FD3 zX7kNkk`W|DA4zz1biH4=QayK1i%C!DDhIZuXTYiF&Yi@rQE4%$!S+rLBt<>V7fa8l zi9>cO4HG$&xe_}SE6gVw-k4u{cQkkfI%K_>W(&dD-(`g3KnoEdXc+#@JmdK8lTWCZ zHG&AJ>ClfXChk$L{ZXzTvk9x%KCh!DKQB?20+}x;5a~3N+cTR?7IjSm1x?^N!$7YE zS%5j-6$R0@Q{D@$7D!g`=9 zE_Z;V!t%3tlYev(YU22fv&p7{670ud)mg%EU@HYL5uF95WCsya2!twWJ!$niDS1LF z79^%EAekg-S6BO=LYwO{QdqiMPJ)Wm4y`%CN&I0a)~LBa{U9w4$JR~M z%t|(wf5n7(6_#9IA66f^tj}G6A95;aN-_6@QQx_P_sOF3j27}oNeAE(xvf(#uv-33 z1-n=S<4Vp!9LH_RUi07z%WujyRg>c#7X5@v%S41utG|q^bxtE2l8BB%LFOxvNdNm-M;{Pfrh{$$XK2m}kMzz<|h-W7op(_fQ<}_&l zW7&z$2Rt3wRB?N9);W1G2o17}!C_3zI&Rx0?hyv|a1{rt1*1uXU(A4b7}7DDAQcUqha;oq z`Nc^R61jxiSP)f9f4&sa-G*3j*sm5xEXzTK)e#Xe96h*4%{v~>0(Ss_jvi=5)&N{l z3qgu+ik@!(;E_M!d8XQpX7tQqoHw#KQT2E%rlC71(SVowqSl>moTzS`R%L2;i}G+s z!N{Hvi1ctmWI<15;Soh}h<0_7o*6$rmhegaIp;-ToVY)k;5ci!rdO!$jt#jid=ono52qrjd0t&%=qqy)I{VodE=Y(C&=7+Mk?C${72LSIjj(WCM-sEYVCM%6FD`t6? z*@2i4kR+u(hnIkfJLM6a@+6Z8R9iT9ERgG^z}SLi$0c#vmASSeYYf^%)CscZdM07< zkHg}hCcvK%<3i_>Yc8eKwj^H%Jub)PR~EdNW#G^bW{+gnzPYB)9ma9rG!@*5L$1)e0b^S)t3{nBfS-in^;rd}WYr1gr* z``*0@fMd4N6_@eGZ2EfdBBT}5b?3jw0FonPr~mNRRy+W z-mn!5L~%oEH)u(l&e-8|b)Jo>Vc>j!lTUU0 z#&zO^zb2VM1REzdCDzy%$hPQBfgVZaNCoFSZ4D6TSR&leH?S0*%S znn&_AFn19HmQ6btJQ7&q7GWIj#ywVEGOWQnGWIqKDu-NK+@DuFgj+oq6uS_mGs?&t#4CCng4hrHFiM-8&HJmM+7&uH^W*Bb^&G zH+2()6k_V1LMKcqbx5=c_-Zn^$Po5RV~71ZEIJ7+<&-C|jg&lL10Kv}qmtt>iaDKZ z`C-W_4e?Da@up!eaG_mzSxUVHReRd|+IW;_c|3!$OZ}{w{1m*|avcW4-2~xu*e>b& z6nFvD9uf^N^HLLZQ(r#+Iqb&y@@@D%N$FP=bV4x}Rwa|Kj1EQq8Q4||Dk7Z<)HU#7^`3Ob(?AtQcSssI?ZSlNWxy0kJo%z0R$t?epjbU)l z9omLyXd{9{%^Uji_gQEq*ll3EEcK4H0qy2@RbsIcroe&?F9u7#vP+Zeb zg+ysia01T_YXG+ZM#*QFuJJI*%pBVMLX)W(M;0?YU@z)S`#fd~yU9$vAjBalvlWz; zm|GlT-xMtJ+FtB6#beq}>of0dALe})5|?N8M@HU3a<)r9Zv%N`z-Gk${X6^bESoZ! zClRCnqNAewF8VqqZ;?pgZ|3s*DA19$IP-`TKB`C7yqHku4 z>Gp_ z=BHtaX~G;QOOO95jbA@lwU`+%l)9obdwhNMgz9HYrPRy9r>8?}FeVS)w<9{$2^McV zK-R&Jn(e76ERE#8o3YdOKfZ)E9OW5{f^3*RO#A(+%6fb03S9ERryoX>t;`@=#LOv+ zp}Q;_YZ=D#HyP;#{R=bs!D`K*89xY@D&rveTGAYdQ8Ml!1FFlVoRjQs;I*1)N zP&5>uD${vTY8tUcpReR6p__9dHWfCy%6BPfVLQn2Sw^QOyy4gN<%Y~*#h9v_Xtz#} z*Rm10W?)X&`uU5?nAy|G64q8N{s8_ zuZi1o5ANLq`qi#I`1SMYHJQ#Og1Joh>|k-SqpOhL$9~$6E zc(HxRpK{cDl}<*oJ82z#X1U>&)@JeMbLxx9L$3_E!E9>|X6FU#EMe2CEDllRojGG8 zt4$G8(_6~(CaiqN)NKBpN4a*I^sLI>W)O{w^*gCE$-Sm3dn->6E0BO-nrnlXn&D{IC618{%7;Ra1 z5J$I8-!rDm_>Z>sbIS2wj=B)0SyrUOjF8f6a zCKUHGWJ!>~BNQ#1pB?w4BZ z2lmaRH&(xujNbVbAphiM%&&Ll8Mh>Bs(;Qn|LYijZO$z0Z~@B!v(}QR6vIL&FJpM* zp z$E~-w47(@X1EOGUj&ZV3!N|80E`PnA+=JIt@-m=3A(c;%tHi7fU-WHjgmx4dI&@50_*Xt-~_ti?59?{!I86hvn|%2iRY2(u`WlQFRqm>nPK z`@OScoo1bT&T9x6f(|J5hYkBtI8?7VOIxP zR6J0!wI?=sGhy1}=Ny&trCdP~_++w#i`O@wcLHHjX`a0eUO$_+TQWd1-mfmQ4Q~CX zfC>i*UJ3I!YC#$A)+tz;X44jDRTw>P3XCzq_m>>PMs;d@%NY&JPUnp~63i-Cu1p}Q zQfyw2V8`v9r1Jn>zMvDUitk>b#Kk-$5i7?UplHQ{+5^XG79u%x$Qt?ekZ_da8;g=JUebR!|T^e_6#jXAjREQ`bo^!IXItz3tToL@J|9Hq=a zdWwng7>IEzTWCt4*6%|dm>W|q_g5w?PI_fAOHq!ENZoekXjjB99X2?>dh1xs{7bNZ z+zhQ}Jnz#9uS9cK>Il#EmG`BiT+Oe}*Ro`{^DJ8Xr1gC)Z|vw*J&V2bwuq@MfA3R~ z@Un)2i>zkFgLh{H3Hac%1N-?Oo08S(AwO?_+jAP~$;wu1+!LLSq$w)0wcgQX&vt3ymP!Q+E))x-Ph$x6 z=yYc;eT>$Ms8ZJ$eC6SFuM`_jg!$8X+AA=chq^BPIr*Do#WIDF^gd~`7$`SKCvmsz z+C`Mi29@0N;Xg9})FlkQfq5AfRacpZ z?(H?sWvSL4kNL`GI~h7_e}L(~7tIDnYp?Uzsa&}^o?Su9gEcuD9Z!mSDF=`)l{9`2 ziG1Z~OHFE)NGk2k{?j$V&`y2BxzyplasJNfmrV+GiT~+1LPtiVMq$Hk5~6ETB$kp@ zYW~>&-mgFI`L2kHrj+fN*GYzyi1qa2;tE{0o##(jTxA7Z2G+oLCsa;EN6a(b0O_LkJYa_koEFKL9zSy<}^NCq_< zKRi1hlGfo))74Ub!C8;aZ)pY68f^UBa^ywxnoNs7dRyqZ+s&eevt;ZrRzbagKK<*{ z>2t?#o-@>LJBCJ%d*3gz;t1nv3~bfxs0g!Exp~9N4Yyu$dq_hC^XG-5lBgXb&+4A@ zw39Pyl1K^h?L<-(Y;#F(uG zN^6=;92_?s;&Lu1kQm~bvl;KQnWV~1n8MnLfY@SZlHG^gf)`U51obcF9&k;yNKP59 zIV~(*sSZnDWOEK&&G*ZqMyk4ocurB?aw1M{mUGB*pPol?F+W~ ziw)g@o2}cI2 zE8hf%82ZPK&j$-fi58u-Ow(gbEb~8py(jh zU+^p!RuKr%Eej63_V!-6+I``$a6YQZg^0PS&y!P~HdkdVt@9*PSJfwEv$9;svl|PQ z0_)}SYuCOkRc;i(@bXwjhHQs53fGz->ke$4DI^a77rD-jrdnWK4L@z%dt~6Usys@{ z%w6?Js4&0`)*{)ZLfy2Ig+|$+W*9^J5;vJPH=pJ8$31Beo0um`-0Veir^uYokb;4< zDRT$Br&;;4A357XAjYPa**4UsH| zzl(W&iIsujv^~LLkA8+?0v14Sq%p7@=q{v{vkUcFvTEwv=u{(P7XC(!Bk$*mCbQMo znsGXcMuHey-Fv)ojyaWMigXLOsQV#CQ$6WM~{P2>(&Ce|>m z*4HH)k!;eUo3Bk}Pb~d5G|1zSDcx`5z2-GxaK$={*6*(n%dY7eVngqA1=Te4qsy?h zl^sytIKtbr5vJzd$1`bX-$IRYoS#JeuFdRAQIGlfr}cwl{)S9zPGn;B3T_$Q>Tds* z&o$=iU*xoQ;$uo@Z3cyp(P7#6E3CrZkrhGZl1wi`rphKVe7eIdlu(Pbp|!Yg#Y?T} zBW6~Wj{TGw<)s%;5_hSf_70X4nR!@1{3!v=4>09Bee&@B>`1g-$dFH#b#7vk4`W!{F@wN|^@(yW&}s-Dc=B}(rRlQKyctfFac2KHWfJkpI; zFDc$6fWmFS8PBFs!?7p5pNoC13FQgUm995Ua=&7cvNbU!GTyVf>#)a$Wf#GKg)#ll zn5W84#@wO}MLIG?&Rv8pa`~(GPLhJh4lU-K64H@;{s;(GzKl)m3&6)`n_(ZKk>iVo zsKEi9(fT?5@1bsA)D_>YqIV>b6BilL8u{fiA8fUHbr#uOF{c@2vTniDB~ z{M!{{Z1zr;@w7}qx_;?~A`7ykIp{Y%eq(3!Vz|8@FJ}0AgZIy5-4Ij4!H(%`hKGDS z^7I3$z;OGHqiv3n&pCQY;z_$r`c#eC-0ncQkjzP<+;z6cfISd#Y8mM+zTET?`utgb zc%j_6US!&=>|THI&UYX|lMD$%HwfwVioaMhvD^i5s1I)H$+BFz$RBB@aJk{RFh4Nb z2CV)+ShSj1ZnPSP4Swy1Y_CI+xMrTTpS#VWmfB>t5`}Fc5Kr+B&R4BA#86&yvUSf7 zOO!6t?xf4b^c>oTqS^l$>bOtcCJc@E4v!F`i8$asJb2nbZ>7~nJKN!GjkqedYlA@N9l60i9oRX5=k+S&_9VSxQT#3Q$NcHxS%EYL0 zTKnoWEPo!M`Ltd04Pq_kJM3sg{n>?Q7+@Sdg0GSoMwKporbOzzJ7QSD&`m~O`V)!S zXxI63bZMepcmL?}yCb^e_Dh)rE{hJXV36Upj*DY-L)qA0+4w<+(SR;xPBj4X8?Ry(KL6jkqD$O{bEaIg zyqwgp+}1t4n}aIo@)6ew%PYtunQcNO0I43**R}>>Uqd|Y$XYN}Pp02mgZf^qzrC1L zy^Vi+X}$1L`|YiC%ggwdHi?8D)I*Qzp(pkH_f7mMtn?Rar+}xb7w39|>h%FIIri4| z*CYOSe`k>Tl~-_=lfaS(s*>lbVKizI$G5^b)WUhx!v3g6{CN=pQIm)zpmOz4CjX)A z|3h)=NxI#>#nX>s)4Rz-5Hz?Q9rqs!(SQ3f7#08W_O+KN>`N3>?`HAKyG(khSb`v+ zCz+*3;}c2WgP}ja{Q#nl`}5xerAQpp30^!J&(ukW=)?(h(hcJ1sxZ_(7`U~1f@^ew zt9s(q=!e%k6a71r9;zq%M<-wH1h1x0MeA$e^wo0*QeUa7lho55Ley7I(mq6|emzOw zS5JGTp8h#H?Q3-Ehm+LT(Hf%|&_RHUng%5z=LXdvgVfIRn`}yFX9N$Whi)4;qkQiC ztzgNY`6W|gD{IY-Yyok%khyD5xjVXwpMkiEAhs5KQ5lGh2r79-e)6=d__IdIhDQ9^ zX^}p@^eMiq{gk^?S9=gZN8du5=!ZPKRW$()GLKUx^;ehOsvPMLy!$)Q^(=6PP;*-o zZ4g&I1FkZL1YCCBmYAIdUxOa*Ys5F9^_)?qh!-Wa1M&*(R+F0 z?u*I`cI~T|sah{0{|F?#60jf$W>Na#n>b~Aefls>^dLqhp!;d2CK?(`%8XIrJtup| zkXg0KsWAf&Vw4RAXbS*%0Wj>}GwgakbfahFLG1ADp5fG$o&aTvl zkjaMw8iU$XJa^Qw5Y>hAiGkSZcd?V+0~%O}`f*R%3@%(#V_y0zr!AN4+Vg_fzuu&3 zVS9kRH)4xlr`2(moxYGX8K-y z=-d0Cv-eqNf1_{j(Om%qL4X%5ctOsz*RP2@8!Y=hcqNu(tY4@0pl(1<+TiuE>*XW0 z%g0yae#<>L`ifERBn-uApFVzlT6S4F=`!2`7;n&4pVXcl_@}ngqY?{Ijg3=Tjh%-4 zote;{rn!W{4J3YF!NhIB2z35oL{iwgAB^-g)5|cGL&0yMGuzuZQ$RP7O(Vz0^pUtt zjlIS^Kk!&ql5}u0RcxwIC48%=u)XG)`vp@GYR1aZ?Do>9$F-7biu-BEG;u6qSdzQp zdZUZwqrB_yYMt6czf&~xzrEsYtujr0&ffI?`CDvjo|v@k$LHbuZ3!wI`yNhhi#6%& zGEE(?TVPjS=}K9n^zWWP*V}0hH!k5(M{C}N5u4K`(vibGr;0vf=?Zp<7RxhbI?w(d zC;u(F74p74WMVkv!_1GBmv;pe4R(h;0!3WN;|iVcv0tY=pGTBxb9;TAsT>Nb{;TQc zH23OS=GM5u>0zT&;nmlr_s)LrS!P|^r}WUWAw9sI*0L*IVRSSuqukf2h2$}b2xSu@ z0ntOv129O{62sa|zZP?WJN>H}H1=i(m~`oF2AOTT#V-S50QJ+#_%wquwQz^bHAulv1CRp5GKhuXe!yyc~DJ!q>1?#iQ= z`hi+is#=JUg_k;PzPSj!Jyd)rZ&jcujgv>+R$c(HL6)1PmQ6)-kO*OF-7ktO&vShA z=f^8YEUub+w=T&RJglcvC4z@^yh*Zn!wR=dtgnGHlwhiw)Ly=m8}#5K-vF0DhVh#B z|6crkmIw^`zB?FTzTy^s%X0D=AuzgV=V@UqeEg~VUUgsu*HMUqapQErz*+-JOuB*Z zG27Cr_+2*^w5mj@FLxd!GSy_ALO|-Z) zyl}YQ;E4xOJJwQjTzw0_{`vHC-PwP9vZfozd8ZF|3BIbdIhKT;9AfN3obJhr_~r1_ z#RsrMwYY=yc;P|Uj)P$YwWl!8DlQ3tzf|X%&Pr725Hwo(#R8utM*eK0?RAXra)q^u z=M9X>M(MJ;pGfnE0y&vWB>8y3V60Q&_--SM{2P@o^pMDT?#2UxB|h0ZAkf?u_Ne+r z?znow^3v^<+I(khy?h`(Aus%=%Dvc9|CW~{80y1JV)>*1{m+Nf7F%lH+8KoY zP_VF{d2;u@MT-29cP=&+nX*+Ypjv{pKX+B|cTB?LBOTwucZlQm&)%2S)w4=Fi;8Om z-l3vR`h|i2Xd^JY6;qFhq1hx>MZ+HPysR}uASzlej}KC7wUO0&KZc<8NW*G5rdb56 zM55a9mKvGMib-W!6l?;}$7~Cs`T``h$X8SCm2=%hsu`>UQ=GaLs}i$+q*g20?l?Zw zRJNuJy1;$`f-bi+52{KRwAN$e#_s9X&&Bvy^IYuWhS5%m3J z^3~pU=WQ_4Rx!hAj4)5(w7ptuYoJB=d!8r5x%ag4>a6}@AFi#eLhWPnlvfWa^<3!^zeyeGd5z8LJaDm9RX`mRdVj)Ci6$Z= z#M4|i%5pZo^Gj3Grhdu2U*E2QJ~Xp_pM9thA+G&->{0Zm_*VTATKQWFSEOp!r2I{6 zAJy8*HwOPU`6Ci=xo*DYLFi8@n<}Sqv3esVmp~u?6}s`1t33guU`uz!4ZX`X`T09( zcY1r@l*RB|%00`!QyfTtLvo<_zwZlARelYKP;hqdj<=YKeL_Wiy+3Z*{)fol1?au9 z<2aXo*4Ix4*_pqESJtbsGc{b9fCKSpJONIpax&rFhy_UG6-? zq|{YjiPXn~Ei!Gb$~X0g_3uunv3`=P*wgyp1l14EeovTVryuHU>NdD+4C^6O3tZl$Tz}EVwPsd) z;0(DVcuj*h$R;z!;FJ@+8LTW^K1-KWpUBL7hal&3a`Dxc2zX@j!Aph`lQ2}p6VUvi zSCg-@Q9hVFU(X-3`D#H`Hb^jzZ>8Hq zkbg*gY(*O$khH$@2WPi+Gj5-0(B?+O0bPLhyJd~2=E%F=&QkZUJT!ApO?<}roj+dq zd^UaydB+X()3A7CF7T!)puMDb{r$~&)WQcncpK`Wk7oQ6Mw$dsy`a2jC0|4Z#Vs-j zsjjPDV#cNwNy!lg*E-GyG3C70I^ubPP2|)D@7C=IblyCkYKMkSm2`L~?>6dqImqJs z?U<`lDn*m`-DfyM!@sc)ppE?FXWIlH8$X>6xNEH?9Qc8DC}csKl6cQMk>S61cAkP= zHuaAKl7-ilXicTQQ%5ai*#D#7b}$>6N&Xz!`2O1s_wtBoTt@WZh+`wAf26T^X4UDT zsl{V;3_r{CqQ?2HogP2#Uq#ee5Bd~{NUPL-P=U!rc-h^Lb|MdN6a%R~p=>26$hC8! zl*tdtksPNY*B<}5%I?%799vqR>U^G!EM##Zn2BOt_I|N@s(GmIiu&H}aDV>p@Ynl~ zU(T`Dw%xF^ZMuyMR)pj4zEQUdPv8B^a-nAXZ5@}LYi@JGc<)n4mst6v7bGypNG<=H z4g0LE9xx3St~;}|?;M|A-}@X8IMY22`Mx(Jdqqbg={11XAy6bkCR3bXd+WR;9t*hy z$L$U6AIIxKH)_7{zzq`U*Y#{e|HH|{^qc+zP-07g92uVCvc}1w%@Jnwr}sm*BGx5D zzFr#zmsvd=uFXtsS!ZY=NRs*&iL0KSOlY4;Y>D286W!i{UWV+}c~Mie*ti#4lDF!v zcZ&h{`iyUI;tCLkXbwjFzLMSSpLss>K}`4qZyeLGF(|c04Ymfl4FON>0fIwM`^9T= z#USh8;B_zg2;lQEdZrUHTTwZ)ho0#(yL$fpkW+@Fy?->mJBzEPa_ zF|uUM3=X}w<#=Jfy;Mv0KNVNAc7;-3@Nd?92uaUhCgz@rqGtc{9I?pb@Vv@Jj)$s7KMqnRL;Qt=uPnv?h>m-D@}oO+Lyb{hm5+GOIXj*0|p{+wn(JNmS>t$@ACbZd~-9LKW|!ceT{1T%HQ$L4E7^$ z1jHQGKEl2$cWC{0tHt*>RLIll2mA|@nEpQ)=x+T5r#Ak>;|dYURd#{=KJW-qzdb{j)aiC)*9jo)j~;HK;{cNW7trSjMG_ zNS!@z@S*o*5?(^T_HqTSOZQMy0K$bWSnEJtC=_22sQ_ib)uNlKO(3ih;9ukAVmJ{UwihF?f%q5aZMhm{S1tN5n@u~i}ne9cFe zBZ`bRL~}%ds3Sn9x5$?R`_YARmu=9-yYrHx@;e1pvSlI+<)I8O`gz{~@X0w|@!ai_ zAI6jYX+#DK5i{A^^a^N>{!A5v44fLFh}gOz_}_jffLF?W>R9*H3TlH0_q!r>sPsDc zt~v{gBxrv+tt&RpnG(6Hq8QFH^ANEVELXn<-7=O@_O-KDaFx8Fu3J}k9MaJJ3p_H= zs3}Kyp-iZ;(;VA%YCIC<$MTkpSq!zmaSG7Or^PhmC79AI@6EXoBl>@jTQU5`s>Q=* zlCN+$a1gE}^L~4Rn&3Ez;>i?T^b<-ZA{MvX7b7N3J8;C+xtr8*yCwpB31vFyC0(zV z`aT?8TVbEKH0^ea-Xm*3@ali7jxuqUol^*E!2r7;LDpiex*e`K78*{a}8S)voRH?o&~3QLX!|&^c8z@qIU{@4+3)M-irL zPUY_JD%VjA6l&K1)Jh76sjObkA@`5GH`8QY@M9F9#@k8jiM$0(r1@#2TSb+ zO7{uLkASgNn^d>NeHUxrw+&|aJd~eLLfJKB1hLDY>?}&@*;~FV6+c9@8>{S!T%BT6 z+`}qD6;#;97#TekbsklPB0I&}s)H+4zGLs<{fx_zv6+6x?{44A^sDjOV3uLF)!b>Y zz~L3EHQdVOxku%S%zC*c)VTK9{$ADG3|>8#k#SjrMkTMQ%Z1*hD%f6xDgyJHWI?D9 z-vLF8rCxNmf#3PF0#bW1`tIqSen`07^DzICK7 zS15=pNH^;8W9u8b9Jd5ZLm88Dw#9NP>|P3`bdVT2^`e?#@B_u0u%MffjeJ^p@>j(9 z2jpnB!O?lQqr<;uG}p_!NJoD++1ndYht0qD$x~>fQE?DfZWm{!O+c*50u3C7cN!)F z{FJzDM9QVRKc(CFvQ~^o;JavgTyfNAG(d61(X3n zKI<86kk1Xe$F4#9oPnK19PhZK0cv{@jwoC)UJ#zmB+=#OBfM$S@ZpZmZ-no(*)x!pLlTWHzDJrxn*(oQh0bxwE(uSX{X4 zEZ0~I-hMoOtZq`Rx&3X*ZFOpwPNtr0ru(x(KU|K)P4x+#*ZS7%-8Vm!t6 zH#p2?twLssbc@5E{45DAXOGZ@ieWE2ZdA4M+_XpzFAP)y05PJdapkn$cV9?ipaGs= z+vaQ@GD!r5$h8wBH*s)nEQlHhioq}@#!D?-n6*gom{kz$Li<9bIpBgTV?pe=t_EP3 zIubp6An)1s?NcLE>>~CS3uPIQ0}gd;UxD4QK2?I$QdlrOvA0?f_mrWc!{>G%7R-i%oOfUB zN$F6>Nk9i=n7%5s|RBRQ9jH2-1V38`mUO0Rt@b&=q>3T;jTk zzKxH%8$Z;Dfl!kmE;ta3NL%IQNm;_KpQqaWoh#1PnmEVi1a+AU4!0&wYf1J_6!f1rKLhXi{~GfxrQ% z-s)#1ye)B}_8SI}>RA&gWr3BmT_6Z_aBDOGSPze2!Dn-wvizM^)xzNyVXr^;LJ#jk z`&@dhP&A7?({@^UkvOOkcK7N1-epb4G`quQeRn>~UIpCbPHe~u-}Hp`voEi}(ACM$ zZ*`Bgs?=u;;MoKM+>)rj$$<3BDQ;JChJFLJcqt-(@ zBIdsZ?fo$@2WIBPvo|A|e0t^3oC^CLY`kstF0kq;CBy{Wge{+LJe?7_Y-+0N`~D$I z9lb`C;iE+achKK2d%{q|Gt6^jzK~{a=5YQZvg^(9C?&zKK`k64K zChyJ~?_4M!EHvcGmNvk0KNxJ7Oe+a{YHbas@*I|g`pD9swl<6;+E^ffi+iEj==U|h z8fpYW>I|O}vAQd2!At!K-=0hxgOOCSNdx(oFW@&edg@F_dL_?ydhYyd_TXDd15S?b z{A1MVIr+IcQG7X5r}ymF-iKEYm@fC7pB_)_^E{AyPJt^2B&!1uc}f=vMF1&Sv`;9! zTy3M1g{0sx^j)ZSb6HUo5klWJs|W@QZ&%R8y;_NwkR`3;n9GWs=~dAGY;2|GR;(Z= z(m?EW4Y?ho!Zwh>#PT67$AA_b($U_N3aP+oibE>9%=H7780L*M$KhzByx>7ree`_n zsV^f3FXFMXaIL5%x@{0_&WN|l^te713#>MS5x7OQ6l<%w5>@k!O`!F4%9;%iM%Tougq{NF*3OvUjC zj70p2F03hG#f-(0*?uifA_J~ug&QEqmP=^W3r5z`?v_q_@~-wLk7hY}$&!-f zLG1K?Aq_0Ch&6Qp>vD>jpdwcNq*tu6=Wq0nm{FYaJy%Q;3DNgAC( z>5PGS^65R;=|gei+^FIllYd39Y!gWE_YKB9X}p*f@NRb)u@kn7xuqq3DXNHOog zJvQ0U5u1%9_pEy;r&&<3;aUdKTOQ5WG=E0cLKYb6VK$jQkmZ5`Q-J~1nx<35W-$gQ z4e)Sar!=O2rn8eM!G3)p;~Vhv6hnpjwY(%m$3kK9ABSFp6!JTXsU-2&h#=)p*_clx+oelsXBJ+kR zEw@x&mYYzAJ#x)o#3s-v#4TU#JWfc<3>#c=(BT=B}Li(<|Yelhc#M@BuL(`r3KisiYyA&9VPG>ko8er zq0WS)Wiq=Rqb@8cJp<~m0hCBAr36QWp`UGU!K_x{kx%Me9qWu3t$}{l^J*meVz024 zmv@)1Mwt~=;QL9@aiH^y(s_l2iw!9cm#mUZb=@fljKzoh2=lz-H0GK3sSv(E1m?<} z)vbm>i_zX`Uq+2aeOdrF_7R#-Vd+ziaeP^))ZD<|%_;1D`d34H^I=u{H}#U*TDm48)) z+g&i+o|wbF6h;Mx0W_v<{ooWIDknuE&D8^vr5eKBul}l$YYZ73Z9Ju6xq)^Hd0cd6 zKzOzSD9h>X=F z2QgIyIGJ%3BIRyo%fDn-dbWnRkJxqIUax`fy_7*slFRC|uXUDufeK%|Gt5l#J36k* zUHf8QK5oE(EB-!v&og&!>Y`$Ua5@_Zxe_Q<-8=(Qom_0OUk$MIC*k8pfC0 zkTB-W7&5vRVhwOD_@%9PCbId5fsl(z4-qrI?Jf2zAlVUGc$A#>Ci(D zo#N09L#If0g9wT^Fm!i=v>>Q780bg|NP~oeQUVeR0*Z+9e|f*059i(wXWetxK6~x6 zpWmaY0zDG*xkbHi`~L32y3~^xOtwAJ5|GzuNW#jQ(ZFd|b;=b5?J74{YB%-&P1eE%tXb-d}nj z^Q-f}WpofV)dv%2_UB87NSam<*B0+mkD{5xm>AEkR?Kf~u=){OR4ihmyZ1$KnUKEE zbAjsw;KxKq#=5#dHa0|vbtI9caT)}Y<27 ze8EnfT!PgW9V%M;#ir5)!mKN86f}=uQ!F- zFN_)sK$MmFVHJbUTT%0Fb1`j?d0y7-P>ry5ll3j*r_)HcV{x5&)@^Bx&%7817h})2 ztWHCo5?dNSL6mMG-v2_aAF4;@9N#GeCe>Ga;k3N;%W=*AOqw23i@wBSDgXfiW0PxNqYM{jMW`?dQV-paebz zYZ3rj6?ii^9fXIf|AgV!VE)1B<{Hq_0i)6@Bg+oh`%Zx?NFkwLWey-=1*B{b1K7*m zy2StvV-Q4Uvvvl1So5d=z=CwiDzq4Cwj!`14q4ESN!Jji2f`m>%@Xio{|$x$0U!z{ zc-;{Y3ZNfj0Lj_FtLOmNju2dC!puBGZZ9~G8GRdr_|yp|9(z?g070?zJ+3#ZaXEF) z6cL0RFo6DAaJ-ofJOP)J-)g}-WPVlaqe#RuThUWXIkO1YS@FsO1*ef8HCUXAz#li1 z1G|I$fGVdO8mA!507aXuV@bjzNrIPVU;!!^#ReNFyr;t+8^Ip4OFc_3U4Wrsv`G2l zOnuLaX3z`(&j!HJSIS<1)Bq5Q1Ah;e?ym-Y1*Gn*Y0J*teci^@nv`1ct>ht_{bM-5 zluwR>KbnLE_HNrt5hy=AlEXUdI}Pw+;wVX2YUNh@g+j`KM7a+NWt|C>OB8&?dC+G} zhO1ows;WSnX^8(?09SCi&d;0H(=abQG?b)hRRv(Di?MDu3I|t~%{e{K%dFcp-f_>8 zz*!<$C1$a5k86dtd8x^BYqvl>f-#_2@Hc_i10h$93)OUZG=LsmM917fK?y`c*68R% z=$M(2NB|I=>Pp1rJ|LKJnyGOKF5)*#?F1Y}JdP2W9Ug;brt?K6V3}0u z=o{!jH(c_iX2MKjnJfgEx#%dLk*uugBm8;1@2P|M@zeo7z5Hi*Jc|n48H7Hd@7#N9 zb;N-BFpW%jgiQ^>#&xqrb{D3^vJn;% z3>FK11PliHb|z1|aH!!p#e#hw_xOfu>GIUdlp{*h#u_t_vOPuY2+}}b(Y%6BB*=T(#oIu1c#=vP>#H;JEn8c0Gtd%mLOg}o>@6gy-25PVcPyh$?rKg_5 zVjXR)1=d(@o*b zk7M>*eJR8@|y#Q^%W+60(=q3MY&XyOX#0zsIj@wE3h+9fq~S2!+&)E!8i+ZRv;Y? zz(4}$<18{5fIt$!NCz&4c&m@FG)9=X#IbmM?YPUMY)em#LBIa5tKcpW>=WV{%6i8i z)@6yOEy0PQdUeB@dK#Lf-&piR{)ofdYGf;-^(MB*3 zfv5SFoxFqP+Q4#bWYa7VQf{hps^Ln)^OKtw@4f=ke3VMfd;jkK!Mo4ErrPCu7zVVk zM!=AQe3b*GQyyi15pRdj$g{ut9UmX_fp|Y8lI4WV(~CNkUiH|@Ygz12K2;l4OW>4UPKCh3pKH!r^Mpl_lONBVQ>8iH&F^E{7q1)niINz zOn4xjea5aPaqe8^Dhm0k${34*G1Hfs3&)Z|>4Cz}Tw|v$>CfHiTXXwmU671z=JkO7 zskq5**BSwrnrb>G!7+1Vb&tt@98dl2P_sKe(&HgK^;Zm#eMr4!vbbO;YnYVeXNE5S zh0YKd##5?Go3qGuEGZ|)+WWaArREShRP*93`x_t)K~8FuZukv7%>kOGD5^2!BfD8V zwZJOplbIrkjFR$XyU4o62M2RE?x#K`C$qlt8R7aD59LFd&v_=X%;p@S;ISm z<7rXmeR-R*PB!z4e&6KQhXOF44^CkJHY^amU`NMS2!1l7JeGDsBZ;N1B0BeoQ6GI} zJvLap$BUV%%3>*6{04tU4Uc`PNvQxpLe8!yx4XvT+OCC|HRh5EbF*99F`rt(Hdb7x z4q`F>3=_He0y(t;$GBf$@fi&`e-3;KWNsrQibq*0UfG7ovsy-K2n@1HtFoBit(Q&r zRC{4{3K|rjN5XF2!C88zB&{U4%!HrKK*NemAU?(QcI(r_A98xQ-MG>}6^(>wr|e4+ zn&;%)qB1}9UZkdCjU=-6{pf z@ncJ$1-`mpU7I;Bw}YN~<|jzfO^W5mEYj?6LWoZNy|zW0*BSxCtYZ}`ZckSxDzM4A zHu5dkKx=@UB)g=Scf;nTZ1R2Q0(6(Ndz*`VAC7QDe&)D)+51g=m&on6UonD_rat=j zxLdJ|sU~_J=AWAXB|wJ!ZFuN0M#G`UPw9O)Y*b6Yix5=tS}80KW=De2yk7fYw`LvM zJnsjU1W2N2l>dX9vSfx3L13GwDxqA9-Q^)2G2=`70?Mt<90ell(GwEiH!`TzWGWA^ zmCNV(n_DMlyioJsMhq$`TMDV7j}9%KOfV^3#Q&yZWau>U4}JQL_l|#L-K|1F(v$Ch zl00Xe>))Y!f5B3PsK^k#X=9&Qr!JT#DWmP%#`|koz-N6=KQtst^FS_ z9u5UQD~vOf+kMDhV7eK0@N^?Cvv1;=^k&Pf$n~C&Q__edMsbBm;`Ick&w;fn3BLt_ zjCguWuIvuFbEMIb=ZD}rCy9E~eXG_gE7?~G){i3|00)mjn*O=_Go&9#H}47RBVEge z(q~#}Nkz#Hy8g{OpQ%49{ga9zQ_JM_5>j^7uFpNbBeR{|Tef`Mll51Juhg)X_vP6< z-y8H_u1iuBWzmZKg6Gdb#+Mx>UnHkSSF9h%OP{?-6N-`G~#JFBrZ4UTnC7G7mixC$JJaKkEONpV9oZ;xwDo!X)sZHIiF_+ z-Equm0*1CqcCY*O^Xjgw?H#oJpr@Yit)JMoNzcqTSu_}zNZhyo@EGyfv`D9GjA;l0 z1OVWzSej2t{!8uk9vn2E6nX;Z62U?Wj@|y=1BV5E6Oy<5L#IaXtJthiU3ox>d#)@U zyN%=sm^L^r|MVCykqg>hSw8Wat`_(~j5ckt1u};I8M|%BDsk&w#Get9u;UJF&um-DN9R$cQiie=ZU)9(f&*WJjY z<*B0InXtbvQLUDfrOeGSVwZoczCV-=2u; z4&(LC!cn!gqGJ^~8>_C%@{jb>{{b9pQT>_Drey}N*ACDrz1YppWJ=xT$)ub0u1z>- z-9@uU`H$49;ds9$db`F7y!Lg!D?m{m+MgwWanN{NYWp~63RmbnEi7D>icA%F*b<%4 z+}x?b*ev(bUZz>r_bna)ZPyP5qbNE{JzN^ zw4Vn{Ut-O)%bFHkj@N}gQlmBnxVr{xv=uZ{a2;Mn?@%Q%9|3s zg5gbO`!%4)A63&Nm>c*wkzYKEJrz@Iz^mU-tvrc(RGZ5=CQv9}*f1fxz9M2$y52d) ztn`*a6hQe3;f3u1+%&}dFSSwPuY;Mz#BmQyM9chUzvYPsdjfE6h8+uc{hpFv{<_#7 zF{sA8AHiKfn$|nAubeEI7Og z)_h>h)HaX`e;vZZ#xpi^XO)Y=$u%`E9tyL;oBzQSng#k8NZpZ6#rS6BHZ*RJrVlgQ zcvd1sS~gDAqUEdGm$#eocwFop=g~HY|I_D;Q)-llj6Q0V*LO`%{9#V@SpvMP zX@$u{1o5$y?rV!zAPgkQ*>a?$EX0zTlXAf`j%m<2IGjh})~-m9H;(Msna!*?Ah2(j zK)KNVQATmMq(*=tfpoyLK94HBhUaxXQ5F#X~6^ zavLAdnnIQHa!qRMj@|stOkS82p9fmdes*>&jv*~8Bjv7mlhfFe-gV#l83Qr1S<&7* z%%`jA(OAA)*L*xd&v?5w%6p33VQHb{94)7sc1|WG|)g8fc{*6axRs<_Q~zf zrQ+*mh&d>{-9o+}$O>W*@d3s2^U;2Ib3T-X)uVjp0tzOXHdxk+did0FN3tV#8w%>` zghOY$-`anWWtBVs&>H%uxtw6eqnQ?FbYXQ;3*Z!QeFXgIyXl5KL?s3MVWVM7{ z-j^86R#RJn(hx3TWta9h4;mC-3P?*uJT6;JlWQ!FTjDY6)&Zw_>!!{R722l?AUP7lqo@YJ_@dQ~)e*Ny9Vl^757sZhM?stnV>sEPQP7x_?>u&v?z3 z)fHP7F#D}{_8gKwb5s6=JiGsI-!SD+)?G$gH0{0mW(#%TOIwacjq^;G zYT%3Up|1!RB}-o!QPrIp8jeOfu$%qaxYnge40h2-dS7>Co27xEtf1t=zU#T81JR|a~JnV;X5&H|;cD}SF8M0K2+p;UDrn4h@8 zK*tR##(QI#_U#Y1JFW0|U}`3qlSnTzJ8cXW za!WramQ$>Bc9|$UhP;>e{t;CMHyHW8ZXL-G>DXRhh%EaoyWBvy*!(tU$1_obIe%_T zNR5_Cx4DM_RBFdO?z`{&I<}ZxNpbUhfHfu?iUV4en*Y)EylBhlEA!f?4tZy=7upus z|5&sjNT*9){250^>)&_kxO|cAMy0BgxJ(=fB#8amVf22Yj48S>Y@bZH)1g8Cn%8xM zQ8#?Di(Pd1t*wK&DZSqANB#%HeRZ8o+`O8QkrSJ*yhU=P?6?g z(zqr|Q_0T4^fR;Sg*=13gwsW)fdA8@Y_ZYSrsl zY6-oCO|k{R>!SvJkI%8EX4g&UIv-sz1ak9`Yhvw3C7N+^wqa7J4oViHl2(e`Kc4*1 zZ1H;|>Ne3s`RvLywTwViJI^?}frI&)`)i&SYJ3y5vp0>i6wj_Ntw?h_(`@^WH~3)g z^|y-`tmozaqX?Ic$6*}L&Nf9R+YpE5Z>|8E&lm*9%n)-vno$U$_S^w7U(Vl??7+B+ zKqM+c|IrX1DGjBnv03xN7v8%!z>G`rb;cr^NvX9BshTY@8mT4ItuiGoGHhM5CnTeM zy?M%qjA7TwuP{wv*?qdReVOHywx1Z!Q@YF?OWC*NY>o5_F6(M)31+_aR-;WDx%xYN znKvWry?wGoRYdkKXb#L>+#R(ywLWuMonIFI}aj z3YE%uL~6+%{JCD+2vAS$99d}2V;qNtv|iJ2kt0W`qtcBGFGjktO=}MNn^K(M+9~ZEee>Vuo|gQ9-EzrjBK=ZGbL?pH4K%|`WGjS9kw^Gd z7CeP%BsMoWR!|Co5ar8|LhOy^D$V8+`WbS2QF%To89?Np=#*k~Y_etE4dk>z`N-&t z5#^kuAVrE0)M#I8qiwBJjWE@JZ2H}i&*wBs{TnGc=8b3$@lS+bAd6vLW{F?WGaCS! z8C8Lg02=!o=Gl0C7k}gJTr*L_>NhU5&ao3G&ckjinQXZR!3bSBm1l>%CckP$d|wU! z!%NQa@X8Zo5j>EvmL9?WH;czN!~%*~M_D6};i+gV@)@6<_e?`57D(Tp;LusQw5{B8B!_o_qE1A)48=JiAL9DmE@RCVqfC( ztv4HUTBLF{#4|)n5!dro5EQj-MzsGd@wbJIs{!mJYKt={{ml%EibTi8gbi<{r*IPM!fMNy`9I<7N+0s9}!Orwqa^B!7OY6Y7QFpm&&V)jD=vYn7 zg410f*Y5CNRE>v$$ygMr-J)ZqrM&$U=VvCa&!@ER3>7@3u%og>-{e``Ihf}8n}~AW zd(;pOQubl_;{6gE6~pGc+Cl`jN1{m68|e6WGh1#%C$SOoeXGJCa)~@Nm#nY1q(-p) z91GVtBBmVLM*bx7naQ3H=H>zBeb2M3;d*D$h&*UX(QUgtd@ezjt!61Aztu1QRV1Jqq5lM&0Xl2{ zRuh0U&H!%b3p5Z>>5ME~Lj@z5vKg6D&%y36e?^qUZ$9Dm-q%(JO*jMvk?n7q|=7B(VTMwHU58Gc`SNvM9#;=}@U%i51 zUL+5v?_ZsqV7F*IUAVXHB`Dk^D54ZS-Ap_^0=GlFJ-suxJ)K~o*`86Y0Ob_GNtaWg z4m6PL8BF69NCORG*$L7~FvWTWPR0XzDFkD@BAwD}(g4OJ@ICUCPa@HDgs3|0b)DoD z^V;kB`OZC-_^U~c)*WLy@3@7XnD0B}=wA?X@7P<4iSAgK8xH1#g*hq0-1HKiNV^%^ z-fnQ8%wN!4GBo$|ZpOl{o3l@*^|y?|Z<*WBEB_F7QmjQQM2`@!cl1pUzE{cO`!F$H zHxg2#w)fC!PZ!`@tFu>Qy;mdXYY5+aoEdMZm;h7F+tRPJ@lN=DyjSlHeG2pIkl5eS zPi-T2)+sW!ZdZ1aeOt+UHs^bc_$$#m%XF-danrS@*kz_IF?N%eIC& zpC`Q0@wWngd$j;{Dn4+JWX_oMb3bSLs+YKOp5W=67;vQ-L{oTT17h(T4%} z!vIvk2SJKZy2N>wfW_D3!^NkE3k!$yF^2)uiJ;t47DI}V?9#W26tylCVYY#O!QVGM zuSkx8CINws^??!2a=rM#7OteQb68L-0iZ~Yz$R$f#%ndkYvJRyaPeBULBBnJoW%tF ziTLp+=EtwXpv#4z%kMwPom<6jhN=ITz+vitYs9Z?c;9LFWWf4xp*ujYC%^>|xE2_21$)%b3SUSJpRYXC z`w^gL6JWp^5TM8&hD{2?pIJVjY!NRF!k;}fyWL`Z`>{5EswDvtMh`0H2q51*c2UN#Wz;&=cB;EsH7FvSPRts`2= zJ7G8Oge&}x`3x~50Xd7p(!N8T){e)zk8?9mSarXZE*vXn{VtX8LA^V6%f8cReVi*F z&f62pAGAjn?eP`7=38I*^I;%FDC_*8XK17KdHw4>y=h;0n=m=aFl$s;wq#V7{GU2P z!Ygin<)SdB>0_m=sKR$A1D{V+O%qjqgq8m|Df*pI@jFW8KPYEbloR~W&wS8b^tZ$I?^_gwzgh+y2i3*}on6)MgMxe?MfmAL{I6a9jS2c)8E-&$6hwPx zWN|!7@DA-xFhx)F^|d3y=cBN9!D0VFt|Hv9&T-L~UFIF{sGx6UIzH+>$7!!aP>X(r zJG++8$v%WTQBews12Kv55BCJJBI}3GG0)D|gvc8=V$ymd#ebhm{s_%~K+bs(lmE!i zwlz$dHdOW6!K_UI=Z%Aft55qTrtr%j^`fw-MRMV@-yA{VPPQ=fFL$5(2xv&UFiE;t zd>X#|`hiK$MdQPZzYp&c=o-X1<->N6rGGsqKRyqK&AAMCPM|fzSlTK&tm)U45?I7d zj1*=Zd&2~UxH)$KfcF3fBV+q~Z}XZGIX{!o_+7!N%IkM7sF0@(yjuCYkk&^*!gCEK z4>dAGZi{?)Z2io2;_+?K#iveh;;8T4{$hSlY)bwp-b=jF*#FDxRet#QM_o~C{e2w^ z;!djMyDBNU$7zet++IbpZGLSok{lY%R{DP1|7hJQoAN|s`+c$0@G$o8g@=FW=DSy! zJcbSDN0OY4V(O{KiTlcD zdvkXBcRcz&i0-ZGWBk&4J3Ve~2q&qn^e(OqkK7g5*FImoIBU-P)BnY(Z)3Xpo)lvh zwjbM(ArEEHTa*7pg+j~pXXmXEsTnZNvA{saYV0Q?y>%j^nj&7G#%`Wob17aXR~=Eo zNR!4NF+Z&#^n4eui?Xw1&{?RPn^9ybhY}!-{Jf0Dspql?yyl-l_q-IUuUp2+MjI4l z^WS3Hg|-f%%k@RK;@BFB-AwB1Ej*#i08Hdohz(EVw}d3_`!manWs%JNi#H!aVB=TB zQ`j}hEP=u$p9Jn-J1*IMMMlvZ$GqNk@xq0+ZaV@nDy(98jiY}ds)!?`>)WqS!F`7d zBf*1EFHZh?iN$i!senf}MVA7K_QN~(*tx{k#k;xPlbVFSI1fKvxKUg zYiUyv_x+!8M0C=4HaLTbR_k@{jooZfEP|>T~nRy6OCV zYKzxlwi}pHVmV7T(`EXHa))3?!x@cJFdBJzmKsVPcvgj`t^B-cAWqFaj+5Lz!k6dR zK5AYaWexG`)_#Nz{4nIneS1~gi#z%-q2pFmNP7xK{F0&5XV|~tb8Yf~+O>C8tIHZ%SzQUbs zuM%Y)6t6mrnF(tkZmso0d)s+)T7m6;t?PjpjcTnfyLkSzrU)WX}ud(Gr+#i~h$f8kX} za@LLah{|c>?s@;>#=VlQ0?h4I5(IEvkIhO_{X5l7nj!}tuT>pdIlE1M+V19ZJVO$@ zHfZiRSEmgmz=E4|tnX4KT{F;6!x~V2g#XDC8#abXpSn4Aqvn-pGQKMdcldFS` zpN4Mx;=kgAvV+GZ`1}6qrR4V3W@EMZs7GydFrm43|9t0%{ec}>4r9yEO@b~=V|kq2 zs#`o722+}2?^<_;75b$zL&K4FofH7MGY^T)+PulVdEJ+Q2Ix;E`wyJ;Y#$tvY7@%i zUQ`_q8|)IX4=%K$l{-pi&w1t2d?thoH@lz`6jm;N&R|Bp$7IBqj#a;Y<|h+?0;Ps7 zv9*ro$%~Uz6o!~krcPUTca;67>a40!ntK;WNc;l+J-%;*#ouhXu9Bxn7a8?>i|Ji? z3a#Lf-J2f)?~-vFHEHhdy8nsHkeGNG;=YMG|K7N%?&^i*{^KnmZnt#3Kuch=J|~J< zrP5upyRMRR$PKY7=B2W4*SORMd^laJ`QK8zQrlM(ngTX%fF?MTS5PL|{nlo#RA~*{ zV_%5^ek-1DZ)S1gc8MGO;2|E88Py)kzJ3RVihb!_!SsV%zfNUFsVLRnWaRstc9c${##9H*`Dd7m`ZV1A+s)l2k(YD& zB6nU4q&f=D%I1GFbps@EKNY0x6TbAVD(XW06AMeu6}5Ts_}i_qgvn42Uz-O#O$(j! zn&>q>UHNThn&eJ6?{!|34%#()#Tddvqcf@%xu`-E-_p8)B(SCwB^$`nx<3E)DCPaR zX^g#ohZMJbTN<^X_(bop|8q9qFSCL_jAhB5+n-9NJOSx%QG(r%T-*)C6HvwcQw0~ndc%`7BV}!q1-kBa3+yuje-?OTtF<$-vOGn7xX-Z6?VSg$>1Ro>Z1`{-ldN;-^$yJgWl?)6el1Qr#pV;%1g=j&>Yx*JW4e}Kek0bch%~OmO z?@axEnc#*)mact?TPzU&(O`I%{wbI|+iomODTygoi({(R-} zKtOD=I=wX3?oEMpM)O%BIOOPX%O~SyjJqy+hL`bn{$P$}2>*3_@bUhO=>qdBAVMR0{4zqrGLXIqmg!p$w<88^&s~Z6iSs2YZ8h zkM%QV0q33mric5lmhwdc!DaZ*iH}}C?+HAYT>XN@n;@7=x@Jq8md&1DsaE2EI!Teo zW1^Ota%&oQcQfuoOpMNqqr5-WuTdZhEZui!UCR@rlh<@V;J2J7Y0Vhv1JY{6?T1`O z<6~R`kf=GgWU3ic|?zaEXdT4Y7L_`#vAoH z$@)f9`U`{n2y;xrNr($8O2p(rHUMUx{_vHR6WHp*u$%*Niy! zv?m5f|6stMWh=c$-g!~w`kuOY59(gQ>D{!C&-f|^pn&5Y$-5TXHcH*IU(MbGe zf{rhiCL2nvdj@jDRMw)>qIRb)&$4&}%z30?*w%NtPi}cRzpry)?7@O%2Mc6UJ`=r@ zpSPycKlXPrW~!XkZ&dGyMr~0At}9XOckXnp|&cyTFQ#WiEVsuv093Qc}yWQ zU<{QJ{f29AzAb>&js@ch`cw%*kBr& z957LfMuuFCmT@<%<=U2)7p$}5+`%Nr_^{J+YdvZsYT0V4JWf^%EoQ3rq;b_e#A>Pb z)iM=m>A=;qf@hL|IDi<{u~yyFrX`~#d#3O9(`WI)mGq}#54Cx{LQS$tWVPE@wS{_z z1h>?)-mVJ`rZ`1|I5&VoYj1_zy&}3tuU+TRUl@O%u0f4U_1bk`*h;$Jmp^?zQ8ld# zUuN+F5OdqEU8uY2&QLtk6EOW}X=Y#Z@f61ISQ9kQ)QGncUSItrwJlIQ`KJJ`Em>9 z$tK;pmwL0Cig`Oma4E}V8CkeuA~(G{QQp(g#T*{n!fgL_(a{b}=NSLS9g_4!?`?Ln z9vrBGiC6iarWHM)aNs1bs+GB0%j7~-u*ETgaDfsUS}ar3)D;6p8UlD0{94nAqUk%M zx=@gBmC4lXA@ug}dPJa5R#$aI~dCauHJ-oTc8b$QdRLuwzuweNSrcr1tksq?lr`TUuFd+cjeJQsaGClm| z_>*X#Msny^M6yTu^2QTAie!nkCt8h(I!PcMUD;$CIY63QRiZ$}p6lhPpcQ#y9k8q` zHN0-A<8?WTZBKgOo|~=CEJ;U{!LJZ-M0xk^+Mz1qkRPB(sUak|k zum(QngDTLW*QrZBY{^su@uX;J9i(gdy{=V{A7EJ8E{mZbrh62mHd^r+@|5l22>YX| zcK`ROL5GY1d62g#SXJ9Yu6=r|g4wGPQm0Jav~wCAupPJX9R31DIrl zCeDyY=Y|D7polZd#24l|R$TTokcowOY76#y{&4D#mNm~l?=7=aJbmHt<(~-jgE^SH zS=7Hs^o6VV>;6>(qOb&7L=bJQit{xWO>h#H!wY!Y|Y)m_uIvWySYSpEzez_v^x4^87wg$seU64OkXpQ zs1F$e@f7=_lsK^ zJDh&-v!&(z-5C7zDp*;*ziBZZfIk`(s)HU zvuJqsSFuXhM9(DK?iz%<$Ldc5(LnF;5m)#%G`t5CiAR(FquF$8S*>=a_VR`q&c2hn zq}W`0=hv1G)MfbZ@uyG6Aeo9PZ1?~zqG?D8E)(+p<;p%boVahj66d~67n?TBoGhQL z$s`3)(V{O(9r(@_znA?@0j{o+yZysA{whoyZEWs;&bc1&pVHtfrJISm$>rH62kmXr(98seyX% z1~-ceR$XaiKrvZUzmZR6a84n4Krx-DdJZs4BYN8pD4Y|_(gqH8lS!AGEPpk z6b^C#^pA&wX#D|B3IJMss4zJQ0Hz`VusFECv_APvwCLy5rH3n;7duUW<4F)p5^|bk zZ$2y0NPt_CAb0}&6+m}+v8xXNLgRerC<2spYH^<%6kCOEf&`-p5DbZ;B~Ag%{AnH- zWJsdG$0;nvf-pUylmyT`5I=fzvGa+)>^r37nosrA!)R(af)3h0#NIU@KNT7xb%Ll@xmPk;300_`aO*Md}8<4@{Kvm~S%>XIK)`mN3x%9pYH1yv%{naW;fJB~%%&T|mzJve>!jAn*@?U{%WNNF04a61! z(F;Q6lYsf8t4#)`1A5;F*wnN}6yYLFuY{J35#=~2oB*uCB|qJ;QfsE_0ZIHRnN7Zc zIFjK05x$xdvNUd23xX#TfOwolJWQ4sj4f9I@kdI2Tx`WY(ReSgdzX?(MO1JEfRYKw z{7cGku&k`6j5nWD)hp!4Wy;HN1txIwCjxaNf%+AIE+1dVnC`0Nn=){vO&xc@ z0mpG%K`1&~q9Xh`vK9BCgM@lSoM#14ECDF^|0(uvj&iX)s?fC3#bHjR1n?TI#H-(1 zg{x61UyG_t;T{(O*D)}7g1Z`(at6QU91WUWI%<(4s2ooItiKd%AC<#IY6t>w@Tzt} zEP(swmz#hKkawI={7+CL0+R;X%mcxxaGE}kI*h%}Dza#M>ZDP<@K-EUtuLZic08HR z{l0>zS_ptf!o72i*lPgT{EM#6FL$+5FR094KB4AohtvPlPcKxE$l2 zx`G51J48++9#O3|()o#=1%OIPa3cV$P6in)pP2X+w2eccNbssKq!A85*hQqP9Mu9u zlNVR}gC#&(&DI~n1vZD8i=N%;bK*ovWx%MB%mgkAc*|lNEB(D0TNfM@n=njxQj7+v zr#rsM=Gpq%Y$F-p7xTveX3@~*IZm$1*t4j+r{{NmlewZJ zk2gn)6drKjzk9a7+@lZ_6n+V>l8;5Ljm|wXQl9Eh z)E1*Dj$R-hc)3~2s47dotjkk5?*(TW%QE4a*qCa*2|D9zfW_`H^y>sx0&u&?Ej_kL zFd-a=i+4;{f}wmxdYu#U17)|Xu04<%VRcIXHdI9n#t$**p;I95E!FSX~&JsSD<<^>EwzPPT_|9d_SSXgzDC zYD0q?gS~V~q86QS37c@QGX&z^;=_{=Q=)HIVtb)2R9WnjSnld9=2;w&sNMu~ttwe~ zRGEC-&Cv!{ybphQQeiDoW9gS#aQ)xo5 zv~&veS_39pKI$b2F?M>qNY7Wt2?%R^gT7mgC5DaFZLN(rSF4ziICl#*%F17K2puI) z-Zx`OviS8XbjnaI8^EX_WoJC8xAF=SDJf}Z5o;0uZgxUkYI$x#x9*+!#kU{ngQgbH z+h606Vz<8*Fq&hXA-cCh1lXD6wb!GWklNMroCb}g`#Q$dOw;%?(=K+8Y#(!;*8mt) zY)8qhcMTxXm?I^~n&+lU$EHpn+ma0g)8k2e!wiUaegzepjYd}Sq;jxHSV)Y@gBMsUvc+ zPN&JW^RT{1)`SPP4J%sWuRM`Y&4bi%@K1(W=LM~Wt`4@>-wG`1rb4|4FxH( z0BA$$iunh4O)e6oeeWQ4)j^C+$<(D|gQbQmqYO=olfvd9NDDYRs?X4(G;>ox_n^rn zB6YBU|KlSLMc;cXd6qPg0wO(U6+|dGvC2LclpSixE<{GXh3Kpy8Kc7?XMDg0eEp4o zUujwX)6_G(D@8(Xf%vG@NpRnO65E6c&s;AqT?<6T-YR>0FIp(yd-esp@%P*RslX(4 z6rZ``NuZ%4eFa-ZCRaN8TjSm^C>?I)mt3Z_eIOu?)A$>t^KHul}!XGh7 zEKiQ9uU&bKgZJN4!SF@7E`l6lRem@7Fpz-a5G)z_j!O~^$@w59Q`E#ac7%DjWz^3^ zhne6H)Jy`=wKH^910pdL+e_ z_GN790B5umg?uZ|JH;42k7MDC06ZmdYrKJ>1P_464V3Fwxf=3RX>y_I0Km0$`TX-k z3jDgXk#dH-ac$=HN{7I(ha9WlGQUSOP!d|B;($xcW?y2!secmhfT`)UoT^A6YX&R1t!Ch!F z)aaJt~jFqmD)6DX1q7~loBP*PP}_c@ltu1URUhNY)+TW1_ z;~0lvoKUjOIv}y}N z_(2hhsDvp9fE;fd7u-5KZYa~;e)qp0{#|Q#1n{!~2-y2{m%N?vStQejVUZWaFNdemeUR-rhE&cwDY=<)>V z?xcDo>Pxcc=^|wUILf6y>Ql<@1WZ7ELW)urrBS>m1yFzlFu);5pcPU@?Cz%;S|O)u z;w5gx7aR-*Frp+7K!Iwas%Ro8FbEi)Bo`d)1zlQt8sv69O_sGNAxKKokTa4@BVu3P2Pbq!0!nM9^jcLcrcWAp!>n z4*(&WMr6NOC7Wg>R%m3w;;=~)3?W8`0^-o+l3^DpKmZKj0NUy$DPSR-#7U@X2BD-S zmSsvT0ivV_Oca1-;6wp>aOu9}pVZ_y+5}DjfDU{Rplpny>ZDFA!B0TtQ3QfesEm6s z!2SRl#RT%FdnSeJBn1U>-~bFj0Mb$b*3wc;Knl&!rt&8S3c{UUVk9Rgs`L(=lqKW{ zC?XbUfDFI@5P&e(asn3OM_dAc>h2|6Lh+`BTA+a#L?8jWr5m0>1q`6lh@l&@r5m^* z(}qEu5TF>;LKt>}8NA^I3P1(mh4jK=Ug%|BmS`101~3lB2iV~p*x`(1@h|>@FVev+ zoWNYfK^!nf1`1{zWZ;S(3yz5M92BNx49fzRk2I3Q4q#>gZYwoHKxTx)4mPP0D4`Ni z%Q!5FHpcHbP$7~uDI1p~+`erc;gQ_z5kK=&KbLFW*iB{J4HQ1%3K~#stSbWc{w)*C z;12$80Th4~0#XzffB{HhZzQBZH$ZJfBt#qpL^LGfaLEE&Krv?m7&7NqoN5DkVHaLt zAv%Hr65s`QdBa0_qVHL}>uPFyaMbps9i)C2C-UV8R7ffJay0 z0?4OF7ig<0EJv~lCR&9To?$2Q1sWnzD7v9227E}C@_p#cbhK0pBk1ONg;AOx!I zZam>{K!9U0bOS&^5HwOl@j$*dazh?R8D3DqoazFo;(`!h1#V)g{EGq%00VAR1Xkb$ zGQt^xWJdl@Mow--WukLnC!6TWcJQPT0%~_Y;2$I)0G?8JxJ3SV{()-i0LAzTOU&d0 zu#`+9N(r}=>6~ut;zv^MbOI)T1T<;_qRhu80Hg}Vez@mSxaWPOP*6zaeo6&V%J3Cn zK^G>+x8CRk*4R^R|)fJGWeR|1F)Wr8MRf*71Z zHK|1yY5*lfKm}%21~~7o;%Zn_;B^7)1-hXdY=8n>R|VMWhq7h#4vkmOVp=%O(`c_U zgkT(ImILgn0Bk@Ad|+mM;2g#wjBNJ;yog!TAvp`+0$4xpxeiwoR_1C$b0ffty8*<=1d zK@adC69kR`-m4-TU;qL#ym+8uEBJ2sh6gr)0SG`sLm)pe5)Yt>Wk*E6TEHeMj2D8T zrz~g!0L%qiBpD<{BxoQe5g;ZH-~cYb7apjmdP)T*AO@Di1r0(55zm`o?r9^TN~|QH z!sIyUAQd1XYrP~ARv|WYqYh31DuZ$rax4{%Sy5mMSS~;CB-Mz%D=rgn<}_!3Jjb z0BYAN#)7Xs zyod!7;}$ie*Q8ThKZ6-ZBLD!YlYHv~AV2{kfRKa(Lz}G*z!8jtqa7&$ZWJH@0>I~{ z?>P)uXUr7$OI~H~T1q43=v=cUg6E*=9K%o;jVWRcJLYqlMI6*%|;iBsc z4+P;3IDv8IYXVDpL>xy3Dq^cV2U!G&MPj1Dj;g0-WJV&QrVnCrj3HQPq$OJ7Ms$+G zT*XCrVZ+3D=CEW3Urb5?ib)|#N%QIcc_1oFE`f{Hn5(N&da$~Uv2uH)!35IhOgW0j zE+rbGEC;Ue1ll?W_!LoGffWFytpito@R}7;1yB{@!k$DSScC?EO7J+&M!+c|b~>?7 zBqlCmMiN4k`%5v=%td74(7fU-J~J4Kff|6Q8<+wr9E}{BA+(DDu2S4jZPz%1Gft*` z0U!cG!DWQQkhBdIZmV2VquGQ5y9-GX?BHo`%M?(-HMCo22wFMplcC$&y%824|C9c3 zi4!XNnFOJ}DKOy0-P!=Av?z;E<^e>5G`bBP*RdU&Lz60@ z5}2boCgD8Qv0T;9y8dH}xR!>KXu}Ua38CLx)@Pj_7uF&}LqYR zCUW`EoS`h3A}KJNgk;+*^vZ}NvnkH%tVSp)h^P})mw1QS(1xM5jubr| zfZNcGQ984s{*C~quy~C#;>a?DZ?vMjG`ua;p(FgFLzCpwxB_-Q$sRfgTHM&1*4y6g z+izhTWN__y$|i?cNL#K+q$G-|SjBN{qFnrpM?Wf0U+I!CqSp9MoY2MvK2Yj9 zrONKcMP++}+)+$G8pO`*LWSSS4$A~$7G{TZU4+{IC&C-3!XrE*JjbRV$eVtI!XMN4 zTZMF~{p1ucU#6uenqssqk-*iGiDDUuwgrA&D=R4)`Vb7XD(f~bLPy2BY5tcL2TnJ0N4m1f~iD{5=|00 zX_BHuC`*n+$uecjmWWU)in;P7%a|!A(E)HzpU-_hg9;r=w5ZXeNRujE%CxD|r%&ivK*=jNkn55GP<`sdXw^9udASFgdo41c!V%NMZ3$Sl@hV!qh-SYy4+ z=gWSLAyeReh#5v%FT-rZ%!L<{!;CN-Hp2`#9y+5Th8H&D;f32kW8sFvkjTt7EyDQX zio#fEp)lHLbIn8N^aznP*7(?Ckl`3KWRDRUdCovRJ_KYz7bQ8+L+KDX&I23;;87(r ztyGgsHigMjNiUH}CQ69lgeFNl^>o#naKRIf<1Ow zf}lReOERz~mdt9fS%S%JxJA;)ZLmeM8zjBqMjUL9Sn>$5$1RJTZpxhy9eCkM+lnhD zh?iY^&|$Z&d)HoDo^EnjayIB>qp~^J7@1@`OagfhI~fVUkp!lgG>IjNC~~HlCyC^=Of8Ls2qTH$ zG>MvGCPE1v0px?{*I_WUUjWRV{H!JRU2OJ?PV)&D=`%QnKAc` zme^wmf7T1)4$4STA`z)b0u^YalqD$~>42cihV+dp4T)z9!A3UHk&Y#0WKSPa z+DJkoCamRTX)i&M^%Eg>4#f z9M}Bnb*FP(ZgLZx+L*32rYqJZVs`^+0qF-?jBaS790FNVLB8QLm6;4mPx=u8tr3oJ zWYb5}^ch8%M35k1!wDMUNCZ$}v=J(zX=?h?n8N;~LX<4Rglh`ZNwyXMd))A!01c=> z2WpfYb_5iOQcGO!Kt#R0B`lmEB3|-8d5$~}xU^%)80g}Ql?iEoDl;FyWQ7o_mxU$!~FMs#VoifZfB2s#Zf8oHPw# zv>U4kWpaF!fgY&xGp|!p?ya<%U}+(n1hlP{(a;YEO_|@6!(Icgo?$t!{tL1CDo`kkHu2Jknm_E z464N_hG3*}kV*NhpvMHLF|H~MX+U+1GCJl|!C1{6mjww)2sJjU!*s8FgWfk9wGqI9 zZ*87M-`SZ9Jl|p@xoW3s_e3X>;L0x}tK*#fzJdh0&{u!*WuX2XY*q*&&4Vh};jwZU zVV*hZg%g}AWQ-55vE0Urr2Re{>6OGEa#1rDh9P5G=$O*}5H3;lOeWb3XF5hwkXGgp za9YEHLjEylgOm*+>}-PB;OsX(xDhR}^CVtst!e=-a4@M>CNBhl*T&57hCBS9293ZD z@j}G8822uKP>b8dg52T$5{kGEVHdD54aQ;ZLKFS6H9nT||?Hfx&D;mJ(Y4DVwmW5&} z_>^0+HJ&{D^{|h9owm{y0DR>wd{K*U)AB?@CCV&t{=HeAxQXRn$~1Og&>63HIm#%y zK$nBerzS*Q=YRf}R;eMMNn<5m_4eHR%_6mMcq)EQqxY!M+Llyzw>##XU_IoCf2@~B z^{I!41j-l{sS+fg&LIZ6b)_$ua#9M?IR=XN18?XSisF5g?$FISEo4pt2fy#2ZQnRNhB^ zfVevCb^ba?)ko5@E%)^ttk52d_>){9M|IBu3(jVDBeiSP!(ovVQr3eS z1F~yjcT%Adc4O2kzu-#n0%FdPG5^zT(FShOCW{b5i#8H%)7DptM@wUE$ZQs@E3oQXgeYq9@t?ntbl(D z{vsIpf+;ZhQDW3C_98~86ge%~lK5dO3sObAkUr#)BL7n%i5E;Tc9btRBm0w-MOh;i z!Xh+gVUg9)0F%qpMPhZk{vZW79NREIRm|wPpK9LVjh)}>c7RXhFR=AFvz?gb7H=RId zZ`N^9ln2bk7&^ConbbJ~sC?5yIRhA)R)i_{F@TgaVZcxfEx>b_7HYymEW(jGt0R%M z317h>8-o~;5g9G-_nWk1JlSC_)&Ys{*9zJ4o5#6ownIDAqCBkje$B&Dgt1}%JxPk6 z^dHtaVG~w&Bjq0n=#n-$A83bH?ZZn?*-I~0Aw+3nFZN=>z@G3qSiBTW&Cm_)2_rc| zZ$L0)5Ya*Z>5K~zK?5o?41qEXai9$$md@BsLb5><5e}pEWJU9q@{}gpSSI)+aNP(d zDlsNr_LqV=q9htngy|Cz=Lu!uTfWDlPc&z6As2b^qGv&wej*nP1yO&IXVB#zqcUC7 zB^in{DyK4}qcWtKQ=~5EAn&3$w&J8is);pFQ=-#!^Tku=H-6~Vbm_&Vrh`-|V4TXC zoWN-U%7dKBNt}l`rr!Y_!|9wRV4S@}0w)jy(;ykjz*1qKVR2B1q)GCpz$4i;fR-@pzllMx265el$Qti>fHR5dJddfb?yVS<-6 z;Rjw;qR#rP|HOnj6aWxF3Z4K8pm43(x~<%*t=@XA-ioc-+O6VRuH)(n*GdYaKm#=J zAHdMA)Zj1j>L0+63-Kzi_=>Ohim&v#ulbs<_UbRxumh@)3I=#-dBu_bG=BmR2=ADgl%Te2$Ku_61i zIWV)hpbYK$uetEA^{TV}lCwPfvpRdT?E0?7P_#ySv`Cw@N?WwVu(VEF48x$bP3r^W zAP!YK4p^JDSi1z`V6|O)wN^V0<&d@I;I&|Twq`pHLO``&i?u$0PSpqiL}N5twvAb1 ztXWf+EKv!~3ax(ow_@{`_)vS)+5m=oxQLs$iW>ond$h#LWn3jqm00hpV) zm|Fpw+qs_GxfAfYqC2{ps{j&kx~QAFs*AcGu)3<-x~?0$5&*la3%d${0TnO-xmy7W zkh{98yA{B@ySux;3%tL(yS@9n2@t#!0K5v|yu@3){+#Q%p-Z}$TfNqsz1nL57C-^q z>%HCEz2N)3-V45%OTOQmzUEuL=o`N0%f1P~z4B`T4nV*4d%xUUzx2Dm`g_0nyT1Tz zzX0q2_}jl1K)?YIx7Es8P;-|uffFXAdJc?wEyT9~U=Jb+xEj2{bTYWL_rV|>!smF7 zA>0)tT*6rb00I!VUBSW}5w|X^!YC}maoejO%)%!O0180D3ZTP1?5iRC!$17NJv_uf z+`>9sdkXNw)wse!^TaJ2!wLY!Q4GUW?8GYU#814%C=A0dTzf1W#z;KFU%U}E48~nd z#$~L=A$-Oo3;-t_$8fw6)tJL`e8+Yy!+1>o!sjRe0g%T5AbNUfm#O7i+lV#B+6R2e z!HnFWpe zFcNm4G;#?OF#*k0lO|Hr2gHgd*laZzS~WTm05P=7=6uct1;U@a#=^Y9>a54_Y{^-S z#%bKdE{w%Uyw1oB&ywuU{2b4fY{l{n00vNvXUxw2T*jY##rIsoy{gYM4AEmu&lK&& z^DNH^O~?M6(R$3$TinDajL$@D3Y7lvT9i-;C2`V~Pzi)E36wAiij2~PaMCG_(v|Si zFzwQlaMLP5(>#6Cln~S@Z3;w)&PILIK5-8p3;|4y0TAHS5OBLpJ=IPP)l%KmOnuc) zoz+mi)luEm7;w8wtN;lx0SR!{WKGry@Vsb!)@$9?YpvFBz1C*^)^m;4a&6aZ-P96* z0evk2Pwm%W4cL86)r9TTh3(f~{nV#h)q$PZ5RljsFxCg40F{jZQ;Yxxpa7c9*_*w@ zo4wfrfB=8;-1oe6cIaAbzm^6kj=u+RN`!}(0f5xw8Z9MAf_$?m++zI@Q^9LZGd z-ysaq5^diMKGD7$$0Pj5cs$1v4##h7;S}C)7|zEUzTq2D5Az-3j?54Ba1Zuy;wXON z_>kf(-r_F);xL}#`C#JrFyrOPUJ;SPQCNGCpIIilf4(0X04}K}) zw9dg9JVUoWW+z%UxnAqOUKG8)54|oYB3fp{{_Dsdd&i#aX0z$^h96uMt}53pY%$<^i1FMPXF{!AN5i{^;BQ=R)6(apY>Y5^<3ZeUjOxA zANFED_GDl7W`Fi*pZ03M_H5twZvXahANO)U_jF(Pc7OMHpZ9vd_k7>?e*gD?ANYbl z_=I2hhJW~opZJQu_>AB9j{o?OANi6$`IKMzmVfz}pZS`<`JCVRu@?{kA^8LVmH+?% zEC2ui03ZWc0*3(s0sspE00IjJ3J3rc000{R0u%}W0}}uP903a#1r!z*6bS++0|_<+ z8#e&}DH{Pa78oc9B{l#!HUKCm3AwDl9D`8z)ISEDuQzE=&ar zPct%YG9_RvO;G_*Y!6s!BNa1ADKJtuI&UExM@2GyML2jwH*H5bO%hB`ElgJyYfv$B zRy0OtD_3beS9>x{a4BeOCwXc)XL~JgbQ@u3NlQ;eY)V5%dO=ltL`!R0ZF6atEHsQ* zE{bwGxN9b@eJZYgDX4lYv3)1Bdo{OqIkt8+xph3dbUM0uHn(~-sdp)yYfq9>N|bIz zzHUgnX+penNVs=bwR%vjcW=6JVyRR+!Eso~UQ^3gchXH|&0A{BTWZZ#bk0?F&01^G zR&&u)c+pXI(OPuPN<_hIRmNpm#$;K@WK_gySjT8l$8>4RV|K=8cgA>X%5_noH>$8R zkenj9-W-f{yX*B@#PuoPl`KBg*#J)HdTu`RE#uSi#b}0H(P`_ zSCTMUi7sr7EN6)`VvR9sj5lY6D|3w|evT=6jVN}8Hg<+BZILcyk}+(NHDi%1a*-x} zkt%$VCv=lEc9R}_i$Ow#LPm)}Q;0%shdw=p9h`({Vj%brcUw(Z-v zbL-yCySMM(z=I1PPQ1ABZAN{06?lgY%qYWoC>f&uNnxz!jci3NGb&jxIk=~w(ip@1X*Zpn(qh)YECa_4HG(mHYVfDY-iMsqc>WE-SCI1K|T} zuK{0)>%)A7+2KI;0I&d_0MKh5KZ9;cBZ8|66mUEP9SY*4NM?-goC9&FLCE^BsbrS} z1#HiXFzc|uqxHNw>DIYtlWI z)iR%X%2C$qj*%v^K;R)NB|qv9Qp!s((P;hko&;`^0p)4-H1k;P#V=Hh>-<8 z2}Tl;pir2YK#fe!f#AyG1A8R9gE%pRMZ!s%5CWkGb&W+oR2mV<7NZ{uByHwX(BU3r zy17XXfM??n`6!qr4e>(&8sMVHP}m>+5h+iJyOG%JNW>~-D@Nu?q)Vt6ku8BSCzq_^ zTw=J9DM@i860^}h^3f)Y>0mt^lU(pXRx=dpV{IpkZ7yZgqaLa1p#XygY>4s<;Ch@#IGqjDV)|lYI9-&b2}Y2CWF+U3GI^wJrc|6f zaiRse_{H`;aiubCA0;_*(w9P!XkyaYu&A~qL468=NAuPI{-mW8ZUB;`s$=A=nIU6s zpipK?NIi7GCIx6f9$AZ!Qa50N+6ho+xztq4TmY)2kxCz?T0kyU1;nu#EqGjI=2b(Q zC_Z8-N?meT2BoMYz_O@}E@kPAuqendrj(OfgsDi%NF{G-^P5m~=|gsU(@xs-vlI!_ z4KJctL0++oq8+L1WV)rxK5$4*+WzKXL5o?PlD2}h^(k(jR9L^-mbSf>>`yp55|b9U zwx;DKX!jvnE|v7NBh6$^6T4D=q{){}y=-Y^#97r=gt*Er%9SYl+{l*Kyd)Lwadk_R z(z5rbyM?c29lH<=zI47>q%7tB!zYFf3Yt;$qBj|9S@QZewTL<| zAlanZ#&*21?Q4TGtl`937^kl7YXkH9+6F@xv)~;_b9GXwilSM; z{Huv&>)Z}!_d^tp(s)Ijs2tb0$U|TG+0(M?QER>{8D< z*}C4fuaRx*WhdL!!Hzbrb4_e+cYE00CbqP1{p(&U+uEIm4 zVBcEQ(jN7sCw*;8o0{Lg_P3{7ZESq+yVa>ScBM(JZ-aMw;KtrJstNw-ikn)~3jegP zlkM+ZL;KpU&WE}FL2he=GaK8qZnv|^eGjvux6uwRoB$3GfSTJp006+b&2x_Pob#OL zIR4}j)uk@=sWaW`RR8+a!#;Mc z4p5Wvi5et@>CUF`>0yW8d7_PWnK?sZ=~+ue?Ly6b)KeTVzo;|_Pc2VU-i7kuCI zes{XhT>wsJdH~W+d8SL<@@Y2!u5ucfD0+sQHANx7w9{Eg1yTvRIdANJN z`lNq5_C5apbip&7@Y zfg9L?9*BV$_<X!g9|7F zG5`bp2LoDggqm=KO1Okf*o02_gisiTQQ&@cP=!`_g;jMK2#A14 zi2jEth>A#vfmjHo5ClRX4&tB6`G4&YFU-4Kf1zzv)j zisE32tEh>sXo{@JiRC~J;NT6spbOIw3o}3qwlE8|Pzy6q3$tJgzgUdE2#ms5jI>Y- zw{VQX=!>_ojJ2?g%NUHe@QbuC1Hp)mzc>xkkPF3F3%QUBw{VT(c#gF|jOcg^;~0+M z2#(hXj5?ZIlQS8TD!B)zfC)m037JronScqBK$Jx(lt*clOSzOv>6Dpp2~g>j zm~aV0`IJmql&i137XhInyWdQrHPuZ*_y7Y4doyMzc>rKU<cre z*q_?~pa80${W+lhDWC;Ppr?qR`AH7tAOvz?255kiW>5xbu%T$+1zy0R7iys#Y6f0F zq95v^Ac~?RDxw=oq9)p*C5obZU|Munfr{480%>UHS~dunfl_49BnxTw11G zdJI{*rEA&?$8ZeEkPK{Ure5l%!jPxWK&NFY3^<^deCd}UaXbD2pb3@Wn1^YYiIAv_ z+Nh37n3UiLfba>BD4LxKo1poc;()21IGWsW4WEjsp!u1iNvfopspY^6zsZ}s>6^^? zs?DjK#aWKV*^K5njJ7(Qx6qu;S*y&6oz=;X?dhJ`*`4TFj>4*)@Tr~d$)3~stkjtc zy4jt^T8_5h3(YVLMA+$)Dv4uBaHU1i7F2*$qK32WQ}+WniHq zilO#;ulSm;E~>96S)%z`241iSm0$^t&2#@dxL0Ygq`UngQr3_oJ5X-O$OR)~y zuuIwq7pt)sdkK#qrK#YRTI!{6ItEs5@n&3bVG{CE@ikrdNs%V>>$l126x}3ObtGSAtv#Oirn4996o4cx=dAqmP z%AM|Mt>n0?*qNQ=IG=~BorOD}=Q)qoi3`8*4Bi^9{mHHidbtfsxs%(jlUuI(DFpE9 zuNe9X8H%Bzo1v$Bx~6-t^SZD2+PWA@2AwdYk-)JE`v@MJun3#5yxY6J%ex4>ySJ;m zyc-F;tFazC36U_RQfj3yJF{PEvpFlXEPD*V{t&%4d%a^?vM{^7z>u<98nZ3Sy}}R- zdT9?qn`K#&4~1F?l4`U_JGGC>w1)}5g9!+X$*86f1V-Sspt-5A8LFV_wUY>&T05Er zOb)!ztGhXzzuBw6*_*7&wzqn#%vqeyd7Q>coX9$l9=x5<`mFN_p3|wU#QGa zo`D;mcN?wOxvRXuklHW|{>i!D>aE?tt~*?yJ*>G7x(${~pyav@`1!7~3zPb)y7?-) zOx&;cs=BIs2D6(A4y(JuOT1TnyuE9?zk9q{{0PR&#UI-U$=e945Ck`%vMdV>EBmEy z45v05vu`}dJ1Y#|`@Pjm$8Ze3(_6CszyP%AD-!q6zKHM#fN-=;`?T~czmCetgb4^z zdzgnX3Q!;fubG;odBFbL$=3kD{>u#u{J;~uwy+wV#7VcjxvQ(pwh~OMu56ymshiE3 zp5i%<^H{>>DXqM`p6ocS?zzk7smp~6t>dVk#Ja2YiJJsC#QW;a{2I>x8nBhH#bLa==nTACtgukp#W~u$!7IE4d&ZXVv6tYy)tjeq zO0s>*$0=K;CA+h5th0dZrE~no`pl+nEXadwMOorJn&7^N49So@zm7bZicGbIc?hNe z1p_>)LJ*py8Oo&^zzA%rnY{kBsanChinhAywy}!A!nv!*oUo5;~T)Vn!#vdCAUHO&E z@V(QE(9ck0hEtJrW%y)`S)a=OofECA@+&>8U`>>JS-UD1Y_w2bW8^PACy2?!h= z1lF*%rdrZnE5M#Cs-MXX&PtuRoVWZaxy-<>L)@)cJ9Q1T3nctiWcQ ztE;NhuBx^)jjXKf>o*O-tlZMWeZtCG!reKY!Hk`^Jk08uw}V@bw``q+d%`h1)Z{3i zynvBH48%B0=21=6Rc+=uOvL#Kx?}L6Zf&n_9?oVR#qh16UZ4k-K(JVxu*M6qWc zEzi5Fv5vqB^31!6e#ZDbzW;o^kZrvNoxKrny$p`B3hwY{s--I{mU>B4pH30~@!^%= zv?YH2>X5p$CT`J=dbLns>oG2y{(Gt^ZPJ~r>&)4z(OJQ=3gj&P(mc<>bDPRVZmY7a ztkEjCx$NA*%*)*wxJiGk*M7pnimY1BxVZ_{-s;^9O3jvQ&FPNrKn$QooS*Niq1-G6 z?A@X4P48#TuVqlh4jT!^Ti^c;@Lvqi{(Z&gjPT1#rHYWoYuvIsJK@!9vv};_f=}6o z-^PoN_#`{{Us~B3-j^SLU*_RA5iOXBjN&Jr@{OG8i>$O3&C!?a$)W1nDV@ob$hBX~ z$+A5Iyb#>a`J8I&^D@2Dz)keWsoSkg-0mr!z)!-)s<+Y#!cL9U*zWAnI;~C}!^r;c z!PknB_$i=ckIgsi<P)=J>j=zy&t7S#nZC1&@7Oil3jmRU^kZrjLhgB@G;Hg2$M&z5~Vw(f1(Xv_9(8@9G?-Lx^&CS+b2FJ{J=Y5vI! z^y$^DU&o$(dUNgA(RkTYDN>_Hm5^F3o%0vSVH8mC6{w1^2b^+`$!0HUG zEywV@#WY-gA%+~@a6|OB+j!&6xY}A1ZW~Aw^(}}wsOv6MO)=dRyE4i^V~4qF~h}$i;)((wSQruWZlx5#0h2m1~IJNF(>&m!J4DFi!(>dtu%J^iDLoOk2)%*IR zPu8aWa}_^S*^Bj&^U`~-5&m$!guojP1a??tA?yVh3lY39?Xu6daG4AVT2{fbKg70f zyYI$3Z)vp^v8J131h=Rfc{KN2!gT}}$S0$!m&kaTblj>Wg)Fk18F;}{U$(Fuc;B@Q zt_xwq@)B5d&+q#DEztrMEws$+e7$wkLfee?G}BbP(r{$AVU5q!XaZ^1X_B7U<4&NK?;84MVeU3 zw+gpG4xY-UGn6mgPqAT zmca#RD7Gg$weB;N0hyafgC{^eMop8c4DGHoG1jeSILlLB%e1IP=*0tjCN;T9m}pBQ7mU%k2uGAr5~YJYq7ffH5d?N=gB$T; z;p4OsOL(d98ZJ}|o^B|*!IaK(mg%Kj7XDU?G8t@4G(-$%G~=eu0A@2lvbMl=TLT%oC$;n#UWIDeq)LkOOs|lQT2$EN9*GLOyZE zy_!27?9?K!?apA-`v2yjsX5H#|MPkg!)L=-hA zLGfu$Zu+gD{sDwgxP~?Cm6aoXB^zSdN;bHnRjpV>t5m&8M_R)XUL~Lz3?e9d~ZAX zlnxs6d4}!q>^sf*j&^LzJ?Mlg6>@Y>(i|}f`NYS$O>^#jAi*9vQq2*Pa46HF2A^Bm zPgghipD=u}z-1+)krRqgM38ZejE*-VZYvukk0B7Zozy~&bV&a?#HBA2OiOXwD+R*7Y< zF}bLPE&}_ey$qdYK$Be?pM6iy?Vla0b4g6ZNDXR#v4t7yvDX8Mmj#A$8j~k z%GQ+{SrcC;m0Eww+>6kJmL-z=v{GBHQpfg=obB16AJrEf5 z-%{RXFFBxJy81B*NMD5-6QnSAgOW}^3?=IQ_iX*D1w8SGzCg}@C)=W0Gnz}!aj3cf zMW|2k!tasuVG8+uk+y-ogs&2jpnF$rig}H4l5E!7CjHBcB{6-Pi+K?Q9QZY zIKn*!N45&P?Lz3Ap(gveF?H6&*nOZ*9YOdwd1jcx>GE5O<@rsA1+IO`3ibS3`==AQ zPm!Fg%<}ftO79~^pl@+WxRi;^^^GLf?LpvLyGnyQdgPm}>~rz8=CW2+ez{S=rBAN4{z!=vRO@k{MX4sJ7>9djJQqzwF1M=nNolDTDKN#bCr9|ZG#tA1hcqCwLa2c*i)iF8%wM$7i9I zAskft-pfBpQmnsTPF%G~N1$BPM=?l8zn)svyI-nDwYMp}`gWD$VDxIBC=>*fmD{Va z;Rb@Z;>qyDqnHO3Kxhb_{0r(HS&u`$Dv(yL{)crttP(K=gb?FE4A0W5NP zO#TQK*CD#%#4Z?R@kpg-Tt|sR1~XuYf{3qSE#_!Sq;Cc=)&LcT^6Zb*5wIc3!HyxW zW-&yk=pB;Go;kC=U=kw{0GY(I4FU^7Yi~^wD6er5L_8&t&@w|1wZcmp0ifmi^SPPp zaw6;53Thryj|eTM=y;SX0O|`+z(B4Greq2kQ~(dc0YD+6P`n7rc2tKApj#7vcPUE( z6K?{e{?}V@aokU1i-!~g6y>CE#&_!;E1k09At>NtS*s38nWh3@!3NaFm&)%^|08zW zpOkub)_V>TXhSq8vwns??NdQ7^gmw_^Q59- zK7GkwxF^DzUj&o!)*X|n$U&%W@iZ794W6d9DnV61l`I>8K@;E@07ee-gp`Z_Z)F0K zf;St4!z62~zrdfkRtC6=2m1n{5jte`1Zr+_K43B%4iB6JU}5BUQhOm%0Jdl*gk!r+ zoVf65d)4zqj%(tM0FW+v3QfGBR0*L2Xdu+^HF-@{O^FOzcsjX^s+l2|FXJhU)C(z(7$!?0_!TGJn}V?T8qk>y4N%Z$hF1-CY>7Bn<$W0-A{z!*Y+j1qE(H1aif{p=tnq;U7GTMrGc3(V5t)El|e>_%}WxN%yfoE|H1zVvA zkPC8~=6EAlYJ-C=qYHA2zvQ%-G0U=qYvT>{=x_iW1^Leq_n&a$og0a;5P-SBO8|xtaRY*Z0TfecPQIqr zQLtge#RH0YL+XKKzJxeB5MmI}SW$Zm1%k5S8S4idX_He{01#AMvScOLD7{KE!(I*u z#sTdy5;U=uPpcBq#XzJSKFcB9ua}fKKCI8LOc-8}Ks%)Xi)#o4op=OF zW8QW6K^c$^g>T-PcB)UL)dR?mB{D}ZPr$gQH;Q>d9BaND?)3z)FM;?RD8@GXk$`kR zfO;H5vDY0&DoKtTsxwB=7z42RCgs_Q=)s>1L?8p}ImVQvM51S(y#cGoW=T$VeZnLP z6BTLE0Fme{iTdZ_n8X-Y0QE+K22bnJj)?hg{||vz3bp`RwL~L12t_g8>T9(nMtemQ zVvNS$M-3Pi<6&|G(}n|QgnFmu0=41C>Y%3{#}H;cJZ&$*fPLOoFUdzO$^AGnY);wD z)l^-&?I$JFt0IY&q&Cu~|J3WoizHVRFszXLaz0+$)@aW(=r`*lfdm>1fC2*6#9T|j zAR(1B=Gg$aZ6eqqF_QseNsI>`>qc~vfr#;t)A-vSjfn}Mwhf9JMpqDe0r_G%H%5HZl_E3bTJYsF5T}&aGgA+{4kS2@@LZjg&c#6R0-&;t z;3z=pcTkl1xg$07ncC2`QX?~DX*i`{a?ckCswd>ir|0G5%igjQJA-zQ3O8vk4waX@ zRcj%Q7*Q#AiAZ4}C>UPjEKVE)1Wd;D#Z#%Dg02@qEkdgQ4_MqcO8^E?aP4v)8;t=` z11d%J1`}w|cmug^Bb2s{*}%ml*oJ6qb)aMwz4m3`tz~_`*>da6#ZnTQ+AszDB_k#7 z7$V;S?0gQC9{_sWUGs{b{{DC~$SUPz*d+Ypxcjj{yIoT1h4eFcPJ;9f**GhZo5*Y#SQcV4!!w~k^gojrZf36${k5E#B*gVg3qlXi zL2NH;2GDJ^JGnM>*F)%ihZ0YiP-ld8k&Jud?7b3sk1Vf8PXNRLaI8294KuiARM9oR zei$Gur%WKro; z$OZsX;#X&>4&A7f)ZYH+1z-R0aI-`}AK1N{%sM;aatiEzMgFY-eCUMtVBa(@#+#P| z4A9ETVGMuZ*&)&>AGsti7?hHTzb$f8-5P~}H+T_I9;9MI_yEC^E-3?)M5_j1#%14g zmibH3K(&i9)YwV%_I+NG8UYqEQ`@x!Bq$0j+-pu41cJ?vuhY-**OGLDjnp6Mc;Lms zb;XKxaI09#sRdPH1S)U7HV=*lP#!1M3nbG85>Q`-N+9;ZYUb|;p+Q6-WD@w#62Rsv z?m*EVdxB+Wo#(1flzXwquBa~bdGBF9L`YD9rFT#Ip(V=#Q{pD=k~-bk-n5^9rO zP8t|6%=eh=AU8e3PXGv1)1<=@0*r>8b|4K}U&o@^#}^=)9FGFcJSoi-!*I}tG9|6m zA0V-sL9w0sCs2slH{t@+v}NRNwx{JIGLlhs%T^@7;&@v!+Kbk;>z=>VHE`TEk?QR; zCySJL(8cWTYYxfdZMP`y8Z5QG8N{o=+q~1+zrfg_9t=PctN{ZS*DBFtXBuDa>!$Fo z9AMQ8cHDY!f<)=VYFbkVoD`wgI`RvKB&Y_Saj-3pxS!lhpp0H}{;d2LJj{C04dKGL zlugN!dYkPFT8{U;uIW4+{mWU^s?A#Q~tPT_|vt zecT&gu8{CBj3VJVwCXOPVvZtk5J)eQO#dE=odiPafmpy7%IoUGNe-I?fC30PT7$LH zgABQ89ZO8liU|_6K&M_(A5NRVyim#d`c}L54ToZaOdUab%wX0oU1+hXmnfdlka3*~ z=_L09anR%duq>WscYyWz?Ibz^)R_V(om?#;fM;kqJne?OGayq4$Se)4a*2jo*f5Jp zC7GY^G799If4VNY(3cB$Loj7S|4*jdW#phB$5jq;zhOJ=WK9$~+Xd#XCCB zzWJiKl+?BDpCwnnAz-Eyvy_F~8GRo^eX|-s)2n2|x_jUElRk#px_<7yE9BGx50rzr zljFZa2kry#_Xokr2CZw)r9llQDHY#oVM!;TBo4Wop5;CWcoHKD5Tq*@e+)_21JJ>e z+y$WC?Wh>iQKDBv4GDT2yZ&F{aZSl2dFTzCT0Pj@%qC6e7&!)qih*g&77XeWEGqEy zj1bKYGYSR>28Yjg@?#l-vkHQfR#b1_4!wp`1uX~#*`7RPw1-)JpsqMU*Di+PL&lyo&s(MI9;a%yC~3z49_e9<)}-XT!Pn3xEblhCMo_MT$TsN1!cFZ|Tat{~PN= zAuKL-%6!0t%iG9LgjD~ZG7Y{SzJ=R@*xXgC@rlS+>q z_ZB`6xG)C^&?`I#B76z%7hJ)(5!MS&4Y4QdlU%cnkF1=2ULU{3FS5j=y)^z(y>ZVX zNv6bsFu?Tz5sMIj%^Lk$E5Au|0>B(>ZI$SCPx?q&TMw1(FqQC{P5b(^b6}Hr9}S>g zm@_hi+_ZwI#tEFcie55jJC$U!hWJGNlxB7v^fH3FdWuo&<0&VV&#C5JCjsDMf+ty0 zbWz!r4k|%liI+9H$L}1cY zvm6EC3^|^y03@S=Fv+wc{<-Cp4xLelrP8;?6DcIUw;Ln(22+`z`fl!#c+)>i(ZhUK zo@qC&B_dSZM{(FeaHfMvuwaW~k&yMXr-Mh01x?8i#Ipb4l2=t6{N>Y#)gNU|g{kdB zA<9|J*rFDPWl>mbB7$kzX*uX{x;y=>#j!V9`pvk}o2P->{cc3p47Q#?3Ca%#Pmxap z!iL=tQ?80KeL=GRESj%`pMHC{NSIjT(@H59cb+X*KW!i*vvBS&PX*y$Ju&fJD-sM( z%1?&at7OVbzBMcaaOmYoynjc!52WyY$5E%WG|6407dHi_ulO;^U9!=RXD^Z4gC*p- z#!=jeEbo{GBa|+v0slNDrnno;Fd)7(HZ(lpf!5L_?*qr^DPB^ZR>w4z+}U1+*dW-l zOniWcECWG4382gy=Lx684NRI5AVU|qMq&dnZX+>Sp6nsySit8Qaj&pGqr!ca#EiEVdU%E{Eu#b>EnNV_wqM?1%X0ApfZsqMxZL@$YWm_=xZ^>ejl)xEWi!M zq+#>%8XABk`Xw`L6&R->_}`X!8muTS*I%#zV3EoP%nn&-y07GUgA29!C7ZKO?Znu8 ztIJhQ{3CvOjwpjCvyl1NUxNoOQn}(m@*kuEyP&WN0CXGu^U&jteFN# z(w5B#?y>lDW2Dcw@aTEtXW9`U>nuKf4DN_C_T%`zbmH8z6=&$*d!dmLObo3ymQ?{R z&*9!uI%vvn1D<4td^mP22aSK5HI^0SSkE$BXY9*3S=UOgBKRx;qQ{$CNVv<&Gkqs$si62mZ;@UsUnC~X*|-- zAo$0$rnqSkS3TD*fJfjFTnIoE(C4n@j^6p4CuhN()&ve6DI7qDYmQ4Qk!6VUvW1Z0 zh5;qKIBBz~KZoNLf<`z|%#H@mx9&EHvdFHr&%4AIdg@)C>i<&t*Y=%%$``xo|Msgc z^B+?KN~NSebGGM(7}V3~VGMZhtei9mAj}s0d25At1_iZ%U=RR{2l3$9DX*eixb!hsZz#W-8a!r!jD=?qBw4Rb8@Vg)0LyU~KOne-R3Z?VL3eK@HMs3~ z{>pVSbFtu@-?ODsH#SlWD{}xryk9*pNS>Bleq>0pza7aYOiuRpKqtueG%xU50D9_o zW^hH`^cP`cD$k8q(R!5-PzGML##N1MkH{gCu}gQpISTj>^XOUNzA`230rT)$&mjzk zBnggiScGEO+)@}2>e_rJ>I8T_h_2c78car0Y2?U7*N7Sd43?xC1$5F@+$v|s?WQb` z`RYiAY4Hftq^=DCqI$n3meHDl{J|bjih2t%e;k<`c*>CCdIM$*@d(4YnTICTAh9&c zUmElc?e=Jk6u$5QKX}CQDGM4{HZB!(NSXc)vC?Ft6{4Yc1*>HPfJgik)F{Bc0!#{6 z50C((1dw0_0$S``W&oOOH88Z;1sgUt2>p^qix$^LB@ml$d2+MvxV|R8hTfB{0Q@}; zp>h>#nPCx{6u?P5N}~!H+T6{`i3U(jWAiIhJepz$cBCi88ERMJ983EKiwr`5;L!@9 z^jZowLp%sU=}N|j8*+UjUg~!s3Qatm8J1Ya^-a#Y*4$?HKBr{_dk%upYJ+f`2Y~|R z&DbqKfXZO-lof;EXL`n-LS}Ip`qyn*{s^nA)48%S%=C5;8PRG^8SFU-aAUu*m}2ha zLEpm1>pMhr)yN8@tY*$z;n96cU=0N5%8e|P3dQ574+obJH3tBo29b;$jHe>`js)L- z8|4DZt_t#o5rq)|&>>8Za)v&e>DV<@w?a?-6a5fK$2A$X>h_Qh4d9#hBd7WMo)ar! z9>>##hyRTSmvHkZQPEc~wFgBO`Z_&8UsgN2HiMXb36T7JN*7xU(C8Tf0#gGVdzSx+PQwGPysA-7A?+17 zPd0Vppdc2b0aMhgxbC9YeqIO#jou&ti6T4+fY7LJRT8MQMzH{_diKRC&)bVZGRo_a zQO9bwRfY*?^;}l6E?1wK*G(wrusnl#6pxfAqkMEbaon>`+?`ZO>)k+GV82la{iiSp zMB#8P9}K;-2_hrCaYbHY!eWw=JiaED2>QP>AkpF^kq{b&LgRQSOb!o-b0Q$}d71j; z)N^j&<78QSq*xVs{Kks|os~TL2TzdlVdO2wOz~`UgLqKdVx|?DBTyP!8WVb+%Qji( z`(2tPGPiU39mD~j_T>TIB^+=!k?F{xB6&WcZ3+Z4@?^gGjuc_4W@@crID z#hs5te z88wM@)ZiY3CFNT#2Ac4xO)9MFr7>(PkTzkkTvxb>(`re@>Ml%mb5K>Cx)2<%wODZm zo66ZS!F}7IGRPN?pjU%vZnvlE23qMkmEbb0&cPgXGL_f7%wPTamvW<6Tz1W%9hn_N zo6&03i2<)sfhOLKDe5iOb@dxVB4zhzhwu#ZGnS^!w>>M5^e4@l7d0#e9#au$hA~D= zQwYx78LAvczq>T2yt7UVW8>MJ{>rm6Y)s+IOWfW`efm{>hoHIQGH!{)F!8FHe5mN( zVM+Yb<(N*)5T{_vW5F!<$ojq2jH+1V4!>N*ebh3UNyWQpAaG3=k6Q+vyGz{Fka7ZL zNDU_%vB!;Wtf=d8$6}yMbfkm;*NoAP&lqm0#w>(0=)umlu`>TGf?L*K|4fZquQv58 z!$a?9*0#B6eno1jEBt{QHT`(C(kpJ|#uDWYJ0o)jbktkTfL$wN3e|HDRS?g<7npvq z@^FCVuo9~w>Zy^GPAQe>ly75+nLUDVjeK+CgmRtQc{1@n_SJVk6J+8kE%Pg{c^>J) z8^h^msxn`m~B$7U^=)boWsupK?FC{a)bG(d<&M#eH zmn-DPu~EwgBtvNd3oq zRfW2JX)_%#fkukQu&dIWB~jzv40D`j^X(ig3#YM=kspiEu#snH0i)B0#2@0bPNNyK z`B3ih48McKiBZqrhuR~;OkR^8f}T-go!tA?Ik_UW!#<3go_pSadWF~RT}t!E?y^R} z2mqT}ks$v68q38<tXvuE;zR}F{pa08)9A-nRFW#Ii!--ac6ZuH7t#RxI)hgEQH6SUyV57ksy5W_7-x+9GNE`s#0q&d!$W1TRtg zejB?G9~;3Z`GNWU;h)3UZ=R(V`c>HcvfG=LD&o5uzhmrU*19BQ&TmTc{O9#qV0c^z z%WG{?5p}dTTkv@nn3Z6s@mWhl{B7PVUhbq(^ut%5CDVA5JYA*gOn4mM3*iCZ)E>x2 z3=XhR~k-4u&*rNvMhZ=O8+7U(44+@N99Zjq^vR%mKu6OMK z*02~%wW;0L?A>Z#qx&TrBSfk9tUllSW&V{ zqkP&{e5v78%Z`+}?PHMe-C>g)N`IQV)NO5eO)fWq<@z4|M4sY{gERA4YzctpV9ciV z(|+^(a>9|{Ft#A_@blYMJYylJSEF$ zgU9tf)!?hME+^VYQ)esNcd110=?Lco1k?yC_Zs1G7T-SF5Fuob>Vg`ptw8%E1^X$A z8c&P5gWO7MCvVM;BJW2*K8?FgzgK6qy0R&sql7<#S#00cG`-e9 zGgHHC(tIS>S~A(VUrYn1X4{C+bHJUK3&Yx~bOp2)M`o zTFC#cz;_eR9|Hm-#oqpLG{<}`{8ZxE2DiFso`i@VItvT-Pw{XOl145xNRpKCg)uiS zS|6|5`$^pNkyRTRoYNF}og7btDG{xhz3cNIDU(UdvaE`VZTWEPi`aJQj#Q~n=G~ot z�(~+-6wg8-Bc)iGZ-L8ik)n2vLyWY$ z|CMjfaE8&1&oc`~vl-?=5=>&k`=rpeh!inN%@Z?{r{oMYk}cEXAJoEm>wds#eCA2i z5BJ&3ljzY~e=VFO3OiZeHQbK*NwI`ZY{}v}n0!2JLi@=}mGz*Smh{76#H&;|e)pMI zbqDwM)kj)JDVJZ7zkI(aQx3X|O1UM6Xxu;MxtXAYR@ZsXqG2MXH)*D=DAU^|YTi#g z;+l+zLG6;}ZNzlm7Su$$!QmC>9aF+28mCtXbrB2ZyrbQ z$=p^Op$LL$)a)=8c{jmD(6he16fz*{_o1>swrdFe?r7*>p7f*nmBeFTc~%4J;4luK zu&l?qPrJHkeT<)aVo!WcIYS@BxeNIT{^FC4wL4jdMy?CZMTfSB6@^M${tUZ+x}(J% zQ|P6ccXs&`NqA3`ggXNo8;q4X>L-0XC46hd7UP*|6>QirjiQd6)vrEU{pn<;M zoej#?bf-fO$N2(29h_QR{+|1g{i81L1tp4Akc*j?m-I<&`GbGg_^kt%5KHk!vCg&2 zq>Nvj7B0FLQ76y5V;>lHhpih+*$pf8HAL-y&Y3urdoLy(ChPF&BaMo%k|rGUE3}|3 zm5kgM{5{m!AV}X_asF4oauxMcrgI)LIaQrHp3`Zp-h!4-m)9MM8QN%K2-&x#prfCS z^RI(CH-+Y6dgr5_>a2Ossc~ZEGr}G+Vnf|eIQWfKNZ(IOpxISAVGYoq(2qgnzas9n z6}X@LY~>7cM~2rz4Iyz**Y*O$Ml*!aq>)Va_D8vHVC3D2twgw(h| zm}3=;ua+D>{(uG|KzE;w>ORs zIJb$xdklQDOgLuH@n_5Dg@Fz|!Mo!HibX+A&;L95wEDa#*yZJY;Qj!Unq{TpJ<^4G zb9B4ALE1kaDEJ`MT@{a!^^V>A;We8te&f;KeY1MZyVhEphb&wEA+KAzvyh7CUsm=4 zn)r%Jqk>l3W(wYWr8BnKJ&l?xE%@6j;xJKk<382Z?^B1RK&O{SY%!-t-9cWeCNsZV zoO(jAJq2Ht{y2OOFL}vuf4%i$_rKlJ@BJ@Gf4_bIcjx;6=^&oE$fb0%t}LhmtE(P3 z$*jW~1YPp3py2H{V?{jQJk~(=v>&U7VQLmMXc%~s*?bdS7rQuZEf1%BSe;&yBb>GagZjLWxh!v7i~tdF9{VmgiN0$g|eFTP#L&>PIQx9%zsN zo{N(dK%ONX!N`1b9T6`pb0hv6;C5pMrr=@eG% zrx4!^@v!m~?o= zw)C`?ifG=rO=&sKbcEk31T`Db~ON}LzA6_%H|Y9DBfyt#gHR3`go zx?xD}sMylvnM*_2(M|7$8&B=VNCh5LgS*pB=WkABT*OoY_R~!re-ow82O?Ue&sF|R zZ<`if+{1dSLr&+I#$IcVI$>h>=ajLD%&%Zo$>`UU2=O$2XPva-2ffjsyk^nJY-b@q zzH*yK4cBv0&%WtL{HLLa6xMHI~mT!Vtyi7joBh7>CM4lQQ$`Um4DI81 z3wzGIB(7f(Tx3LiCQ`h_JTZe1(Y%tmh)1q$AkpHD3-!J#lZ8_B6IpvHHA7n zNL0vT>l>f^HEVq`!l3NE64+1AgQb6)Po{0LjM9!<(`j|ty8UVapG=lPCVNMhtE1892+wGMC*^Ep_+TbyrJx`(fL4x%KKbX zQH5!RGdx1RA45Z*fB^@_a*u3I;*u08-36jIVC=7sr4!8d=q05`jD8Z+C1>5R{TE|8 za>b|vaccQ*#S>x06A$!H-rok7(5rBC1l5ZxuvSs za4{6_gz=gzezYncrY+bB6QUUMGPK>CEj$hrGRgTQaWih#oALA^A~Z$v&TR(FwJAG* zgz{CVW?6`Ts$PkF=&LOjvnYpG2OQHo=$n=<6%@{>upNNV-(1OY0N@i@zM6f5X(jQ>ee~u_Sm8q54hYfj)#% zbRm^q{!oAWu9m+D+u#WM5nfmE@{kIT=zmAXA3a24a!g?TyMt7{7$)Ng#e%U5I9MDi z00mATQG?+oqbmekPclXZ_gG}*r!-%jV0`=EGpqidxxoR(0o!JG5XY7h6XK^JnxOOXd*Z3y`T^ALv5B4dBm zLY^AWPaVI;|3&;Kt0DjYfw6GYP=NN;KaB=*9vl9SD-l zMvLM=Hw?*`h#>xKa4MDzc*wq1f!M$!U*HiPc!YB{!Z8Hd>xvA>M(kiHp8=?Pcc6C{ zpm&eSgR^1Ds4vbas*OQt-ZA-040KZsxiW~XUqHUDKsK5oTaPJPvJnj-h&QejJqr}u zYLtD~PWWTwmKw4Nh1hnbYQ-FKWghOdQXL*s{n9G^u?z66P|3#DkV2S1<|ue5s-O~e zwLtw3hhh?^VW(k+nxnAh)*V<@h3{v$_D34?r#Mm$-G42#Erg#7mFy`1xWOWQDjpUc z%&>}O&~K;LU1UfF{7Nxpd;^4Ml+kZ|2$)kttr)$$>_Ens}3^tD0 zT9I!rctp%rE2D)Ypu)<-!GJPX*jg~`RWN+=!Bruib^ZZc51y?Az&0huK9cjdH}q;L z6#f*zu^D=`5e!Sh!?%W@g}cm`q0E3K=29#M7{*+_i+MPNp^?BMJ22E8%v4KQnhp$u z1eRZdEAbi=h)2HNLAIJ9mv$(Wm$a689}@W}akAARvi(Dt`os5?Fj4AY zk>wvESF|E`39M>ubkg5(fs-49>f%UA8yRsM+cx$E0xSzJVc8Dz4rY2&3B#Sj6(sK+MZvga`uu@Z$;*j?huM zVTMkrR9fr=Zl3x*(jh6Yo+`Zz7k*TU4#0D~2EYQdkz20tU00+%iX13KZk|g~MSCMO z1l}G7|COiiK!l`cBkNtE>(>#{Arv#$v|2NYh6SGAos{dY6oI)Ezv(n<7qlel`CCw$ z`c??Z2>7<^4Q`~cUzg6yT&jIhDCyN!1c7OfRwNQDdWTnUJ51ys)%UpFF-)F1?_xiY%|!dWi@IOPtG(X9E#i3o;uDmS)Raz=p>z}rM%iCSLr1;t$wYX z!B(WsRl<#AJ7L0>45@8wvAdU!1~uium$teQ{SVS>>M!fZB21>4o4g`5{F!2R-(}}r zUA9>anajT;1{P#@^J{^3urT%YA9VQS0UOxq=0ztANWG>Pm zF9u~@oQ+T9O)T+dxHOsU0x)rhA|V9%0`R3U8=86y^)Z9?0Vqos;19Fm@n%rCmEFHQ zm`jMm`VPe?4iPSac(R~9LZloEaeRr=+$K^?Eoj-DaaT*|^yT^obU7cc-k4ev&ZNh! zOW@WmME`WV{AOSV$+-URb|sTh4FS?h&WjJN7%HBub38rcqLY?iHqw;zxRJ*s^3=#O z2c~w7o@X@GZW+VDHXbPl-}I$BNqsc)Bg0#W(WqB3l1 zNg4|E&BOa{igP5(_${~lj)cOEezKO}{odqUWixWjwXp|O03Yu9LBoV!t{XR(s zOsy$zpDLP$FuSfQNAhvs$)$)lLjt7K_RrN18FjwP1phqOwCqx4kWvRm@XJe4!2jc) z$yP=5X#F|WdO{3=^gvi;Lm@?4)ge?*mpEVt&eH#d^UvM-MmWNJQ=?^cn{RRXmmRTg z;ig}NGXGEH-vYBJ&6&)Jo9Y8`RT}p|0X7XArpK-x^DC0(Cmte&9xmq5nKz^L#m{k! zrsk&%Ibt&Ux`SucwdLQd!)(lM<;es`rP(Gsm`&H5izukCa;)aW9xht#ir@X3L*h6T z=V&Utn;iAmERr*ofSqu^HDc*=+yk7UQ;Gbf7^xGu8g*-+P#FQokpe%q9h4Bf=!3jCy(#v&f_%78Uv{)l}@Pb(d8o z%GjfgL6y;TU7evXI=!bjy06${KrX|)Q(9c|dgkym8mpRb&dj_TL4*uH63Ft&Qv+FN zeVvo_u~goW_&ZH))nLii9u64FQBaQbEdbno6PjK96mBF9r@RdyD*>#N9>_nJR9+1a zoL>u=Da=zY$(tz5iyVi30>=BH_%+XjzRIY7W(;XxQRjl=U1t6XYl`-euw;v%kGSNLzLMujB8_~ycj#086_gTeQ>f3AA&ks1V@xbI zXH_@O_@2^D{HALkVtLVPtX@bTD$ZaXe*S^}TutU8jQaeW-MQM@!(FLNr$Q!ZO|5;S zeMgTfefRHj8Z{I0KlIiWrpe~Z@p4`Oj#8W4uc3bICG2#4Ig9H#WPRBTn^r}PciTIC zWlJq@b}LT|ThGYkBZ^gIZwEZy%~Rb(A4$eNT!Mz0!CmAFnIoQ%{(JUh|6JAQ_LG8a z=tEi^c_~WGvpB^rijvC^NqX&gJI9we{?OxuTAcb5Go6QhO#_$yRkx`lS5g*pn;nW= z>8RbT|2z3Bq5p>aY8Xy@F^Y~ZZAw0Bky+}y)%A+OGVq@3>L#kE?!Q?i)h#^+ zHjL?5GfbbJXU{V=3Yg_Yr*$L_@(EvGrM7nt9qX9xXbTNvnlFdvNuR#|U3m~YKW|Bn zQp~eqFTilFiu-fg`5n@9Pl>_v@qbGP*}H(X`+0cFOQpp;{6k>C7^AXu-@T!=-t9a* zLkQvB8Zs!PVI4rlfv)F_2>LI#z=gT!s!%<6K^>A`=+H+h6AfI=5Y_&^khZKbUEGFW>l8WQ#t&ROWUt$EP3nLo%OIR@|r*W zUg1^qHQtgDdi*tK(SL@$RHApGI|R$hwb%wmjz8b8gqZzv*6#im>)!ljRf4(79|Qze zpr4-KN*rQ#U%j;$75G)QFS#r)@%gM^h)T(uz&W6CucUT&|ZmaEY z73@0%HNA19mQA#bF49~PFkNY&%hzftE7IQ}V1LDlk$PV8-d+0Vo*ZfAI`>-dO0dw-W~yp?`O64#&Q4mUWpR&RZX<4Ok>c$W#nN8p47y4le^@-FWu!u# z9?wW!xX-hvh*!3xIO>#5jMX}34Gfl#3}S@6pYu-YvO0zgvd;$hJhYLAANvKD`Omch z9y?Bt<^1j1z8n0edu9N>GXz}LYdNqcFYpP!aX9b7JZ-jnypoE+ie`G{pe+R*p$Pf+Vi<~Gue{#BoK+!ORggt&BptkjTgx@BjY z_OkmMDQVU4^auE8U@KeDV7jj)i?nV8%c^bvUB>g$ zDtpkCrGM=5NH6jblN>cYmvWnD&&Ixf-(tVyK~i2htUfOOtDoj{pjp!!tTwEj)zj8> zv&^q;>*=bpJR)T3;oz1j1KW-6ZlK0gF0Xze%yHNglgCF{*4t?wP(n-{O<+DLiZs2CX&w9)G~!uRBr^sTMl-OW6tsi;zT*c9CA z2}`Bgy0F*l2;@qQf1}M^59uaTS<_7`>o1X?OJy9JF0DpZzuG;~eJN_)sbc`RbNS`< zK)P((+kx0Wmsbcm;?7V4f>k9$u6=JT;gCJ05>fPFD)+uQiGlg%CzpQVufvYNCB5eQ zg>F4nidpw_elh7pMy~iYAY{|~%?st{QAeaV&%9hLds^9PxDM0MEtbIjOI^?4L? zH0AU79_jXm%H`JShHrkPcd2KNSDgS!^kL{4vsgx$H?x2Co<##8w_xpB+x|0&HLxG>q2#wBmK*%PwjSocM~FS$u}HuB=kV8C!Z>5S z{e9@$I786`=3)fCr^d&n8_?)8bElt24x0iohG7$oaX_~OT7zg*JQbJKQ9Lp(CFt4L zpp8*RDmV>Ih7v`mkq+D44<2GN=jf%E+$~Z|!>?d|BZ`3`yLq`bG}cyDEF;~^Oa@T| z`f-`S67eElMQQIc_t0i*yTF|YI5|JyWZm+HQo$w^ehy@L5ekqECGg)0)GHveq<`_Dl{H#(us$7%Lo>MP>7IU>iAy4Mrc zx|oM=f*q2Zw!)ST9z_j9Ze~&b7-DJ>A@A^UxxcFLmE+DlS9(Cq_7Q(z?Bw~AZ|@}r zM^Gs+W}dyipU)I`pV3s!RP=qn_h=!ZN;W6qIb6Eydlor&z+yUv`ET2>=v!Z!Y~kO& zG&w;bi{orcs|oaMdj7K?OsrW7@=cA!yCp0vLL+UM{6c|Dzt()B9KF@2Kl5{`>rR-XkMxJY9FU>Q-4thJ?g&^D8Z=bk=abUxy4cG$~2? z1Bz9SxGUJXn-s8lP*LA{LSmtGj}LG&WR-bADDS1=x7Jgepc$5A`{McpHk{)YmcY&a zb%z*46vfU*uKXTJHR5Pl(#oIte&87;V)8OV6>q+}oK<7NKo5At^9_uO{n_MH)S!u$*+%d#-WU z;w=A_YBsn8&keVY!?a&i6}>>J9S4fBatp>|!Bj-Zf`vn2ZeflzuOZJH+HxWjf3@$D zu@wvA&s$GknTTD%0xPl$N56v8#P5Q;>71l{@L5-i@Zy+FT@`S)l%`vne~y2lgcM!N zq8hHWblAvGCeoN>qzyHH!t5%)0A=0AqvG2Iu>or6#!&ZT78WH|xtZI@sI%;(5p&(f z4Ah7f@FZyLb6I18<;XJ{b%jlN4dlqOrcJ?n=&Jn#&x((a|3#$RAB0lJbP2Rvp^xmB zs@z3ei#dcl$U~tDX2_k&HW@0dtX9eORD0d31WI+SK&8b2mJb-odqL`mP;x4{AJ5f; zw75hB{V{@jx_R_uy#FlV`Yeq7N!DO;ndlJ~fpO(u;$FE#DGJ9du9^I?T%@Tz$IX0x zhF)JvA^+%2v)2}eO-8&$b~#PFsqQl+4ZPk@`==Z#c$i{Q;lu=yX16~HU7GS{Ou$*I zbu&#lU6P%SMNVR=XBeS$UJr#Y2eAf?0$CTP>tqIobmU(!zv`-;V|ktJ6O|jE5Oax2 z8i$)kb-W+>AooOZZYoRh%jW#<);a7A;f=yH4JFA@%KkEEEf!+@Q72!{GU+Y~4Gu3uk1pejjjlES)vp$}t)f)dO#Zdh~QrOR#Q8;p19y#wX4 z8zy=RYev(#p|O!YL|xGUKDs8f&E_#gG?akA+@ZT!u`(qs|53lfb7W=**KYD^HJ{Jn z7H{7YG)pkm*u-zdcFTvSI|f4K)oo$zsFYt)yNXm+Y<_T`@&jgntT-|5lP8y^w9X-E zuk>X~z*;Z!rjC<%Tuw*ny6tC-%498LPaqx6|OISqr zux$fw?nPBri!FoZ3BW4l1={A$<%oHp$dQUe`BwM2rg>^3R7EesJ+@a{#Id}K{W_KX z8`?9GRxkl$U^a}xtW@0L9ipi9xCS^sL`8~w!l(!-I#6cXR8$7H6bnw*Jy$WEzM7er z#)1%d*2WrG(=d#oWqT2l_&zdhSPR=^)!bFXZ$5<7<+{O8AtiC{_HC0V>8)AELTz(4 zWWsNb_h$LTfMn?B*B?65(nFiq9e!+>XteeWe7`cR3AB+IDjxFT%1ig5S|FJZUB_@o z5i;T>HzmN1JG#98Q*_o*Q9XSa-zGL#x?$;trCXO2kXVo|mu_5XRCMW-ZUHF)=`I0j z0R<6I5D{r<0TmH-e=qMp_ndp?k2~kwx!<`n^E{vD00$+P+Z1^r{Z$f9X&vnqS(v{< zTOv2bK<(~Px1Vp4}uBH=BSA!8Sb^Yy)#4kznRx72PFuk6T*%aHMBHQY z8v4;QIDI4CwuG`@6MjH2`&-mk9VW)iXM4^)qtupRp!lY!v@_qb%7VXzam}t&hGOSp zIlFRz-|P2(k^8=>yZp#wzK^*z`L=I^(c|Rgb+fL_BQA_g?%P2zm7XbMaOv3CxUsKw zMN#Jvp{gu0rmRm;=`2vgl7O8`j51I&t_n}QcflTL|mF6-r@N)E5O`gV02)7cCA^P@N zYzQHb7xB)kpbBC+t8&b%0y>rCD!-wCrwyw1N!<$N;?bMNN7G9aB17pY>3tPsd~x#p zr$jZr5vW(X^Bo-1b!l0vC?~uOlZ&aP-?my^B|VLdNG$#jn>JW z7)w^v@RN#W^m``ri1Fs(JWPG(erQ5`xHZdgo?+Ih>EANA>c9LAvD{FAY8l zso*$1ft-<`XD1Mxxt7ie!~~ywf`Og^Xu5>8&DN5~^iu9*Jp#WM-5L@-MvGfkN*qfg zJFlpxB)C}@yscScSNiJFU|B43%vp)r8gBOMvFxDEfi(Q{w)V&{{AO;XlmL$<0cuT1 zGh9Ov7J@IFp(lhi>$MPa0@R8GwZuY>OrdLWLsdXveyD^BhvLH6xg5K}3DH62{7!(ItZ+DHddod8RNk=vyJs!{+o3BUwEc4iaFQ`eX(V@&Z!ffpSFcy?0=NlOpDHU^|g}IZEPOI`ybUxiBuohydljB2UZ$ zGzifA9Y?VQsMIX*+ee@V0d0K(M3SB$PeA9pjwMw<_glY=h~sf&j|^(=|2ENEa(W zDHL4S!IbpN<2s)9hk5T&ihF$KDEvjU~L#eep^6t69C-lV3sRzGeBW|&8;b; zKp6`Frvok0sD-mV3a_@9GHj-dlt(h-fuV)f!vVpIo}dUXASDjGYbx=6DL2#` z#Egd@uiHNk4E~$JojI!NvOEcyt9qCwc^4%0z(7Y!y2V!JCG^t(3B2LOi5UUdryAyge^S=tF(vAD$FP3j6) zH5`m**2`i|rUR#3K4u~S$G_qVPalii1ZR{X`qn^H1duEd9Bd1`MuJZP*dh#!7~;VR zHtg~QfCM&a`~<*)OX3ScegtS<(Q|1|fM8-l^EwhZ3%W{m{ESbkC0iJGvMEXkL7*p~ zQDGCCwSX{UPAVF$RhN0MBMcYs)!5M+QDP$A6MXN)rJB#8m%+ zAV@h}?#e)wUfjEe@5O^C(O`Al z4qnA5n3JIRX7oS3wDXB(vq4?&_L9=*+o206G!Lz8CL-+5U|oY?@T z&9KQTs7*5!0$<7efnKFeMy5jA?yg#}^{7nvC|Uk4(``eSKw_sUY1D==7J#m@WAD(; zV7hZ^a436+@VqO${?%YR&7XAZtXQ~DI6$2UE<7EG@|35qM$D(TN~005);zMHh#(R0 z$cq$TC!G890TVN-B@($Y3xZ)F7|0Y1k68bDcXbv-Lj)HrBR5Wvn*;zW20A{NxD|T0 z%d~Froyl<0^A~!MLo}%T5cN`rW5fVphJhZVQ|{1eOlEDKw z#PnJ;XW_77IzR?qMnwdx&>cH~hbi_ER4HKvqIQ!O0-_oNFn$7>pHN%pK zeYTeCYrskan*|1_gJkn=!0v}OJ+xVr1t(RD=PO|A<15u;{ z^94k1b;@V2!)@HzS4lt`fC3AoH?5%ei?)Ii0icY5!tkK(`^e8%jGfs6mXrLRCmI!* zyFGO2pi{Dd0=dGU;Mc^$=q2b|&TgS6~%at-4aD0#f;vJzZ& zHuvm4Ci?*hUc=RsPC7OPg@%)|?Lo`7S8|G%MqQrC96`7eH2QQkl=*1^_6L4ptt_m%`Y3 z^Rf3Ob6|f!{zyEU=7GSmMMosmgmiuXB}f7dpus@K)mBV!S9xKOIVZ@!Y|f+Rm3n9V zgtRtjj%&rJJAqT;8h(;9hr0_KB!~ede+ufrO;Qnog6Kvn92l<08h6VK9ULs=cH^!G zbm2Y}B)tYZ0hwbVR9;ZCwI@<&h>6`dixV%6*(Wa~p_b`sl1t>^l4vP<=N3T~+PAoF zKD#U0H<)@BG(@Che@6#(NATE!ne=zr$YrdATG>B7J>TgVfYLvRQ2HIlyMS-nccGX5 zRaWmUr8XJW8~|p7@2}dfi13xV1`JBljJIg-y$*BItBT%T@soD6ez-oDPN&z||csTcy2g z7}V-4a{C0#QVP&4we58()U4zjTt}e+z^)6(G5O@h2&>kN6T}3aV%^*+b;aia(nzI$ zI3}Ua^8(-hee-Gze^mB)Ap*FYaI|Iz%AgGfI8THYkh+L7S1Et#OW@hG#{zELZ=n)w zP)T7KZ^T-&LHew=S}7H+>LnLI6_9>fdIj-lHbhTjn+r8aP~sNE4uCc!g2Q)LO* z6^iTc&J-h^Xk%Hy?!f3K=h2i&z1gV3(S(sCD)0wh4VcUDy>)ykE)Ky=O=k^&&9UlT z*_#;Y&-|y~%A2=hchlXF97?tnJQYvg><(r?%A~YbF~H2 z3y{zdp*U3L$GbqzTGzKEgt*&NOt5_t z5n{0m>DkV;&u&|b@*BWDz!xW6`X^$F6;h%TKsRE64(u0q9mcDtYtdTgK`OmOo~gSc`MbEzpC( z>GW`shj`Jd{3kIV^?98OgL7HfSJz)7m8!e}Af?qPt)you`%%gL3%gO&uk$bU>#H|- zyiu)}bUqHH`)+Vy$?mX&$I7jqq?Y<+A_K(to8QQDv2qL`MtnS$F9z63tbmUt^lnHf zcHX=BrHu@;b3whvM*F#D-cBR{DBl!dF7zUnQLG!}^Du9g@ZH%dxU;#<>^hr_ed04dvX=Xb*5BlGgA-{ZKFmqUl^#_bT+&5;xVm zGMM@68+#&GR|G=fokeCBYdJ+S#)6?4KrU0b#ciHOZXUonvFh%R2a%l z=-dqg%724w-jt(+Nd|=IRemm|5U3$h1xc42+aG!7{~ZhK+l2ccHn5Yq@gzkIe<~9T zfMO$oCT78%1h`tafB~e4)>F*C$~hQv!%k`>Qi)CHn2H-iVpu6l73#wPG{d6y`MXej z9Fba*GB}DYS{2?wJ_v;1o-VkU%D13Llrt+Z(ug_skGQ7Y?F2Bx(Ph*%6EVzVuZ zE}ImKf&k!0Bbsp0?Nq!?B(V1sp1t~^#uy}a=X-S4Ho^S-_n*WK8%n=Ix0-jk`OG){sJBbAxq#2#Zt zLcJ-I@RTw>p$)z$mOi<0%bR zc+cBq6qO|zh6iDvJvUYtW+D3@{><&oZD7%&i~Sc(*D zYN&9KTmg_YRJAco5@T?E@T4&N7t6-G{o*7Hq|A$c;}xP9fM#Kpi<9=p;+!sqYx$l> zLO+CnUDgI!bp0^cBwz}=IRK1CtXsrpSTvq(ak|_^c#Tv=GV*|ht$A2*D^3-cKG)4uytql zv!+X1FuTsUt*emT$Jg#5WRGLhC&w}IX{=Z13qwL=?aUhxV*)GA2p@&KT2OyL(ZK5e z&3}pY=voy?>wOswzn#sOy_PQ-qgz0t=>~vdO9F~A1b}01bk4s$O2vUl14KJ+ou=6& z`o!lpRm5gc6nuqVrkO<34gjqfVj~AP&H8Al9E=PDF_UBP}3t(yp|(u`_9EmPP7IX1fx* z!fA7Eh=}U4gcU{Pi2<03@UghW(*XD)8%R&eo~2hItqr9<^@ zKjpv!0U9k=n%)$IIYvOKnbhsD7RA(!gAG~}uA)*-T&8inZ=5YCn)Xu~2l$%q4<$9` zAfjLl80sW_^*+i9AF$kx_^m_`W%mUD9fP438lCE!nX@v}8kf2*7ePsw1%M6*kZ~zJL!pf_Oqob z6na;I+r-Gzlc-Eebd6enMam~aV+foE6r&ih_G}kKaaS1D5l%v2+5Lp$DexqJrSzXl z=({W=rt~!86>5~Abo)8*R$)3tiTJ4+9Ei5rh%`Pg)BlPz*bf(5!-KVq^fN zXQYs)@=zHAPpLAp2YZJuGgt|P3XEs3iB&4dlq{$`=qAzENH|<^Q7KF8(qB?50f5b^ z)Yf!`%0ORwVfN%KLBrI~&$G}K#cafc*x9H2o}iS=EBYJ&c2Odl527TH&hJL)mNBrz z01Mba!WSfgxgj(sd&Vo$fViE7;L6js=Shgwy zydJ}B3Z`xkqvoF~i!Liu@deoTfwr^|uUc8Iu+{yzx&=5h$A&mR`Cdv3x}k!S_3Y-2 zVEY>(3y8(Jc#q^r&?sG@DQYFDK(GMlHo>~R3k=T`x~7uKr8j1rS)e-f*#F6^6D1nR zXxwk%_<)l*|NJM-`ZSg@=)Kw}0lP5AD_bB@D)CTnKtzKwvC&s4elDYE;(2rKO{|CW z5SKWJ?Exq}3>4-m$j4I51fla4&AclbMJ!Ze^U4USWm}Fc@v3QH&5yGMG*bu>vZEpVP}fpto<%`f5X0|A{_PMn;enZ`2Ycv`gRK7L{-&jAMO%%_)j! zw5=Yg@dU*@SLV z!Pj)Q%8w03y^-uEv1g-fN+PHo+qWybs7h14m9^Ng7flekbfPs4*eUIf@X*g1J&GZ{ zd-VDVn@@bOpkQLvgmN*m4vR3IMBS|#jT8*V$FpgG7S<=ZM22}PGq|V(k*1sVRACqT`!~$bd_S6&(IV{X1EEdCV%p|f~i z|JHPYpLYRsBvbleK1#EkUh&jfMNH)Is>Ij$(kpb&tYtt0Eoyp}^Y<(d6&>>RENTQ; zHu8>Jt8ndIrqXkIrJlzE1)NICg`3hPA#Cpyy@UjNpd!i!A|GSbcy1cnFc|iz{yL?P zT)P=X$%sA3lF>3NTm~gsjNTGqC`@{4`2Z$4Rx4o?=Vp`Zl|d;P%qZ!QTde|f`Z$N` zrJC{8DZ=nPO)$so_v<0chbEXb8q5^|#;plSMvu#E4wGMj ze|8IAonLQ|lT^QLRCBZt4lH<$wP-i8>#%s7H}rTnpYCZ7TX&8fNE;TX{_;!rr*Cq4 zH#TB-(KMiTw(reveIBR(>=q#Z)*IYCGr2eN_7XDNKO_J`#{XN+;KOoKPEQ7H&k{Rk z^VEcL3^N9Q)0U26-{=gpX;fFdTdX~J&+bt3RRbk(6&Y2^xlYR^Vw&ZkM=xj3#Wqc6 zw+7uT;4)QCxa@f$h@DH6m(dI2PWdVPhl*QB50rK$EJV=ma{;BbTAMm)|Chv+xf|=Q z)Sdvi)d(ufz{E)!-2yD772nAy+q5!K!=1A{k8Qs z39r=6^qxn)-H9?O$RJysd9FWC?7a8^b)6`rqQz72OnOJGG&ouQIdk&Wne~fU-!15Th@=+{ku4I=tKZ|TvgC@(en4og+3?DCjOU0V|Q z&HA|q7^V~@qE0EGLm87Wa*fukao#bp__f;cYiZS1zBcY8&aD^8tAh9hat0GBiYc9v zrzBuYHoATZj3fy>?5@m1)8aek!9?QMRyPN%cA_oO0Lwhyu2 zy4hTbFkt^P9&#?4yYYiJqkdxxjKKDpc=_RVF^8D02babyYgu=QKygUt2GRR#m}4>f ztaM|0)~Dc2ab*LRpF-<s^7zd#NF!#_Jc`oc(GtvQr{G@%OJ{(|-ziXynx-~n?IUMZ& z6}~-77SQ!zs|SY6Sw&eKKAF#K(l(9JSz0tLi85Z|hI`ijF^ECBsC&gf*?+AxYMqd~ z^seM}!h7q&LXm`ro@NXaD0Tg~qbNa@$3i9olFe=M3*Xh1Q&5AG6Vm!LvU7y$7 zx)1DVS30H`Fxo-M7tc%wd`xN$+jfxTvp&8(k$+r;s@wap-8s1C{+DTg)cZ{Gj@xUB zr+hdYi37HC3y+c;jh0=Y2a1k0-@Ydtbk5xA5*%`*`^v+US^D>z5bTiehh@+-c8--! ztSTdX=ivVu7}H{LSWAS15XWQR$~7y#pw+NJ3HyF6o_e96LsHKR4od%Esp?n7pP0H!RiH$@Y!0*i+0xzq#A z{$K6~>~M9IIKssgdBt8%Z#lwi+zGdLrk{N%>!c%C4ICdbtxYBw*K*zY?jNX_RuP(V z!!g3$E9=fhNSAwKVB@s|2jMi1khqC$fo?ASgn9RWlKQcN=fnb-t!VY~eR9im zm}$jUuC4fgb%ek*D|bR0$!)Kb@GKTFNF7?K!1wOQ{48DPo?L_C)6jcXtc-#_JDE<; z6Tda(epB!Iez9t?JL^Q|3i?YnxwEtGy{%cp#MZe^c^p!e;g@t8Wt6Q$+SGjH0Ap6_~W_cORo8w!M*X{LnyA9 z?`m&XaBWwA@UOHC4FP`Ykoq&xyjDTrdr%YF+jM^c_kJ4ZZkW^%k+0IcLyOT`8 zN*cjPiq_c7-xBY>l2FnU#~y+$u?j0HzeFh8R>IBpb|YDGW+fB&i=V%!vrc4cQwibz zcY6A(_%^@EnZVmOJlET6Ub*fwzexzwdB?}RI`7uS##`pNE&g^|g1fx5HiicsV!c-eDwDQ@<;9~wfH;kk`G9OKjF+nkznZAh7KMh8Qb^p zpIO39E3E6xROIV?d(dy6<(BF}K zXx-uSC8R&+eVu(P;mG@QM;%Np)F}6_!!SP;y}+X!G?StAB;Bw+n=GrE5|Li;Cfd$_ z*!uhS%c4Ahf9qJWD$iIh75$F5e9J=JE#|-vw$_IWoyCmwy5fp{OPvvK1>-(-c`qyM zw%Ba$OtVS)t|;B+bGUc5=d)mbJB#B*u)<`U&(oRDe}YcD7wkQvKTn6Be|>uY_9Lzr zmzU=suit1Ie|+!gXAj}mYOudHElK?nlV2JImsu)}rqN_6i=j8OE+fY>JLi|hu?H-b z#WU{&;t=pX-glY^9#^ymO|cTKHdNwAx}?NhH(vYY8g51(bDEHOV=0A!UthCuf%c}9 zba0j-P9!$Qz1k>>o$F+b^~}j+fLFZRm#;%XUEPvILE!f&CSBn%6e>@ z$?k5$dqEqw@Mk&p_lM@S>GfH1^XUxDbF~T4)ZVNvxwO=_ZjaQ@7YWg#maK$0svT`N zgF%-O-`>yT%fyD$56j+7|8hm|ROe2y`PB@Etc3ax@^C)dwPAJ)*)siA%x0g^HnQ7P9^VPXK?b@Fh zrSiu@DE=|LMJUb(`B#H;c>g#v5&cS{#BW^ zpS;@=)jR##l-mrYHTy>50{sz`oK0U$;~gJOdijXAeEyl|*fMmS_q6@u*(-0pa;TmUcdBLsZ zRJiQQ{cjZa3$WAp+wzf>QIQ*mpn7E#&LnlCABwuN1tY z(>5MY#1C@vY@F67EiS@ZeZNl!obt+fCW$zR5DNbN_H>SC)Rl<}x2;6v6aabCeIoB_ zdYR3qbPYB5l^HPlTfS%C$ljOliL~(9a*qyOYe4$Vrm|mT6`yW-TZ=$w!p!x=2OK<9 z1dEuq>N~{Qj^ks18zx(qZQNu}5B0cDCSMIbM)`Q96AiTUdRay#X^Rtd=bqA zb0(6Uob;qZzP?BYcjX_h2K`89`~GkR{R5E;5avD zUTKyZGYfqvllA)7R;#QW?UwN-`^~pZJuRD+YA$SH#vlqUwh32%1m(as!XJu zjiK)sBEJrvRH zVL2!8GL(6m7`ji5*{o#ewyva9cu&Ho(PUP={_byLo2pxO{;=HsbX~LPjW_?X2*_(G zPn%rWWtX$LwQDj?NdpnnZfj0#3w;fWBflTZ=3a_a_4+-pOWgKL0xI^rJF@c62Xh?n z6x;q5wH8s^Y2I})9HJ77OG@6^&SjK%EiHNXdxpK;dvGb;Jt5^lVP2I#`-?9@x%T_! z4(h+5QV%{%B>nJBPBvt4WTY9QB{#5ptWX4rYadN-ZXp?MKx1&2(n@N97MAPT%~4R0uc3%19@E+yXx8 z%{N6gdqsMsxOeZMOeDb1nd_fHEL_MI_`LwX+S=`iJITdt7FH8 zbF8BomHn?B1iWqMX@_Kg`MHu8u*jPp3G!J3^C=Bcd6!?q-&nf0>aX*-VKK?JdWk7^ zo2QS``nrH>=7er;>l(&7-O^c2Ok}cLQ5j@XlWki(Iw~X?;xgLKAkE^!U|{4FuS&$X z^>G9k`K;IcaV*TuyLNWpgOxjrVzTWd5-R?{E2=8B3Npf?D_baH>sqzLQmoS8NtvAj z)Sw?M|1DzXXa}Q+4OG~efaNs@Skw(#I^w5ySa%{`Nj(J{_D%hY-S+yn=BQWVTGFBV z{q#x42E7r7_sq5V*7eFR@Efwu7Yphu)mydd(VoDycMfvPyo?#r>$j}(F-|?4GL)9O zaj{Dy1NVQXwXV~tooRE{83|hyZX7uNvK5v3E#Gom=8)-@o6x>>eEytFn_g?I$NhV* z4z9X_xu5qcKgQA9EitS(P)?i8Z?_g)pDehcv6J=W$D1n3iph~R^WNNm8)8=9aU0!& zU);X?Tigy5wcCJM7)7vqH`{i-=to;exvzg=L5?t;C8sLWJBIYMaXb* zC$md#E*ZSFkL<7lC48S3y{-KEwObklt?(f6kohXSalbj8?JM<1=b7*s*(b?v$x|!| z$1+imt?A`b7L_hp3myg$$#VZvZsuLi6_+6SOgY(&7xffo$__#>>*Kq)8>$ni5Z)N| zBTq$5!(n4cFox&YFZBLOIH*5(SLN__{C&gvxwgd_@$`9JZU@&YW}z2uOv+Ej-mtt4 z8=;|N9w5cj{cU|2AUkE2l9ZC)h4+7RN5gEa|L@lxwVEk7kj?1v6LF^z_nw{|-=~Fb zA;I4fpX6NDp4-5r+kLL~PSUyj^`tmZNW&w;71e3}x1*oxPBuO@D2E?uIxYezMCF9? z=L+i1j=Z~kfd_Lh%}kiZfBn1p=;fxjXLQ@nJO{D7Xh`De^?wK7Fu$98nukbw$59#S zE2o-?TQkV3kb0mYfJ{A#ycqsZ2Lezjl|qjKc_C5@q^%sVBF+JJZCmh{zRnjTIiCG0PBKatO|=foLSac_V4-OKCAf=-pCU@gd|f2E_$rP}86t zEvBu+pvFI=)rRQT02*Owl5ZyZFr}}<-fAbrO4_e$r;nwOp#7t_m~=}o3}YlPTAEwK zDO&(Fj>y|`LuiiKpfYw(B@Irx9Ahu)yUIxw7gTpm8Dm7Ol!s&UWAnH^tF+2SnL zyTRV=Ocw9t74O^-ZFLqgbCyh$YJ>*kPaZx7mDHFlUC45SSF3P?mimjWbcQq{194u2z{}64F%+cR9=U>FaQ)UX?NvmY+EpA)bjaC2C7bC?lu9002Ijob#g z6Xw7K)Xr_DDko}p!oY}=prG@BmKwrc4N}E0%8j+%aTCn%UArAkMPlfSzC8@~jN9?Z z6!d(!;~7`wS-azv>+V(P?%lfMxq%bY0L#5`_uJ4F8;kR&(-Ql61yfwbz`kNoTGAIg zn9PxVRv}(cwZ%FKIRO+Y8WJ|F4zm5q`T!&{F~n;4HN>ns)c7l_o=|YAW>B0^cwu!| zfKW(Wb#P&1$c&cv)Yk`d9udo5BQ~ldS~*Qt*5GtQ@XP8)WDi3!V3!0F{=_3p65NeC zuST#%!kK`mIk5U0tynY9Sm#}}(Gd<$&p2mKwXoe-OHZZ*&$w}LVxe%nrEtOwSX~XM zq2NS^05V)`(klV!FZmvCc&6@qK6d1%rK?OG*iAkcP95-k?7eL*x|b>AmF2%3=>H53 zASw9?y4&vML@h_HfPs2IM(ZJVYoJ=C2BUc-qoGEDtp=Mlu&~kVN$vAO%gFqyy&~J8 z!t%W*yGI++8@V!WOx01M*wT9%(X4Mw-wYPgzfxEcDi)Ac}`My)N^^<~;^g;T@O8T_jhbk+T#>+TqN>OZ$ZUyy78MRh9*>@&N^euI5 zg@U)gIJg{kR7pOR=J$I`_Qg(+0Bc$~&g8xK&PTNJ@6;hU2D}EF;Sf{h&{C$~$HIoC zLcir)iRIdcWfbt^Ks53Kgq@YZa)4#{wO03!R+B`(2!dq*(W_iqt4y_DY#}mh4PWRa zKf}P!?}4~>fm{0;T*P4vLX&GNlFMFlr%IH|SCZ>6a;w~X>s;fjrsQs_KNn&J#TQ>c?6nhbH=iK?5GE1aNGnf`P#2zzr*?Q+s@3%k0cm7UE z{mIn$`w+;Eh@@N5#=n6iKR*1A47$>po&0wx1$;XSxCwp3_Nk>qOBn~Q+838WNLDjR zLfO67jEPX8XsD2ul->y}`UC=(tX^&?n;HbJjYPQ}QMX~~c@qAvYb`ngmoi0)EQj)q zhe~R_P+v}#s=Nv=Px4n!Xwr_*Z$-M8?$UlaL1)9(?$yqmYpya$%!*3XaEVr!?N^8% zm2qjkJ=eTt1A%I_FbipUFX1?LYwONy8&^fy7rn0rCi8-K<3Mfga!cKg27BnR(1lA^ zrbq{!*jK1b8D#**3f94Oz%4t}jtap1*K%wr)}k3e-&cHoulORW={)3)V0@F{ZK;dS z10lORhBeXss7SU48e|rZ!_n5Wp(u@^#xqgA!%?<+F)?u#mip17^s3o8D3@Bm{!=wVkN`Np^Qh3lQ|YaAuF)Jo0IkNXaD@%`JT${sZ-7 z5B>5vZgDSfe@}G{PzIB%n3E_khrJ`$`>FXUo18Md7++_qtuXBM(A|GXIH zf$)*wK+zDbT4|-mx?%srInl#8@7e(WTHVIILPf5v)7?}_Aw$V~b-!3C4_fHHp@o|; zBV`ZCvKnlC>qlq)q(71m16PkWx=xdHjXlKZXRm4%ST*LS(5()u_yFr$KYPr&tscq8 zjnplwE2-juRFZ-=7AC<}UlqF%;Lx@?w{IX0=GPT0j3`EP^vEFdpR8L5L za!>3M7#Jq8G@ya1eqI#IV)&h3YHh2F3Gwb!ax+ZnoL=1;D= zeQ$TB{a9D`xbYsT?eYwEhxFoJB)Hr2uVnp~#-|7$MviV(B_&nj9Se#VLLT2Nm|qCr zmx}X|vThrRz78C!4fw-$cv;mdD&*UqCEt1b#jx%D&O53d_f>67>e)`#Y9i$@a;2d9le=9n)AQwsw(R7?JvuH#b=?K4+23#gZmAgryPM%wSxaRf2`cQ3r>9$g9@@?mZS@CAxt-~lb;LLr&WN4o-np=B zy+HkG4Lutu^br{mi(!sT@X-(*`Dy<|DJkoMIa^6m>Tr#3Aj#yu#K@3CHDQHSA-TVP zvR2htDbPqfaHsCi_J1A5=ho6JS~CNI(`mBv*FyIG9PaUn&MZIhw(oQoYnb&F~~n-}@mEAsLDq?lbQAn^C#f4&rO?>U8VDgK6c z*N!TkuQS<6pTAXm=l-Z11V*K3KI4w4ooi_{Q=e}OdT*uv{?$wNYS9n3fL&x6F|vsm z`TL{){#-8r#bkl9YTc0In1RX4tsfJ2&(gw3DR&DOKxag&^%13eN zS8moVQa68s-)&zWggry|ZIdsUEl-)SD&rw#@h| zLOK94N!xJH?j1bG=)1(L-xqLHSgE(v=>92kI;q3qMZj1RkluZ<5qmr2{yJmTqP3LE zgNV;Z_kT%Z2ZEvO=XTvzO|FxqpVWfjE3xU5Uw-F*S%W;^Rp{p_M6d;Wzpu0D3i-zt z;`_1fX6UM~6fRxFtENxiMPT~dnimYje65fA^vjDhF`5#!?z`KTwb-v@^=+gY$1tzh zwGYXEq^Gh3OcWk*S<9S$6>0LgbFKgPSr+3bmTmnsF17n7Ym?DJ*Z-aDR%Ve;P9QYK zboN&PM~pxC(?E>A={T@*PkI&z$#Ie**{S526s^sWK}AQ?T0Y89Equ3>!HB`Ml+i|8 zuPla(tvo5g5|miM6(N`SDlG~z`6}=phZ$#D0?Wo@so`!f5v9tgIpGpw5}lA(;8ei# zvd(t8YOxI7O+kfHoMv(NvdVCA_uz*2bnplJ?+KkdkUELRH*>X|yv>=n#Q0k?eOPYu zs>0s6Lb43|bop*c6}a(sz|AIkvkd1ZQ>D$-38S}CUKqXv>%xHKv-LL$%jOO_?=2l7 z7(ZCMF-lm>_`@;OqEQu5J^Ll>smg096{w!Qk~-(5^-Lo^?bR?NEme=fI>USy&4@d# zZW}jsS@imY-}bp|WXbja$OS6e5s2MVgUC-mX;#VTt*w{HqmHiC`#&dU>F+5XZXrY& z*z&cR9n=yC(Tuub39L6b@^#sG0)&u!Ga15@8FNnY&req_fhjZV`0&V^scyS>LF?oc)F9lN4358FcVfo2^Gb`?tQIt53f5 z`HV?WuT`-;pdO;){=2wFX~m)9OyiN;y@g=*KizF@i?k&q^<=kBkoRw{;H7>(2pa9) z4zWG1-Og=#d6M*og++IdUz36v(mX^yyO8?w`42r&>&v(8rk8Nl9xTnqpFfq!JeKvm zJc4tii_(}vy}Or8w3^mD8wQBek=-(GrP1idc+X_H!re9(z6iCmf81Ygv~KYq(X^5; z%Lw39Y?z>^;Wc>aOlLbo5$i0;Hp_d70mO4SO%s!FNF= zb|alaPJx1}*#f5a;WD|LOJ0S3U|Rah*Vke0p*ccMua zUC|q&j-j^o>!aF z-8(=2&Y~*ia}-d7V>Iz0XA|y^8GayW@l_ZwA#Dge+fSEW_9!BX+I(%5w}<&!%(F5l zY7OfjyfA5fO+(!=uPDaGyb8f=em=q934dkWuk#eNX6q$A{c}&7zMSM0X;VH21heE6 zygw9Hm!&ofp^C5NHxe@`UA9vSi>V2Q=Ciby!g>M>TgB%ke9Ld{-7C|Q&omB}Z|4bh zzZjNsoU$#dNO9`DReK*zTfArfRpnzrAs>o-fV8paXoBU9?qYq21J1dOaq>_{(;@0p z4$3W57yF3LYt~nKo?Xj)2p5)GRV9Q+M-XMLobH0;hw|L=_Gc2aKcOtGbuMvouB9Os z6S($U=`Z|4rIw*)n*ZLR2Wzk~ai$tn5T8oTdQ3Xgb~XllQ*VejmoHW>GBa7NHZ_}H zCcV4qp_;e8&WhsuRFWa*Prhe2R=QF%dk`ciqa=wD?QE=?S0pv!7V`uH4&n}MFr69( zCky7H8wK#;!#<^Pos4x2+f5T3uS@3W@*0qWv8iS*HEKU~{6gYwMIfI;UG+YC^`nCW zD(qC1KiRx}7-A99hFn8f;fMQyO1y{o4e~EoC7%9><0<8*GO>Q8G7HLUUie9QTe=D+(4AWs z-kd+~8?Uk=ZM0EK;k)veHyjjbr2n&=rL48y`9r?ln}?FOBKCMZ3szi(uK$+1vG9JQ zwWj+_#5n1%xTkMj&Gtv*79u%bHkJ2|&(=Wu*IUAB3!(haBzT#X%jQKF5wCtLe5>hE zJy+(6JJi9LWCv+Inu+E*fAbmm(YQ)Rohv#_5mWGx7kM+^^fBAcB!fdC&xY)TG2gg8 z6K(K4NV1;A<}12v#;|O4%>q%Wvw>{296n(?3Tv0nXF?8Pk<7VsGxZA}L^K2zd$#6` zh5_*uAD%bw;)dx`3UY&Mmb^mD++p$w8*Mdicx#O)RqFS4+I!ZqE_`X&4UVTg(W2O- zbq=KZv*T}&MV6a!XMKAP-$GWwR#|l(0kz=bNdJt0AN(uXU+}Zr=3j{x!WX{lPT(n& zpn4kJqD;}y^a)9BT8B)Y`N)yvp=~oC{9eZ8aM!vtlX~K9c+lcgp!RMSH(OdnV|rW| z#VW91NK2hF;V~?3VOIy`q-VquT-y~Br)P$Fn(H{S{9@JQVdcx~%S->%qV}y9-}0(? zze>kuPNn!(y6QU90iK*)FHLtaAjaC{5FMCo0Y-&V|Tu;Wt}I_8!Iu=rwj(c&9fH;u`?YdDFf*fJ&Xg+ zDz!ELqv$-Nn&{dtJgI~bAoNfT9h4G!$3W=4h$2!#6%dgQB5DX#YADh~484d*6Hx&} z2Px7-9u*8lq=|r_V9ERO{W~+iX00z+M(UwheQ4j%D|!T#-_(to-7!gN9e-|2|D zp?Bf6;khI}i+z%rR77j+rp~nCn@i0+tnYH2NKXy)C%OfAP-5ICh~WhTXoO6s=~Ve0 zmkCqG+k9uR_nk!VIEkHtDyg9c+3~c#kovwJDoZI8)rt3mnw>@0HEsmjK$D8*42%A< zDVQ{WZvFWQ%OUweR__FykfIDz)PH)I zYsXx;r?v|Ms)!m}vN=tFRxU!DGx$T&f! zOn!|G8Ss?4$1GNFbdX-RrTEW9BDX6gPqpv0iI{gP>Hgny^C+~i-_D(kL?2B*iIQpQ^-1>%%4f^e$FCkmE&VPpJp2IXRk_S}`>HPw!5Ln%cRk?kB2 zLUucJvc1o$V=ExN2OWy|ilpIg`(!`zO2^L7G#cF7%e4c8nc`750 z7+LfrO1I;$dcpm|RvIG=PYJbbB(kh_Gs$c7YQ}Yh)QEK%bzrZG-!a3w#!uj6t?0Pn zzq)UO9a6fxUY#GrQ(`O?W3Rz8&)H8BbSFcS&o;DlL4s5V%k>oxccxX&w=M0&1C$M| z&M8%%tfO|b%U0L0bs3sJb1ntlV5#_`@N-3|J9NKG9IglB@9@2_BA#XEpK(BTSn_vs zSFU!*{o8Iky)kIeb8S7SiQztw&E>i~4K?Ac9R4bCFZXPYw!crD$YxJqRqh=J6H1y9 zw&Jy_pLhe(d{VT*N9pmpquFV7sP(9smlr5y0OVmVOln6upupbp zsb#U#Sl5+lINu6b=THs5VET1Jq^$W&@fXpl^s%ah%iPSd*Y zQbn)*5PIv&AF(0ibRjZsZ&fVNtm!!~tA&T35^;ugGasNM>=ysP>iAH9Yu56Whs#%9^-m-&TX@v| z@?kRANqWgZ4R9S-zH$zm{)uIGR-X2s%mS=yesV_-u(NCX^iZmq!S|W#5+21y>0~2~ zI`G|z7a36<%p1$1G90q}=CNgyw>2ZhF3R6;oJ6=p4mgaOP4)^olg~<4NHT7`b5;^( zq)QQN?n;cG|EGeQB%*5i3aEYffcx>9XbZ5a;ggo)59KbaUf*dIi1~N~^u0~deQU(i zy+h?9=nwfc?74Nr);o<4FutX8q5LA>`K?Cy#0L4%V|;1lVm#jA=fAf0-9HYBXj^lN zyi|B&kKS6OoD8k4s1 z0fkZbc1g=1mS%jni%-IpHBWA`GJ?rGnq)4cXTDt!)@8I8&G&{@ z*DG*;QA84A>pkIvGI~b4e161R!|&}%<41B0;)b8Yxy~xtr<9MFz~R`)%a`&rv~Faj zvgG(i5lA2;6-35I6%%6d{BVI&dI%L_rA2MVv*;7^1?dpK?@y)-FBRpV%Q+w| z!!2mwvox>|7V~mF`?e@tnFcO<0SyK^vzGa*3X8aCU}YlwbxlS=dQz_FZ!|PUxA(Rw zwICjE&E!+9yUazRLR?;ercU0Hh|r~CkT!t$?W_HRJ0>WY9TRGIds4WORKQHl7cBC> z)nP;j!Lg?R1Ui)iewj?Y-$L_Lu=Y}@XFh%mv>}l$bPz)X04L&woL9sKenOBm;9_j` ze;{%H7@@>dYabxRVCn6#aZ)qf`GmcG?|Q0naegH|E!t1oxfnpuPJV^muhdMg zsWNwjG-Im?092a)xJYohXEp3U$Fk(0KTxdnUOSo;Vh3$Bp&Vesp9lF~JwCf4BKcn(J z#XQ*npcPnTsSsdD1rH4aCd4Yz zz{#(m7BoI`{FRX>x>>eLU#45n3(xsNK34$gQLD&`|4)fWwa^~NS2{eFLBIiMD=Dgl zBoecV)Fy$dup&(Wbtm9w)?ry0!X*gn`uUr3;;p#qk6_+c%E*2`AL6Y9QH(1cZ~>ACH4a&q#n7_4K8~B}Rql_j;WESJWkew&L$@&@{|LVP#q-G+=E*-F;W9dG=96$UUPMYnMCiQ>LB=S( z7sI)#;E(UTONYnOi+{!P|BS^|J^XNkc3SWYi+u{^#(%gEn*G(6QR907^JZf0dMjb^ z==@^jx5!7Q@Op=rQML4T2x%=E_ABz?{51_0@R2YTJFzOtV4*JyH(|@H^6n)Pv;}s9 zdiqPa04BIN#sUBZQ*+)WBp0Vsi65qE{k4}5SH7D4XeA^>-dI1vWXarZQhVCCR3`SZ`{yp^I=7HvED3y~NVAqcA4^+ls~S|Vri3+52Ic|UzE;6a zJ94KiP6j)WniDFIxZw0&L0BjH4C*gMIao+Xe7iL5mKR{c0x?{uConyeb*wd-)Vtk~ zHGk175IR#nS!C-z70zwU6CpMqF@zNk<4pCvsUShG#SPPntv(IMNfTg)C*;~PBoUkNa9D`o9v}Yr zdG#`@e$2{BhQ{vkD#cXcWKH}7fo5ZClQYDl=b=>14Z}Zvk>!l0-ar^gF!x)Yf#LzE z0m%-4T=I9X&QpI`_iFbdEg?bW6F*xGOas%PQQ|Fd>_H#X<;o~ifqGjppLU=bvc&y1 zCz9L*Vytd$4YHz8RUs8AA`e4WMx|LbX?44MdkPBEHq#{Qr0q+4 zLJ?Mc3o|N*H2@zV9(>&^TWS4g7i1tH_B8SeYq|u2jnnhV1O}Q6RipMPnE}V7DSa8! zaW8=%MI%=0N-46UeoNxOIg5Tl)!Ih>Z*_^~ulfXIA`E~X67teWALL@#X`Y@Gb%_Q> z!GQ!}J?3b~&YE-=)ugjs2|Ur2E&$#om2lyIf#ta45yo~|^Q9qz7z`b95MqjUmxhXVQ322~Eu1aDe5Etba|=asUUd`<_# z{qn9dK=rBSmzf<37^;lRs?wvu$2hWd5Zn2&N=Z!--G{E&pYYKUB{rvjn2~lIfWFYt zUiKdbVFu!-_{u2<`QU{_!dwjssxrm?Y2r=ePg!!=!&bMl^4C*3V-qvv&i_-gbgE zZ0jz-PXvr3_2LY{dB|`Xi<p>}EnO;-^H0E)} zTpKgz4Vq~#kSznnA}%x=IzL3R2e>VF-u}tKo2-&6qwARIHwaU z#bEEb)BjxkxX}IB3p$VmdE!7BzINB{6YuHtedR?v&-bLlT3iq!oGRWdNFz-qin87L zj={X7-ct4#g~F%$hRq0=oE)$t7A-D(&&_nEAqwF#>!MROr4oZVVD2Z!bEQQ=#m#E9 zOZ?X6%djv(sDX$MjUHa<9sE={Q~OQvP5R6aT^2j>O7xM&sv7pL7{sF1n>M8`K+)}` zMzG|fTsSrK=2;+O`mCH*EQRp`o$*2onv+oL=@l_2EGHZblTMM>E*FRXt%5A#Ko%IDm9RsS&1JK|$Xj%oc(Ze*E3g1{ zg^N@_K0dHwJnX3&*C{cM%;X8BgS#2wd0X6292%-a1%d2JbUmZXWeARF1)`He4Tk5u z1pC9E00tL07|9!_#D=XK%>WP@Ed~CA_0EVJZ+yz<2|0g@@{d2;`L;k2|vq+X0ovCSj^7^*I6_Au3;^pc_+chK^TuDR-RdY<2+b z#2IQwd9?Ej>|CkVW^jEM3q{e-ZJJ##;uf5a6y60`LkLoXL@EC;h~{1}n=AzKK8KeM z`P(Z=o+YicoKeP|;fLwRLn60DbD@Ro9DP=DT9v9Ul|vGh95WvF0P)BNwjWX^dJ07$ zg{p*>DS*ULL}donR8G-T0yPET&|qd4Py3lL2W4#sFuVGm9#^$)Y%WNO1PbnV!`OlD zf~8Epg5PsNlyPp|bTE${=v2ydSV+W3>VKY<9HZ2xt)L%z>6c=WjmqFtTiWTjSUH`U z6%Ul}F_)8)Iq+Z!G^m=4#_yTv08j}4insH*-{*LWI^o`Dc58-NG|MmNa-On0vA8M& z9Qmuc;f3b9GgA#l4IK7KSGD6|7|_i!XV?|u&BX&l3(vqUlEP=apmE|U;1gnkW%E>G z+oZF>DIk-?Qg&?m#tNH#j^JuDTaWXlq)5JXuu^!4X+zE_TU1%6xz3&fYc>HwDGcSL z(6T)j{(Kmi6%Ez_z^5A_jK2J^x-7cNr2f9?+PvT%CDmB93P%(qyg6M{O}Ke8+Szm< zpfdpE9s*>Drc6ZMAtgeM?bu4W}bKV~`r(jx5u>D@|^-o1N486{01_DQPTL{WLH1fl4S&iR?m2m3>|*Xad7c0d2D& zXFJ<{XlP}>r+gt?NyE+Ujc{)>g$FIA%Y=GOmA?8?`V{TUgsydQMoG!GF=4&$R!F9Z z$#4Z~^7}%n=(WgpChA^Y(J~-Yt-p;tPu*k2V?hSs7pW(*!HkG|AOMOyjPjOhhzU7Y zmU>$I17%6)XALoIKAT{g19u=jLj#c4xf~XxXsjvhQR+(%wMMD^5^K9grwCrdDX>)n zh@GX0-45{UWjnJ6z$sxjA{DDQxSLbEEFv0L0t4fejK88EJ~qIFPB}aLt`c75|El%$ z9HcqlNq~rkot;)?#a+hEmZ_?k%wFPuvJ7nwy>5Ypdgg=~P_lP%p(iptgx}hE1A?!V zaLxzdx(JN~ry>QtE925XFh&p#YF(X~f=h?FXb-5DjvTm?V6?i+FhcW4ebAtw0=Ns;@?-Hhs7Dti?-qmRX77!4xg>{OY6O6okIyp z>&FJ-F}8EYQg&K>QOfQ^SiR(#j$Yi*AA8+XE%xechX%TT&M8;#qKuz6)_a3F?ScOG zY~3TgETvWU?g(>CK zu4jTLooensve!*0zxdpKYsg$!82q+B_*aSH?qfCN@mE?n7V^Je?qchxc*q%|riQE( zfIDlYqdNtC(tidq05!4IsqoYZ8<;&zkarkRtkB_D)F~!RA3`O$ze)6P=_>_che)kL zpUjVz&0UoHzVGW@6`C2Ov&3_?yQf;f4)h#`?Zdy;qI-)<_oA|1puhH`GYuWtT4s+4 z!{x|1R6dipIx?^sid&OkUKtT=cj|w@@oHscb%o1u>Q!e02hW`RFq!+}L+;jlICbvy zR(_)hC&C8Mv=eg4CyA}vneVEV} zn$lcKkZOYR2@6vYUBH-nJLN(agJIU&$RoUszmCzFoZWvt8$U6}VTL{DHi(`5oq<*R zHG|c&7u9pWI-BTb@%E~fy>F!mpxh~>>w$5eq6vR8yT(2>P=H(SPOV*gZKud)?FrT@+7^UamNoB2!#}Xp|d}w@Tu8 z%HK$wkY`f3==1P>^*ndD)1^}bh~pYh*mQSOT!4^`ajGxh?Reh`BggskEoKkKa&H#8d3diYnwO-3`^kpz?e6tuK{_QVRWYb*FOM9~X2)}3$8A1cB- zeFROM>rRkA@buBKElC)3Kc2pJFheKA*Um}etaAUv$+)y`EGK+qabB`Et@Tp~?B?45a6SHf8zgAllZEA&%P(9P&fhV8=<71F zzWV25nM^>cURUFUwUYcbi(TqIzhK36b8zupkn&RJ1r7hdGakX3+?}Xf zZ-lu|xBheAhn?}8#e%-AbDrNvntnQsVP_x`(a%3k{7W#l`YP0spj(%q5SAZi z(;+f6?c=Di-2XxTYzQsSGGf+TJPNhR(cd{&kkq&F*(q^xe<>|&?y@=>rF1UqD3g0LGOhW^PcikrL90Eh+r?um z3j+9lm%XLk8HT+Op=^y?=)+77be>nZ5$0xwb%QCpduaX+Z?o+9W`1RKXBCfIVVC((PM}Gzftj3K#*;BP%{_s3sr^9}o+v>khDxsg=>uq=m{+${4D_B#F zxxbl|@Y@=r+5Q?8P&^pkZ|+;U;bIsMn;CNL*+|v#JAeK2h4;*UQFDZEks|j$;~z50 z+>^5c;4aPEy}#cnCm!`9*#Z0N(SmrVq6@|X$hTc0iT;4G3E@0?5}CQA+3a!F z*YVW7DN1URZzo)&o(qEIF;PDmR_B!0blLj|#gv(gEL=1_h%Z z=5-k#^vIK;<}bVkW0|*oQKyP&cbYsrs`HW}F)dH{O59FS+h~^fFAKk*){Xu?lB8vn z=CdsyS|a}`Ext_u!?Ww3haO(}6};4a`+sl|*W-|t!OSzJWv(ZCw?I`^<|@8v_cvxf zn+>ydzlq!o=(;=Xo`k%=OX#|!bw0_WIZ)4?b#ZT^STa9G1F~qaA*2jp~Gn|dWV!PygM8|F{*r7|;+RNb;g~EL=>XYmc z-#^my4i0Qj$Y>qi@jk)~9)E0X>(82g`g-hu%Z}zF0F_dYDavxY)n;vrh+c!^g^a<( zP!p922Nkrk2VX=8KSPX#w+QaY0%vm2wGSQO)QSr=;?((z8^joJ?-TSaR5=FtH4^vx zZR;?7T{6YZV&$o6s)$jkoN<2(kw-zT!^we7hGKkg#WSudn-7dyN3GRE zSIi%ZE|I(&>M{%iWh%j}{k9cF0u9&jN8HGI9QfG}yT&PAB%8^%z^e~m0+xNuopXU> z@@;F%@18u@?G{ut|I@(j1hLF)p==avswP=h=5B-;;mj zZz|m#F>7n87y0RD`I8qiLl1)&7b+bKUj_>|*j!ndo-J7XSwUcrI+|(jW56eI;y?GD z`J$(uxxjYDX@KIZ9Thi&Jh}4&8S%Y_wi5arL)n%+3j?}_>oKcY{qI8t(3V5}>4Ilo z)|Hp3&1^6V`N0mO@r`>NhEn9$_n;IKBs7Ji=TK6k#YfV|@Pjs`rl!^jCw-kWPy5u- zcaGJQ?Q&7tg)Qznf>Y9U50-qYgFg>vEBbnDtA+g>FF}*?#oiQmzkZ%vXqmaC7+~J# zK|UtDY7`Ig3uRKOGA;JiO%1Od6?yDJC?3P0l-bEkq!qPdrYAM$dtTbM_*7~u7ffed zH4^9l1_EShS$Z?0^Dh@>b5_24F%ah>;6OljU(@NR3;jE6E{y<;v3J44>FiH9}9qi!Kw{aACATt)Puv@~UL9uLQ)S zZ?Be@bzXOB?y4btkUdaFcF21=IWi#8-$+t>T2=p0{U?^T-9ZXoG{B;=WP1W>n9p(XPy(XIko_4@JlEDBo1@JMAwJu3<(KVnAV8NL#mZe!#yeS z%gnCXmsOSJCyKOBMzm6K&z^963AzFaBEmkt@8z9uj(3|wFRy34nvj>zJIJs7a3U+z z(H64zri!oEdG@xb)x{!%3{vBJ>TQ!DRS?Yjh$v1=i zyfrMt+mmA-WNhP`<6z6YXbR-WXqGNKd+YM$$D}H$ zDMGOc>GAYN;HAnzCu#PG=90-}PAz+RIfYLj7Th~M@izz02{f%IpZ}Y$g|le=7P8}$ zZ3i7(H*P)KlTL)IWb}VC#n?a>dXTZBlT>vzr17ZS*;Yr#P>(oA@f3jzn zT}7L2KQyeA_}LQR*~qSR)Iah&v-PI^SKY;Bw(+$2bmoS(qtaW8f^SdC7+t8y$amBo z!|#`zb%y!9@_))vUoNz&VI98sQQ`oRa$%gr2?Dygb8N9wXm81@iZr9DE zF$OaCnA#HKoC+P3BlFuOKVO+2zmHT(<>J=lEJ@rHx4j}@VUSbMSPkYCJ@&0*+Ppe@of=$Pz`g5Y5 zry`(K7F-SSqqezK)A*}roHB1T58ik1?9FnM_MzD5HOccgXQz3!Z{XQ)hW&YXGduss zqqp+r_FE?(`-YKweLVWFF;s7&{m~T5Jl~nQN5sobeE%G}Au*(t$#hcx#ieQv+dFxT>c5gngVcp^iBFez@g??R zP+5_)DFHd;{SCGpPTBDPdVeh#>|r!Frh2C=6K=A|o>18b3{aoigydFQ{_B^X27|651k-WG{DeO?L(Ii{(dBzx3;q{rMlg>iJrkE9Fdn zl%OxhpDJyinuLF>-^fYvXF9YzwQ_P*U%@FGFxOW;E<$sOH@~dx%zF)%3q;ab`*EHS z+P|^$Pc`!elw01A@e*_v=EMiDD77y_T3(5Q`L7xCUrBe2KTTg7z9w`saIBu2>RzJq zDc-tgAoAzDbjO={k{P+Z-bO<{0k2-#&=#icdO)@YO?idCEx1d3 zVcKLfM0UyxY#eB1=#G&kpT;?Dx-X5c>HX? zp*=1142RMPX$@l|({6z4Q7uCJF=$I}21_xke^ubLF0(|LW{=;J z4Xd2uxY2x7awLWMewB8<>S!^^Jg?OJ*jqs1CRytktc-od@>nf|Q<~R;`^jdS$2pzm z7dX#X&73OIoqOpolq%X6SYCuO)yJhvQ)=r6-bBS%yfSBzrddeS1-md}H))`Lgp5@# zU*g!Hdr%o;DvT#AJacX@yT9LXXj^*d%9G>@1K-23*KtpJ=2F%X;4ON=a)pV(fjE7- zAQ`O?tXepoZnAh2p_aho5tA|9T!t_o#^u(hSn6`E$tB)u+GTFfzE_l`br!>~^=lN9 zJBmpVl4LVzx3O!`m1+3iJAvLLOVV4 z|FI|$RYaS{v^AX*?4V+uN);CpY2tL2P71BtE*;cdlng_a#otddUp^7epZO#_8ywaH`elzWINjm`7x1Ier>Xw4j~o#hbb`KXu}<8ilo|L^tK-v`{gVFV zsMNf@2hCk!+6MnP@1y7T&RvKMVcru5SD4QcgfG;3&vljH%vfJv^-8GheBFOm&H^fD z2y(JzZEV{VP{*}68{wO(xO@GbI{cZWN6K4VX%{1%9AD%=bEd)OxS#9r2+AZA8JZdJ zfbMxT{Pt$YALk-xe*3Tdb)**xWy&(7efFa#D7r;7 z@DcG0B!ozQS7-TEj8(Qvq${FnA-yRiFJt?S!WnM^u6OhO#-EhE?A6m!&UKQ9y2)}h z@Nnd(XzHi2XRdE1$YWnVjlOf8ge}JGeH!kLCnAyssmX#ei?eErGj49PdW-KO-4=Y^ z{;N)&zwfr-v^f7{ac&XhAd_U@j9z9 z^ElJ-`HyhQpD&)G$tfriFGvB?nMscyNlfBHU=Z($)R4zb=fxpfMSQDe#;ww=P;`&? z#X*%`xi@`NTeYM3=UmpG)T6www$#M|!2eUGu_O=)K*Rug!6eft1gAHXppg8sQ>=00@y}%(kZI?gPiK z`T4E;C9nJcTqOjrGuioFoAuYOsQjiOHJJMZcKjt3d_p zL6d%lY-_=O02m9v%m5au&J2AOy#^ystK-v7}>AF&3iz*dZ#!Gs@OR5PDe3%tjUls_|1wQTx ztSbwAvb*`jWb1*{7L|9qg?Ib0)Hb6fE3k5M^QrrG6MU;GdHbn;;6t`+ogIOI|F!a^ z&6n_N52b*vj_p#mZ4i2=M?I)sYU|X@+Tb2k&9+rK8AvU-Mw`FZuYTZ~RG)cmq;^+Lc9)j|8*GAWgLl6s?`{b?mP7>};3qo*GKuJu7?bpD{$a zzs>RepbWLYYc*{(3{rCa9QFOzdZwD#_Rjq7Nz?Z~Q9|0~%Aw4w*kcDe+@9aLc*`o(AeWzV5q$D#HAe5^l5STlXGWs0ZeiCj*lP;E?U`|kcGzYP{b{3NkaApH@VzdJ^v!oE zeK;oLdCdCaamLr<^zSkDd&hR@69;sXgA7d8&dr;6QlJ43j5#StxmlQUQWAZ$IQyiu z_N3_fNx>yIp;RUZ^lwe{?{baU@4_DLsGa(_yEEHUG%y4t^7lfOqyW5@3Pt*ZT9 z#pGFg_4oJtD}R!_W;Jfh1bfUyf1bPg?_IQqoSBO35E!u$z(W6}?vcUr=m+S(A6kV! z1_i(4v@@B1?Wup>16^KS%sO7bWN*j>Fe8%i*hB+t;L$A0(w+XQSBw7l?>qeO@Lew;HTZ2R$Bqew z*y0Ic=M*vta%LDNunY43kTS|DQuZ35fWl3zhO&L|x%?_MsA3hLHyN9nCTsln1>ZIc zwN3k(_GR<_*WVZjRj1s?V~U5mo7#k8x}W~v>Z za0Y&epp+SuoAl!R8l9EByAvmPq)9w-KtpcCKJS3`TpH!TLW8M@;`UP)>Mr;;|7(Du~QH4!NlTe@;@Z4xLSJFJ<+~$)3 z^IQoFGCR}mgX2BAdFde=TinTy#TUirdJnK9I}mU1@zxg>Ota7DUdWFmR8jQd+HB#a z&ySz}`p#HdetqLLBQBQz)ET(_{eS0A^9h>Rc$h#8mdt#%6Pv)QeT+@yu+S7u_=%Ft zhfoiDP0x@+7MN1ckUN)CB@0GG)5K%_#nR)T2P-lL5&bBY2?~KNvwy0SHCrQBB_nG* zNM)STPw$+_BIK&9sh?+ztl}mR^{aU%;1`+b@O*o?fM3wPp#s-nSrd#`GQX677wM+d z18mD^ZizqX1*gDO$_fw#KFlX)sAzcB-zA8d zkb#@j49`%Bx*1rgG8Xfp9#`=}a!a`+OFOXcF}b9^wk}X$Kv(}i3logce`zxoFA5m< z9yj$C@S8OCg0JnE48s2m2ECAvPi&hsyk*oryZ`XfKkNYtjAARDK$X#3C!w zImKivxS0%7vFYHm^#cM|##^vtDCdE+0h-{gBFpt6OldJFZwjZtWsb@}_iEf*>El#f z^`!B^!o(ZG8(CLYRq$)y7IS|+?~t{ZsS zn}_=s%3UJwn_?575zNvFP7=@tqfuU%%`JX`1% zd;7z&d*-hd&DES)r^4EF<^)Yt7$x2WLkwxZyy26!%+aF!EW!j+`)G-yxU++I$g-1j zTYLM-rFrcZA6m{3=dR{5^jX`tn$nOr#u~0+eMjoL2j;phaW`JV_?|=(0-_U?{&n0E ziTS?rv8?O+&vy?`j*gt3#;x9d?-qOV?K$q>(Z{>z87JGe#D2oQ`N#keqm*O4CnmIO{_Yvp+uJ^0i^^$76oE=o7ChxbHIz-GB zJu5g8FXcgkU<}_2%(HT$Y6**h2>xW1qVRNDCl;(fS# zXerYhDT!L<3hpe~%r+rORZ;D_xQme@Skg|AG-hYIuxl!*%%}9b-eIe4_oS)*j~6Mr zpAFtC#qBEsP^&Y47S5M;=kWT_Kf5g=%H6zl|Cs~tQ^M7j< zeEZ9#^oVI~KFL%JM}XOY*?;*VM&f=Od5``aN@xB)5huahsf<6|b?3bcs#Rh}Es4s^ z)~d=GuBe#^uX>*Mh zP8bsi!4}Zsd~ldW5gK)~aG80jD3cpmV9xC7*gCS5>6ugU=UsD=vET+rt;>|4NC?0p zFO4m~pCGy8nS*z>5%bh~1IY?*HMQK$kk&ntwN<2j)xUNBt!uyBy6W^}YO+~$I>kA0;31^Fl!8~$A2M1$Ynw%ROxzlYTu6`S7(d?UdjW>Au zwsq;KrhnlKVf#w^iIQ}7oX_BT4f8FxoxFSEmw}+Cm`$y>F^Tp1$Fv(BgAv(r3o80C z^_Imo*X2uQ8`ssuo@Vp+c3u%7KC3IO!w2sDnFi(oFAcCX|K6>}l zuxv`d-&mz?pp+l_TPWteQ?@Jxs&*pY-ehnyxFFQKFGp;jyXZidaW>|H`STXgk z>sklv&051-{DY@$VPn{lEU3d`&u>G^5GU{GXo=_w$8&BvoZ693hnl(*`i1=Re55I@ z%nyRT1SjV`ST4%doG}o29yR-C6;zPV_LL2J041@6n7S{w@cm8HQgq*9xpU-DI5PS^ zlqfOl`ai>yv3^Cy{0H1#p0>3i9x8&hwQR?iK@2 zctKI(pbz*w%!qM}C8XwNa-%p?U!8oI%TlhFX{nLrFMGLJpBr*x)HMz<+AWnqTk0wC z-DHW|)?1a|#2W*Xju0^~ZM%E;6`1iR%&ta_V8>}J)>>`VQjo17HsU{<-p{v=KYd}| z@4@|~^Z~g>yGEjW%VV!V;z0yNurpQ0asrMTdVXzP;6&^M>3v#(j#B43f!PNbRXgBC zuA_B_4O8eUiuB0BH4R4SgNyGF7 zv*+_>n!}Dh^V_bCg0N<0Hyu2N%=FY|fccYvR*+fEI>_rT$D4+}f2iDP{tPeZTBE<1~u_ z?~6|>GO>>{69WXl@2*E?3yL^Z|5fL;;J+8cS!eOvVz$=BvdDR|c}pvaH}=*RzS}v* z%;9cham;_!OiV^tGSNi)juS)D!Mw_Z0V@HV@H)v$N&DV$FdP?IA>%DMEOLeLy`!ev z1<9$9m0NdQ#OEwLC}x#L{Isf?f?KQF^Jy*HouVrjhHcq)s`yzw5HSmUl_0PAz_x(A z=Gid~Azy;cJLfyoKCvq>adRJ!6|E}do_3MlET3e;2lVy}bE;j=L-U~C7x-x3frq7D zX6qsjsb4rm;xQBce7oUdMvwIQFUoO6%C^KIb|Y!&Mj%>*+r^*HESv_=X3GOEQ>z?D*WL62EJ)7pb#9#AevjXPF z1?Ee2HCu1SPu)LO6G(G&tJ-6oQ}XFhBuL8NH8+g|_wJ#eaNoCKcK$r?ngyTlAm_+> zjw)HA?1y`a%@~5O$SK)dYEiP(H~TT9a;3s&o6~wU^#PS^K`RZdFwd{Clg3ulyvcMx)#7%R z5ka7q2duTWWoYSB$HzEhlu@hN$xT&59o1xxUy!Sftkwmo7F};SUt2Z~man9Wu0%^w z*2Hhrnz>Y;LD~!NFNS?|WU5=c^XB_or_PMhq-hP~clSOoY)QT*+rHa+Hqr9AP>sOm z;K%9gD$!ChtMH$-&-Q73s4GEMANLIQtIONvU}~4JK4xV^k|3h`I>q>hm>NT(R~S450lt1bjP0i zj_sXae8f2e+D&a%?5x&YpGwVqMoJ52L1D~J;SQ9JQx7(0KTd_H0{O~#2>+LBXgG7% zmQ#-Gd&+>WMF=#LxLd<}uy6FeZ5=hT&JuC@nwhrPbOq49o}2lc5ne6^R<)_VLeD%m z4NyCNAk=ZJ{;&m-;S7rWl+YBQwYv-WR(Y~<;au)3UDsLk=}_$BgEO)m`+r+`2$KeG zx(UpZfm4!K@3nbRGM}&R*&yzz6~KBg4^mFDtg8NBc|rM(t4sOOy`5AaJlVAJ^SpRq zIFH=9vvH6Ofe`{j>8fDs>{zWaDt%*ie zd4>}E)p`%(nj!Up!;m6E1AYn>d(eR7p7zf@j{LmD*J+B@o3_*9wchLXKz6>i`peBH zn;N_~&g(Gq$aJ@8SOMw#@<@G|OC~?o#v(?4sN_QEZcrJr&0ZBXvR^cz@;6O|PD)C& zFnKhwn~eQUnphv@`aWhc+Kt6$o(_Ao*RAbRj#5vc^O=COY)Bej&}s!ogC)Pzq_84ZSo@Ymei$>KuNnKxG}Z|;OH+4Z}d#>C24 z->OJ7x-)-?^A>CC;UzHy844J-1$e(tVL<1#`*{gzElz z0slLGLYU!^-QC#}d-gr9PjQHs#rv7+1z!4x^J1&Vxa02uZqH6117Fe@`PL?l3i3*G((g2M2A_)`D zTENz6j3w&=-uQIub)@*hfqEmi>5L{ATOd>Su-%bOO1*lc-_~!rH!tvLv^8T?a5VMJ z)Ef?;)>|=_&4cAtlNt(gl2>hqoR$z-X6Uup*#? z-IL2@h9Aqjtw-KkJeya(bj1Qwx%yXM>+X-o=}7TW09=L)s!bn`JYHs4rh#h9(_{e! zlg(ywrpa<-knEYOOn!snQb4b?0lm%Dg1zuUjusqA>L9^C)@by#;#=eb6a927O)JUx z%n~D27%;^#K(4At6&MBb6b>U(jLT6R^pj-%Q(d`$A(@|A9TJ5i1;dN2qe-DsuXBkkLXHFk`?O5cgk#zICo)&LqJ z<5zLFu`oN#=^wAr?3N$<%*Ba9e5GPQ*N5mATZ#1x2mKE`%tel#h`@i!=W`-W^?CU8 zIMTKhvj}C?Vlm*@-T?6Fay)y7d zcrvz(Nj4V%Mv%ZOM!yZLn1HFw2mnwU0B^W>0geL@n1Kiy3mlXQgXI8me>}4o<~KZB7W*0ws+Znr#pe|HqM0O&o0U*2KvgTEk%$nu6lW#IZycBV3RMdP zfmzf;keN6VhzW!N#6V|9Fc)7`z)XT>D^(ybX|>(|FHG8%sSll?{nAnPZ#69I0xXN9 z4m+QbXe1yRWmTg8tFw34{)$yDzs4R!X1VqV1%R{DhHnw3NWX?->d8zH{s0I&RPjrf z|DbmXzJO+YQ!wt8r#1;SMCTb_ly0(qRIc)5Qi!=2cxEKM_D3;z`B`OL@XNbKnfwM- z@u9EoIo@*{s7eTb)5!R8i%$WFKs$)BRI>1%sbZz>9qy(@-?Ca~q^^YbO&I0{G9AHTv{IW2>dj)g zg&74%a3Uf9G zcOh_@SUi{NfLkzq>|`t_1$xV}`asW7-+_v7IIGVBGMl1JOU?)&f(R4XR3#(dkcC^a zp!!kqlQ1KeUl9F}KELtGpqj$kH2x^e4wOk%HpuWt-)a*QJ*_Dm>7Ok~5xf-ZK!pHB zT?#~)fLztcSO&jwo?1L&6Q(vgUk8BWZ+eewk0Tu?rY&nzcs&J&=Yi1fN#mv2I1!WF zb7PZTu+b@P6MVU~V1Sjqx*TfP!T2T(=s@p1(F9u^Hw3*mx$ue?g`~RjI?O-oPSR?d zt)J7!{d!Shv+E5-3c_X46%Q3@bVVGunmIHq4J(Zl;0!?vKI6fxL&KV%%#ExT)_2}!y0+aJ>Hl1nx)FX%j&4gl|%G|Rh z)p1`o=EAq4x&P9Df)}&yLk#+dI8NgDpR5FCyTisgR^FsNCM)aJZnRq`)INKg%^pp4?VOCRmuu(k;!jh!U6CsW; z;~jkkD!g;Z5*+rnQV4C1hFppO3vHk?v8O~BF}@zDga>f$NuP_wfrXBfC~K;u^uGV@ zsVL)doWJ|1&ov|rZ?Lbu+o9Hr9cLlXvN(Q*uR0W-l7+bE+_XE6qu64$&4RK(z`9OW zrvN|RAcA)uy!?sk>12E$U_@F7@Kl~#93dgg`xPb#uG-@q3%LG`!Z^oCFaJP^E%GQX z6`O)>(dFB`F-#4mf+4Cn(DdP16n#htYyW!ldjt)25C6 zn)j^Qe;4@k1+e@tc9^@y6`PD13Z$ZxlXQ$&&&lHSH_%LqXBFkfer-H!U690n zS2kb4VcXmyvP9w->ro;pVw$m$^AzIWhz?W)1!1~=T$sgbHAnnNThAfL62Mw#clquK z`@|BH;H3s+0E?y>a0RTE68|OKPuOeqa3Q}3-M}nFuryxcWYH)<`2!Xj@ z4#(U}ec|?BR!sTRwf#{ZQaMK*mtPSHY-N;X2m`SI5~FTIjqI;-y7La1DbMfSBwoD^ zLM-Xh7y z3R*($OvwI!rwB^r4vr4=$m-?sW&tax=iK)QVS8M0k|wi4mbM6~^Xj6KC17LHzLseY zJKLsREQG045LED!D)u6q;>EClP2p6dbbY3JKQ=tT?|0Ga*t}WS*|5gPMxpZci?X+` z`@TQydT+4`py8=mB<5_WhAoo**syOd5{hd80?=cK*F5p#rfCjv;*(Us3mGj-4aWZh z9wFZnD5<&JhW_zjq4&*008iW_kcR(rU(aAjr;}SPXKgN#RaD2Jx~B$iTnZ+Z{CLgo^A6B^TaqyV7h8eA)E)@iG|_up zEw3;)fUx%^v1sTFXh1@)0hqt((U^?oa|?s^1S`6E|K&NtglEwU{8d@t4Vs-2oWyw{ zI;(9E&h4H=0(#1_IK=5!YAaqM28l()Sg$q+^p^GEP>2%~|Ju(c%>`e!9`qN*4 z!O=9NHxB0#YV?=#9FY0EV2W&$wT<@+!J}R9e!HmPAyaa~8N3c5UWG^Nh~uj9sd46` z#_$Q)gO+$a8st5G=6t!Lk3#6zj%xY=pOCMB7(he`8KMu+l?DLHGI?>eVA}l}5FzO| z-TD+TnH+pTok&*QKG0MEH?Pl7XPJu7&KUWIwU?zM$HU5IoTNS>Yw zEU@=JV`?o*{d$}614m@;q_gg206R$vVIZ*@fDf78wlgM;zomdzVx@t);2DI@&!DZk z-$5v#H&s{zdWJ}0;zV(aO7y7jXV2zCq2}1m|Gg1PSKeu692|=sW6$YrGEkN_(dKK* z_Y;2u!a=OmG)f}g2F^C5$uJ)3S5N)W1l5imFV<##>NbU|s(m_`BeK>6Mc{G~=EnI{ECmvBS>HpCh=3!A=F!! zeQ1LQ8@~zgHifz6Z7)q(crcFwsM?0LcY6!i-EDaDayZU~SvtT%$UA|ZWPm0xgmcQ8 ztOzGX*ivS#9*ov0Vx1tI$l3ZYp#R-SF*>3i{62Al&R$aSJ^J08+pxts6Sh}ZmO7^y zBN5w;9!7C?lTPtN&S~3d3ePZ&3hSJ9?xywQMlHzO>4dHXo5|K4YMb~M>9tz9cIr>e zmzZ{dLKU6Cnt}z;(X4BJUMx7a=0aax8X%3v_SWcTcG|B3fJ_m9wFtn@8NSS8Ej+?} zjRa&S--i_WDGT`%)o054yHGSye*3elh<~c6 zle`<|9|lDu*y#V7{SgN(i6j#^WZmk3@E)bA-7lesJblvHCJePO${G^J&_KCp!)zny z*`?qTDx9^8(~cFklLJp1PyOWFoMdN)G7tRBf&POIhy{>d;1WEP*@bWt{K2r#ifo?7 zri_^ceSqsYDQ27i;U_TD2-}xC!Nyz9sj+vW8eq*l0P~2kx1B(OIUF}MRKsrD&6vMz z?8j+sR1iV|>f?CTQ+e?KfTkbUT7cdW!B&z@d2{qYQ>q~ujl`=XYtU0G7SrD?j(+m^ zrPT_U@7<6zfkqLY@o?84K$OaW_4BiK7%P??ASXpi^}NgpIhL89WN~r0H5MYCw)C~E zl>`N)(&RWx?N!sev<|{U*5?fWyC*xKb>Pj?TRFLf)rH+Eajm?-B=B)Y9r@!z~6cmqXFRDNEHa`3_SomRgmN(n;EDZ z!I(KGNq1%my!~eJrsUdQ+zZbM_7ypHHX2ag0w$lysVX}>PB1Czw&2OMxC{|*bDq?DWa^=nSJnhKvKM)Om{*biNU`LGmhLt*|WsgzWOtDUCnIfzd)el6sQu{utyrzQ=M#$^+q8g5orH z$4s)j+JCmPzV(wa6&K7$$?hFl;(RN^*i3^#&;%z2h9R2#<-FptO!^s00*2l z7L^IV1#?xMRO_M3=8^6TScP{PHmNG7`|duv{^gDO&%%OkPC}~r6_!vgu|>^_I)qIv z+^ROj1P%WV2m$Q!Mk}vrhCWFN4@*--q!9$OS_CVALRmv#-n^L;lhSA^^f#Ef#;-IN z$jpNSA47>KbMP4kfFk*7^@?p^61yfP06)Kds~xbMorbJ&gu(f zwk(-*nd!(Z%Mn7B0<+pK6>u3>l+I)t6 z*xPYuZ{PjOBLD#)M#+++ue0RQnd&mK+Ucw=vONF#2=5H+DI`p|&GA9vPuYSbidM2>)Ia281vC%Yd! zx<14d-7ijp;Aw1(!W_6c4G5`%AW3Uw&@ja52h~tL*icw@E>ayifI^fa=-?x;GG$13 zz8?7m$o$*b0JD61h~1F9Ug8}~C#p*7%t!rf6NXKYseK7WM=_UaK*Milc8yhRvaGCt zDkqs?JPv)Clpsns3sIMMXU3Ds_|}uag3-?vqpxCJ;ycmxE+Pb1wFW>GlZUw!K%O@tH}>qO9Kus~Vz=7bm(A~$AVmi- zI3p5%kY2N1P6HUzne3!k=tivSki>l{z!~#Yj0B{pWu7^CFP||f{xB_Cd1rOVrXBk{ zh*5kt>lsXih>SgGw6Rwp{lc0~&*H4^76>HsOOK zQ5C5bjG%!7A@CUtMs@MJSVO5IZDqa*>T+{EDD-J8y}V{<^GHEjr&E~!C7Jv^-bRvD z5GBEx8`qK%>y{Vd1A!rJV#um&`lB{lAu+>*tyuCMb4ffoN|R`zH_QltRMdf^BHGN* zxd!z`!(ePD9yDCb>fE(0UGof8KQT_6Ufyf=DXw+Y0F1LQ@jJjf@q zJO!yzBeo7Z6{MY_t(^akb_w<*43g9)rYaHQY^`!VT6!?$$7m7pP(2iST8fQ4eM)L1 zyQqBAZ~V5*rjX5*#0~&eB>19e0H;XV29k`YD70Xel^XcikT`U=PPUhLe?`8tNyA&I z=?`*dMHQv|c_aVRupcKKAWsGH;O4iDq4?gnN;Ia~javtGy`3spI{xj}qrtiV*7j9y zFrQcvCzA!2{mGNygpvOWH^0`*WjAT?TA;4A}GMIf-pZ~=rVr+$RQ7IYVJVvih2^hjiR%P|Fa7S3=m2Ec^sF^jgRQ`Tmv)Iz8 z+L{x9IHFkvP9O|{K}#!F7DdL-h|Up)bDK+uWpa}45JJ~zuB{uE7K0~=Rn@<=reiFc(gTz+@qBh>+nD4qSjdN zj95v*(EXZpRc209^MX~}lIx%Ng2v0?b+UOhR`TmIe=n^zRL6={R=_*9J$csa8kC*v zENb0_qF35?P&zlSCD#k;uYS%M#8(eI6yDunyiDoc-ud8yt530OP! z!jJ#+Ss3_n?V{nA(knl?{5B23WRV|<5vco=w1utEp8&mv@#4C3p!=J8vfpv04u-^J zWA~KjswvS=q&MDCHdHzVJwl@+w1f1bnwVu1G!3opF2-kzgl6{MDX~mw;HSyovv322 z)jEX%(`GAF1MwOF5}cIokObnGR?C`4IVYW*vXW!Yv_WCp^(1W;`@8vYVO`j2aDe%( zWbGh`1{($IH$z@+csyH)A!sE?gLe*AZ2ZS^f!1YbsNLY1Tr^+V>~439ZvLuGReWow zuX@I6g+S)40LG7{$S4jvi&XVnVNe{|p}7?pzrpI-tM2p)wdnC#zIWdv-#!Xpo%T8AxL`5-p3Rtg71Mu1HQ5&EYj=Vip@%w`QrgQ=-zgZ+@03&{EOqHvPIe-?eUl zXQ6=}`c9_&(%m_8lF|p0f$W;z7rHmJ(wP2ec>lx((5|EZ4LnQi_%cufR^uEj?;xl` zR$gTq4*dRh@b2jiuJeO`f1U2Xd!B#(Ipe>-fBrnb#1#d=qpVP5kwofQVbY}Ixs+&0 zJW45{u6e;Z{YPGZW)$Q@NohmhjPtSF@PWf)N|+y8;s%HMuAg39P23iH+8r zwh5=se1sZIN1hYBg?SHWsKL}`Va3F3Bk9#yO>0h*MtF1y+PsYT(+n=;amYtO21T?; z83j_&6x@X}?2ceS)P=H2B0-K)emw&Vm4itlNRUQ+^Te5nQpgFy;O{|QJFdALAYorjk(wk zASL#(oy#S&SZxUQrC`A5>*Wg>jZBoTz-sOZukf?=h{UL074TguLYsR|44Sskh`#pqf7z!=Z2@09t&0@?)w6Rtw5{9~t-?gahR_*8L0 z{IIFqufk{^7Z+Cf$%s{{!Qz!r;O`Jrso`jxX^C#DH$FnuaA=PN~yC?aP%T>Wvoz|VfR7(nk22d!30LCf4N8lBn z8bT+T1?6Xfwj0kisa{7~q`lcSYaf@Wp*u{ZNvJ_%M@R0gBZAY1uCgdiDez3-QHx!*+8Y0d<-0bi64co+q@H23A@DXtoV@nbD5 zoyCg^vs}#Nr|@Q0@lO1kJEmyv5oj!ab{)FGq?+h=>*{UqjoFiSMbRj0e|fOrEO}z` zzFbTQw%90kydWpyMMB!z8tQlSq}8=NiEMAyT`?Y(;^r5UB7$TY&DVU5jF3g=0r}GA zCv|$a)o0x-u~5edafRUMz(lReX_wBT+)s0xSK}P5za4_ghY1~+E&16WupY%H5eg~; zk~xnCU-N5s0)2`WW&!2%&w-29YD*0lgO~V(N!$l&ua=sC@6F~Q(&W@3xz!ly@1mPF zpY&+d7uf5>7ulB4dfT}uy;qMrvn%10zX$iu|6_HyJ`G1hismznOL*$8$%Z;xJIo^j zvZxs>Av_!4b5WD1n$Qq1lyZ51Yh^E-GjIBvC60HqER&Dfmt4#-h4-t1&pwGM(S-5noGq z)YrPTh^1RkGg@q}tjP&pw<=It2)z^{|4QtWBz(+omyt(Nz!0n1NC0iR{d>^{G}AX- z3p07=Ld24}KFrPPanxukOUdg#V?9KNs*IaaS59FXfAgvATP3=xjrWoO(!XfNE4_C4@!^|}>l#Lc8V%G?2~FrX(-~U$ znKTa@erGD6nD90}H;lUpv-fxB&a*B$q%aO~h=Dy?SS@AA2ZpRjg9V@8Izt#=tG=du zWg$1Jw?1&$n;9`lWhtskRVUUS7~?ac$ps7M@gov$?narNLf8r*M@al!xQo*Uul*NO zRHJ%>YzEQt{$zvr)Lf)#+QNIbM@>B|SS(|)$A9Z^#Tm3PSTCNyOe^SUrE?SY%ZugLO>05~QU*qXM1c{U-GH$fBV;Tch*zjgFZ@h>Ec_Ab}iuWN(zO3H}vI&MPit(@SvagbdA;P;*3uA~KE zB$iNeum7{rdi`C_Z_U=C26K(IXyw>>?r*sv+9HlweNl(|v9fUcnFkre!em)2*wRLk zXRQKqt9(xg?^FLj-U0qG=ZW~JtBiY2*u%@^bwYZyxM~0OH_ci)U6ytk!F_^A8yS_k zvokjua5rR<2N!84B|gPvxu5X=BBAyEFD{FX-5L1F#Cm>5*jg(A zj3+TAdb?_c@~WaegB?6Fx%pikg*H%nr>Ii5)HkJ}wk9lYUt!jZ9I{2?){7d4-<*QK zN~(!VS=Dijb(-CCGqc;0WEj+$so`N)dJ<>N6BAvugV0c&QmLJw6|~C)X(5Ek7O}xqHE`5^hmx#%}6eZe0TURj0|b{`wD25l@*z znj!A)3kpdIyi@g7U;(b@WW`h?0kbCLmcPX+?}h)Q;4$0+uNDLLUK!++D!l_Sn~(y3 zsCnL5P@abf0hZ3^F9`nEb4}&S`KHDTOItRF#)A9h!fei}k`l&aj;fc(h52}m&E2z? z9kX?Uv2C(qBV+N{wGOH0)zDl-)R}W#d{}*$1&x#Rc{LV#uE@cmCsi6` zCSND%5vQr6>g;RE&~n(4Ff*1&(9*Ikl2nt;`=!AgZ|stUAbX1&1d*Tyfr=)s(%H;9 zyDX6*Sb@@X;p;rE?pfRyKKtTTN_QYOcc! zF7qluQ#^;P;-wJtBi3uiDd}bx=A2??K-b<8i{jANqKuSoBnAWkm~&*$6L$pfh6q6< zt``*+gX^!|mJ$S!um4vkXw`ZBejs-8m{(iNYsV$dPA7yX-e_bU)5GH3f|o}$ggl)r z_1P^2h@*G_Y-5S0RC5jXZgnz7-MmN5bSuGRlSelmO`ea!I;CCoH=}42 zzvZ?iuFB|!U40y$X({;*XP%X=t%pwxa3Wmd$)El%?LoB2-ZX#jWUg{Q+ddf z(u-46j%li_OTt;U(LK{gC^i8h<(Pu2b#O*cR)lgH*9x2gs?feKNw;Eq!a_{U`uf5c zef()%{90|7HKksgs(VTWBFGuRmGk4$x~=tPIyx^GBN7n}m(`(%NWzuceAWeEBRrWo z3Exuh*HY~2VpZ@jtC6D)pHF742QYnbu=1*Pe^~GSd;&9O(iHO6Dum5?m~^(+tZGiL zb`oL3?#O6bHD1$j)fm~|t2nVy)K3;jwi2K_H9KoI6L7^3Vjy1QkaxX7Ymkz~08a_6 zmga<-d>}?NSeV6x<43$bJl&30qII~dR}%uYyYt;rD3Um5X6AB6hcLVKSTo(YThf-uaB?P=yL3w~ z?X~;#aOfDD@2Mq0)}?ijG8BPG!`8&}LONF+Wq>;Z)jxP$oASo%?93E?uHga?&mZY7m?y@1DU&**xnO~`$6GtG$krP3F zY}{;3(L7mO`MZ7iT}tEK9-W+1Zg+WNKD*vaxoM)1o>m=b&4?UIaWM$0xDWd|LTtR6uvw}Q|ex>=(n)8MLe|TEn4F%~~Q)z&v z3x}cw;^5I7MY=}Eq%Mi4WIvMeIa2JPgI#C%oLfO=ryqk!hfNd5t5num`_!tE%-f)F z>8^G4U4=`*NJNXUv>jBsKPMnOHRdLE^sxfc@8k)&3Mri=ptg#9)6h~?1yQ{~TZ?mgJcO4s*D>R$kgtYx@|;H%YGGNb)m-nuq`3+gQD4k?A2P`>8^d=Wq0 zu5R;kJ#}yFcK<6~?dzy`86h3$z%RGTf5XVo>^s9?X+&wt=|V=U$BW7oy8pFd;W2~T zJ!O06!AmuuxUpkijOY)cL#MlKW7qkq?EiBYt=(fkLbNi6Q9rxbJxRFX(B_p@7SiQ8 z0k)l&Cv$+?fgxI@GNH?x|{uBnxIEG-SVtIM}P-&dxJoQ({H~xXn!-fD0H_kIVn$XBf}4NBEh3 znRf3_#n5=4vs%-PXYCzlkAqa*9WR~7>O0<%_m1g&L1%}^z$35%28zp3LmsJB(n(!X z8IA!r&_!EkPx^yDu25Q3_*obfS0#Uaj~;XC5;^^+n&b5`KpUoW&AS|P3D z2a@(}p7G6-y(oSAR#(|6yT+Wh4x&iY&E7Edp@tQN9@q{>(@8QcUsP1mAE7W9G+03nhAg6DDtLob@@*#SBc#Qx(RiP zTOW#J4vQPa$nPUy>IAV`+by#P=aTf_Jxg}o33B+kpJ1b$%gN%y70Z%Fl`|@kj1n-@ z-dM=`b(sKp^qEiVf%J}Ln&cHdm?nl9CY_WwMbpQ>^5|Y$_`Vj@T^Vtrb#o>^&+9|` zg@R8?M28CQ|FRp=+{2vcCHd6hDkAtLLI0VFdyI!3@SeU<=##oXkG*TH>NjB80E5{G zIuZR8!Q8AgwH$n33E|!xevN)zUVz6=SzRY~kH_}h;EVCLp3Q^xafS1ymNgWPU`epZ z@Nw(nkX@jSY6%B+P{Us969b0|5ZP9kH@ExpTDfgMl~k!&Cm{9QYICq}Zi3%6_m#$T z?jm$5U`7TixkZObmpWbkV>%Z3NJTr(GoV6NDb3lKGwjgWq(W=P{JnmL6o6s@IX6xH zn;uLZdRvC?(wDoDcle(!-fL2~z8KAWL+&mw??O{H$v2k>nkqT z?MO_DgSaJ>z0Fjdwr{+tSG+^GeV*;Xw`VU$9ODMD-oFaO>Lb4d+iwkcSk^$FbG~n5 z?>0e}eweDrg`G@4U^t6EfaWDW6}Lm|_%faYs@GzjA7#oxVlnx#jEGCVN70am7}NJy zrGDRerBElh4Zb7IE?IPW$mBnG-o9*>%M9gE$M_4LwUim_2h&;Ck7%^ypK|M6B#J)g zb6bC0TU6NFvD^7SUbkbu7^?HTSc8D$%(VV{5`Xx&Kmyh&7B&>cIpe?Cz~*c}>y!oL zmwC=Zz$sn@^KZlU5Numh8!P_7Hm|EVeCIUt+TDvbr6bBztw$o)D81bAt$!l^JqZ-u zz*0B-jDzyO|Ko!HhHy%H%+rm?tbED=EAuEr0Jnq{3ttwz)+Ak-vpew^jy6K+1U-}G zkwo8;J=m#74?38}MLbrDCYM*e+~9*>BI>6{YF5i9mYi_FMIAm8IP5|J_;NW%T08mtdDA}8O|cLzYcUT-`Kh4iJ{~D*=35NCqzMoE5saz zR9SQ#E6eTZqao?Hp~0*y<@jME*zM?HRw%oT@vziebk`8G68RJGCZv-Ny%_N|(EK9l zm`U(M&_RZ|aPUFlglOo}Lb*uz*0F$i=7HIn#WLozrEGmktd)FQ?}?RS&r+rRfN3rP7j;q@8A&G=O9*387`JkuYa(=Tp3-5h*Jz=vQ`p zAG!75)%{1Cv?qDGdj)@N-FK5Y?6fBDu}slBdkLCMh`rQBGLW*NRf)(OzSeQ z%fA9E{L7Mp%7!2~(iwxmC<{gCL}%4prPKzaDV#!-+FN$NgX^#M`F*~Z4}BZle2ug2 zl}`5y(0=>VohSP`{S4jk_K8Dv_KsJ}LSGUIv|4DtAL(k;ll{IwQO`E7|M?=cRr8KA z$Vbx&mLpL>A)~yX1Y^(?{F>e57k%&J3DNwNn8`8jI^heq<+3Y-%Fv8^f{WHN1G2B2 ze|Udex&s_I4fYTik0Un6tba#F=c9kT_t73=`f&B1!4KZ#kMAL(HgwS|+dlI{Oxss} zpnvRK`RGHT9xj<@efhNn6%wIx8>gWAj-6A4M?dqRz@)PJMEyjT<_6`+_2U|PYkD8o~E73pB9ep>7j3h;}1f!W}v5uD;c zR_|Gz15ioA0cYEIA$#0o(s-{wI%BM29TtigZNtwZN2qAK2n^RLY5h!4WdQiF zIJ{yV_g4v1aow()w&V@odmf`nODN=X7AU6wCp!k}pYxn?REHeG-@52vA~@s#6@1)p zrhT0|&wtholhS2IXy?wQv5dnMG1>y_B;ofYW3B#Ljwlh;!t||iP3Y>a2ycJkx0nDe z*-(6B%BJwGlr2pWok_f>YT+CD2Q(|5)1}Bn!AkBu#>pMwUZh-Rj%8Cbe3Kj1kXK3- zhU$3#vJT39AU{n_Wpg6f_&>lg&3Ff~xt(=XHPI?3Y=&4Hrfh=5dMf5hcCe*Ww*Gdn z_++$))T*(i?dpL@y_jv62g9%+o4`A6zzbMvZLu$>M2*F|r9p9>eZl>&S2y}onfSi~ zE{Plo(`n|K?Z+Gu-s!b(`k|WQY>u{8r?o4c45;Sd!a_a~EW*CstnuLZR`zvuVSWw^ zP1^Bd)CvlqAna!wne1G>;}YcCT?t5O1^9bNDVRuwIo@h5t(Vql(T3!3?7&BVKt3Db z$wZeHdDAvdY@2_7m|MN)>t-?R&DeROaA_*Lcjs}=^AHKIcTdc{dlzyEuzJ-4D|gL4 zzT82o0y6HH-8XB2%Cmk>>%q{ZnRJ=R&A@}d$Uh#F@~U0$e+Ecgvn)v{s#o}>&xTF= z#oJ0=F5M!GJEjan;WOeRspE%Rw_mPo}m0s_^_wmi7 zFhx7lv)6bY^-U&>VNeN)*Cr>0@+#*)i-|7}f$VP}dL&9`O8qLmzTiM};R zFgRz3#_OAWEAK&XJ-=Ct+5H8_aP5+M{)Z|z+&vfni|wrg^nJe3Q^*lBS<<8%7@aQq zmjJzLi87eG;LsDR2;<&A8_wzX-=5$g9E!;@YWrVm z46>PRQzaKTp-I8Ig|@#IdWD2(Uq8M&IqmjHUP9oqMq5!~ikpl?l0f{!{gTxz_CDpx zseT;PXH#fg+1|esfFv=IJMocbZAK$(3QbdLvou<$K&-n95a8 z!=7aY%k`eHf#KrSzPkQ?rl@*`==+fML2;%rCFN*2gI)8dApVEf#+up+b9d8}+7}IJ70B|`*#}A+H?EUc07O0EOk8jTV`quMqT8WL8s-v@Q?VarNJX6&8{!uP5~c( zykRyx()szvsqzDO_*K*8hxe~Zu?laA`E}Dc6P#R@ls^q`*jc$b*(}9z@d*tIhPh-} zA%d${S{yR=+&|VcUH$a#gTeBZ!C!YSX^RL>J*TN&9BO_u=yYM7^`BY!pS>eZ-De%s zD`ptBX^Zh-Zy!y*j*Pp-use0b{OBKk9T|1o>$K70+T(!?CY`&EQLgR+JzoD) zY=2zy$+_+KQj+F)1#sS%!oRW#W@y~Vxb@LrFMcrU?$hh*dOwQX)Q8jtE`R)wafNm+ z;2nqB_@&7^M9;wOnwbmFT;Uy2-1VAcFOcJp?-UjjvzQEw4zGMNIQRNSI^(PP@&>IG zecW#>k~^8DU3TH+w#<2v^~kZ4(Zco|EIR-^j@0pA^c{E&-UXNh1@|eK5fscx%Wy7Lmj90zAp1w+5e7-^m-TO zvbpE;>)xks<;#CN_zx;I6Aj0DKA>ce41+)FJyPb%MA>tB(0RJ(CL&dVsy8k;EG8prCl15uO{wx|D|KJVIBdoXj}V864wIO{*|wAg65EKeG!Uavo= znP8&%Z@2^a^eN@H%CmPJ(F{BW;L*t)VdkDPRxUTh3xaa@JgaZjueW;U?J^V}t|d&( zOcw5bv8hEwn4i-hC$#}pj4A!URaL~bm(>PRz4|586{Oq84aBt{u~F7G?|#rz92aLc zNo!x>qp|+{$fDIdIE0(VhSNsdW5$Ftt1iPbz0xZ$;6Favo2gSwTvvc3^~opB~(+nI&(wjGcD)Sxz(aMo3pvgO2lSB241Mc+5*U^ zBfs(!uBSt~<4V6il+5v4$j9uha_X$67z+H`KRwgGM(j);TD3`x2;K9B0~;v|WI(sO zyNZf0my|D;EHDVX!1)q329(W+0*4O#K%M+e;JnFf8;E=elLs({WVt6Al);=pw{=Ug zdm2g=l)-h9PGb0>&D%j^j7lFY#;MY`ru?cfU=JcZIICnh)yvA*L$WTr%BnQa+EchT zx;Vn}hTikSwH!G%Q~ooz1i36sq;(X>uJ8$Xvbi%f3OXaLS;|XSBG6hwLk7*u(D5Z_ z@W)3h$j3Cgh1^5&OTUDi%sY(CMvH|*M6G$mI;~s8iA2PW*PIov+p8oCXAKc+*-_p`KxY)tYK zNJ4bK+M+q=s4#7ftb!X>OmW()=@G)-@$Bsm*L#UjTn+|$Tv3Z82QVR|hC zt+Tz%$CaB$ye!n5b4Q=(R-!ltIyA0-tkew^)$n63Kv#P*kQ0x?n}DM zG&(>uzYt}}_FKq@{8Xevx{hrHS}>=r<4i>iJFNRm&`eeQ6R!Zo06@Y3ApnPP_y%#% zuK;5}m^?|Cv`v+4z}bwu0GrKJtbqYwMWroX;}s!Uv;eS(Qpuy%D}A@=jLxH?m7y#K z(7{?T?YFGe+U~TsFQCf1BDnBnD{Sn_wsljzYR|R`28UxkWiZS7bjyu{OFhNgk8A!* zpYWvug}#^5C!T{;=wq#h<)wrL&2D8mip{NRaLmdqS*M%KOhqmR_E_NR+*h!=W!S6} z{kli|%>858vAf99oY4mu2L`Owh_XNedr7%lsTL--|AH^tea#CL$(8It;yqp+-eH(w zRsiU><($r;yr<^u#hf?}8Vq8ggu$%PyzTT(F$GVq&009fqA`Lp_k`1klTWN1IJ6bZ zyMo(p+??SGs16caqLYUL1Ka)OTI@V5@mhTkGugIoZ2WQH(`icd~!TckPk;L6(bKzY*WsEMV`06iS6VhbM*=tZmn|x)GE@>%|%U3{JFiovL@eFG{Xbi(HTH6^m1-$#V9oeq$=Jl%WCKO{ zO5t6V;flHj)&#JH>P?dlZP8u`AMQ4pR!$WZ%If+ynJ(g@a%o)*4(t@htZmBe?LjD> zDmlP`J!;RkWin=d!mQk4_KZ*Wd`7UG!aBX?#KO;P-a>bs>+f3owEY{JUa0=Il5)=TSI$~ag=?y!Mzkc55 zRp}tcTCVNEA_F}hA|f0ku&l;qe$V0dvV}ueHMYD(2L-0Ej?g^LGp}~Ja~8WnZN6DcwA!+!dS*J`a=LmRNIrzLe*V}*rn(gPG_OG2 zb~aH)WKmQdUC```OcsK0IEPd`hsP$x&Q@U|U161+%?Bs2O_X5-lPOlNa6(U7rkyZu z3o0I(>7r8U)$V2K{kEy?#a;}HEI^N|3c~How=ez(4;vFA_ux*b%nvi2@n)v7uY6`I zJ3V<_&#<&{v&x01rrC3Rv))U~KLxox?a#YaB|a;PKRdn`9knpbUsRL6ckCsXlVb>t zxrHUBi-pY1ec<7Gw7RZLX%|&O&THM0NcyV^HYfH+q`wa9EVgFz`?jT6{mA|~2iT?E zO?)U)^hC@)Fk>51*aXF0?Ju{BD5D+pLJ#=kE$tP8ULN|2qUbPtf(8(q9a|E&?Q(_= zt7#G&s?#|qWWmlbAcL#AF*4YArrfbG82OKvDjMT4rvw9@-t;WM0WFhpB&4ewakyZJ zD=1S}yMpG{lUJm+t6ASYZ}2^BxQ6*ntp0smB%((oZ2&%00`f*Oq{GszTo>oi>Q-E8 zAyf*c)AB6FHNMX>_Pd-%(g`N4+lB)5r4M7dpR4jld+Vl?wBQOZNu#DoyQbWNg-Uz6 zyvOoRRRQ^PhHobd0wpa&Qzh{xif*_%t4DluS5>gvEIu3I{V`z!42Rt8wFA4^1XQ;9 zG6!bksF1SV`(h}DQepqvHH#8yfnWU{MrkYb3LMCaVt9t}G8W9cFq|+a%FBw-fgxG? zYZkgN+QA8D(7aCn0{@VzC;9>|kgAc#^e2j{^C;IC8+n}8H!lDJ^Vg{SWUDRWg&RpLCtIT{w)w6s{#(|oBOSNGa$qyc;#W^fB>dlo2KuCZTr7at#%)`< zZr#L@)5dM#z-|m5a#P67;4pH-m@#WcY*?{v#GExNR%}?sW*9GeBc{^iFpne25hK^F z7`colo3*rvG2_N=IGa7I1`Ae-R2H|Dwk~}DVXlYX3dOY0H7Ga18>-} zVJn9X+czWM9#gi4P4nYy=dg(rcbOez&ELv}vv%zpXK>f7!Cw6swq|hRYzVNYZ#VDW zzJCJ`E_^ug;>M37Pp*9aIrHYupF@wX{N7^47L2JBY0MZfVZ!7J(FkimF5eFDxgz?1~aUgic7hsH`U>FF-k;NvYkdj3f zaf}hhfLRcDp@0K|7@~+E%2C5F$|&=TiYwX@%ndUHqhgH8AcKo9%IKoYG}BO{B8tic zQ%esw1fvWuDbk3{iYwysi;_23P#p$Bl%tI}-Dp#eH{RgjO*aW?bB#FKD71|?#%w9X zmkV9ljSy(AsgOesaRUxRTXbP$l1keFV@zJ;bVQOyI2c2XF}{Sy3^Cm>GYnA1 z;7}M&k>VE^VIKZHLjzJNobYK1E0CIkRieJK)TpFZl|ojevTD_-t++B4Q?+K*)hn-5 zbHX#kY|{*2m~Mx|4c^=*m`B?MW1bMj7(@(r?csofMtcsnkxIczYk>fZVK5G6;A9id zmfnmbj%u`JV~#c1KqiisLVyF#WZ#H04!WXU5R1rT6B00S7nK+s_vn1vPu6fmOL zBhG;XivCRk^Gl2~c#OaWC#12=Pyxs$Y=zWO zDx7eE_7|+MRrcCffWit@K}8l>u9Ah6E55cQsa(c*OnL6l5SpFIGGhRD-H=NOP*%E{ zxeRiH5zI)`f|$h!U`p8d%r(LxgcdyD0T9T6x-|2y&3FbIdz@#FPGNJ;INJ1GDMli(aWfnqFpBq*vI6a);Cx(J!36>uP>|h55FFea6T5o6W4PBCP_MP^+Q2QMQAMtJ56O5NZF2FQTRss$fvP-BZX7)djPfe|sP z1~ecGM>_)q^$HlaZtXMoy1_<;so(72#41X2ug2*CgV z$u$rdQV#zKgB?x#AO8y_<#b`G!du1HzC{rok6tG4N7NV zOC>PC4M3NJW74U0^Oo;G1|k@f6d20ybzTbwu<(qK{$o!KZV%-Wco+;IF#$EG2sAQM zug0OXs%3EaF1#6uWFF*u5ZA%+Er1WdTV06gAKgoI1rKn+};F;s;ANu2@Yz%nqy zHaJ5xk%UFSgke~qGlYt+sDSpA4^gPthh^A5p@6Bpf<4XLs;oeV$(&WVLX9z$tK3Rg zv;qxK1F&cdVk9207(?SN5jo5UG-$)*6@vzzRZoZn4%m`=b%aJ}i#EkXH&8?RO^Hxl z!w5Y<2Yo|!y?{6nOxoR)5P+Z%B*6X$L=`yD;REEr++l!BT}BPPiwDt=zq0py!OOi@O0 z$p>5!oruFWJQG8#2}F2=Mf`*@R2=^hLk%2&3rLhZP1zK z{7b<6%LqZhIE+KKsY^BppaK}c092O{jKjMGB3DvXHe5!$;0B*LVqMzhU2fH&`Nlp# zqQ=0-$^-+Z-N0E%lwD++fysatmB3zw$4F>e256iwjD&Y+V$THD7nvT=JWbJnfh%?Z zhjixGfIt>N&3~L$9FPKJ)dPWCRs(bZDR|ZZU+i zhYKi}H&lc}o?tk!1zV_u^dtif%nDM#pesxT3mj+4naT-xn5m#-_Hd6su>cBezy+Wf zSI|nyA;nYh9FG0U^c=>@REo6RCw7oTI603DBxV5NK$Ve0211bnEYTO{M_?f7l})Eq zmWvQTKn7SPHfZM?enV>9)W6jo9hy)!XoC^h02VbB<#+?Y{L5>+P$AX^-z6emvT2*T z>B8uxZ$P5{0Q}#T8K^LP5q<#!GwA2yiHD{^WHBtmfZhOPxj8)s7{Crts)ig^9K_!4Z3rCGD*WoxxE^LyNc*!-aBVAz$IAj<934uaf!$3Tfc-g=) zsmV6f0O^=z!({+`&DX#wAbmYgOSF`MR>Uw^V$DR{Nz_D0P+ZLj!|}L)H{1Y})xeKB zB!jL~jxK1#0mTWBg^S*kQXIt!9H&({MGDLyh^D~GaYa+;U`J8~t&oKZ#1B%?z*#)g zpaTA628;wVctg7GDKj(=G;oWJ2I!BHqO=%;6@3$YYRX`U6P6L6H-y7Ee1kV2UN>CB zIe6I%d;`G1Wte3^3ynhwiH0}`0R!aHHF!f&)tv{a23MjfHiUx>;n16E?bdQFbo6C% zJ)p}FijNiPd_-D!2t%PJ7pO@Klp#=&y%Ue&DWu(nFhIlUsS&L`NC#-m>qP-Aa>#>3 zsu*ZN(rAIugu!D;fQ1z9E{ZB5nU?V}lJX%#EUaanoRhuzSpDInIB-L>CPcK3(EoKqIsS-4 z$mW0)WLF7z!$W`I*b*rRb{t031T&Bc4tPwnAYNS6YXBT-N3^Ru zEyDuYL`F~pEjUF|B!&18>;SMhsTA0O@ul2Dr==He7=dWWWQA1DAn= zc5)CK!kZ8X0S*wL4S>TqkexXYfzoEB-ED>uyoQ|x%-$i)ns#jxEAbNVMxgmBnq&YLc!1bC=~ekibVno`2HVyw6exM zLnc;4|2R*fDnKx}lS*I&#nIL5;6%(OX$dQab&-oTh=WsY$?;D95e*ALz=VTS8Gu=y zF1r2}{sEasA0T+qJy#(!Mpy^fd9bGc3iWS-W0s5*s7Y2y5X2HN?P)^}uX#(p}Gh46Ks3B>=SjLNWl!`00XlVF2r@ zP53PdlAMUQO{bTjPI|R~qIg5;d=3uO9eJ7~y;ht-e2%(pa=%H0cizcE$V5NN4nC$U zHE>CHLCd{1G@=N~GaMIPd4qqpfDE{R25?aeERZ^lpv3;Epfix1s&JS|mXG#W#R}Y9 zRhSqHkjgv5TvfbS1(?8B(3py5MUCNDM70&Lctfn_7sVMUrF@f0REv3Jmw`Tmra4)? zo>li!*-LcGOAP=EM-^x^s}MW^IbwkE(l8tf0eHSZ4WK6m^kGNWp%GkA0knX+gu^uq zj5UCRX24~d9&t@;_=W>sobtv^ONn+sW(HhTIbm=Dn8(T#6J++(cE|wM{HF`Fz@g^r zcYvW6h2D1*0~&$I9S;pDbiiWmKm!m!6m(_>5CADaP2vW5;9>~TL`dSonyG5YA(2)b zEK>Q2TZ(w`6`4TF#K14)LdwX9w`hO~n1BnIK>p{l77dhu1{^>&t$DM}YT=OEFMwl2 z_Z#bULk0}Mj4u^3laN7>!^&h4JvtufU{MROn|qZ*w6^EBUV}5ir^r1f1lzXN=qIkCoY zpr%RA0bszr3d1sN(I;j)a+QD#Fe;P@{(t}=B{|_|8pK2{?4a6g?Gv z5;|$im%ud-w?vpl(Cg^r%~CGIGvCGNOwP*GfJWfNHat!%SHL$#7+&P4O_+qLkDRB3 zMN*uAi6MY>H--2hEcl2I$dt;cY(NFLq(~;zjJ1!3c@L=ka~L%PH0US(C_n_|&Z6B? z6Y+iCu*?#oOeY@GcKn=1R#{*EK_zuf374VpH*f<6JrxIqgE%ZzHwZDjScW*XU2W*! zzy6had=NMo3!&L>Mqx;=}=SaEz9N2gAS_XmDXfiD0yh0Rv}Fjb9r#D#P_F zn66;cIBpD&o$u(`&( z#m)P6+uLDwi4j{{cU%6mw|Sen9jv(7vlFc-S8kO;g^CrKRkX^CxpL+eDKKOH%=vT* z&y-!Ya+Tt>YS)yj#ZsoY@UXF*sNj8R(`t>^WV3N>n22ewru3BmA3{CoB8_r!PyvqPoDq<9FV{Q z4LlIR1QlG6!3G_C5W)y0oRGo_ExZsyd!z$E0pkjDMwr5gaYn>qL?kA}5l;+p7-39A z@x&Nq9I+T1=5Qz?fxrku$Axt4aiSo3+%ZTY7n%o$U`mQfCy;jHC>NVJN=eF%K63IG zoTyy#NosxqO8x*0jOhv~vAW>~s;|5f3o59}0!y2)#CmhAI>#z%Ic6LSY_a9=@-r{T z?BcVn!N`H^t!5f46tHF(V zhL~+~Ym1m}-f|2sKH1vFEoPjn)z5Br-Bqw%jfoDZ0Wk0eoO8kfC!2GwA#Xlwt|>1+ zY^JU6ylTmNHoN@N$&XrVt=%ubY4a1uh5!J>kll9OeHY$%<(-#a3h&v_0$_3@1{xA) z6wzP3M6?S;fK?O`nuGoQkeFs{#L>qhC7$RDh9m-V;)5KLqlS-gLgvaTx1_R3o2c9} z%Z@^}{z=Oxb@>t~3%HV#n``VOt4^BhwAn1MZl-gru&OeQ7(El!4bemuJK9gdmiF^$ zsGA#R8Mm5Ih8ET`-Bg9GH{J9V6;kke(^6mi^tIX2?yRzEl4)UTWZL6(sJaEgq23>XQ-JJ8zJ^vi^(Dyko zpA8@I@ZZ9OAxw1>hgsO*+=_9=8E8^uYxaasd^qFYG44I$A}fZ2V!efG#M)wJ(ZPcK%W0;K7VOZ(qHe1h@sCz4ux^g|vl{(d_C_@8kIA>SI&>OJw)*SRraB6TH4h94e z6by`GJ>a=qxUdB-@|;jx)5^xPR#-0cXlrri$_9AO<)3wh4u?6^;SPD&!_rB|0>)s` zzW&v(!szZew(}Pf8FsLL*@bt%3m%asB1Pc!4kI}D1u%Yr3uMG(Njf3f@;Xw+GD0t9 z(R-3ISYm+=0tEw7U!aVDRs$tc~^qxWu>CbQsa7`|c@`rIPE_r1@2iDcSf+;YK1 zHYQS&=@bFkW*JwwqHAEglxr$kNlP(MZI?NlY*uv~S?O(Rbu)~gfVCF8y-NNtx*Aqc z0`LHDe8XAS3P=Iqf!XFd~}(UfL`?ghkRyr?j_ z`w;A6m%8J`NMJJxkuZcXhl=Isid9VK6)~cbCv{O7$TOpqQc};Ic=0CmT;s}&hl_ar zq6gIz6FGEdN1p9bdq3mjoW25%uWW;EhWgX|p7xiDn(t78NerzH`6opYs(g~7R0?1L zi>+6s=+E(u>~3xlwbv!)10`a5?B-zj0BnC8)5+gQVAqSoRDhq)U3XPRLZ>sZOkISmCRO#X7Yo$9#xFA}x2 zo80u5GX&No3YVY;>y#7>>T&BP%f3Be9D@Ug2(eMP{Or;`P&Lm~c{$a`sW5Sj zD;Kr;qq(x47rp6KuYg8ZptA<0L)`@DTvg{y*rf=Gbe$c*plHr@_Enu1o1(-xHl>{u z7G?HKqZn=C&wQRNCu;oC%ZkN-Ski#{)OKn7b#*#2IhiM3sj#L z#f)R@@p-?Xo^+124LKcjEOw+LWxK~= zJ)TlAdJ6UY@FG8nqLO_bO>v@}1DR)31ICy6T5KwVNg9KyfUpTw0ttvf*t7-~t#BJ| zjtomwE0@SdzFXex0v+ijh<++L63@3@gsj!Ce&d&kh@7>?whQR4fUVNLo>p#OZsQhzA4FhB=BEk~u-JW$hYKu?Cbh4%Mwi ziK^DjxW}@6#?q9{%DjCOZ_7q?PvgQtpakGpOPy|g!m-rIJq}z?kJeV9ZY^WdN6U+w zdVao9g8(%1++i2{*ek^5mE*{9_!MKR=B5Y)?BxF27u(pJZHUb#z$xF~mW^U%f zDsGQF4WaDFvIJ={lICef&2B!|Z>}IrVg^`%S8J;0)WUBUPj<;@0=2nBZWaCuO zCdXuh27rvGD9Bfcj5&@E`Fc*3VE$#68m=6YC2*##t0pI_q~+=CziC1eqp& z5G62DYZxSBf4*jcd@D5k=WA4I*J2NVT(I_RBgl3t2q_2%kxl4^uNIBL5Y^2amg*ax zulvNyaU@5os^!Y)W4o}93Q;I>t_58H0PMby8mrOF%r321XRY*tM3|^x)~<=d5nvK# z{X(P|@bAxx#P9e|V#Hwo{?ri+z5q!KFrJdfjQ)^iQs(jK(ExX1dMNDSG>_u;=(4WC z)6N1Czao%kC0CjTQ4ndhGHSIrv1tSaYX0KIOr4O3?0w=A>%)+{N2RHFuA@dS<`$X>F7ASi>1>{XP@xb%V-DeVc&Lma?^%COFH zM+%K{~iEU>}?F)uAbuM!3G*W?V?B#H66%mv&!luR&Xf`BsQ>uX$9>`ZH$aRM9=Pr>g z4uiRZC02@#mX2k+U=ymOr9OW0M6V1tAqSX(>DnfTT9|1#XOu=k2P%cAVMJt{>`%{B zBt?GIiBkTgVAx7^-YoC%a7TDVARGcoFM>&_(;*<@A|&ETpL9y6v`Vow9%>*MltC9j z;)_rwO8U@b5-%r=Qp4KB zR~E%Nly5CQ^ppgK0}EqOm5-+?v{z0u1)qTwU*Q!bX(V4!Hc*kqa?ZB`Xq3hVG*naazhB_feqr3 zNd6iEN%P7|E#{mc=3R>fNAm3spY)Yq0}f=wC?$~R zUvc3EF5uaOAWrfq^S+`S@<@B!q%XPQViR#JE;gcW5ieXdeO$>^JS$cggJhA*kz(a9 z^yIb3VHsX+K}9koVeZCMN+S;x6(w_QGD$*jgM9b}n~Y1jWF$4=f^`7HLuW;I^a2Mm zU;vKJmR`tlx=;I}CHktRaBy>L$7B17wHdL6aF!)qsuf$)R&5u=DFviB)lg5~P*-v) zr|9Z9$RSs1hf!c9SB&OZeao3{oHkEy$;OaRpa4 zF{7*^Fsvea^#ptyNqiwi8&(j;TEkKobbVg|7Fy~uehpJf>XH~VQ!KT9OT|;v*A=Sa zens^%Xkh0;3!<2|(F*>Q&@!+Bn&O!2801fMPwOZ1V(Ph9Ag-U$H_!?)J0(a7)Gv%Rfxd` z2x3RzU<~k(Nyp%bm)Ihd*ocjI3?KpzEg}wxn1W2qnPi7pS2w;m8}}L~UY zDIP9S8@LRW^9}pr{l4i#k(MoIhT*9Fows3{J*_#REhrmPuZh$y~ft>z>!8g!3oy(ael7l$N zIXToBp50lU%~=YvfD{OVAPPdC3&Nk{APe?cpaB}6_nDtb;h+t=p#52(^O>LP0S9hi z8m6HKZr}z+A{Tmq8hU`DExMvBdIoNw2SWOz<#+~WKo>k(rDvd`MS2=iTB4~z7j$8x z1B%jaz#F`Q8gkkj_=2bTf-i8|8-&0caDbUxzyXB20g9WrjT^a*d$^OExtrU$pS!pfzydD7wlCneuN%9wTf1-Dy0@FVx4X$^ zG6Q70w#nPIUHbvf`@GXzz1!Qp*W0znTfK4nyy=_1ZM(kb+r4AkzFm6*F5m(xK)@^D z0ydxm3_QRC+`tL^zyF)SA6&sBT)-84!X4bfCtSc0e8H`|yk(oV`MbkmJH+oB#OHgr zrB*r=AhOGrs>tQC8HX4_`01qOC!fl*Gq}1k{s)Cmo5!ye{m=@?r6asIM97JJ$b)=C z1mMV#ipZP%=$stNqa4ZuAOOO9YL)zrlpLu#NXxr?%fTePxg5;5Jj~Hb%ZYqyiv`Kk zd?uG%&8L>l-CW9>%7UEOaSh@Sm4~q!Nkemh|Lcm&IP^6uUydcoR;=n z&mY~%865!pd?tay0HgpMhVmP*;~PXf7{y__f)#|xqv=S!aKM3Cd((27E}KIe9LmA# zrm<~%oYs{xoFN_8bA8a|yvUJU%8y*qD_zZXo!E;#(v2P2lU>=Do!N;!*qa^N|9l+E z;Tm2&2|@kUMH|$UFxy4j+q=Enxn11;MVs5d{oAj7TBotr*F89IJ=)(L-s4@~=bhe@ zoY}Md(aRjs_Z`oh9nkxI*z3H@v3$(EoZ!!V;J-ZK2jJkb{K&@{8X{hTprIHd-r~2x z;x9hqCEntL592xB;w!%5DL&)1VH+f<2E>isSAHq?!QBOY;KQ8DXI|l%T+9*P=J9>z zhg{6-J>QYM;cZ^%Z+_@wzUB#@&V%9rfZfdXozH`vt${k(nr~Ju% zo#*}C=ZT!s!`{w?JOP5CQ1GsTxRT;q2b*q3FVs+WYRBzzqhHPt@7%5!WI*Lv-tVaq z{Sv^tVY|Br-|!Fry9s}~CmjC33EaA^o4}{LyDNb3AAhB})8v?Q)0)YDg9-slVU;7&XxWQlh$DjOz zTl~*I{IlQt&%XgC0RH1&{^y_m>)-zGAO9zS0MJSSf}t3+0U(G86j*H7z=Hw}A|zH& zpfiLI2PzyGv7xYHH3Z<($FZZwk03*e97(dI$&)Bks$9vkrOTHvW6E5a4}eXZ3VKYN zc0=cl8M=DtqNZz_(Ec7brUCWYBNx)0x_}xjnpEgio<@aI>%o=l3?9>H@tDz-M~|^z z$%6gbd8lq2O|^`8q)5fdB1g`qELmpL-c4#Z?%r~;^cGJ1 zx9>Q^b@x_28#wMPSgu6BLcMzR>C>ramu}rUmhIfHcc%_7-W4sYQmI0f>J%ze>s6^b zwI2Tb`s>}_$A2FcegE(QXrFxVt+&bqHwjRHPz(*W8hp@`af%ayk164V6(*d#y&V9i3ypl3=` zi!^6?iRqa;b$V&1)6Al&rnBfN!#r?+;c6CNvCp za!Dlqk6a?}ypO~yuf6l;d+)ybUV`tv|HhjMB$=Rs;7u6Z(9Jf*SV)XB3PDH_gcJ`{ zu`m${BeBICi#QNQ0@%aRJ~5VT^2sQttTM_Zhh&ds08}soFhe4fB$PJ)X!FfG_voaL zJ@$wU&^`{Gq%z9z!sybwF#6@9UoPFH)KPP3%Pxyv*|gMDgSpErO=G>w*i2(B=h$bP zeKwxA?2?PxY_pAZF1FlBH!go!V3`FoocT4Dv&h-$;9kN>c&D~ZBTL{~V!f)aVT94e z7nE0S`Q?;{ak=J|cfmR4po1O;7+g@X#pzpAu>})b48gkUs&A3T5LLX+dbPEu9{c|3 z(q`L4>Z`BL1Qkpazs2vXQ-MY*kxbG^B#uPVNF$RtGClQ?xbZ>vcieGA6++W6LBf#107SO30vhmu2uz>?_3^U3 z=|UMn^AVDifg}kAjephpPKT0PpX%1OAF{1(hr-&U+Bc~l4C;kiVw0y1mnA7u@l00K zl3!}^nqE;(SGuyCtHc<_T8Z(CU(j3_!B|Exo-tOHg985~!dBcRBA7kZ?r39wA9cG~yBPs@EkmImu2!Vv?7nq$WWb zO6bYU5tldx`y!RFVL-!v3lRo1HpZccVdz2%fyONl(u|4;&>k&YpfHC?%wihzBMrQX z1#~nd3TiND&)nb!g+xK5We^wEbW$_1i3_E532cme)0^Og!g87ra4`(0n4orxLD_g(louMbC*sH;pr#LyTwJ&^E*?6%>g?#0A#scDEVINwaMGCl3|1 zD6I9M0!&l@56;x5h}yPrTbgK_v;?P(&MFM8A{MQdg;FrKs&ZotF67L}#*`+@jLbc* z=A1Qzaq&@iOlU$H($EBHK?_SDe zUXJ)=y^@-}PeLM+^~zqnE@?0TL`p%yXwd(9*$fatq<#_6C59d{A!3mA!4$y=0Mz={ z4tw~+AofUH2T*}&NQs*e6p{vqmNRVjsIY@Y5??pyGZ^0tn}=GVm_~@DMb%TwVS+QS*XQ=eDT1>KG~&>_Z+N{c#emhBeMTl7G0lG?y*38!1zW^}Th zDpPOC{5WKlG^A+sXtK0W#?gJzjmKqHaPwR&FaYUy(k=0cmwe(_!pTfuw6iLte{ zYzo4rOJ7ezu@l8|Xo^+%!DRzyeIXKo6vG(Kva@ zu^7GW70;|i8OY%bXdLOCGlyK{mUQ89?6YzePAp%sQI1yFLdJWyy4q#Dcz6V)x%P;= z-U+e_O&C}4_E^V^_jGxop5#+Wt<;*!{Cq^UIaEV=bAJ)|U}-jT7(lNkIx-WHlV6nsWmcP(H4Vqf2*&CT}6j z%W~aGOLn~PV-*-_3iduC6ZU?qb6V7MsaNZB0W9=(by9 z<5<@QPDwTXL*aH&eAU1%%{;5FuJ>IbmIsC z1$7KYcc+d{l@<_{Zlwhglok~f6cp6o{PTbNym-!Y&VA1P+~@vY*XQHZ8>4Ut&A6V+ z{I4jQO*exaRoLinsZavB{l-P9L@%mdB2rKJ)ud#-e%R9F9Tg?L`@Sk-#RhFcQW1^@ z6HBh!E2^ByZsSW**>3c)4~$t!hpqJHZsxbMumYox9)~dA}D1T z+ma%wIm-@y32^SnFoA=o5JbD!JBVH$^2 z{UM}Ft@}1AEu$`^9);NwNxSpZOoh`-i%luAQ=z&rGzw$xm~WnCudTD^Xc=Wb2X%6Y zx;nL7zpQ6Y^wO^EPzr6vGbuE{Avlj6_KIp@1p}I)YOB6#m*w6Jx7#jQYOAS2E3HYr zYv*AfS*2TQTwgYj2+OW91uAxM|MfA~4=KgFD-W{*tsArhHWJ(|AsSbEN286qS)KbI zknPs4&9}O^+9<;&Kq&{7mLEgDL1XD*ESds3we{XdJWC3?Ttd@%++b6#neK0;UM@5F zE+m>NG&xv~nKn_0$Wt3GPq9svDKbS|nrP&E#U-0-Dn?%q5OWR3iz5jQ4$gzkYGIwIEO5h#Tl2aZvTj_ij{R$>75EwIzD*xuCb^n77hy6%9kE=5D(l(0*3 z+JligKO922U<7i~lu+;2IJRrsBSSLqbPbYIy4p)r3TS(~}NVv|F6F zYio`4+ZF3$j5<&;qCBdk@yyV3vRRegM!Uu4MA)V%ikk1YM&Iwqf7$|LcmbLrTC?P2 zCK)&aBxp{V`{b;fmAr6o)~UxgNLJyqMrk8$maKz94Xx@(Paj_1M7DOe);>u$iqR%d zJRmidXpU~eaQY~0D#AM26#ktKgGMLJ$XZp@DD&uA z;n0$Ik{wr}khS(*S8ZUH86OqqtP6_;XpCO3BG7DKspCkiUA}Ur@J$ z)PGxrz;O%KVIgj1BO<9}xmQ{UrR*5`{w(^9UaVr+49aC_vH&zlyUE)LO>Zs_fizS&Y>5lXO;^9~(vd zgd|gAa)#H9;llzIQ}E6+0fg)=x-es4MiAcLm{zL?UNPM~%M=g`)bBuMeGfLUs(qP5 zzMouJbLWshiFU64>8qk(__BOSb#2m&SLYziIUJ8uQB3~gK;f?xs`6O1)H!u# z_(I3$JW+9J5O%LhM8j9qKv|KqvG1(EBATF3lTPr0ig!wge5eps9TH1+wq&U5Z<%Sk z%y729{U8kRkj78h1+MHig>&ChcGZMN*(pmwpl&SlkNjK;w4wbNNY4a?b#J0ELT$?# zVm7a;J*e9FBeP{!WdWoAqPSSdPwE5>@i@sURiw@sRR6F1B`#|C7sRxN&2}DY7PA0BfA`0-1dyH z2xs&EZqf2(bxv(8L=M-q=A+`#Tq@N8o`{-~>WmJ5CfXE_RE@L46mJww<*!=ljTM76pk^tE{x!eS9>$)WL@?29ye{6hv$4?IZtGs%I6TG99SL=PQQz(tY|_!^7(nw%J1#p7?kYF6F^@Ja>*GQT;fWv%sFEhcMy#6Pq2UoW)A@li_3u zI!{LSdeiiVXS>~+V7tdkO%imH8esDy;q+aC3zBjy58`n}(02-*b^xh`VcrD~{T8`mJ2-;_;L~zCuRWG;VL^X4NI!`! zT(J;(>={>#oh;6whWy|w#Zw=u8`Wjel;Gp$lBv(*$uzYZHf?DyOr5b{EHlR-!j3&< z97{Wdr5(i5LaL0}ProR^kCgD>$8So9If3mQd!6HPvdJ>&g*=2IvESlXXy+>Llob?Z>$!e zVk>j40v$KSwt`=~>2h?OH$m}oV@<{|QSA4TMqw!bS9UB&D)d0mpn*xa{b&l3FUNf~ zqk+r=EaxTie?VgCDd{W#kU;xZd0}w%i9E#lu8+SqMOD5QAFkO5NXkXafMlap$~niq&uk1r}gu4xl{&y}=MAiaX;E5(^Yry zLd`FKls-qHfiO*=papd)LOt7px&Z;oesVb}8o2)`5XhNq5$l0)2XZ37qx5uiBDBx3 zw8}YO*|0!iz#sSyObY*pV++*F736MQ#-;~voIRet-$NHVbUg>iiU)}R{@Z(O+HV2u zzX=j1A+GO&sIb8GY=H3mRY5vcF#@T0AeTsIrgj^+a}&gF0Tk5)JJkq_z`xSq09ya# z4V3BFaR5Nrmd+zba*yw(c+5Ovm`K^MNl0m7lTKZdMxZ=~K?IJa;Qi_DATDWUR+I1w z#Yb-*IcX^R%7=MCM_u6&j|O~TtB=amB>`7*Fbo5s)7McaN5B`t+(r9354u7Df{6sM z6#6iVq|va&%$M1=G+L(UF>pN+7UJVTyRY?UE>!_iczLOGQkf5C*p)&*%>x;ID5*dK z-@^moo;WHHufh58Gj>{@9_(5FCXAB4P{6sK*6?bSlME8T%J;%<8nTK6lsNPaUw~8C z`3bb81R{Wf1FcCx-9fy?x-*h0r~JUzIA1K%M7NCP(<0KKQAd*F^yV-@r5LGxXYbIm z{yt2H{FhnF10!OXFUwi2x&5$`Q4p;>`~jsFbGjv}ZsY?Z;XCkLo=Nfb9A2^p2rxUk zQ8URiMXdhHW-n7uq;;CCqJXh9R+adL`?o>pm{CI<9w{E}y6njU)?RyxAbh!s)X6Gc7 zZWr^JC=F8h#8B6Z^N02dfGPYhbU9rKTE4|D_o$Pq-SOKH6Q8J3A9hU0?azr6CMO)Z#`!F@BH+HUG9ead#B&uSoB ziYHiGOd**~q^Cx6_0GW8)bwR<*X9~qFrrZ~8=k8~uD8J;kvoIjHw2=^`G<{prd@KE zx`kd9-G;*k1vtF$SJMif!~kDi4v)yBvDdBzKtOCY@_FQvxxV_X6??zUDY2$5Y!cSU zU@J-emKYHDHg|{^omK#JW9MfkpPdg%E?srqV-R$O6!URk7i}R>Ok+Z1 zHrpm9pPEXRt#ckOb(*H_jAo`B_ZCPFLu9a_cG<{EEUi*rYwX~-UiPQ*87x|f+8iSJ zoUhOeo*`k{T+N6IvDpk6hn!MYj0He!vX!PPl`>#wTejHIgIZi7gJsRXqTu|xACUTQSI7f@3sB)3MC%{hg+r2H_p$1v)84l=s& z?6UXI`=TQSUaJ+#vAcKhX#`>5U&*;f?Ciao@-!G&kZA&2vUq`IZ}kg?dNkC!kEVGx z8gncIb5uB8fl45X%37BwM%NpvT!A%4lv&Z0qyoL>~QLqEZZ0s1vBA6 zYW^&IE`t(FO4Bwbno=yy=}_uA7Ab$t1ZEc!$ewZa+520t?u_`E%3pwn*22>#(qBg6Uc6_-TFGob6$2F-k^O`k1SDXI^0gjR zwu2a;$_W9&LIPfEBe0g7(1YMtsSqO22JkC+v5HS2C z!&knh;5AlHI?MtL?M85Rs=*~QwN!anyhEc-4&-rWz;V+6Od&HRa*r%wHdr!Mr1vK(U&t8EoMfkoybqKnMT;rjSa~_yLiND$J-E z8ia^%g;Ck#0TgW(L@@^~8VWp+HA;kMlZAvv<28X;JW$u74}LKmIA%EzC>J+s2LLicD8OxHyasT7TZ0|dhr!P4WFaGfW)Oe| z4V5Xeq{UKwg;H`Q(Xw2n_ghnR(XvSprX2jOhkph}CtbG6BxpGpy*O8Vo}{B}zzZv@ zTa@}S!>nee;y`S9KmN)r`&Mlq$nbD5#t$7uB)A{#;QPNYmv7D z*_%ePaSs42CiW8?gefH1TXm_P3}XTSks|Oys2O}r%!;v6AVv0*ku=qh(l+l(;SylA zk;0;K$Q&CL4-LbeHZvB~b6d{8R82UK0!|lz)8-32rQ@)00HZfl<0P!aei)oLy@9fq ziKmvu#WR}(x%j3AmM_RAq&SArhG8`+1r34`u&Q84G7w$!|kYAM^uFZ7ssJj&bS zkx%80ldv9C(V{Bdwe~Go1qa!Q=||^oVp?vf12|lUeCV))tR;gK6^;UY(8=eVAHPd`{lUCeTI>2JPOL2Oc=4?wYNM!EjAO6MsGuhdcFS6tAhHrsyvrQe@yIhMYEq~D zHr)n!(v-D^0swuznc-IUj{Q}m<%f*Z4C?knoB}SMathC_#Pi!3$~A=tHv;cPK%Kr{ zcaW`r{i%YB5fJCl3mYAzUDjObOHS&<2T)bONJ+PcA(B^MU~y$wzNkYVo*N|!FUtpf zsefD&uPN47IMbjd(7>`sM8_%{03U{(HhwLIxZ-Gh1yEUtVLl~bJ&7o1h-fQmw6;v? zn56#GhjLyct%Y^y%f$0xi^>~Q%O`ocVyNt|{yl5>zX3&*&Z&htDs3|)-=>bxKTX9o z@szv3pPcQS%*9AGV5tOqjRkeTGkX(ew9BFTK3S|9io^5lfO`7bmA62X<<(F6+3yPo zPR+B!QFua=j>-?YzR$28>{Y|^xm*V!ZwMr!Bq@f%v(HyoR+Rz~c!`|SE1I*|q=W>3 zP7)6s%4;Q+mX>mIkp$>9*1P|;Q5XWs&r3Ac`WcQQDC~N()|r6Est}WT4JLydCL}4w zlCmwP$Gx``cevn}Uy_c}p3c9<6baR?F8aDoQBKrcCu01hcn%A8=9I?sQhqKWnC%oP zp^+FcUtfTw4gzZXShAj#GMAqeT%~&h&;`zoX~`u`v;?u*nh_c$Re=Aq20&wF86@a* z2veFwTra2+N)I3bGfE(iXcndX<^T&AZ#BDWI)_*q0Wt`i>l8$nfEf_@Vds{~bg&c( z&x_KXN$JX%%INWjElJRe#w430P`()i#ZnVWJm|y+3FCodt1Q7II8Kr~Jd(Tjk|FY_ zhq5=`j{{0Og@1ONeAV~k<}^s6cFgs~CHXl({jpGBSh^folCD5Y3HP=e3Ngg8Y&q;b4D=@c{?7|5YTb0nxNfxrmh{U(lu z-SkOvuejxIsXPZAXtKsGwnR3M)5^G8lEfK(etC7FtR8&ThAuL8A6Tq1K~-p z8IzbS@Z5-sQzJYV0_uS>zaIg;{9H|p$*8vQz*eME65Ry{v<}9?{l|&R--~H zle_S&CJzFX`X2YjXV%6uE<>a(0bC`(KqY00=kbq8ZQLYY6~n&p-%N+V2T64XshU)5 z{g5&p0Lvj8-yuzehxoJDKJ4i#yv~qWDSiN4IN+RWV@Oq?o04KnRr@E!ekg^^H^{~{ z+mYv4RCffV=ub;NQG%2(Q{7Q1!LYfvQ(HG_S6QgmD8=vPI`+U8>N=h|S{8HQ5v1m% z8iWAfBro&HVw6YK^CROKQU-dGFk~Wu+$rV>Y!RrYZCeLU@4=*Uz;{!@%5AXr#j0^N z6P>!Q`9n^L6j1q)ZVak)@(xI>kznlnYCV?oULRp@kPr>e9Q6dCRcamzNX|a5ks=YK z4Tv_TWrhxAD|s50WdPg3L>(+qCs3n{@y1sK(zcE3lQ!{30(TEETMuEk&}|0fs2FaF zzaG;i5D1z_QAeQtcLJe zu-DbnJN8F zIY~@7y9?cTHfI`Sve)cI3qpwZ9J4pf)D@zPr`a83G^0%rpvT&gNz{@ zU<@N@u=MU)PW9{>>^i(##6Poi2=ok1N42KwLN-xwspXWRk7qw@K>%tP3^LfMfQ~bg zyUbjq%XcDcAcc9tKJ%u{t;ALJ?Bm9S4#u30DGF63hO;d$6RK?M9xmGlcx#6np8E`+ z2PnLc6>$)1YB z|7){)x_=OQaKN*`2)F;-hkjkuuc6a*Z7=fYMkN zq;?0J>vX(gYJ?upX^n;3X(_5S{`#1SA-EcZYegZ5Q#mjpjNsVfsN4lXnUp{e_nI)0 z(0`X6zfh&|qfRI03XEk}V0>%EqfZVICly8t)_;b-$o5r-`x6sMl_cx7MB&PAzR z5Jm348k20Sy4}eGl14Ij--0=49-5-uvFv`??ouA5#g{ zc)O|^VBzMV{C0>>6-XXptW83THQcR(2If>MG_|QU%6O+bRCPxvcH*^omI${8nK=%!ANUU?HIWZZpwQ>qKIJ zWV?_mDr_9I1H8~kt0{p7_uZ^%vvNWdQ+am6|9e&w&T?&&rIV_^K1uFHL2*mC{F7w>oV~06kAIrbtjCIU}A_qd*)=uS)vXVG8JRsda(?sdO^46`l6IAhor4NOxXI zvaJ32B<8usi<-@mZgGEtumO1AEAg<=))>ug4K@2@T-%YWas6X=ZP=z!&I@C>^rv8r zZsiHf@0;9Y1f_2;Jmyci6{c=vDuf1@bRbpdx?cC$=S;^hm!jzne#DosWWN9A%+%f> z<`G)svXac(Mh(l)Q|)8Pcu?r9?7BsS3X9$1MC*DKu=sij;7&aJpoS5*6}b2 zMU|=xi;|hPbbLtUaQZ95RuuCo-!qk){;<@nh_3vkwq`39Q&c{Pu~lAY7@c`JbE|X% zB2&rVX|dFXO%3;;ZnJ22y}8s$5_pl3tQ9niCPj-arW+JU2rZ^}Yb1GhVAE^am z9W(kDys4i(h;f&&Hjchc<7lx|9YYg#xoFtr51(6XL!_`qv$t;v7ncHj#l>#3qII ztu%2!9y3a8sToi1@olg0nR)Zr@mc7U_wAXhWRj%&dO=@av2n3{&}L-cBAaQg*3I>1 zg)erGovj*%H;7$!QY{uC+hvBf_EIj_Q{Q*}mStab)p`D6pj$%x6(1WE)Hts#`ku7( zUE@{X(i7{Kk_}@O`!)yCjw%7K%M?xZ)X>-$gQc`0CLBucBKN0;9Xk)Mxk!$X#s1ru z|F~2ooNUN@ARmZ!QM?}}Cv3_k63|TBl&6|mDAHf*;4xgARwzDk)ibn$D2MIN#fsi| z@%MMp$G7G3hG67Mjb64ik^eq*-o@Byp5QE< zIT*^i$ayVVnJX{OBucxkh#}0e-cXIY?!a0?{oKat=3|xyOG7cA221iy*@J^h+^~G1 zL(iV)8@p?-D6C1IA;PRKy@)YXtxc71kyAJ|A_ovA>jr|w)5SKr^u9F2b1f4W%wxNk3&eM=MUfCnq@ z!L6&)uQ7q?GT1#iPKG$RByq!(qEucZW0MF#rioLW%s6MzfXoDMY(}lRxf5vyGwof< zr~~sk+n;vuO_ZH=#CU(lO!_xvIO!gjezd&xy6Hz*xU5|P`9T;TQM5u{Chd)ll>lV5 z+DYL*l#Q9b59_jvvg>(`+$oy9o@RqJA1lFo)swG(a97;e@5!S;@8&6S&dr)`5y#et zb-s>|BAt`Up7fH9qJ41|j4sS}-jpGs6OQfNU2_WWM|Tz!oh!oMh!r~@2U0aPuqRvpag4_+(JJ`r;~QdyNN2&vJi2 z?J6PrPwi~s$05>H$QYShBvxL~QR-IhBJxVawQs)^Ddr`+a+__XXi;_QQ^NzyJNX7v zBT-@7a>e#r)=So$d-rWh>=Y7snggxN)9icM1{;@$t;i+)0ER#-m{V^mD%hI4QM$LM zX9~+#ar!PmPy6ZGmXuxjw)2NT-C^dRGzzzOGq@XZkGy^SuGZ~!SV_0!<`ypb_2=z_ zfm@G)e5C2t#Lc308dAdFPPm-jss8+PLO0a!uGleCb-(B9jW8 z6iV%r<2s_LzIe9DFDoPeb*(IO{d0)rQ;Py;jVYE!O*;HvurN@UTI?s$N_u5CUUG80 z*vhr7D9Mofoy}PDT*V17Jht$C)VTz9S{zqMR`#IT9we?>j60;}n|0v~bawr}s$4H% z`=~Y{wdZfGYDyxonT1h{EVkU*xxM+tO!UDU%hQUSHHLfS1YH2tAn55A&ynx9+vJ+V zGH3!BWwKVy{RSJMV0;ZPbMrh=wyR31`n{%}$cqAt)NI$`_n3a9hO}d(iTDiPuf?-T zGclsM$|uf$?^iXhZ-2XYVNFw!){{0PRMocK^&g5nG^# z&SUiO+w;@BD?0um_cW9KKS_UW38$=Wnd*Ktrw%&JsTi1NEGhR^;2+iERoK8cL}u3P zqG0ii_tjl97}vs~-=bR6lY4lk0#q*dawiIHWm7sO-M`4z2stUR3KOoE1k4N{);1+w zvV=wi7!MqNFOeE(zGm4g;c^)mdSCT`(pG8sRogah!;jt>+N-fTCYZMsZIU!gw6?a zca?3yl(>&xf(iR#vwfu8wE)1ckDBEt3K1m2O_J_N(2(um{PZ;iIsFQM-+V{hz5x9r zbfWb95KC!^j*#ro*5AQ&4Qp?Eth@fTTTKxfEE+^SdTcEeh3WwEwkM}2S9xHIMmRzK zK0{RFtU~hR;~I(`pOkj{O~A&jQq-@=y>%q7c0U^XUg#^#U{?6uAP<)#7SW~WeGllg5E?}0lX z)s2&2C(k2jfKOLmt^~FmZ+;H?Eb+<5SJa8RjiNdtNA(F4c z>^5gaNqMMxW=uoNwS|QTABWVjS(q;SgJ8GoQo9+jX zpAQVps+8581egn4*KnMkEbwx%S^grp$!sih`$m;OU7jVIQA1RcRkfIuh?OPF+ji*9 z=y$QRRl%wjPY(+3JXLR?lmDS9js}}6L+Wp8WJGVP8RKt2Lct@DgrXjJ_v>%z?dm{OjylQ&Cv+5ALTbTq#pdn8T~nSNOT#@@>~{w_WQ8w+u^G z{u-VLSuOmy!{w#*G%9y!5_Ok@m^*@W^LoM#6-sF;8MFDPaWV1%nd|Y~Xi+hBDtO>r zdoe1Gd$VUc@QYK*Mv?i=2HA~W`x}venx5)8@6Byg_Ak7981uFYH}-s=MNB4*M8OBH z9wW)a*Enn?mvxMfh|!gF($5{;491}9dTb>*ai#2W@ekroUyaaZwT#*am)M6Kjo(DW zYN;I};klg@B~jjGyA^!NudLY!kf;8o=)RGsLrEEZP^hNVSe2T^Ln=xk8xXTS z9c#{wn2@b<^&}pOe40y6I{|CEM@eziTr`mNjW{6Zpw?ZntPZ7g`t78HR#tkaEc~sF ziEX%x*z4|%6{Sr5SB49-j1S$K(od#bPSioI`?)`)wA<)1Qa!3zq05}7W{O!0XOIHF zOaY}Np%p3T%+gA|jkQZ}n~U1rpMTeMUh(UPBo-YJWKnO0w?E8v8S+E7`0hCyKQb?` zEpT=rXA16Y76&j{beD^y<`vbA)yQvwY)$K22JxEng1PMOCKh`#D~&j$Uh}KwL};5o z--ncW@06Uw?NX{xR_gHgaSBV_O8Q}d7#U@t#`Mk@gS~F=g9F>Ov!u?y7u`Gy< z*(z6Pb_Uzdb^Bxkk1j&1O%l~o6z-&;Ps|7=MT4Z2MO!;oa=sR8xl)#}<2^ghH&i+c zYx1PT+1d~M`#*b^C-tw7P1fDz*fLs! zEsRv-)W7!SKYQ82j|%=YGF;h~LZ#k{^VSN`7~O&t0tMK?d+O9Q5=vn?r_DrMZ!Ky&GZ~WdyYilx3Zkn)8(H( znA}E&vy^b9y#6Xsg>2(e`J{hGe(lqRt@4mHQLBngYLa;Pnv=0$u`k80d4Vh+RigN{ zjDX*ciE1i-ZK;uxQ4l=$YP5(yz-DT|i~rK{?GXR(w;67EHvHAD=A(&sKZ|Zwmb=jl zHcQqP?{AWq3!G8@S8}EZOH)##Rh>(Hl;ak*OUKU*tp>cDQmrxDopV zkp3X#!DHM5bdwFj#LT`@3qM0PF;$zX%5Z!d zn8?%Er^DrHoWB0KQg$x2$$Dop74=0S>Y>9uv2HtwiQ2m3m`zs#Z_jD&AEG(W3qQli zpsBy2bAxlg2W%Emba~30n|!!5~6*yxn|D+;?<3FxwE4#;G`vj^GqDji= zRsX}WWl2M7T(TULW0FRsCAVccrA|{J%C-%&R&I&NCozp; zpc!V-=?iGD2H>aK!lUit=AbHpIg=gaUwIH$mjO-9`x5R#xp;YV+;t$ zvlS$TSW~;cJMX8X*V12i9v@=}?(OnO0YT1JOn0Q=9fbc;HI=1p6X z7PP@EU<)CYRa2dfdN_#Py^xgg~p>NQ?{V)qaVyTN6um(End zr_?9OY={wQ8~)lnqpFM%S^Fw9DJ^lfawlb-*%(BAJ@xpce?oOC^u7s3WDE9PZ~F?f z-sSe@mSRQ{(zQ*7YIs=_WHaj+^@RInSQa*@ptm8YlK+m|F$TN*tF|FFPzQq_HE|?5 z>|L|m!$?u(NO$qt$rGhDuZBAi2gweA%$PjKTRXMy#wVDiBZvv(nAznV>TS!zJDk=g z?4~iqP2!@wji&BTK!ZHI{tbabbxeMK*lllEeimS<$!C|5=G8U2<3SWL0z=~MZmh(-FXEXUBr`q|5G^F4@uIShKDEJ3^#*mME zKI>pYs1s1l9HurQr-p-tAt%(BlbN>2Kh!>UvYz2?{OVM*k7xRe6VU|S_9kGi$stCn36x!1Z$_#_X4f}r1VFcw7rA$R6jkwXkrNJ?_Q>_SPo64X4M66u$2R=B9h9KR zlAyRXc>9~Y1-lYYasXfHl)(w;s`jRz$}q(Ejr%^1BV-%FU8l|I2>mNnJlE?DC( zR5LDE2eeb}$EX#=sAXKJw#KBtj7fjen?4bfGNF{7-IQ-5+qMzB=;z! zP|)m;Iyvmh*%Huv{ywt1l5&oOa<+b7KC?1|i{>!^Pf-9Wj<}7Aye*aiDv9VT$-gW~ zzAUZjD@(X6F1{>DCx}rDBd$i9JOlwaNMQN$3indU4-()ZAhs?APWr@VFG&m-fxpU? zFwjHMe$vNF#D?;NZ?~&HD5Gfm#R@=FlYlEOZ$m_XL-L)51eK=r-;K$?8;XB7)buw! zQE8aaq-_IG;sCJ8{??`6t-HS;eNlNl*Z=5`O52A!kAB=~1OEZ-!Jv}~N{QOa^0CVJ zkxqS8C6hm0#o*2xs$JHxJ=aycB2;_qRiE0ccB{tHaSV2GDfL@R^tj=rshBSPQ>90b zwHT}}xN&g9u{^JJ>^YdWV8~zgj5H|k|k!GH+H>kgF zP)D3S|L|w%!=L9LKE%KK0~9DAzy_J4?5BH9rXhI@SJu|hHyupeT zHxv^`PLjc=+%HcV?w<(V_`!Z(sbb_C?EY2o_j|&KG9IkxaQ~<34W+d~t;@*%Lf7U^a2*6Gf0nAT3_F8Js2aE9+26>tYkrFAa| zWy1itE6IHt1HV?{#hG#>%ECg6aKWR}f0kojODN+zzHKA6*sPv!C!EM4!LNz`i}+5K zi#p=J;k)p__D}L7Cu8hto_cK&6RFGpUC+5SD!9&y$J4fjO`=AEtKC4aTu4&k z=;I=#qLzuraN{i7%hSCXajQf&^|Hu-r3TCLdxf65#0oH91~{7DIVAYau;ZVoKjjJ$ z-6I|~uH~*u>q8-{uN60Xx`Wqp9y-%M?@{6go_tz*Q5hTk?ZZ$q`;7;2w|{(GXt00z z;BM@%Z`%z|ia#^N4iK->8~w#WG>8jM^%QkQ1iNr=Hogz_%#NZWblNB$O`3#xD&gBSfh& z%C!uQ^*ct5LVcg7MTHAlY8k2?d=`f{7FjV#w@*&X8h1|abQntwNzTk%TTw}u(xDO4 zrR1umo-NFkOxI(xe_!#V-G67GQ%3nC?I*L>EJ6Y1YBk#eWs6P$!re%4;AMSywt&+tYwO&UWv2q z%5Gp#p~@iv*1*F}Te7nHbUVxWYu~5(@;BcOYMu}{zyI+W@&T^Sy(L3epE$Tg3cz2! zWSo04q_}vvgOEmYJrxM1GxE=jSQ#9k6Mqf4OH$|``|CWn=@8AuCvTLhtMO~aK8opP ztxtW7waX_xK9@i3EF}83%|^uJCwbGm+*udfNqvDajW-TDyp6NT6UfrN8+!CVvN~z{ zgiPtxn3`&aJe7|UXBBIz%6=H;iTRgaPycst-Q2>2{Abr*Y;nHi;9=kr`Dvq$dLV>* zkjo5>;CASn4i12oH{(aT_wYg?C75!EhcJ)BRC6fr*%qZ12Zk|_(&4OqP#(1UbW|o3 zcMn{Fhe!6lm^umL{LJ=Ii6WPPeT^s>f$y_e^#%F778FYx%dkLR%h-bni5gj^IxRDn zPNJf(FH$i@OfYeF2V;RbcvPZ|aJ9Ucp{4k4UKx8~N>#1|{t#^UzNA3%)ffYAA#c3H zdjkD7H~Y;I9wCpvHN8(PDt0?D~w zYw1+;^Sb5&K3u*6`HF@uhZ%^3EBP*ASH(HB9^2xImKh5vUMh;O<8X=3&-SmybD9i5 zWs#6hC-d5{w;tEk&q(p!T2R4MT`B+S zYd1>w+`~OZjs7k(N%eelKi;7F7N>)@JVU~blsWS2LGvgvNfN*|Z|KfDvV0!LYJ}w5Al>0EYTcekjqwHM;fPb4AV;03EhQw$tG})cQtWE z$O;4W`(BzAh2t%qxy*Gr%Rn!Z3g^X=DMswbS$3S1)?HIj>0GYL^`mjck=w7Y2cz*< z#^V;(^UwCtw!;+;_?xpfMe$^7#JJRJ$NPWg9IYLCfu9IYp*aToy*ZD=?gnYA?NupV zuJ?sR`3tgE#aF!-7L408K*|vOGRph~d4%oQepMUlnaF4IotZt<%r&wqn;57@WgAqP zy8U?LKhTU5HVGa|4|=xe^frxJ3OfbU{wDY?$AgwJ*@Br4%Sgw%9EmRGeCWd=r#si? zZC*xe7{MK}k(J_YUR1gyQD0P6{+5#9J`aFzC!K(`&#_+zZJ9hI9UL>SWbAg z;Wkv4Ls=Q5&D&FfRsE}+ZXzW7FU{UaG2xoBdYF)iaxSw^0h5>tR9Egziotu^Mw*mg z>E_#MCz{;tEf*!XVP0OjhN{BNC-EkT@<+kP9c*i#-*gwCt({)%IcYk)HR1Zh_a$af z<$HIw8Bu1cd_F@oW+A|Afz{n>{;$Y5sn0y?Ev=*(+{Rob^I7(9{v}OE%~Op*?tl^D zkdl1=vtgqJTivczw%)^l0fMNg(bPhiQ2>*B0Tb;BYOv zR;XW#`zrx~{@;jztzkMX8}r-bw<(l~rDGWm1!k;;jdz7FyVIPb1sT=D^c1V+%zooj z=D423&`K=&RdzVZ)ASkW8ZJ8U1*(T-#~QDI^MkZR4001MO{4RyeNhozqqNig-=m%? zgU>^?Lh88;zkEOe>5Sr&!vjFynxbyJO*`2^0yts_VYR>CI}-w5xXsj!wQvos7t}!4 zrp_j`_`==y6H2BDB2C$jt)5aSr++dE_ZPW5=)NduuP^&15&YrK?A{+4w?U7H9A@dW zjSnuGTQiMj8@T7FWTf5;c5I5h<&4-H;~8rBRu39@=<=t_Mf8|UE6BSxqX=c#acuv> z>T861wuxuppR;=Q$*f^ANKWS2uje(eZ&kW_X15prY`)Xi2msuf%Mw)EhGkut-LVe@ z6f9?T7AamNedfv3dVXvk)~$3)1%V_8*;QGDwX6ChDwjw}*8t?dIj5hd-AYD(dl4 zrf`36X1o{bLo;GVp_@;pkmXAKh7jsk+cm>ZZ+Zfs+0_KV=>y58cwDmrJr%%nWL_}D zX7qq({R^Rg{JS{$D|eJoaE=hqV}X_)nsaX)G~nc=5j`gpk{#ZF!{NeQ3@C5T-Asc} z{qlNbV5-?n{pPE9gs;>gJtjQn@v!tq3_fM0!lKP8*;G1Rhh^l?+SDH$B@&yc)9AnY zeoQxJ5EOG19S52;`68A$A-*v`fhS~SRl_U%Jlj0~&3eXq2gT0rZ8RrK0T#OdqP;H| zHc(pLixd<#npYhf^oHLUhlK?fPDpY@?!cRQcMrJx=-XB%=;chonIvAetw0C-%w!>u zzHz%tB%#PisbgE&!Q#6(!wbF?f;j#4)+*}_m^J3+Ss;!hRc6VC0r0cF6o=Ey(`DaE z(}dPKlD++}HYLiMTqk+&i+iDME}55+2HcFE7wcPeID^PmNI0%h(3|Ailfr9?bnh|u z^-b2Z!o=;2$s7{Ve}$3bbJ0{<>r`4|ga*;agfCeHIFp_SOmpF1wj32>bTr!60kmiIi|k{ZOdLd!Oh#d-@n4JvY<4jz^R4a|}x;v#9bIeou)@cl>)C48Ptf zr<~L?o5^UeMOhvHby-yQHw9bFxP^GlrdyTSopB2T!QQW<0N~uc?}?{6sdHyX0e|%y zqNe4Y5;XWU`9`-120lu%{_(Xn!Sm@9AA)Q#IYIY!xp`0o5h?Me4%O!oiCMX=So*Nn z&A)n?ZbVg4Zqu;i=6~h@lj3rI#XWICVj=_yf{VJ&jR5+4(gKMof^RxjLLXg9Czea^ z@ve=s`VJnR36CwO3fZKwS{XE(~Y06<);pDDqK-G&ZyM>0Q^HgZ>{z`(1VO z%yj>N<9`&LWmuDM8;92$8!@_Jbi?QnVT|q+X-9X52&kjG8xco`h=72osB{U^2#PpD zPy_@-M8*AI-fz$Ie7}$9y07E+J5LE>=(0SUUv9sf`PIACKmXLiL$%6Th9z&KskVTc z2aNYLQu&w3%P^Z%j)ZWZ>2rl`siorroun^yP54j`7*nEdfQU{>;HV&z?4Qqjr&QfZ z`=Wi1#%ZL|Cb$S{pS-7_rjEXPl@CKepjFHIapy2uNKM&sCFn z8<4VPjek5(j&S+PtnohLiV6*2KM{ed`?+kLTsg=7V{tKLw*2j+eI5`|`#F?nNm@cO zoJ)~r&)SmP?Yuo1xnD38-Ns3?*nXJ5HzjsM1@B>~<8d#Iy55JifmZvwFCl`TY42zZ zeS*2}-)$5@xx*jr6-8=r%gVxw1FEmxQtUwv=>B*tANp`j_+;Xc?lAh=p_#Sf*BrqZ z8^fzebK9_WCs(;>jQOEdgYdDeoAt2%S+M_SHW?C}K~VF3c=`Mfds<0{{UZAdCRVq{H)y=WzV0Cbi+n7R zQhfOE;>XKeo5(Jq2zwfw?}4R2w+4NYfS(qYyB50}in{Uc3t?0sU`)?y*e0@uDX!I^ z1~k!xm%AG!aTM~N+G}F4Njzy&(o9d%%*%J$bW%>39xEq<3y*Ez_9=e%)PN-P##{K^ z#K*eohQ8~<*9skO!{p1B%$gKsk+kG6@g~~0$C9%F>4i^5{kbfJ)GESD1Vt0?Y>Cc| z2F}1ZDZfrkK6A7iodRygv#VS^Spvmi*rK%H`FvONwaSH6SVQQpaW7T0<)v8I!{TI=OV z)wctx8lRntt%%w}E}SZ*k?iV&;V@9K`SDi5-<|%8?j*XKZ4ATg6u#2WNbj?Afv{f! zgDU_{``&hJ>F+jTs@)@$@RSqmG7qc2{K^-NP>X$J|NpM9->Dng5O_+xnJT)6|Bv<2)s`s0wQs%Wc-`R)Y-3#b~jXo+f zbJHm*5jDmoOyujihJm(O1m!9}qCC{v>$q92-g<>^OB&IP_nhC3EPbR_8e6Uz;0n9M zs5;PT&?Mnv2(7W?;PpaQFAUpg46F%=xZjbK*&do#Pl0V-vJUFQ+Q`UG!edQ+8c|7_ z=g*c-)KHsz_h>-LzhWM{28P$p-g}RUtdWTeGU1?=n988l{+$nx_8Oto!=o{o&=tKJDMS?`FfsFcZ*h_2ta3@$!(FjgbObn z%kukx7!t^&Lj)p7K%IW37?$ckzi;w&J{_)Oz_~*BNdVz~TBpSW@oMybKiJKA{>_Xe zHE@{<1JWVUo`Ym4{3pgej_|#S!#K=a`u-Uj--h>wGxVgHtrwq6h%=x2-c#LPKMS<@ z;~|`E+{on#wdf}*0icLA1r-8#?e<4&@)|1%WJl_^^bM5l=kz!(>aK?J;DB=du;FTD zFLv9AAx^n|fCCQE&d8NSf?{z%Cc;K)MG%e3mXpxK-tZAs9`~ zY^AtLS(wKf2u*-_5m=*LADX^`m=I=C`_2g8HKOP=$qqYRbB>)PFcJg7k-!KHvP+eZ z)CW)lKyYL*jwJnrG?o1dh`=DQCAe?$U#5QZXuHkGkawuKtF8>f&(|QEeIUtuuyZa7 zw38cX(l2OhwEDhIS0O_7K^b}-!K5<)Mi3wdI1m!Y2-_fNa3ff7^ZI}zRcBF!cpyIk zLL?o3L-VC3kka72KfcgeeE_rIj;>+=3e^m_6iLJYZC}%g2nL`>UPb|rbV(AAiSzSqNZ6<%^cZJB(kN4L!Lua2w&6P3SAuX<*Y6EiM-f zkQxI9;JIFee3ldSIj%`gfUm#0{Uk-otv6(sU#}H$c#j5fetPQyL6N&$nF<h=1=2yGA?}y6)G6_QEAUUlZ(pu z?j$Jtvkh({Mb~ zd8m+aAB?(f|C;v5Baf#}@9RQ2&0%!*opq5VQ8fuvyy__~dBWK&yFPOkHYx*&`dMI6 zw^|&5|7;ei5Ivi*bZgnqVcyR$bEGzbSjk{S4;qFSHGptB=}2a+1kuaw?kTqhuT{!}v=;nGEvZWsPJ^MK#rL z{RlplhqCHDom`;qna55`k8fLnKv&m@Zz$B#WTj&JS2-hZ>^=WkX{!g?jlx{{sPjgc zFD=9%po7h_9_v_kb-+oQ0A3}I)%COBOB@tl5l2W=Ws%&cja-2Rx3jiKZ2i_CWi+c zPMhEXkwm<~%>?2m5nNw}HxOe`!65k}(d|Rx`$D%L-1$w#kcPR?l`@FC131}OzGEW? z0yZ+?m8c+WRtCfx1I&Q9K#5#D-2HZGHW;?&N0R|= z7C4WK3^7APmbR&RFr1~G#e4!M9W__n$)%eJRWP=i?<{#)w}H%hHpn3^aEYqeKn9`9 zoHYOmpvxLDhB(x>5-B5|Cjn5b%%Zz{o-!4(BhUW9PC-;efRQVdWsDt(q*Y4%kf-Oa z9-XWZLj=7Kpa`>=Y-jnYEwDRW>Wft7%rLK>DAkRapcjCHamY~K3Mdg0Ih(Xf1R%8m zd;nZoKLAFKS3BKDpAR@JBYeAYM?f?YoEI~zoldt}4Tu6Hf5Jg}09pO$Sj+%`h*Q$8 z9HXtCg?zvt!(aX5E6Cn=;DcRP;pUQMDL1CwwJ6kKHLNwLZl?LnGnVK)PgE4|?H_d7 z$Pg9^H!@3@FS32ACgDr~cEsvRShNF~&dnlSS}y;^Hyb{bVW92(^T@R>Q?lnoC36}x ztO}bS91RB3q0{~lLNxqM1i8q)Jhd*EWX){t2U#&hnk0)vIDx=b;lQgTe$t$gBtWqN z<~^WDXUt{!Mbw^e-Du6IL80tO@Bu7}Cd3-&8Qo6o-prN`%V%Q5^h2M`>O!Ezc$a^Z z4g}RV_F=%mHxXJi!M%70CIqS&vVjb3-so8ggZ|&Sj6r}P7Yx!v1_Lw+zi|LOmW}vK zw^d`Yn%cmtecOoyZ~8hw72+;uKZQ|vre!MWu?OZ|gDLTT=m$zhaV<7^wd4zow^<`{wU#tqa#Ze*{6DG=bu~x@G8$`Ta7$WY-b!?X z4EyeV|Gs#ufu=W{YgGlV(sxXx-WKEB=pT46Y?-4Izs0dRPE1I)AnNPta48aqz`6_| zUjoox`WDU=@;;0`z=H)Y52IKH6T<}k8F=IAOMT0%>lCl8CLsq z*>+ck*_l@#v~KL8$DH&0bwg`3ecXW2TwKEDvLJeZ)-tLIyb9R{GQ^bt_(LSam?1%$ z=ex9b?W8lEPM>Mj>Opf{;-x+3j9GOPKlXOCjoFfYZauvD$wcn^-FG(el&XnYyphO2 zc~BH(C7WpgyI#nmW{ z<4_*636NdG8gzC@qfV75=Wy_-`gh8+nY*uAINoH`sipWzyAe6POjjkACX-EK2}O0M z!yqbLzd5A^<#9gQ=#&~p!?p0}X$U(g&^NKXxuvwz-4y^C2VLKCWs__bl7@%^AW>LA zdrN$4A>YFX!Gb6UlG&pqShkPSR=FMKW!t=+90)BU0KwoCQehVZAakx<_A`(N1Q&iz zM8LQ|ELMfg3}NQe21h)wP)?>_Vo4YhcyI9zy!#_C>F&i5A8u+2$apfu zO)ZPT*zgzkFl&WlY>-HwMsy^pPSngY{c6+~eI!dCG{{S>3{|tn%_G=Dn34*(qok~U9(9fdMgi~cwTS`kAbN5FM7?WcZGRSabR2r^|2{(L0H2u$1$ zr`<)<*37%%8WXRKCtek0XlIaM?Pmm?S{cSiNFCwClTkv7G($B^p7hl6IPhhjQW?w1 zPGIT5v0P#ojK?Bz^(?U)Fh86Ob)L388VW;0{L$>rN77(Iia!~49j$HI&XP_KwM5hX z0f9jBwlO{4v1i^-a9j!3?U(kAxKr?H10Qx?c3WNNfW@zj*|LUFHZvEMR#+r6cvJE%jw$D0mhmm_oxM zV#yLp3x(0rH-xKxNmTSLRGzTJ!vQR4>hVqq=V}xO5ukv>T~+lo4h0KQF2{5d6Sa-? zb2O7s2td|?MX_7U>86wi5U&vdy+nQ4^j`}>gCHdBkoKDibAU5BHhVM(NQC;%LIeA+ zrE7Ux9eLi`VY$=Ga#Fx9z!-ghPJnYuHdkJ79!qmyOO{)JM&n5Bq`_Fd^LmLeO_BkZ zzz^Mt4Y`_jgWDMW@PDyG=M>xdNnaW9FxLm zvY*Pq@dPx^RJK;E+T_Y{*Z?YdA}hHmE1Cyr54}?f6rwb`yNC=4E`>P%_e%|xdgBBk z5)QgzI|%9(5T=30Q%{84Phq6ybA3|_cq>5r#u|Vpqb0tQU={$Rg*TW4C_e#C3($VG z5WB>q?6eo~d((@)7gO`LRQXyXgRE_-xlEKvJOO%Wt5|7_7C9h_<&T)`s+rXDQQ8%# zt0ecOL6lH8lh;P_jd&D>2_F=n0!4$;T`zBWUY6KW*~QfFVXXFm(sWrFhjxI=PHI7Q zsne|Y9bs@keYC)WPwE($32Y|nk)lTHH)w$jw>(c7l z_>-+nv$HX|S+(8T?psstPO)z*-ve4vVbKsEe%|!oXQAWzkd7fxsx3((1PD~U(a#vK z8V|H0iR`1rAxEHfEG;4h_?ei%&n_;L2Zr7Q1M`65Y*#n7F0FL=s$>h8QX<`wnB>!N z*^CbMhaJ+uB-K#h0twZf2ZWP=!Wf(^wvOEb*q4Xu!ALsr0faD5298jwwjf~=v-D5Y zyMp5R1g51hCdYK|ynHA(1$Q~ifoa-hu4Cc8w`9V{+4umU>&y^)k~R$hWN!?((F~36 zz3GHy4+QiS3^b&lv0HANiV}diH=CGe*}n>Nw7CL_tf;LFkoX%;<85}qM7a{x@~b3L z)vn#~0EVYMLy!yvqAMCOK*$k~f;NBw2NEO$x>PD~H~=jU)V^Z(G2X!PGCfTHeAS+R zqa&l!R@q~rx8Vp*kOvf?AWTPw$ww{p{ed_P2uiy2^R^=AUwlkMq_!iu2!cY1*wF}} z96!eoFOz|7WVNh7nMrCTghGQn6%}&(EYj+USPViEqqU2_+BDtKDO;V z*%XC-N%@vZ+BMc=6ooJi5I!DNOU;5{eC_N6z-CCoCIRvZKr8m^65&jnbt3W?Re;BJ z>>L&RUDRMSu~c~0aT%CoZ!P?<7S)48IphN-99f=|WV?x|7g#V=9^f*ZR(KP5WfUbu z0w69)eEmh-i_Cqb$SXfl1As11AU=RCEnvKa+5$jh(H&qXWBUeXLxl1YhS6wfVu#JX z6~JZ^A4F=1J!W$k!Uy57ooJs2K+bu8;Ip|#4lox_VcELi-AY-SryGOW0_EZYsL@bt z#lo#=*_L~M{HSq&s-HYbWFQoPOt3&SVEpMV09C~bjlY{|EdbQmdspQZe_2v^<8cmk z9K5gLK}0|xJpsT#P_FgueS8XM<_8ibfs>_5m*YPFrOS#Nu1h8Ye!w|6wnkaOLiy>kbEI7BdBMNCQ$j;(vkDSETuHd7+Sih1gZ-l9RUF6gJ2Ps^d|HNU`r8E zajA*{szu%`C|KZx|HZ->EvXuKPl z6#vB-WKoztktEn7)(52GlKRO~TlY#%_MD4kMa22l5J^TDK2 zuXi{7_SUZ-(RzDgai((mI$UL5Sop#l{b9lP#yWErX@t{;CRvopuv{z`vQ8IKm&~hC z%j+f;7e|P#adgPgMKl?-tXmo+NAOnk8IGLCQ z^~d)aJG0jqA`|Ea#Fmdfr1H}h zPwitl-*OAmA`*4>t->8rkGQWf@{(DDFbqM-x8Ly99E>j+XdWJ{QuQSd0VVmrnQu;2 zX1n}Lb2(2x9N%8GEHGI^e*Z!{z0(~mZb5W@CG7O-^Piu=gjZ%?)Cm;{Ymqr?(N!9O zX0uUd9kS;%km?$SYEdW+Z8bmOH+h$H=9pt?_R2GNyipy~{g3KeU(`P{uU@VD=#2aL z=%Zaa6E<0@^l%-y<7MUOrD*-c`TO`J%*cfD_OzK_r6hK@tsv1476 zyJUL#a-JW$RmYso)wdFMD%+R)7`^=hM&F-Zv3M}RmwRl=#k1c#5K--Shl>3(kpG<7uRV16RLR4 zoi}56^G@PQEFJpp1-0P0K8ZIGLeI>v(rz)Bf`H>I4bZa>lIvA;3YRP^B<$O830ce% z4w8BEExLum{+|sC=j@(u@+}CqmdL4Q6tQI2ncB}q_xWAxx`wPXwC1|zVDojM-YlQP zlcRfYp;kg1Kvn|`sRXRHyIxUdaa8H@nMsk)WLdN8*&TNKdzLNr{<-_7gcq?aLH0dL zF!I9nl_S^TbY>Z^wUuM{@q8ik2B*~%|7;D&(QZ@Vb@I~#+=nF=yawZ`{sP5+ec zIoE}L{gdpVB;(xXS@`yT%+JRZj_->RmJ6f39O8aOkE;ihjX5^`NR47aec30QiyvB_ z6>weo`F&+co%OMB6?Ms+Pj`{-!>w3{Lr=qdBO1$Dc8993ei=yocjqi&FOB7=-Ck4b zBaK2*k@h)S2d%*scQk-Q_|6V!LsSLP2>HsRA-F)+kro{ldxy4$dKNk_h4z+cO$NW* z7fd=bAx)Rfzi1AlV<7-3~3(S2%VBEQ7+BvxZJ@G4+b_Ce)<<{)mg!U4taiOx;xL)} z`d8gtPvgJ$-Z-gZg0KFL?gf~&u`t!;qPYbHbz=CKEc0T2($Ejb z{5o(SX8d{8V+cW)o>mZl=;Ncu;XO5{r^%H=pU>vX^d$#xG^h2!&PQ~DIk1^4>{XTD z=0I_|6Oz4A!hCbQQP6ams-aLP0czU1l9jRB-1dikx|Lyj?_mhiQ-ZT+Fv0lIvp*i+ z9)+JOJbctJbL-6>p3?DbhtoirIX0m4jndzMF7_W&3#hQfgQdB={DbayCfYmQ>rM;0 zK~9J3kCyxXQgj!|6_JS2D zYLE4tR&C!i|G0FerQ|A)IFS`Vg{Ozmqv>|s3H03C*r9dQ@^zhcYeAKf%^#+rZrQwl zhFsH_=cBi>=xWm3auhRQEDy@!3>*C1EWZksgeu8>5a^CfPPzAC=J$?pc9EV>)r<|J zPg7iwwS;-T##q~Hm1#+0we{j2g!WbCXLE3u!_vM7i#8wH>aT$#R4Q7F+6xA#nvtf$yu{6>?$(e&<-&A}<-bPk+w;n6Ju^%g-?Uef^`8j$_ zW!E)#*9)fhoA@vSmN6<>QN|(D?xs5u(#^h3bA2fuSNVF$rFM zH$ODmrxN`gLXV)I5Y?ZPNXyw9SI?df#jww5<#}i&UJrO9=#r6=AJ!vOK5Zf_)?p}c zUl64ud}91~Wp$=)v#7;`q0o(i(K@R@Q{g2@jMENRnbhC-gw04QQOy|iSgHSY?@6)g zw>J(ht4;b;7%Pr#Pnp}ljS0eDfe)IAmlf&3*WA|16HdZ%x5Mu-9qy^91%S~bU>^TJ zvvyYmXDEv0*SPp&bRt&wmWi-M9T8ZX#`jxpMCznY`{7jQ&dagK>nRC`;nxi~Eqy(O zXun&P|A4o^BGEucM&n$|m9dtd=V#Mc%K98#5y3!D1%nbC0o8sk6udwi7MzsV8ET~0 z{q42UFxoTs@$y6d!%Y~K0h~4YIKYK0w361pg7LJFYU2gwgv-YfO%TviBkJJ{q&>sk@ zJtd7aPH8MLFq`M0RHBc+fYD!KUX&RDgF-e3gjydG?-zH2tFogtL3 zNlEpd)c^$Fg1ge^2xjPi(}kd`BEJk>^pAzz{wk2SNc9x)o4zQmC{IbgS-?L^zDQ@Q zuxh`~JEiMbw*F55|*fv4aRPq>q*>pcvGWS%6+x|Q1YusV~Dyq;6;+(&6j^a zHd zdcXYCK-H3$%7l#F?V&1RTHDI~W?jQY@Wha~0aF4%=cT%2-&jpPf!|DkT?948$QMw} z_GC+2AVVLAHOYzE-X9e|t`*QtyuBCxsibqUt{@@FNWfW8u!G6**Y6%v$z)&aWy-15 zMLiMxPXew|CQR#J29|Pbu891Hf3xRu&~&@Q!LgrS>8Qbe+8@?n^NfMxcZ_2BQTC5( z-vU||4HzF?uF8nNTUAf`ms0-LH#p8%zdei2(Tq1$_%c8AwxqQ&P0rHIMJW3n_1L$} zGdUj*u#pAP%r@KeVwVs7eiT-0yTl$Y>dXGTBj+nYlY=>3AW%x4_nqxKxuJRepq=_$ zb@qlo64r&DeiODf45q<^p}hQ#X6eGh=oJ+U?i2gm12n zqX5t;!rS_o@3yxKT^=@|ueVgw$hrNf7(7@_4HZ)8?3}pKt@f~OIpOHa>k$gWFSgLg zCn1}=prWJ5PDZDj*SaM4n>XmozbtUWzPwg**1OF-82$I+U80=hYLctRQfp9$@3-ie zvt}OdcaK)`ok|$_z6tI=%cja7(684W)e(|JWh6HWY7`j=n7kHZoGq)_Z$K&gOu*hF z{WwH|1v8!*G49zvH~HA+FZJC0JL57Blo(g!c;N5-!fTD1h=OPbi&uWAJkYpDH>kY6 z`+1}-`irT*l~&rhkoq>6`ergv#lUh;Svz$3;8c2J3 z@XcHE!_wWda7PmS?!w-E^R1TClbV-;am+Flx70yCX2 zKV+7r`iib5HmN;N|8ZX_e^5nozQ_*SYXQrk-#Zvf6c*FUoxY}jK zb1Iw6#?rm1{B@QOYVz>)LM1m+1y&dD+f)Lpg(CNJ6Roi&X*EAdy)WlWX2jCRt#zs| zOShZ@bf{W-fsP6BT0HjZ1cN}*F?XV?=uZB?hszqAe^X|3RnF?wN^IwE1itF;%X?p` z3RV?mCR9oOyam8Er49{|I$qPTLGeZ8_9oJsF*U#d<9DTKCx$~tn+bF>*y$)8vL&r_ zp%(&q9RA`GGb8Nv{>9&iS5)ua>sQrxYI^*%B_}Y0tD6JItJeR0MR*u2!j8!koyUb6 zssL9@1d**JDh8;4bkX^~xw#T_R;d+RzNMt71}2HIu9c#&*}Da#XFk=TW@noZQ?z|j z#o3z1`hsVt8e-=NWff&LK&C(N;oVv2mO5t8ans-h%aOo^?((BwUu4|Rf9@8cCehXW z7%_CelLLFNzD?jo&_ZULF-ifY-_knSkf9lwslAw{>aZax68_G?*ss(0BT!H&r7~x%+YWJESb5gXs2u#&@Ax+gx|1zV8PG&oT_kJa6 znzLWl2q({LUOfLMzK+$usZJB~W zl(gZn`4ZXyZ1T7Cw9MRh8$eWVqv;RqZ85D$zkzrwEg%h>(x3ZDa)zpS!;b+w5s zR|NMW&*4V8G?ghPv2OlC^_S`(8fr=!f3&BKw6ka?@o{*XWZaK`?&hupILZ*ek{U)H z{8&`spVcM{d5ul1S96sfuxr_ov=UIf_w*r=FrM5#5ACeeY@g&@AT+c-7k`MPy-+jv z)arj7%i@1BglnZeQD;%cV8iD`kIQpG6nc?&q1bFIx~;-=P45b9A9K?g)b_Jv_ z-KA+#rDRcldnz+$s%3tLA}mAGJv?)Er^wc$>UiWse?35mlFvhw|7AY+>Fgb6LqMB$sUp8-5ZuKe8Hpc(y^YDB2w;I@5XrHN-JY^b= zT~j{p;{z$sDvyIW$X9{s$-d)sh^%DVh#Gbu1MtI|i01wR_WSQF=|COkzP&1@FSzr- zwK=B-3))u`I2KX#9n9$#ddvulezs_ar*7K#OM8VpMvHVw{(^g!*Gifh%EgxQRLXO{ zJUaqDP?q8?Rax@0d(J4NB&Y)Aoc+QPz8#I_RGCw%Y4jN)YG2gw`Pejnrvv! z^e3XEhhb5O)F2PrmPZZ~(QgOSUh@#Fo^8FKKTq}*%_giQJEavbve7uG7F%o42#+K? zu4LO9W_Nx}c1AclwI`E9z_%F}f|1v!*{!=zWnNy*) z(|=1Qor7^rNr*tzkM$G?j({L%>n?=l<9opsg^GgDY#i(u3Xc{`LtKs~Y>ti>`NfvL zKv(gd>-fpVYmZXQPh2V=t=eaIwiwLGhECT6D1Dx<2*!dM%BAtYQ^GCawcIFa>eNU! zRFna{GX$e+nfio07(9L>e7tbSEi>XA7O4V{!r4B&O+e&iJ+%g~IbMOdsx~;*H@JE> zl%rF5Sll??Zt(AHaA>#*?6`8!ZbpXyxzpUls@%le+$3JPNxpUC78QaXr&=MBtq|^( z=bKnscPvFZSypLFjy73d*j+JdOFm~yxnxU532H40wMHagVM&(laaUl0UZsWN4!5o> zyDO}3U1jmW*?L%`ll53U)JNU*cic74;H;#0douJ^4V0s-#5XFj!3kZw~H4Wg4mTwh;;G(a$1BC zO}0$)wqk=u=Xl5V?8dzEzP-L{Rkmxn0=a#-8`tA)W#P)D;*(_NlkDP?66BM5=L;n< zCH|Tn)TYTN^Oa9lgHP7FPtKuFZrhh@WdO%(M9~yCZtT|rv#+9Fz`_(a77O1Zjjy6g z@dbB$xk+Cu+I$NsWNzXHC()H!+O-exQ@mIFyl3|wQT*Cz{X2NRwF-ah6!v!$g?CHCZ&~;U+xd3~`S(_R>%QaP+vDF; z_3ddCENu2$?;u%T+Rat=pYY3%I4w!#Yi3oDegzf?rcF{U@0z$pk3OaFNeu{Jcll%L$cYS%TY36 z1!&U?F;odQQ3*E0em}MS{v##nr$+FPl3>GrhygL_tR(4gRq)@O?|-U-Pc?#V&_DpO z6loV<c4QJiV2+_<2#Z*=JY0ELJ zpOQtT0@WozblV}ib`ae!r^eq;b&eu*4IpN8Kh1U{EXN|u%^}vmB5ZaejE*AoNJ&Ob zU;ruL&D%(UYp=t@i5A2_g7U!!+Ot`uB(s$ObwrfsjkCqGB+I6F0^o4h40eNbcA%GX zupH!Zl)QPC9C-I|3wHC6Hu%6SDB%0q0VVLDEsDVQ{iN!BN6!S?lo8pbj=+saHN<2JZ7P)NR|xI*qqLjR`TZg3ze2JepVMkG z*kO*@u7?I|9x}sISaOeF0AWG!Lmt-O`KiZjnxWfg$7SXzJj%C<{yPzVa3Y->Ch{&! z{O*b3;}em`CxN|T^4>6!;FD_a6REKi?A@3^QUzW)H5{AzQ4&RQj)0Zi@Hc={mO6Ex zrZg$S+e<&`biiO{Owx;{Ev&I!V=1B00LNm0@mPfEb%+rOVs$gpGC0zb_ov-|e@%bI z4wU}XAw@clMLOTO;6M0zLnP{*-kB%enTPVh1aF|3XyClpN+Ug86C5jJr`KH;&nOZ;^MRf^Qrj1|0{& zgO0D?-KJ0&9*0)&-raM7)v^AO8N1c^I;@`ekIcLO9y|>r5=H~c`U9tWXatl4_y!Ze zbbjDe8b`J89Z{>UsN*%AjGec)udu$|9LZyw3G8Ay;TnrrwsSC6K6aW4|2palATbh? zY%Ar{H86Ub5`}*tU@(5@cyf}Z6CW$w?s<3Yjb&>KqC<)x!UP9Q)But zO6o)BS=3(9kr>^_=eLelrXSu6t}>%U|B909Vx4KU#1envQ&z6ac}x`iwu+-(+4ks< z18S$NhRF<%t9iezER)-wC^K~D`MCP-33n6LupmNA>EL~jkZu9Q_gCP0Uc`SeTJ`h& zwI}WmE84C5Q<%kEo^2i@OCH*`wLR<&`lt5m@PqTq$@SanzmFGh%vr$p<>W?Qn@PCv zI=qTI|F-pXCeGoN+vv|N$1~%&R|@j8mu3#|v^GSxb{3Dk>OqbN(RW!S(efx*$io5? z$w{=djFxI#_CMy8<;odpP2|cA3NwXXA*@Vf;&m9a`2rz~#w%Gr`ptkQA(Q4AOu8Nx znOs6l);iMVX@z>02_9C)N>ERVPzA(dNh2_HwZ8O50((Q5dx8CO$)7%GDY*JezW%+) zf4bKz#pdIfit(ic^VCeig()CHg@3Z+@OLszP?oXFQBra=z)J@a!VY zzWvvuUE}s+s)y_yrOa1a1WImYGb=OvN_h zx>O@(mQTHgsl0x-4d(4;FE3JdR0neZ}_d#z{YRgH@vlr^U5mN z@Af;!GEliX5)K(!Hk$-zuR|v>->G`m%gv&rs{IXbV`d=tEaJQVU(i%RrUW#=+TiIM z3V#l7vjWyE#4yCDNRNMHQNWDvZfLRI)d?mcD^K6f(*^4rFMrP0a=mkk{aTqyAl^2= z{wsP)74cT`2eme~+>o-v`!`Fqi=Cph7OOIk;>1l9|7*N(bm$ri;-yU7?^B>X9_ zm*O<5oMhfp$RKqfQNod>Wfsr0%@-moidSc1O;ORQPU{nNk&#U`wn(5^n?_{t9hWK= zvDF55r3oKDFICJU6!HuR84SgiFQ&u8q)rfja$ z1P@@j3;sQF&`ar_JqBuDszrv@6zIK@)6J;0ffIZ%3_+v00Og4!+S=uz#b4!3s`d$aT&I{`EO((>%H?#)nL zU+Ep=W=x(7Ti#k1c=V<$hi}y?Z%5%NY2lV}IM0b~>f(}gDE)969|-=+_NEN0eMeNs zR_QCZr)rEFhg_>=rHV03fMhBv(IS;9&g|{FJJOG%rx+)XvNqc#<}7oO&Z63!9KsrH zR9>Nb>(hHWmM;)HagyPM{Dz@ybCS~qCgEInIcm#?so%;~uvh+<_EQtDN+Vxwg8?NA zR9fBIYoMv5F*E(z|Hyt%Ip1yxAlaMXuqSt&d#e6ce|W;{YzyC0S(e)n$PkdP&F-J- z@*Ng$(SH<718%d^HIZjnJB%BGSAxu;1IJ0U$>FJ}_jyX>yp$(A9Y0QW=wKE80%{ zXa9;WT_A@pO+s}UKrhAThhdg5;}-91dxUH@AIF0*6i$=R=+2{w3XpX@&YE`DTKO6| zB*(GmgGJu8CrQPva?$snEg^#qUkcrFJ66_{`jI>|?6;X?ILIs~9rg~i>o|~1O2PU# zdoOL+e|k|JbgdAx_a3I=@JlC$Fyyk|WT|FrRr~s_S-TpCq>({FfpEhRC&Ic_Lx{6z zoS%~1Q7p7>tzyEIvs(LvpGP*`I7+xRfU4$$zyaoW!u!LGR@3`f>gsbQ+|tEY1*z=Z z*+Xkl&5z5YHB{3!W_0;PZFDk$+_bN;7UI|_b!8fzLdH& zV74H4s$}cuQI%RjW{HJm>usf9jZ?|PkZX$Z(5{RU*U`7pWkwQ*cPwm|#z*oVIk@<_ zj+Wn`nU38txi-Y!5^_@RB;Pb4l_)4U0}DeBPPm42RV?wTE7UmtuJj3s6A%(*r_z_6 zA7+n_8{kUsdQCeuod3i_M?D4yV1NN+OM3s= zKRcTD&nhTk+;~#Xb2Mn6Ps?uq@@RpS_Eqx(gVC7nhfmcFJl#GbG;CllPhj}of{^m> zqKErmLsgi$>_KFZ`cb_^=aa*_&4a_63nw=RD%c;M?P`#@o!TkRF207vw<+nhd$Dax z)Li^&6&u6f(KpM6E+)J0akwEIbM10sLf<1!-&?+RJAM30?tDw=LE2CY<7rA%r>s>3>CKp~IU&ra4hmB}?I?vR$(+tv?$@Sl zPAiYtH|Pu4c$=wjG?ie!4zzWw5Vp1cFrk71dEM;vw&Qqb3Dq)WIso(ZTp!CtK#BoP z`AR+h!xM5Dd_1`Ean|%dt+89Zc;AocKMII8o!8s%Ea-k>00%YMItUyEA86TwbHmqa{MgwrH8VfyX>nNTb-rO zH>mBzhB5JD@#MdpO9}F{>q_!Q6?)UHKxKsMHxC2I9jmr@wO^O_c7orA*1fI?o~9BW zNP=@T{+^)w0=u(wSNhv80~k!fJ&eF_K>^*%EgJ-E1AQZFKzZwJKzlYfmP0=&+v`rw z=Yu%V1UrxVw8NGcQv`7D0<-4-rqc+<(Fm&GKP>5iE{}=xYIu@9Om|3P>i(Kjssed3 zJa6^I?|GQ>^+#p)*ydTcVOyEjnHp+inULZ*6;muEk+?k;E1LLseAY9XFSe2?eijYo zfhbTX+R3ibT}}I%2bLvUAa+)hFo=|-%@^ZleJc5x*g1*Gw+z|q`W}h&9*K%c+G+~< z>K()Cie*+v6O}A5KjyqPZjpYgDoUiaHv z(*QgTYsx&=p?g3R-ygBn=3ZAp(b9%Enzd#}g%#)4CgLU6D|(x^Y{E!eY5U)ZQAr@X zWIsOg>!(PYp=g`Lo4T1XSR*-6u*^8x$ncE}b*8FWCJ1S~flZ-)IcCr^W?)3y(|?VI zK2cCfTRtyQens$UT&CUq93|Y)trBK?dly=E#NCg~L}(d9Gp2TW!6vLicZ!id#6hY5 z4b$ymYot%Z2QiKfoB@=|2-WSE$osvJN6*Ntd9T>wv zQx*a|K3g~1MfvJ|+=Lta;z1%1VP&OS$~k%D-jRuapsQDzy9a36ouF5JG3e1sM;6p> zSINF2tc|_2xbEA=g)m?tU=6uaK@WUDfUnsAuZ4Za%9mPS9*goGzHs(D!4h4s+ik_!F z=!O3*uVwX1$ycJ?DBHTlz+q-;J~W% zKw|xxvBl>MyCmbOH2TYKpODYk$2X5Bhjsc5#*S&Tjtty3NKN!JKlW=<+k}Sg5`?hJ zY+C-q2vQWaDBMKf&>f0yse50Y0V0x#XLl{e@YR$+qX!9|D)(E!=h;0Fg%-d*V0QbDc!=d zbP9q93P?%Ai(fx&)nB_p2KeR zrg=fNro@FS*}e1I*Xm?p&IKNf9RRn^3!LBoZ-@J~w>4MhYFS6*=bWR!*J62ZFoZk9 z<|%V8cJ5=5pW3~a7jauyu}?t;8eRa0kVM5!c-B>{T$Bl;wN z-kO;#Q_nNg`EcE(hS$Q=^j!Rg<(@MgS;OCoE`qer*QS`nK~Juu!9MwtvkV&By}`Z< zkjycP$F#`CbOI37YrDeGYYu2qdXH=+@DCeyMcxS*|6rig}Nqj*K zSG+ub;jp{rJ$J_?L0=6b3X*nsJ3UHX>>KUFMLE{^Zmp3u7tKUdtz%kLNG!R(Ct-ioip zsBrL!84N-2_BYP_MzLU8ktXMH@AoC8?>Tk+#>M+Evf+2b8M!1;QT}QvEL>3m1h9eH z@K?042WpH8UCYwycX=nra&it(pAaO{1Y7E$Dw;w~OtR@w3bIi$q*@9bdJ5?#v6Lfa z-m~(kXm}PwpVjcEO^WY0g70xSR3$u$E?duGKK>;uNJ&#bzIoU%IhtuQ>vL@3u;TRD ziVV$NH;Z02fnYZa-t-DlPlq^k;2ART;V7p*;#6g59V*~3v&f)SXL z5h%v^u|&S6PpiFoOMw5efCa`P02QFyDd3Buk$}}d+_Mn#!9+byC|$WzWhJEy=zQ2C zCAPd30oq{jerpYrKNtmQkT3#a5RDYSNTFafgb{$q0%X0v^dLKOAGICYnO#fUjq|WD z3mmKqM`gcp!wP<*9UXNS3(H1R$)@N}p}|NjC2;oxh

lx~c&dbRJSSCD!KAoJL24 z+L}ntVs=1*tp{Z#hQlwE*{8v?? zXmC3&tc9hg_>Pu6L^;3g8>t2bu?}aCr7G5wZx`2Y$AS^)Mg|;Y>W;2PlG63k|Gw zt!K_V{jdqH?D?8mUlDA9<-6>i*XB;RSOXiG*#VblD^EpGKU~XL$*+8LT@Xr`$I4KS^ zfooS3v%h8gFaC3eRBK@(4tzbVUVNXE|1`Pha;7IZuQigBv~SKF2NYw7!?!|uroVTv zG}E@C?HF4Uw`5rf}{*5iQHny<%G79i>bZU;)g9d?k9BDERtcMM_U*C2Md-;L4-5?=`e3aY9vU~B_*G)Dddor?QjIb95 zDvL$|BgmWJv+&coGA<$G7YM~mDEU&gS-Dyco0+|NX+(&k-$D%u4Oc41&6G$GmF zD_jgyqfMX3XazWtN!8M#rkNM^)lww53~lc?NdayI}tz zmXqrJp+}EpMS#Iq|1@?5F{?}1@&{vQ>DaSwhfQo!(SSv_o+%ha^?U-W^zE3U_p$S4 zn79+%`;)xOXJ>x5N5hC8l%pL~PPtF5q&UV(M=eZ;@_l?wkx_q>rCbVJVy)-;VFeB@ z2*0EHSN#qh!a_yn@10Uy8TLD_(Ito|%%?k%MPsa);fspRQu$ z(bx~cD3a()L2b|3K|v1xSjN+0-?2e_^Dr@!n5Io%Ta^mZLvLrBPt-+cYSCLqXBxcs zIAk;ifJj4;Ta$*-<|t4Wy*9HMKb`hbyUeVLa4jDYcK(}T4h`eXp2JYxWSrMxvdx~? z;e5ilpeIn0y{dIm@NW(3$wAsKu5R$B6yUE(dyGBQ>ti>*cOvS4LWAQqJ|Ro&Jxww@xe#EW*k@R64!KIXq^{kT(q3zpL& zg8!_PtA0X>sy3a@OAdz!^92qbKoiAkmvNJ%`75K2n9@TmS@;8IIzvn0GGhY-4`Ehi zx8cl01az{va5BpM{Q7i8o%=754<>6)g3823+)~_!Fp84`Lky;lG4bZ~vh&eUI1ov= zDvP^$O+!j)k3*f%#;cx)(3I^#^^~RK9jK~Rmb*c4F?^fTB?eBKZepBTPM>|g8O0Nx zaD#|0BR>)Y$)ef16URz^0F}a)As;N^QDk&RNCx*4>;xlEiCn4at_m4K-Xqp&#ht=} z&l&J3(+?I^OqA4W0z$fvK-A+|ZROh|EHYMXuY+77SgmwbmL1>Ew~sYYQg0(h>|^DZ zG*ca&ao}}ioP45L6lemSJ!i!(+dePPfk#4)dB)`99U`FoXb`iThE#kDlB`zO_et2? zlZBU5l`I|V4bk zMvEUSK6IPRC%3pHO?ExP+ulpi;y;!K(dBE?>n2m!2(^s#b;X4AQEWbtF3$_o;I+d( zvSOm@7&Wqe$;7L&N?t)Hy;nx$5rk;VT4DjlOPrjlUJ99nOC))*J&aXFV=5RYRWKvg z64YB@)_A9wH9!dQUP|L}mrP0J*lm@`S`C;Hpt*xy7S=rzY4cg&3|!XIz;VP%5w+NT z8kOt@o*-)8Eki%m;an0u97%6Em!oqTOv#M^f}@vGtR>S8~-Z|4jOkfq+ zT%#BJaO`_aa}@VZz(3gVpnb_9IV#CiuQE-2nh79LT2{qtqNx}&q@z-_)vI8($w=pG zCxCN)8Ga9Ak}DZ)XQ`23;T4$>TN7PO@J)mz;~5{vyI!~z3#on+2I!afQbX39xc?z4 z|ICr-$}uH^_3%|;K#U+<5^Oc|%Oj(Dr}o+-JOYWkQ|_u;vV>R=6;#LN%!@@iUTsWrFcYchRZ&#?R8i7k3ifJ>TWlQDxPffGFEYDHS$pzgWd)$Mrq^+lm&s?T))Jo$yV{h z!tFU4WHOZ!&+#j7nu3f&7h+y-ffOS28I5=7OHIsf&?APZ8PoGogxYw6t}f#fXH-f7 z^}HI0^}sCeYKc&5hvJ3cc%B3Mnx|#ynWeIpWv5t(0Ufz%HZPdk9&e(tkzg9_2&F`7 z3t=KjZl3c@4MOB?NV`Fw$7w5KJtyGhuXeC?q}5gX%O>5BKpK-ITh~ z&kGZ=3Y_A2`)LMxA6BU!IA)a&e&;CI-X$^P?#fKz=r5M0SX2j-K&P-HMWe-=KR#|) zE~S*DLQJJ=z#P*#SdeA)aSfK;9f3oIM)fM>rw>0#;%)qbTjA&t1V@$UMJ!_a5Kb6I za1o*ri5eK+vYYpChyy3ztCt#xXiNtW$J`_-+-Pu0BhUvLI3cWWcg$W^ra2aY<~@dx zqRF`^IV^mOjeLVFef(mhhp2}^zHxM^K44WeW5amx5fHXnMXy>0!0iG1|A0u^&|H%8 zBjGTw*6=@jpF5BPCM%4Hx7h$h>JViwByR3tKadh!BIqs0SNei%6< z@l3WH&?{pA^YCN0`EDmxcQ@6P9mJB}Kn(FkS3%-RJBK6uw1ZE zYLdzvtWNC)YbMWpm!l-P6Sx@F)Q|buT_I{3!DQuNELfIM>P9aO+IRl$K$$2LQT<<`L5J4X^D+MCZsiZ%hx85^hB38^C04CKU%;gfl&(w=nWocd(kL5Op;=u6SRy+)K(YA({l}0z7r}zaL^j1 zwE-Roex$lb(-V*^X-BZGKPApog>Tj+C?J zN!lb&Y8?_k2RBjga06H-pR+^)Skjk{zQYX^l#voYpCS);QgcbhyR{g&w1`lE;8>!2 z50F5@BEKh949eTXnccoZ&*M8h-by{7s5d|Zc5j7hV5%P>kbE86Oi-<=3`p5uMjYFb z3R0?ixMsQOhJzsAIeAaP@f|!~_W(SYP?Z@By;nzU&<79QlI?KQzb&jc1ES zvua_;eKgv6IBX<>7{`HRycX$^ZUc|DV$}w5cT$B#k2wfXmBAH90w~diY(82ogL>2M z`qHCAJ5}_-x{LH1G2Z& z+&(^ZTkO;LQF|GHMKa3rFd~p(%pe0bDpF_ASYeRCpn}K1f#w4L?P>{fT+JZ1$EWPT ze~zYk*HCltU!4U?FrH7=6aPAs5A=IpC|}$5Xa**IhqyC^!rj%i7m2BQkva0_Sx3>W zg9p-s=YSxCy}vxSaU#KnNs>GY4D8(C*btiovw)&RCCYyRWnhCtN7La}I~Qjgaaj(& zb`dNr@*5P2mNp%R8UZk5l=-_FS+*o63-n66|9cxA=4=PI(621mN?0f_CUgnf=2QJR4>g zrnk#Z3e}0mLl}|TJPpOMRmE1aB$M<8(e2dug~#n9AzH~y>-=;Q8+)lub_e#ssq_q% z5r9@1VWeps^>w7&i_+u(pcu{edi&mMaraoJxHY!GiB<@9_ zyue#tFcCutJ-@hh6VLNBhG9DSJGsA!1De$!-ZmD3D%<(uj9)Q%x#}ehv9Z6m+~Ohl z&b8H8IPt9wFxka_Ry%#bz`a9{GKLZcnGTWmHd09h4d@XzXmC?r0FS5Ow@@M{}6ivW*{2!U}w31PuA;e@IygnKMjE~ z?|U5QK-n0$i(EBwOi0=sBHIZwz@0Xef4qlOnWItlAzf|V@gSUO@Q-NxbLwRzl)>XS zdg#j#-Q|drM%X zF1;WY5E<0hMMJxcpnyHJw}bu&=?g)F_3WWm4x{a|VK`^LBaVB1^Cfr+dAiG+jDj>0v+)PoPFdbW8wgfo`sxF3Q^hb9@!JXnZ95(BLguFDA*nZMw5NzHJMNh<4838 z&CmVOs_{c%BWU|z%@t`iYL1uXU!yir8LVELHD8#kKDlC09W_5!qzQVFQEv2z+uAlB zt{%4WiH%9MwC;8mUnzT_HODQx`>3;Sh;3cunc^p5dhf>fiLZ8GmSlYP3!C#?;tXKX zCX=50uMd}kmmSCHZZcP*s)qK(lSVp5&}l=XgP?)2^ob~(+5cy1vLeyN-&*I8ampH zwK?zZM3aBpGH9zd9IS4PvY8l=6&o z2zF`{h(U|ja$&*ru>lP@>+X4Q^_t|P>hMkRcw{6j61w~y zcxN;B(5-N@)@p+F`VDR6CPJ-d=RyB$WckjItQefPs?DnKNt>)$R@{YZLEtNeX9@*T zd9MR$I{t3X<^3l7GI4q%ce7|zuzio0a*l-uIfRe@3~=~s#rvlo zSGVI?F*DA$q)s}UlBnM3qKb3E{CeqWMRrquWpz=LozX8U%-uUy_~v^Z4V88^3->W@gF~d4yBXn|h2K^oj{ob}{ z&}j8Zp^Z@EV0C{#j|<69#)u%JuhPqavSmZv%lIuA06j+2h`9Q@uPkf=0;Sk=Q(r0S z4990Uk>3@u(ED>a^>sW?^8T9uSdx?6!zt`<@Yoqk>jUKw_}R}SRI{^6;FWT$SM3dF ziHjer6M`L_x(@c z-p9pnymXphP^8S9c`WM8IpG8Bl-L+SNhhPHMLTTqZClY4afPG+hXP$S7e43o2^!TyNF zjd6TUMM^(^cTT#KcpH^ch!mf&k`G;0ZdJa)J;c4d&E?!GH+}LH+b7~XwGM?>FhYv_ zM{HDz&#rTniCbCU$9rz=9Xo6f%M19}8*;3p>fMG8-m}S%#>&h>`l$dLfh1X7j-VvP zxX(c=mY(1I)4V>}1im-EtN-|7%k}up-pl&q?xT*wrYC=Y3q=UQsHMxfLTFpZkv~%v z?PD+f6dj|1uSw;xB>!32bCqP*2;G@X-e$TrMVV+Mjc6lHw!m_|zLOer8sJzO@7QZz zI(hVFSj0qn%lYjs<;?VKQ-esdX>tuBd=>ufXih**v(`Y1;SPRSAE{j#sqJ1`8>!1< z%P42GfV<70E0Q1b(@;)3E2%spsJPNn`R^&k^H8wzlBuEG3zX^2i~K!Pmovgv&<#J~ zT=%*nyQyUPx%^P!+EHk!m4gc`w(G%5xXXb&#yQGZ{>VPaL~-=?g|$Z(*`c*Z)9=tX zp1E=ItZr{!M$Odk8;#Ajo|ZaJ^5&U}igzTsC4GLq$Xcz(i}&f_{Qmwu9s138!B^L( z(+c0O&-O+Siy-BA_KKhI&7PB(QdQCu;#iZr`4`A_C2oj1SMO`-VOY+tnI5ah@~DL1 z>t!qUFGPU`;>;rFvpjf=`u1R(p>u*uX1;ZHDLTb;bnvLw*vFtu+_JXJZG zPXe!GB_+TRGrqC%=eaDnb7Bj}!mxH_A#eEg!#jx?hIZ7$+N0T1;&w|G_jwtpmm_vK z8tZeElpl}T%pb7#4cWMALD&}Nru^&6mwiINvg1v8@2#*>A9qx8d@&=jncV06(a}b_ z9U#O#msG_sq(5mO{hvBMqg0FIBq5TM(jI!i6?cM9I2J}Z1n-RuNoU`Lz#?->0lL9+ zG^q)tBcoy$ooVxGw>g2#iDcADT+5itv^^Z)j73uTpYVe0f(t@tJZ6cf60YV#amB?xY+pIZ|pS zJGxbJ90fDU<4MQgH87_3I#L}7LdFB4qhyNTe07tqW)I>xOr8qG0ay8nYr;eHL)Ot9 zp>d7c%DT04)(78*ayYk>MS(9ZRhFGo%T1v_RSTh5D5j;^vNaQ3A~_c$=xh+4aN0n2 zWhzF+Kh>oi&rn&TA$ePek2g^OOHpE__E$Lj&%WDp)r_eLllGdH2~%vn)q(+<|QgvPSiIPuw zn5sAy5yR~VcbX5@+$Eh-R!!kAd#=X3+!#{xFA`-gzHzjg?y{{@-r;3XgbBoIs@AAg zE6%f41(4U`clDUk$D+3G3K?yX-w(ERc(ERg264Qu4jKnCem361yXySi)7{vx9 zzl}rcD1>IHLrD3Vx1l-;-41_!w)(4jZ#;bV(OdQfBUypcW`r>>sY|^D?KCmoM%v!M zAG^bS!(M&f3bsZrTA(%XtI0hRA=wpj&O0j16f2UtW?`C^vn5_odK~emosw zidO8VKBL9#%K^tL=UXb}W);w+N_N6k7D~wfzGe20`wn8=QuZSjwMP{SBTqbT=U2GB zy*WCf!~JqQ$vvkjJtuY5E%t8FYORs`9e2mUbN|lGR|dLgcI%fyxVFkbB2KDNW2mr} zgo_18-#u&!`+Jy6=?PU{EmK93+DD-3x$F`yerUMa^Eie@LoL+8e~7-)HuEyo(y3wF zk6)8NdRW2u_7Mvc`Gk^0TVh#4Vy=KQy~RNRI4t*j$!8SO6^G|9`HFLVe~u<(!37??}GBNU>Sp@3rA^;X25$R zr$<)GEk4SAO#M(F-A9ykZR#7AMuUvT(y+2lb4ZeBOwfu77#R@Xu2Ga zl$_V5V0BI?_aiw0IyoLQD!E5;jc7;@mY1c?v@=gL38VR^ipjq<+TKb1R)i2R!sqx% zIN~cynzTq(W{k&tcp{HmWRU+1iN8$^v#X-;mMQfflX9V@d%4tueLSneHtSwa!0?dH z%f;B;an^kjVAhX30Y@O6(TE>_JfqsNIQ!sn^h@vRSA04m;d_MsFFZF0r!S&|Bu626 znX!;oWGEI%&C3h9efMIBED%NxIVDr1m0Hc^jdtX7%#b#c3jS?w(|-pw6V2FKphDfRyh~izw^dDCPK+A6w*)GNn6owN$ei zo&7cAZ!>lWYHDZPa*@%5-*Nrp6xK&855wtRm7FqaVaM8CBi{AttQfT zcuoY56YUrb_;^>e3+z!g_Mm%Ehb~0N-^#t(2}<-u{1dPol;S9m7 zWdCu>4zf%ZtdSvYk;(p*{r5tea!VRsZfzJuCJspfOhKZ}Cd!Jrl2Q!f4pCOjRP)&T z+Z}40n(_b>qd~qA*=Tk*)KsZOt*@QZ7o@l}>IO`@nQS4F2i@*i*v07Ey*SlcTNcSL z7rhib6*&zj^BqrQQnVNAY4gO6>KOO1YX!$8+renCu}zDkJ3_{HsqB5ziWQ zGUuf{KhTS#CNsMVihjlE|Hg9zXwW|)Y$3`37D-Z!@Gk zn|c!l1f;u3w2oKqACu;B4wtMYP=>)EDIfUBK=(8t0(4v`iMb}7M=FBU2{U!xcNN;EE^SMCY|1@+x!R4EkP+(hX;G$HhpJrb( zWjL$*(|M}ojQr%>KH~1EQ&Z{-kP#L_LM-k-$}PmG&Q`i_Ev5W&;wvZ7K`5Il3-9ud9pf zu3p%VJ{!CsqcF#kbs-VFj)9{{4o68ci~(aSkK0r+d_htU&oG{TKRi-O|4lR^i*WyX zQ5}_;%`#riQh9%Jw0a=@e)qQbw+`t%ym6kcv1m=C*`{;W!GGGCQ7BO8WkTpSV_75a zc@tLFnn6%Vs?0u;zA?S7Q5hrR3w{*I(3qEsHuKSuDu-*!n{<^~OO*%s<-rAlM^7kc z=BTK)nX|z<3e_>}%i42SE8!loRB@IB`2$I zCNFBHIBYJ*HPec=(6P7Bc(x#VnyJ{yUzqYP(NZGWDHTpyc<#3fhP4V23|mn>tpbLv zOU=@Cv~AL&Z8ef@@?mX?MQut+WciY9HO=I#vH%m&8IlfGm5dU_L7ZA;Wop_D>cKLK zg8D^Zv!ZsVigvTZcEciiharfnFIW^0S)`@BiEQPmrx5)^A#%UfEvdD8uEU+8)i(@+ z41)wELB`TMLwh>Ii#pfmItxwJ7L&MkL)GTl>y9H5VJjK$rHrl+qd?q zZ~adnp1ptbu#d$aVDRnVE9yV!=|B3^fBdI^_pQK8D~QP#bRGs`N*_3{A7E-8xGoyF zOd9wTHUQWRfQtvNJO}=I;sDit>T4V=$Kdk)zW4VBKinTgqz^Kv4k8)=w(CKb=|Kd3 z5YaoxtUAP6JcKv_M5l)&dWR&}hFGVEWH^RpIff;ThG}p^wD!Z28~~vOctIPuv?<;~ zQPlj&;s=YWNz99_g%`c-qqmE>C6u`O4~wjdN3AJG&<&$jYom5+Vs-W1gh#+;z!-oY zBb1LhB#(NnjbRl5b|v7HVm#<*)Q>U>XeI!D#iLS+V=l#G9!6vTa>m((0YBBT`kt|R z_9(*qc)ZwzruNu>;p1+_V`*NK`Ms0wlw*&l$H0;k{lCW&!zZH}xsvc0B#B`2GsXvv=xs%IVz$cDtK@lI?GQl?`86@s@%)#F)yB;D&}^0F*$oZokuyd zpgOZ;GxI)tW-c<4 zd3T}8eV1}}ac#z`Vdm<3nlSzD3+2>5n>qNCInt6j^3P)RlJWCqoK3Zyy4Na3z2Z2{ zqA2M6Zo`Z9NAI})&L7TC=Zei;5jdv!XC@e==qf13UvNy#uZqIfPHt9ZskkPl`4!s)#SmhcDRKOcmQK-7sF#>|1o~ok%j8z#=BG6B9AXV^Uqq zx4p;vD8+M4$+GRqo=U#YHYJO(;Bfp);_6N2EdDh>;dV*SwPUc_nRTCF9!)J90J4b~V>~HJ@rV&wDkg zafPA`sB8pY8h?D*2Qa07thW8wWc#t9xQwDPMk+eEiM%>HH@C z&F7C@-k<&%LE$wp&2zuvZ?p|JhxDmEw6x3uc@)wpp|>3Ga~ZAS0&m?qbl_UugBnk zg{K?<1Q-bkUw*^!w&!|EGiM6BGWsuO`%(CE*Yy^?8I82s&c!s%PhXmg^c|1Voy+DO zAHt3=*REgx&hPXcw^YJTkPnS_)6V7G?vtk7r|Ub<)b=8%_n!LfMN{v^OYDU;?F5(7 z{Pv{@q}~la-pMK5$>Q4oPhvmQZui&Reg^ga6Se(Z0udCkSHrcJ+`d|b znexDy>X$RMe`jd!bBy}A)`N4MXXkp)&kY97jlP_l{5v=0{&`#dr^SPxmd}1#J^y)c z;HS-(pLYL#+H+qxs$V!ixNv!P;r9H(W8lK;%Z1Os3t#SEe(Jvh9{dV=_AB`Luh4;C z;a`3|{r3yYeHp2K8U5fg_St3p^UK76%j7SYssAq1xqoM>|IU8!JNMb|{O7+52YwfS z`Ca<&cNzCph5A+1gR7U%u4nd`%qm1KAP*?Y1A;=da8f}C0!2wL1qcX|h|7tJOYpKP8LBAK=u=A=!zpgb zNPA23+l!mIurtZ%3rpTsQ2vjHT~}NBiMC>}mV&pgk}q;L_g&&Pr=3~B27<)YeGd^o(Q#v$#n+Fy$Mw92~h41QRobjt$i$9 z?0GBG+#uUWyWdBz#~st_uh;eX?(0y~hR6SPKXRzMqcZr&YQoO^?cK)<#t&!idraT= zn6V6)y%RKZ&vV|=f8IQJ-YjVTUciij)}Ys&v4_@U&ejvo79;Lf<8DN@h5M9K(3tDv zv0#s>K(jK1+7{Wo5`n$vf^XJ2a#wE_t*RETC}qEstX~uB{w&!2S)^l4r0SDw{U@o) zkIHo`id`QSI^Qa_;-wol_}{K`OyfDHHyIaq=nuA-cDC5QZL{s}u>9C&J=~_4UKe<~ zq;S0?KD#BdyT$kItLX6>dv=pnYP-upt6sr-jocND!Vg+G^On_%RxL{=dCM*dQ|_f^$cVZeD`VSoGbY*pkwc>6w|ysnO}#`PuoIfB*hnT>RYG*};F= z=zUj0N&tS5ap*Od4MveNh}cXtmk-C$@);ECwN#8IA!Iz(CR!@T(~z2p9Qv(QlUXS9 zI-AMX7jN?edI~qTA`hJ}%G3;#m+!K6u@cC`$t1kms2nCl> zSM$bjJcFp+bXUvQ@pL}JQlmGm+i&w_JlCh+wC%nt(@f$r?rz^-tTC^*o9TXi_@UY9 zZK-ij$G1;i{)g)`J)Pgb;G!tFO?tb2e4WS=eK6bm=4^Ma((t)SU-!?$l{U{Wvwc0k zzT<}pN&lY;gzR2lykO~hYX9f#WO5*lI2G9ZEwy{t`EFp~_B8>9LTc}W_A(<5sA|d{ z9c}W#Q=`06QGXLH%JkO3Zj=*!bG58LI(to7+gNw$`J4y8ndKA@aEI zl1`n_g`$h4_eMu(5T+++)s!n_QfoysQo(k@YvmVy8rInk6- zh<)5H2=c(!!2jze8tZ~jm7lBMbR&AJN=!&`t#4EFH)4;ORB(VI-h3ze0zW(mc36jq zz{si;cN;x(KbaQ^Zq7wTQCwDnUYSOs(r+$ZG)n-Gt*>0L-xY-w5gGk^d!gU!?l;6j zl}I;UE;BlB7Ti?yxIqH;E!&QM{qaw8Znc5l2Aac75j*B%S^J^|t|`2)#+@4e3jUWe z`dtk?%NM6M;^+D5W^%$1CP+Q^hg#T)F3# z`om2laSa9!Q=!smW@p)yi>^1N=~Cck*H08Z2I*;#7b%(rbWR6lgwA7D)GsyuOOCeC zNX&{3@)SmulLUcG3>_ zqrQg;3y#oamwD{r7n6@GJ;TUdL%g@7nT^jwpOZU#6=+G=KPorl4X8V5i0$>;d%vFe zZbFW_{Z|Z#OC>v1Ka4HaQ(6kE#et64=?*AlPE2136vXbr+$W0ghP zP7Ttdw02b@tEkNXPCQ5Z!EkR|jO;!buBmk`*W6pJBvCyaguzYaC?qBgxEAkCOv}D{ z7n0!1)=KgP9w|g=M;kRBhF?^D-G8z5E|6c6eF#(2wYfj*N>~k@-`~?6$!MRnggh!k zhr7{$OmTLSl=jO&GiJBk@Fs~E!(vdbQpCB>?G?U>G5QrWFg0xjK3P4tlPwRyidn*C zd9d+vjZW^~z(Z%|=+oTv6uK=x*GjV@zr7aLtCm~;*jH(j^a=%qXAQ`C96&wUJ+d0~ z#u+0EymWpek$`>3VL7hya!~w?$-r)ZKCTiWG;;J)A~s)zab=99cU0dObj_CuHV$f` zAw#>9(9sPl#&=9O+~&TJ%xy#MsjBftgAf1k5e4-f*6S&8&>tHgGxgDWLE>5XXgz_z zR(+k;XoIvR(mcUQhCd;GA!0TUz#Dn0!fX`Wbc8mhNZQ9V0#9oa#x=j28jnEA}rbeQcTa43lC5O_kGV=McfvX8%I|ktQZONFtaU&_b`0<>{u)JEWOdA>^|?m4xJik=u)P- zwX1T-h+(RO^h;sZtot0o6Fx4arGCo`aivybN|T4&Em<6$Q!eSbp98(v?&Ud!_}<}p zf^y;r{$R@|C=`}~&gd3xd|r%@xt^e^v#liDy{q{_R<-|I&7#4o#ji(cFjC~h`+w61 z>sSEO+`X)eA-?+P5Wx~lV>iv=B_sXT=S@${gUICyB3@Fg_!rjdBf5{z`m4JRm0!J( zQ`!uB`nIn}(A;r^9*Dlqzy{5eaz|_~1`F|H>)()=47SFpU-gvUn|<3!QKOTflOV!e zlXzE#Jf~?WdW4P69rKcxzJWZ_B5JF$1w@;D2bM`=2Ez^ou*H|4d>yGQCaNh&vBv5J zWy*I3t(IEfhc?}f5fjIP_kR$x837F*`U|~OtTzNaua}OfiDYFJ28SC7b~he%_OSlW zjJegt_h{_hp+b zUU&KDcgq)VN(=r<{&4H8cJ|2GFU$6($q&I*9@E(K+P0qkZ20yZ==3wfBn|m z&d_?&uvMbC?XQ=QD|^WD@#`0PYkN*fTFb$N*4mP9TMud0J_wL?)-P6kPw?u*?*?7w zCG`3BPhGuV7c?9G`-82p&Ce7ODvWQc|Aq*i(s=Ft^ry}g z+?R8@vyYnmi~rc{{=)yBeav-U4zb-+q6wV~TR*FyxXJyyD{kpK=x41tC%4&v@>0FT z&v1LrLkpU)59QQBbzx5qt-f?GJ?Rf@66NG}`7dmBW<9W_^X8%1ukPh>yT|ok`VPF) z{(K>!2_7OLoP@mU`S9fUSHrE6qqGk_pXW;hpPcl4&&X3c-U2}aGQ1#Ak&$zR9@u%6J3pZdAp#?>|f}I0KlZ!i%!D3zW^=v4>0%ivga1zv}oYZm+u5(sS)c=g-khm zJOEJQpZ;Pm`+Lrf?mD>f&;6S^;bQmMBIX$}<4lGGz`g)kJeKIKSsBNYuVCp{uoQ?0 zhTsV1d@MO>1j7WDv?hWH5y90J!KfEWVHJUlkL2x&{3tjSjAQ0+oDfx)Qyq5Q7Phz7-sOTpdkx zZjAeheShrub|@xTS{z*?5-b~gtw5CM#9qfeIqrNG-2Rk3AM4x|s~(U2UX4|Ai&L(N zI zIH3fUShbf>u$S;$4^ml^SiG0e)RkC>NEAB(3z#Og2PX-jlREN~ZlIIIa7jHYN&Qyf zf%v4p{G^HCWU-SZTyWCRW%3*%WkE4z$tq=j0xSbS%DYn5R#Mh4iAdqpk6kGebFiHi zvYq_YgRa!0mDJPjZn@dXb-Y6;JZKIgPNAM(9clrA_}GpZ4dHj95Sw z10a;E>D0f|DSv}$l``n`GpK|!n1mse!jKy{Fj&$t&Wiq$7AV1z#`}^9x@V$lGsWC9 z6MV@;3kaFw!dXHEnNqA-vIUt5Xmafns;`pNpYc@drqt+FD)oITjo(=t_Ss))slVc> zZjoi131@#!&%VVX>S}(9dqEevAY(N@L#dz(DD1*gB>v9NCoAX!s7CB5@BJ>s=u1exIeUmUn#Ls^Zi6QaAo>g>$t!)yU3|%_)8UnPvUf_w^Q*-!v?Z1HB~ygS z;#sni(z24blf@t26#tK+^A2jFYomA)AS5I}LhpnsozOdmD!qdUh#; zp~ZOU*QK)K{jws!)6=E0BFxhw{L^3iW#0;)p7lWUH=Z8tmla~5Z=Q>7voMBcwr-lQ_$s`CD9CGNOVU82fRy;3i_Ql*H_0Kl+}rC%n{FCAA|rqVkU zQMPIaE`}Sa)i+bCH#e$pMprK)YMfH(Z};Zz0;qzGVcnsy(2M*~r=qZfn&+t6NE6t9 zskPAuH6gvV|6SCCEY*_Z4r=dDz4dVR)4-E(YRbxK_? zzP^`OS9zl-d*c7*g>F3TC4hc+mVR}XQVVKWpKWNmXy~|T*d@|;E;qC_G_Im@2Z|c| z{1^<~Xnhf2KO*>5FLdfeNxPuH4-f@Tjwvt}V@>d-T%*24zqaRc;_3z~z0;?YnV zQ)rPDF%M7KLrZ4q)ZC~@2snuVXP<Ql8`hv zI)l&}JJZfJt4<1$^hO%}bqxk5E=nCAb~n1#^G|+1rY_p43wu!Wz@!Ve+|@nUh2wgD z*Qvl*S#{%MJKyXk*Vo z!?TOV=3e@~i^f7&;}g?Ho&ARHhDMT8)0jlFAF;P-rPmAJ+pyBx2-DngYl8KS zIW_GRL1`OXsJNjo16m-BEe9sVVHgyD4EfB}dfD4wV%0yZ(LV|!=8*wV24`r|ES-!S zCFY@BEbW=<{r0Uxn0OjoG2q1zMc;6KvD4Qk^k?8oUzS zxQz}Yf6AbxxVB$fN@B2uK%LD{?mB=z@+ve z*YHEhTC7u7w8?YP{%(9@U5Har42)Fm*N{us@HCoKa#5q=(tW3>uA-o8v3VxpbkT$Z%?@Io=|83_f~oUHh>g8X=gQMewWky>evZ87GISeqmYlrNOTUTE7lGH__wNdWJrAZRY^lQ`#Y2&jb)gz{6($X( z7xg`W+E~~K-npS4KaE+fPd|KqC%WO`WZg!|^kYfV63=vL5o}-szK$8K=<6wO94Y4> z{kGcE6g~68X*ALb{7${(R?{dTqwn_Rj9U}f%cW(gag4UFH7w>;uqM&}a*SbZ>~-I( z>_Gxc!0TKd_($&Xy@2*@*h^GU`!V;pK)~EaBl#`A^f+JIvmyNJVlyV88`(HCBo;BK z?>|@<(_z*)U)euTEd{Ze9Q2u}c2H+nCQLfRCRttboF*op1y-BA55QaD-EL<^f3!-k`1jhCnUD80zsS^F5h;Gb zBi$}54Y=T*=Wg>rr&d;_cU1Rxqzpo+$(Qq{#ZVjpQiY~*DV}8LpIrO9=9M~hFZJD2 z$&k=d*LCx{JAWo48oTZm4@amMMKzOTVm>6?CsnF-7xvcSFP_&flN8OmWtu)Fr#1{g zXIi;(lXw@~iyCdrW}d9PG3Qyd4IS<11LvDBQGMxkyBwX4?rTfy^>*q1wA_04a_RhH ztm4ZU)7sWk`nQ#9%Zz`PnbTH&z*b*Mw{K65pP9Z^c3t7S`En}_KH=70u=;GgbXB>6 zaZ;<&#B^e6VBSh*!mX*;h(d<9VW?&x%=AnXspjDM!c<-A z1gR+cLs&+4Lf^-G&h@w!lENyir*I>)rIGPXqm|3zK<@}Xcz?xhv`}XDNons}uGyO< zC!LdhO|xUK`o>^iw(2sLCp5@!>%R6A9}e8_AD~ws&+KpGGN0p-JiXHRLf!RLrup!= zZ%(E4P$i)KJYdzhWrsS9ks32^msvGWKGLLLpjth%yj2;8{o+yf#i;qqD#Or#Rh{Sk zF20f}myBBfzE7XDex9|gN2=GxlzmJ)AW>xCauX78z0Q}n`#1UH)8(I0LEq*|8jee5 zXhV(@wPp%6_7f}{M~L(!iRQ}CzML?P6`4-}%>)?w}Vl5vIB@hQx>Fz-?7(RlBRi{`7Zhh5t=`waDD1Aox~O5N6s|8-Qa zu3F!kD8%mC5~y7>C#}i6lWPorof;RmcxyMvKSW}tBm)1s4*cD<7{0eWwH88JmiicC zULPw-3f0;>dqb+EYe+8ZK41S?ZF1bV+{47snD=#~aB`!-c@fYy&8-q^cvJ;dkeJp@ zq=3TwAa6JIQ<=mK%_>C~o|+b-%!^o{*;Pg&SM&7?=XNIY@Pc9C;@Ms%MtH@Pj{16= zsz-MI@YCG@j#LwNf8PAmsj)uxSLFE;!7_R1HrLkL31N5_XA(`5$5 z4qu$Qg4SLna9{uG(tG#I`e^a>Z|mK^KzDRm#AZ=Cu~e+eF{)uG!xXlwhL?_Q8{>Jb zSrYB^E8arITK|zxd7KoLXDdvL`AVHSdR#k#dumu$lw=oMZp{xT{E7VP-0$&nZ1+!O z$YOW!0GMwj>f3rx@cvfyp<3(4WXARuE3d{~*O{l=u@+kFQQzm{Y(Lz6czM3p6RpGD zov{&fvD*9YN)-R2zrh#3Wy$tmE zxG)o5G)24`Z%1b>!fY+^wu;H4LAcgLQD?N)JzL(tp$@CwAW0y6PZh6>9YPkm`Ra>a ztH?E>D-=#bf}3K_W93c6YpdE#BwI(qTiV}zhY_{Ja^UUgSJbvdeU3u2DIIj5Vdrd% zTF^7qs}W=crCG&@XCD&&hdO#>NaqVGy+4m*Wi%Bu)<;;|A*-+X7o{*4Ic5us;)5GJvl;%=6`|!Ooa`1f zM0U_!<7<}qK3*HCnJPxeb!T6Xeo2?sB=wI*)eT20+PxwDeB`sN0ku$**(hC_RGSW1 zL24_R6zE7aU?MeHJic1P%&mD!poUPEJY6u$*vcjaS(0|&f85+$m&}o*@ z`VmXkxU2q?828oocF>c%@L|wYarL)7&dF_Pg70KiJIY?XTl;!LWC1|L>z`dPs%+&+?LgsghC>?Bh z(1u3gjER$m+Jih3Nue-(lX{+U%U@6U`71H=cY58H_o6e0?+jW)o0@JV7>rNJ5EuB=~jqcm^DP(Fqm44@X zzmmUnVg4lAwb!>kWIy4+@fWNw@b|x*m&L$S1+fgP2}gurf(#EEU{e_rzhNGtkPgG^gEF_4p;VC?d7a?2GSpxz}b#a)xH}?kriUg2_@!La-&};msPg z`kK@Tru8zuPO;S{Gqrg6g>V)FttFRMZ&@n4^32N8YU^5`yzg&*YAZEVbJE*en_BYC zFe|BYNozenPf%zueEMwjn!^n$?nfIRlpIID;a81Ta8rORG7^tP+|Y|t6fajwued=Q zW1K>1jVv4&78TI-qL%c{HpVvQWktQcIwitdS$BN1^T_0JzD?DPj)-fYC%b(tIoI^c z+HAoqeQ){3TvA(y1@e}OV4?T5z9j+c%KC9v)`2q?LCbI!t$9ujcaNx73U+7s zUk16I5z+G+EKj}j#jfG5JlN@3h)DIWclJ%Cn7=Z$ra$nS>s-kkXv^ikzX=E3qj&i6 znydM!*60$x^UzVD>ch^dBp5mw6jo^zPo0cRgjmu09(yKTj6H{H&sH}!Z6vw$bcc@a zT{&zwp?xBs$F+H8G_+7pC;NG_l7~v_mzB3oUawuv3mSgvy%7-x1@lA^a3p)Be}U9b zj=)G}Zr2~)-v$pjgL+fAn}M09WweIq222=1QYvB%tl#q z35D}_JW_2(43+hn8G>?Ix&MxiDOHJa1#Mlos6>ADKJy&dSlEgl7g~_&uNy6(#P+_@ zcpMmAuCNSAvgG4qy;~2(kky+(X#pdD&sXsi z=&%2-hYB+;iU`oj9f^F7S~gA&X40|ig&3;U0W)GwU=1wBoae*rchp{`yI;mijv;Iw z=MKSJ5X&=8#+xrPQ*WAFU!HTAD32Uaga{S=he*bZ9732mQAOg+$zt)0oM8V`8!5yA zFPw>{4V_U~0SU^k@IJo(kxczfq5s)8Z@3u+cJtO_(=9_fcyj~uHkPZS&OkcQ8s?(9 zm^P`*qEuH}yqzZp?(#KHN>Sm_)ne3&xw$QKr~K%QeD9h~$7X_b(Yo2AFVnkG29OG)0;>VcUFS1Ku)?+h~g^u2wHcTlm;9k zBl6=nVpb}`fFE4qmBIE81=ifCx7m$TCuGxs8J;Y=@d_6PXoB?*Ej>NgM{KF+oWnfh zKa_Ogr2DID!&BZM06G=8b!gJ|pM8Av?OV}nPngQ->6_^0tLsFH5jHG6#4%{&wD!^E=ck=bn~Q-6Ed>3I zv!p zVhIk71*RwvqZj}j3J?@aAYtNdFeFei067?^kI^InG$+s%ErN}6B(TsqmX?wU-VF>F zQ=SpiiV*7x83fQc0s}Gn=2Em=XaEg5)<_dAX+wNZp!qZ}=ub#QQb4iY0H$OhoQi=H zje}vZba%4g;J^tCkN1ZWHi2)#u;Pi z)q#Z}ir6Ldlx{bb?LF06>MpCI@1(sc@iVBM=8Rxj!yDJQ?;H zue5<>qI!}u++W=F?Lua0$dSF_bGg$1uX#_ zOp?`~>}XQ#U7__cN!PWaQDXrjkFk8}>0d#enh-1$1ws}8Ktw$qF>2@@NfUu96iyh# z^1j9v-0%<#y=XOY0h+l0;b<%;K#P2!InxKE;=sZoqciAOQ$_6T9cHn1S#rb%GhG@E zR)jF4BC6G4S#< zJT_wd_RQRi+QNP+)5Fe?8HMv*O*p#DJ#D+PkKy|1r~2>NQ;%_gnI{fzgylpho(d<2 zJL2GuxIZ1&&Ue5b-8k00nV*UPZZUA9n(F28OC5E*Y+AdLY662806C6hKw)K66D<7O zDQHiP*;$QoAOZqlbn8(<0O=EE!D0YQEWR6vq8C?SSWU4<0Ko`cfeS!$ws;guIgJGs z00a=gTss0$pcf2Vk0fMp9AEyE#a07ZYf)K1GCjM)9>j&Vwk-?f-d_`~p z4(N!R`jd$M0iKbh*XNmL#n`VGRs^GySd9SmRsfH4i5VmGJO|&powAb{kQ$9U%mH5a zRE~;M=1d0CBLJKQ35x~b^^R$JRV))BaZw?+M}luhKGX~4Ph(o4*VC}25Aagc-4Q+}i6>%D2S(9IK`RWT9wI{{Bq$G3kBe0CB z;MjtfAw(dzuReEO(M?X(f7{@=m8M(b(q>!od-PEs@09M`9R?~Wn zcz_*1$?kad#UvFOK-AN1UeFw;wyaLKWVN3HGzJ`lZzzHOTqLrJ0k|N_?xe(}2LS%_ zsSp6Yr$Q_(z-k^&?{S*G`C7}B0FExz@)N?XHoadQ&$L~|A;+hE`pQaDfUrgE{sSC` z70dP^bJsy_zXX@F+vwp6%})dDoxIF@_gOs6upyOgH!)cH?hhfAV3z{$K5XXGVB?u@ z(``55U{$cosAJ0w9DoSC?F;@r*!D~y(W5SwdK`N(55G|e1fd)!{8b&px;yIK{-7Ze z&JggA+3ZPSBnki-LD1N%#opOOTT!{I(MS*%vhNoI{F@oxvX%aMTh#Gd@O^&KBDJO? zRx_DAgRD1Sq#pU@TGKbg2&Q8FCNUM3@!eXp(b982D9-5BiLTdCnvXE?6(h%Q#ah;Pn)I_dSOog#pq&DbV5$ zCm%l%(a75t5cUFUz?FPzb4MCFN2~+ZnC6(ekOy1YX-x2dj^3~tfU@6c$GJx(mLn4D z&TwkM*|=#sEkhX%t(6YeRG%ntD+i7mi>qTktgvF|<8(2O6VL@84ZdO0`^q$$uBn#? zu;+-^XR{MrbeVA6>4gIT#J*QG3lInv(0%aYe&?&3Q`aLn$bs_cpo>J>L2Sq(JfZ+d z<%@H8*BR*M1xLq*jPE{sofvvN!UpiBAp$RL*43cz<=bgss-OLy(YOlZHWhM@l7V}j z>t3b~jtlE217*S66^8Z1V?$MMD{RE2Cf+wOM!bh0f32f$eKctg|)aiC+Y`o?|%?6v@JmN|Oap5wK- zQ9RJfxU??b%!CK;aFnIFzwAGC~p6aB!u@qW9W z=H5f1Ycen<@_mrE;h{Yrro8r0RnC4j!S&zABhC-V@#mijpIv8ZzLfeL5NZB12l%W3 z*;D~R|2?+tSwFd7R0m9ar7k~3rbu9ft8vd3S!YAiB@#KYxCtKM->$mEJ{<0EBKeRt zsp7Hkpj;<1Khu;-oucW7ZrW~8MB}Bo20s}Mn%p1qES7P$2+C&(Qa|4W2*zc&C73~= z1LY@B;)(eU`69dmtuZyjXbB;EC2vF0QQ6Nr1;`1U=2F#chx1X zu}eM&xbKB?7-5kRja%3fPfVhZTjDJ@AQ0~nv!vi2g`;u=x}PW12MAOpr}ngn+(Nsk z*Z%4KkS2F$mBc}9j|b>Wr(eB7`59Z4{jG>JI-1C^V-#D^hg|APQ|8@EpYKOAY;dPP z0-|(DVS_o7BmfzHoJf|ZwR8c_&5K45M;Otda5Ts)@UBod5Nz}#$o5p;M!_e)NQ`C@kWM~1m5wnn*^}c$zSBT z52r%rO*aEhD-{>a7*#jmVND`0CStegTi8#&uK-4e`VDHv`HStMcdkGW)y?3=kPyd} zOIk?CYB%ejd;tp0daXN8QssD)6vRNAzv2*IR*3dns8V}3I3%9o-Amma;cs4`R5?e0 zMx@dKk+ri;#yN)D;nSCAhs)>4uUAHsjM*zPmm67)y; z>uT?Rr(eJPQ90fiPGXh~)>OH=c$s}jHEV25z@&mjzN2(l1$C0uX~uJWXZcK%O-7|U zeI837sZH{jkbrjt%#-mnOoz|^E4PGejKX#Q9#*jc8I%{(ZBc3*Zf*?9oAC;9K$YaU zY2{7)vsjSABC#CEIg8Fj(s^1@(JOlrD4$upEhM+9Zi&daCl(gJ5453kN{MAzM(r!!!|-NmG#Uo^eIjJ zl>J%yr7CG!nepCsM(ituZKjF>JTwkdsJ)Y}uo&v059V0J`l)?HwOehDliezNZaRm@?iCq3s7!12g!dn{-_fQ=yZDmRLywwI+$&TX@ zis`lRW21!EwvB{7W4^T@8+O|~xAqBgGVfc)&eh8)LFiffQ<3#8ljXUD)IL=eYmxiZ*|+W=inP-T`if-Vj7i==GE00e z-{KQ*h&o;u`JTswc|bm?<;VUrs7=O7cI{2mfqCZ5Rin5udMY&jwhW123gQz8gcAno z1t=>sP~z3hAsqc}=BTs}-HsNr&JT^Z6@||Bce5U_{T7kE!S%Wpo}hdK2%;U7!2;T6 zDxwQI0$+<{xCFm8E$t<6Qu)e_;6=0L_hLMV2tQ%SgU5zW8G}@M*j{<{RjJfzu>t1Pn zZ+rZ(YI?5ZJ+ud^s~Lqzld!W+g&2_uEKkm<_z-e@h80^(PcR15JOm>_1QwENW_iW7 z{HYm0EI7Q<8M*LQFzAk+og{i#<;4k>=58EV(J_lYI5duJFC4(>yQ+EzO6BT}0`#7Z zr8&i`O%+x~^fn@;qx55W7X`tr-B^Y|0V>Yf+iZjtW{@l1++|^lM!0|ps!Z`N;<0S~ z1zBJFtNLVc;$@%aX|(HyDv^|m3Ys`bbDLm(Rp&96hTBJ2-Rpi1#|^+(nhul!4N|Di zyf+EsKgPc5cU9FxA))plhtTwpMGTN*aUqd}cZ5N32=to8 zIWsu55gxEXC7IoxXMzSX!cd?Wc5hqE(Sk_p%?Z)ysC=4@P3b3uL2AdLR0oJV0Ms}# z%<+k6Vstyfsc`X@k@9G627zlBTR#>f7w4MZ1cq8+;QlC%>`|l#>v_VYlS9!$Lsv$v z`xxh!y2n@Ny1Kdsq=Qepd3KND_|Kz?9&9M{G3sNe_3;4UcLGG;45sk}0R#xUtwN}= zkS7>@!8*yI#VApbKu;9WmB~qEH`oRL8P_R8b_I?c+qX+xwHJ z452*aZFG`~HX6WPKsE3>##*hVL&RAsik2NS2sS+4U_$Q?b-HE4=js7K{Y8_V%k^^n z8OV9+nZ-byj~1yzQen*(z(G9>`lOq$Ai$$grKx~V+ z!m=ycM($9n{&AXXTI%>9gvS8C8xRaf$AxojqsA8P+#QIqNQ}k9>AgC7I2IQIpXGi! zx3A20docO919I1jM4P5Ajo29kQg_F3YT95!9Bc?I*pUcgO9z05eGBDpV*#W(Z~3aF!ch~U z!okH8pB^oh-udRQcRzgM8~iczs7@4gV1i=IHlqngT0?##+(9&Am;vTZ{?-&g5Aqlz zjzB^TvClUzHfIElxUyL|G^FP{GaouwEn*ig}!fn003-K|lnGBQi`Ltuu@eroNj!C8Q6 zFube`Qmg0)1PdnNC>>E8a6d?5FhHV=i_f+we0*#FQc{yy30_HvqvfC!$Pk;(UVDRt zq0t3~+08f=@i@)}A$w+!ftWU@3FkN>^=eh8yY$Y8sQTZ$Xb;e@#!M5@?;Aj(8H&+* zsY4W9egL$>P>1*_-K==@MAW~66~!Us7V2O?QwIlooQ>+#6=10#sL$Io81I7ebk;iS z#TG64fK5gYPkfEixtj|4wDsR#ejmSqFU{KU!3ZPsJ9|lyxv>&~8K{kGpy6qttFDAm zHYLp@YAQ+ib;ct_7w}ld2rmbD^@B<}6~(Ukh}>gToJW9Ou*;8@$cs}_QyAdyjil{t zb7G8h&>YgMgp+Yy_yXw3f-U_g28Ll~%djHElh zR6?(>!hz#WPBV$0wV_$IEC33`ni6!Jpw&3esL5`)fvJ9xIY)w^QG(6lsPp*jsq`p) zC*t}lUepQHE0rJ`I81-H;>_S9(-$XL^XOHpDfEcorF9nCrI^L~EsK_+^w$cs=NQZG zmT$+@o55S%_3zPv;J?r~-U0AGrAo4bwvggnBFcdN4u>0Q^jZuHL3wxu%!UHzXMsj55X9@$|6t+Gy9=0}# zb=h#T_Ky`tiP$t$oGtM&hb1suPx%(Y-A1+?E^g0sy!8*;eUoe^Md#svub9Vg*Pohd zrUR^CDC9C$?LvM#Cp8_SMSk^2Y*;1MBg${XHMo)l^d{YiMX~7KYfDxp@Msln+*THy zdh4M!4$jHfc{MWSb8f+ts7#&I*QxHW)66H*QY?QPt>xSlpd6&( zHvL-GH2ikHne7-T6;`X3nPgVwW(kLWEf3|IZ9#nIRCh%Ryabc^&@$Jo?^9Sxx0X36U5{2UZw{IDWB|P-Nn(6Ek zjS^oNZ4li&GN~lVYTgs~Ts{{pGWIe*adGq=v(-)&h4+f3F?r7O3NI)XvTM%-ykI`w z@Xp-I@q1CDcOjN`(=JWKJwUtgasH;+7k5(zpU_X`p_w~IAfKA^T>g9=F(V(#(05St z+Ph@^yASuFTBbcNv3624(eD$S?p?9Y+!99YGG$yLO^r$fC&0`{2hDUPFP*S-+YVpN zZWMVDdf|>2^`o;NFa4??6z%$Vyp=;x7{3iw782p;Y3*+3D`F*XCcd`}eFf30@ixA9 z(uM97ez@IR=@IRlHtqqF$hm!6&ojR+-%S(~oT7QNrl7!Alamg4aFDw0p_7`z@j{ft zdm*QJt%^mQ92o0fYR96N`Z!SD^4SaKYG1o*3{}|NOvv|5J+u81bH5xqAB*31;oc)w zP4NXX6$R>74%^C(PH=(*Q=22Mn}})~VFwA@A>T41_nDh1CEA%Rd|1v!aHA7%s6u*4@0{+_3x0nF>bch+^BZnG`MI;= zQ zIyJ?xRnCln$D^A*1%D;uj&AxlxL$ZgneVw~N=11I?f-m*B4jfq@8Jd{8bxy&_#6{L z8IXcO`N!Kj?`^NHH{>_%ef|Ds=1qg)JE&jc;~e+bx~^3j>O0HE0s-;??`yn&!1T+*?j#SEu9H=zJM~}fFBq-X4As}(MG>9Z&?;nbLipXf&0%biq~_AhrGZ} z^)4?pzFXC2^`eUNalKW4qZRn;`lrTqy5PU-%f<55yb4Ggmuh+p)peV|0Hgfvz;tmVr(^Rw^w(l0!uX#>8k%WEllGRX%8SqQUy3k?1w+UmeaJ{I$2 zzBZA0+pfA~=Xrk6FD|@=u>yNUh{<|+?mfMpM4h|Q;&)^OReVef^6j~{ciM7s){x!H zhOPhB!~XmErTMjN(HpszFkQ)m`XLD^EJ(~*u-)H9>Wyb$cd0c=xkZj?d(5U?SN4q0 z>+5F|`iI0O{Zp?ySPJfNBdT7e{_C}S8S>Nq79Q;=qbs(mBf+@t?~<^pgBa1)PumHd z9pCuAXs>@wdElM4+>mK;qW5qIuU+-sB+Np#bwNqEv*yi0Va>HN#;)bfrNl2vj{

#IW|(gj~D{x&un!I^MA!Z*-&$E_vfOpso8i_R8h(x7+5i9)4uqTXyi# z7B9z%kNyHu0XG77_>Ns?_`A&wxs8cEa*FQkk+JW+#?_OC6C@0NRU5>ylkOf+fJ!2> z(=1p$*sG)4-d(#*?fPfB(mOI~4x;8B-r5&O^i?#gU)?De`A)a&Gd~ACQJ?Y6$0c!A{%p~N$ciIEMlKkLORs~I(v1~)@rT{ zO1v-1zj6cC^72z7$!gc0OfxLsB*iBmaNark=|{x`a@4hHTG-HEy3+ll%0nu1id4tv z=usO@^i)};E^DfjA&AXyL!V2e6)1Z6cdZfSr}m|!X2vy^=6z44n#phKn%nmnwky9T z*06krb|$)02Jk{wl;L}COCi}#Wlp^@@A((D)puCJuPP|cEU|Nc{_Chv<2j+bF~h$c zPhBo+k!`Ow-`txxd7Xc>u;??ltToTP!o4|I>6_Ll(NV34(P-_rw;)1)wT^e*SkEl_ z+&+T4-a3-|sdTH-a8k1Y0FQk5s;A`N*k^}3bw$Bod1V|$RSdo}Qh1Q2sVc70?NQXB z`2ywrH51A9B}Jf$QMS~NS-Ls#Q_?lMzzap@U*YV!VF6q7R{YOux#`{k>3-;j$k=)Q z5Zhn8EvFPBr}RO!@#BPYk~o*8O7_z-hi8ZURl`Lly1Ar?8?PeB)-Pmj=Ok@J*<2A# zt-uwYti%B0tS1^8&SOYt@#|Q4mEw=df*;ZUp*V=4&%l74MtTFPF&^P5ntsBileWe_1t=|nB zJ51b>pAQ~e^jkcQzGkklsI8RSt8Up-m0_yXq8Mi|8)yF*c0=U%@0NgW9yRV)5jW-) z+BDQ`$J%&ocU$(Y?(ysSVKf~1f-x$g_kW2Kn24kK_nxwCqm|{W$o$&yd0Ium@>O!FbfaVClY*qyNP0a< zeB{oGbyXmrx*&rNr-x^-l(?^tb!z&A)jsKeuel z_)Lq@=PJhJl9*}Cq1*iZXZQ%C(mP{;dgt7#)t_kDlXO{?P+h;KQ}<=%A4Yu7Lzf@( z$EFcIwJ}vlRkS%rEG4>{3S;{&)>fB zdYkvV-n@Y8t+$2}SvB$lZ#?sP>%Z&6dJAh2`xC;XC}wrD@4u7xlsl|Yrg#5+Vug2q ztp z(EMQefI3SH+%}RuaKYu-`f}%B(p+b871oCN6rN+Dw%?H^^Mb`XkSDOct*K)|uvkh# zS3{^-VOBK_Zf!u4`&{hmU!Tx<09MGcoI zamF6Uo@WdWn|GGJs(uenVC_F>pX}^$r}UThdCrkFj$Z%@MP`=(D{;!&zV%87OmP3} zF0I??M2{hBuBurSR8s0mId0aGsv_0y!dK8(Ih%Uj=u@?)YxHjE^>t%0aX&*&k6hs~ z6J@{MYL84s`EkLt%aa4hj|VJ<#i5~Zsx&Rgn3X9E1L*0Xc+TK41R>2nv;OEfa&)T<|$Vds<9t*#{>r5^`} zZ&Xf8I3Mc#zX=q(?N@q_#j9+rU;;H^W#QrEHE}|LTBoWuThr6{Mbx)`0mr#aZUo<+ z=doD=v3KqnM08Q9x(i%UjY-Q}waF>DQJRzx`OH0J4=i~Qlcf1lY%E%oUH{yuQ7esk z3rTOsAe8UHZ9r*<(Pjt75p+-t+2pNmCc#lD`FNe1Q-W-bb$F#S0cOsIwLaaBEap6Y zLGHO^ZlM!M>Tv*55x&w`)Ea7jxh9e&y#<{H%QV^+yIn^=K3 zsvVN&-m@JMxk6*JR7Nwvo<8a)!o)f=JGb1ETAbV3#J(jwuNEE;$RKjkIwe{=Jah7; zGcp~gREiV1=shM*x^6h4l}J<5asd%$?@;rx(^GcA>A$D^q->MgKra5dIp*vW_=Fe3 z49$_dCEucauD4Y<(>+q)J#(AXG7B_RTc@(TH=J|zEZyh9k?~CQdd}1H`p?Q;NP5v= z54uPQiL0exR#MrhoX%j6tjjzxn`og}4oNWLyQ!F!?hq@I=ZG|wU$ADzM6I~`c_7t|6WJscK!<>{U-Q?)uG5{mdNxR=y+G`n7+ zys+`>2cb4O_MHdzZ6?*S`=p#|NW36Rn1TVo+}D$S`HdSGqfZQM@)}rj!g)i9Ev!vJ zw>hW4*2&te?mQLyhhCxzlez|?{)=KqZotFIn-XaG;}~aI2ir8WYHy|b{Y=P*{2Rm@ z4$lHeN8WxXU0Y^s4ojQmA3LbJZ|pX!N>^oWjB)awct)sgQ}mkXp^@M_R2Ts7)y zBZ3$CrN={won+DH8Ke8O+*C@su~l`6wk%b52+vx$h>hU6YJt8*C6f3Q=B85ouZUIp zZ{ln_J&L*4dtvgV&kKV|9}9Pc(G5cBF4teBX~w=lFY`WY@hr%sUFgjgaV@0;iz$9cI0eo1Jj zPuKC1xPD{uWdZO>BitM9{Cn~>QKz6eCQhQ5bK0Q`_vF4n-1gT%Io!ET(6PvllRX1d^|`dnGbO$ZKYs3^w}!K4m8`t` zR#&;WAAjwrY0dH#nvLVDb-T-ZaD3p{xBRZjyR*^V{IScS`upC8cN_1o9XhNmzaOrl z<=w-lnn^h$o5ranOX$Ctig7Q$ZTTruQcCCR16rBR?|LSCa^&N@vOM`BQo;%dVxPr= zmEC^zZSsDLoGe~HiTrQiM2iY=XKM8C&tET9pf!NdK^nDjouRPM&^XEd4cPVbVI6#E zoJ!0liw}-OUlyAB89+^YOGT~ha%V2SWJg5yp0b%tVQ#An_ZIq<=BJbJh<>3@@=s}} zZ;{Ci5hInKRFdw!@pyPz`m5xV&MUPy+wKg-FJt&#E{=u=|IlU4jL;Z|-ih#!Qu%() znY7sB9b%*U*=1*o{Uqwu(%Z7g|4JTw@qct}Ws@A)H8b!f{Mw)8L)Tx=o^A8acs+-u zV&Vh=WndY!?q(YLe2Ck9E$&*(7LxINRLJ!htJ1$E-FiMqRmKo--2XY{I7um0Kr@NZk_|}GEOZqJr8unPXCak?&}1UCzzW(r z3vEL{8xhc_8_Fyi_0`zI__cF^z(udfMj_5xhb0p$M(s2ysK!!v7$ZZhg zmrw@NBaT&iz83)a`Ud@r4cN#a{m2Gvb&#Gdi~%Lb#MlO7dCT+}!z4})Lkfj4vxhNp zp1{P~m<6(!MzJskB!goX>-He)_6FOAWVJ~wyz)HX z)M*wf3oGzjj+gGczz28KpA*!-Fy1$BChmA0shTE|n+{wlx!4Ap!(Z+1B!s$J1zsM} z4!FUeZa^|OphUc=`*+bGCOTIp*gYoE(m~O%Y|%$Hkb3+Ta@(YK=v1DYS$+kDTmvw$C@8K`m=7q4Y&j)+X3pQ`7w16!-f#OIV}Hji{)P*1h7bJ> zo!<|&{fiw^4p30d+1!8Ve)MqY@3O4QD+aay%Gn>{xEJNrt0UA&<-FiI6zg~XEG$S8 z4_Xs|iiJYO5U3#q=!nA%5Wy<_^ku z=JO@ce>*0n4(UIsrMK_kXt-vO1T?%Fc<(lgp>2nu-zpcNDVpRdB6B(@m)OFP46E|!XNsn4ZfFWTVb8`CrocXQ#=iEQ z9hPIdvSGSL36gfZaEP$jXW2-#<_B-kEVr_Um$Tm{s7Bb5Jx?vCXVg3h?Af-l|~cQ*s2J4Q+~I>pf;>PEMt8%Ku)2oj2pP(VOw2|JLob$PF-q8ZyVxry9cRNusR(VRRfP#7Vh6hjWa38IlYw5I;ylQWjvNNF`Z=m_Z zkU-Z|ZnFt}W2w`R(?dtn2aCFj%^bqLPnf)s%-`Bj z*+4g2q+5w7e6`DPzG&Fd&V6Od?Q4|lJA|}43wx0rE4MW({}BtnJQ7ljH;_j%t00A~ zS=p^wE**LH63uQOnY~0>%-bSjT67EuVczp!H*nLV**DcChBAP8gJZ+^%LO4Ine;}G4}NibG(7sJ{ZUul=Q#=rPB zHa#?cgSO?6A=rj+dnUPSN-XIDpgbdL?NkRe{w;! z2(KX-meoW`$DmTr3|@xEC7(%7D_H17$3$E;_f$1$QHu6ejn-3%A^nUE=FA$3j4nEf z8;P|LeH`okQ<42~Z2w|x*}GVSCsAcT6*ps*HZhjA<~L4b?@ZFFcu@UDRLKx^wV@cY z{}YuPKvcXA*X~;#0dWanA7n2TroG+sb31yI$`KK)MI-`hgJ=%Ai&B9 z3}8;iCY&BHhJjhNJSX4TC^Eby&G62}6VP1|F;{V=q+;lT#ZQ8M)?x8J%D(na`g*r< zO{Ymqp>YmUDe7l*1(In8&$O=1dOJp;PL*T)BsT4x_iQ9(Jkl&JhLZQwY$Y;!S4|Pk zWx3sn`qfh_-DmpT=T9HA|F6yjc%1s%pJc7ybzLIpPAD%YECh%E&+^YH7r4y%_^e&rAxoGeE`rkntHSX@ zDSppU-b4j(d{%34qjUPBPmh(=6|TJ75o%YKcimOgVt}|`kzFwh>Q8kvUT$pR9yYM! z8a7$DsWqUx3--+Ux5e8uv2mccwjtPGJ-%GcdQSY3tj+v~ynYyAtc_!^+4bMET_;cn zisk-Vaj5&fFT>dLtG94;y@&KBsPGqL25?DO&M{r7)AJg@^9FRP6Y5^ZG5yB@x?27U z$r4w|cg%ar5u_gKogTNna>EZrt|L*@l`bO##VWye3zKOJFn7%Ml5rCo?w54NJsTOTMKiD80sitQDXIb4 zpZNc{;lZuu@k8~_-y6boyo}ppxg3Cqr+`P#ZaL+?7CyY>!CusEOV1D2mbj?k(p z)?z#hzI(%7kKI10k9Ujhy2kO9dor3GfnBc}i#^e;kA~hp=iKiM3?bBT24VQ-T*Atx z^seR8@&9~YbS@W`MkbUU(s`XF*Wq10{Gk`tq92#z%GQy%S_m&&98gy^q}7bvSdO55 z7Vg43AgJlF!SAE^9VyuU(&?%RaCyi-AsZNPx>DFy5jk8jXIitE6Q!ET-fmIykFr!& zQ=_z4qougo6T7)9$+J-x7ypRp_c@RMOEn+A$_2c$&~WvD zi}+j9KpE4*fjx2~jk%Jp+K2qPY2Wgr@E*r)TU;;=Zz6r?7fN06rwJ&@dt)G}xJE-; zR`UAU1QhY_xt7w5RnKPs@^m-XeNLrWkF&Bny<$VZpTCe}$i?fMAq~*35jx!!9#xT6 zs}Zxff%6wmkw;urn|QX}vP`#d_sTRoWA%7)(yyIP8l%+uB&;9aN&R0L+v@bUr(*I*#_*2kH)cx5%agCoY&Ar| zN@h@gk#i&S`Ud}oX67FnTzzP-G{^sbz$(VujWdqldJl|!CvDyK2_s){^10pJl-{N7 zUF5k1Z+zTR;op%DajPBd!K#njZ~eV@*DI`YfBO0PkU5zLGR-S+@G>a*qz&DXu-!{k^X6wda%quhEF55Oh1W3Ifza4X_;Dq+|8d^>oTr=vDWhqlAQLf-Ju(` zug!uT?mG{-Pd4lY+q*ivIF1i4dMEuL?D}xhN1;qMF{@h5I1uDfLI14^STsnDu->2W zpFrsGyXzlb+@$5c{MY9Zo9wr>X}MP+rJtu?>R-8`0#jD>duy5{1J~%9up8R{?Wb-$j@zRfjh_e|;2*J5~!_6EZs38Ry;|&Uo9h)|m7CYgo4A)q}q`j#aN^ z`G(tjnNfTa(`7o( zWL8UK&lC>#bkF3UJcomNY+5t|2K+n6X1Z_#JA@p&`W7OcC8diD-^Sa{m?#$)PUdWE zm9iLW#7Dw$I>OsbIAs%l14~Wylmc@dxH=oF#OBeLX~vzy}RVNKImQcXIdU-U@*qF#3H zNrZEha^hk+TtbH?QkNC)$t${q*3>NQUK>IwaQo*h~;ZiX?ag*|#e6bPw zs%}x6o>%wcSCnb_+E)7g`P!EU2KW*VC!2U~UZ+mKB5XH&GK}^JXja;6VT-}<(*&O(^Ti95d%(7nj+NqznU-~`ks#mW)dsQI& ziLYyKsd`D7L!R(Em*AGM1(%T384elMw}?CLIfndYKAE7tcfOX|uS$Fij$WRhu%!!p zDZ!^ptL&=0OpXo=YOeKj4z22QfVI};xv7NIE&Dk)4#{~>w1y>2uSRd_jC_dcPVBCV z40Sc)Lyb<7Dx;|yOd;XN;#c`Pr}|>vKfGD;&h1m5>E#D;4_MQ_Zp{}PxyOV@-mvO> z_=4%ie#cwO8;RV}XNatm^FRFQ60NA~nIQg7tlam;vnknUu6BopFz&yCDvM{OhdCd$ zL$-${e%9Ms@EMiO7MLo}+x>JnKq9f0?!6swzt#+nY4@?Nok$s~v}MTrfh@U<;sHHHjY{EyEp8OvJySSud5Z*JyJou* ztm`Yj+5=S%HFH^VH0GWI{e&60ibXEOX46T2Bm&i* zNjOA*9;$wmK^w7s#LqtPedzdX#HDMmmh%POE4jvCH|WO^EgIs}SoHNAdXv;|ksAmU zyFLmMR>#ABEX!gdB*5vJVbnp&;n8{|yab>(Qb~k|0TjsC*6Hu=9rZ%>*}S`sw5;Y1 zg_S+t^i~r2MQTeXIV{5BC4a|87k}n*kEZy6k{REG^EY-)&=)_T8J~Dk%ig)@#c8|r zvLX^{_HccS*L^mdhNXbh2qL05kub2IOvzFUV0Ao`>K!xGS|$;!*Cg| zPz`z9E!iZ?CpoRC?Rt2qd#Q?D7deIUnp?A0@$O$#Q~E)T<&B3?mELid*shPRF#>5* zZaJUQhNl;HGuipR+wD2)3@>m~$-D{GZ!S;tE?nm2ObfkfmMtXRVq%qP171DgA3uDj zOOv0CB^E$26LFz)AheN=6>nYkDm;7H{?*yPh?T%HcqKFbOl|sYKO%R z@QJpG;T_@)cM`t1Bw9ZAN7kDMikew_4A1%Ja#0|hZW+E)u7RRYUO@~!!)B)j1w@?v zd<8&C&Xuo+8Btp%@}$?qBX@6)5FPpF7c$zRWXFJ)(*F+d06mO_d;%#xkO6nA8orT#Y0 zQu}s*>r|k8)HP~zyh2RM*b*%hf&6~=STRk426IXL>%*8AjP4N+?A%WV-yLb|b0i_I zH#(BM4-R8z%LmgjFX&p0Y!sM`mYeZ&dwI5ZRhf*#rEab~o*50Wd7jrke8grcMK5E79+AcYX-*wO!@b&v0(O}=Hz*(}rUmO`eE_`@0aIS*)fu?)eggV{&0TY=4l&k-qa zdS7=NMJf{1maR43s?^g2xW!(A?;4vqHw5gj>Lu%pR6IFp>?F?HR4p@M-gB%Gq!LOLxsUKkp~8n3v@6uo=y7 zy>DLz7%G*E#XQRfwnVkVs#{e#$JY(aOWJMEOlfxeMo*f)MY)qE(-(Nu>jyz&f(1l< z;S|k!W5@oyh}dHhn;B)+Qtf$*%(pjojcMPnRJXSt#J@8Li+(ltZa=0CCan={<{aH_ z)^?aUmzg$xHM*CP#WpFR>efBUX0dZcfZ)I0|NdhpetgM~@e-*QfhC+G#>hdm1B}w_9Ag-{J!Ql*yKDRcB*yljTb+U= zeZfb?EQ`3{OOA3Y#Aw<&_~I-Z%cjtt9@0RU$%ZU1s3@;Z3Q@97GXH2Vy2(_mg!~2o zM}b7{jF>&aGx~o_8uvgPAjuf~%* zP>)o_GJz0BBE;WO>>L)U!;Mt7T4y>UJB_GHeD;CaiEvF(xN1BlYOQbFAK;4r40}mT zlUGvxgw$jD#62O(b8v(UlcSwiww`Uj6{=z#f!E+n^Si63K4>GEpm(!po;L&&5!!YW zb_#W+_^ogT4D!=X9cNC1F10ASJZ*@0&ra-Z&U#vk@zD>Iox;|tAmjnC;WK_pG*KP_ z#$sSpDq}yDk=h2HRbxRqBE<5IgldUD8ss+dG+T%84Q(x$H`9l8xQKP`heP;xIp(Q0 zIDc$3wu4O}j}0-)20o zDX}XlJCPwSl!AhMPA3c`MM2DwQh*&(QtyR0QwpxG7yMPxp|2NXrBUfDajq7IDHC8L za)0h>_yVSSvj7k}{IkRlCHs%_eKoD&Jgm@Q*BTG}O&rAR`cz`q@$+APpyymjj8Iv;A zEX<32?y$T)5Gbn+7inaC0;pui0TtTv9oiYOav)R?5UtH)7AtBfLsBzpLKAYQLgg>k{X`^n(6PgGX(li(S@Z+C+QTi@_N!-A_F7|U=$vphzTaz z8AJ4#r11dYArM7^Z{YzbIj}SVtf~z@RAXf>5Vo~}TqLb8O@Rw_S{4)LRd20^93%&&=}5120V=ivLAw;$fbn0GYa8=9K`zy z+Gb#WaC`4PAp-CTrX%!7_=g^6A3@u#hf}-!k@OUR21j$W4zVIb=#h{k1PC$(1mAmP zMY+R!2*gZ*E)hHBiY@NQqw+oL9P4~2@+!^N;J!W(iVTp$a6KnFSNj4{av(`L5IqHe z|H3$k2hz4Z>5DI7_UFE-W@(6Ho*m$eZ>i(Qv_iBYP0PI9UbGk-4TeCAp@L`-pl()L z7!|;Kksra+lGG&y{q|5>l)&+R<~55d=hlE=*Uh>mZsbAcpv; zfr}P@e#q01$ZG}Z@x9nev4D8MEL=6hX1~ zj4-kFqM?}r(ftA-5X{DhO#M4d--(axi9$zHOn(=dzG5J|1oBsPW_x9%DZjAOOCd=? zM3k=5+#!fo8#KlZ79LL)a3EgrHyAMBuyDqwhc&Wo;BLTUu#e~pRNEtr(HIOlz}&FL zL1@-NE(m59YTXq>ACff<4HaaP0C7QBsFWArCka|H zOU~eV9GHO!9K^vK)g?v%;J}0;N%Dj0C@?J*RJsFx=LAlP6_=~$4%GwSA5@*-hZ?xH zMveiho!zcMGn^5$8D;=Ga+4bsa!l-u?$rx3xs~&}_4@|ze;c5vrBT-@|Jy$xsp8N= zb4c3_NRbPzvs@o!s_c4k*zYju&^8UM=(88F389zFzvYO03Lsj1q=W*9PCL7I-4HY# z*+-aAz`#6@%IVv{H2g3jT>t!;%$Xt!aMp;+lW9qgNfHk*oMQb{3HYEb#|uo{ID|_c z0xlv-mk!~?`iE#7WNQz;g13V!vMvEkbXkyJOaLQ|G+UVSE?X+z7z2U3LcUS%m?(H& zZ=!iDN!hb?xSAp?U21Ah=Y{H>kOw@T$)3pc7oF3F3lRYdh&X{80B+)7yC2r&M!yG6en%Voiqdlc$^j;{To19uo^STEw)W5F<)~3<6+JPK71=8&z!zNZZZI6Rq8QH%Kob5O9fDGLne9Rfl^$t@hHB)@TKC6~Tki2Dr)H zoDhJH@b}$;XIO=dkt|*h*-Po^2OP_n1I}3|x^q_C^q>9oVib{tfod2>%ql$x1pEdk zU(=ZlQtx*;A*AGH&p-m9C;W&-BVs+v2O^M}?15!{uBy#+v@fVa0Z1Rhp>m)#K;pI* zAT$f8+O`V3DTpz4pbXP|0{{^hkvLBppeYkogzftj2#5!q{Q??d+I3l&nehOZDJIi( zc}@qRD=eNag9;!PCBYydCjxw|2cpG+Lb^oQf<$JY<#%g?7$`tqmkt$8c11xCr#n^Xw<A11e-WUn851=RfP#Q6McIPg|bdJ9!FyHz3s^z;~>3YlNStEdJBv%K!sB51%q^ z{#XD%0g}ZF0#8BC>LKK;lC)H5Kvcli0OUo*$|I(|9o9%{fIyXBN7tB{EDk70x%ReM zU~nq%nk3kFW`-V+J+1}<;ek&H2xuT-!760kFu$4#4FhAq>EfGhe#Df>RsxexkyZPA;fJQDH1DAq?#T z0P<3|%#F-%2<8(6evSi6%3bKeirD_XdcE$DHT!UVf`;L*L z&Hm22Ish@uS)AG6btQI2A$YU<^~l}^7wN%We_A-PdESfGUJii4(}o+~o<4#^Tv>jA z`f`k?4F!}z4uNfdTz97Z3+f{NaqiW~{^kfbyuVwHehwa=3iLOxd;^m;hsVIPJ3*rCAi5R$+*V3M>|B|h)tpaC77!IRjy zUt2pYVpipjQ6^p6SizU2a6#?~P^$N<%wMf}_!K$sx3BV*qoo!zY0lBPa770_5(FjZ zh(h3XP)@@M?A&@h?@C!>otJxRCVWLWRj=yG)Gzp3HZraVGc-kA4JR;Um@2K2%-EK7ixeOG?B<;lZY*EImMHIdOzx)>YqOF<*D zV!`7%__n1mwg2Ys{3HJlYyWPX9c{mD^7wFm^Y5?IZ=XJV`0u};XK~;7(owH{Iq_a? z)I_&vCqK?yV?-)HK3v|U6s2Gcfa2rh5W0+rW=bMSCY`^8DZ(UGHA>Lk)f*J~$M3FTuVs%Znb*fNA#i9}M|d(T1M z7&7kUjS$)Kqm{y{=cg+7a5O!qTeS5dC@8WlfWi_vy2i16l` zRF0sFOcqChxwJ`1L9B4p9QtF%V3TMq(The-lk)SeTDG~XuN*v#5z_=jekKBBs1bVD2@z~xPM-hy z12}dddyGT8IIg{p&OXsv>mgBE?;~0+mcLzD>TbrT(a=X~0 zO-NED4Ac?uF<;D%A7Ick*j7nSiM^->$DhYITyaDJpABKUY$Nvd`hkj`de>naQ?ts$ z+}J59%3D5FA(sRkf_Y2FBn`NqM8V>Dhur1gGb)V=WTV?C&}(DF=O}Qyr}l)Wf<8=< z4}3>0eM2RR2nZ5mqm@7s8PJMOe!6uj2n2-@4Yrt-V&k5odNPGIFGY&!@s;;adlLeZ z-lB+Qs-yh0G-89{p|<6+gbnb_7Zj&10x(7dGP%lW)2#z4cM_Vgbxvj6TkBn&zBUCF zTRx&%zf8{F>jBO~z0=qESiv-3Kk_Re3_rRjOT0kW>%wPJN>SjJ?<8xo<Y2auHu?s2 zf`|+0Fi46KS=I)`w8RH^%?NXvFaRkQBSHTSB?K?j7SS#ex4OOTroP&qYw~YU=$Yds z8Jm3nzR`>jv4MPbq`8Fpu-*;N0CisShFAmnd7;uHXFQvK}3N?Qa10W=NPSD^3t9 z-%Nzy#KCC55M~VUim5Zx6#O-j3%;BJ3$f4PIBL~nzxaGc9A%+Q0!@|rfIx@C904r= zSi!j}CxDQIey{xkOx50PlZTvfv_Mg@fW2pdPq}h3=wnR%v8D5gaT9tB%(3U%9QAT3 z;vsiT9c45L$yzB)A!fvjfxs02+Ehj_?R;~43~+ot0A!43w2msQ5h$b6lY5zkFGDc6lXW@5I}$^$d5Wl4 zC9-rNAmW|GSS4X5iz%2+dFTchDwb$gO-}JTt*3X18)OMRz8{O?=Q8F=Wm*l-_MTgp zx-lU8B0J21ae@LJX&Z3gV$JTP)0>tLfjJeI>Zi%4dU34mO;{Qd5d?lLOncI=@6b?n zik}uTO5!EFTGUWbm^T<-^a)VmT_!gAypAK3=FPW0xH zq*58onIP`li&(c!*ASBtrwsU9dcaAB|c&_)D)$4;rB``*YZ|R1v zzwJMJ2Z({$K9FS}AX?Z)Gkhsy%J*OlHv zIaFk(v>wK&pfxg|<~z-*a>+tECc{RpUAkF;!a=} z7HrvWCHp$^eD|tk_>~eq$|^IK`uZrK?01d)4U-G}<^#f1_G>y^R@vP7JOi#o-cThyZXbZPafftr!u6C$f_hW5}W= z?Hqy#K27Vsy}-iNUN-%}wAf}+*Y`qqxr^brjPTP73DrZl9stc}r0d~B1~noLB98$@ zZy`=fYG+5cLCnSJ3l0m-)alH7Q;K3~v5K5`$>LB907_<%q9*E0Q^XDxg3IVRrznDA ziOc~2-ee+TF;Va{eZ$$~l2ML(JsD!Sk{AlfYgDqsi;-l8MYWN-Mla-7S>DD(AV6DX zA`4Fk^+xo(K%n#=-&?fJPK`^=B+;tHN~=$yv6LhP9^fFEI9HlXC!Wk?V=##-RhG}GM0=emd%Zc2u&%ZMY~s($4?9c^^)d-ROD)+KDH3cXFtBOs zv`5Yq#Nc#=?Fw(&3H-+cBb(XEXSnw^7p_Bk6>C_+6S0hn+Q^9;i4M5JF*~K$a$J{>2^HF3iPNv7BsMmvye4xk@a0~s!R=) zHfOB7o+a#X>~4Y%_dsf}n%LGfo0b!;$uCjjay831n{WWF{4U>t4l>RebRRa21fMr! z+ow(B(>%_Ka{CIq<{o07jhhd!I}ITJYcKq_bk}v1+l}<}G*3fIDC3_KJr&7ciZCw? z7*=!viBdDgrZ~hF700DG8Z{ovu}{cImxwveHcRl+O1x}keKp)8dCM9WZw(D_6nJ$A ziI<07;Z2S66!0fnsOM-|OG3GbY3iQ^!Xueh`bL!d%G3#UVF_UO$Cj##7p*BKl~%ND zN15kPxsry}#VkdR0N%h!e;X}Zz~r28!@m*ijZWcpqWKXWU4aY)$Ag@nXq~ckfVAQt*qJTJ)!Z;+{wJ)DRG@M2+03V)%x>WLL%& zTZVC;*5<((LCEEAVQodIPwc&$Ew&;yP*IK4@wyUW3249~qmdrWr!-4A-$HlW z{FT~Eulx0?j&}H`0tFd!OVsjp)Ennh0nl zi%D6Z=5)iE%yQ;;PI7mrVBvfAFaEhu?YU3!s_fHdv_9$QzK9|Q6KG%Ff@z?3z+{SV zEYF{iJfN2B$rmW$i$ge^_VvcXJ11G`nz4l%ZJ7;LzUh4CNSzh_RRSXIib)?gZ$XLT zv>|8=Emyx=Y*3+CLgBu3Ay5AWjZxe^urk9jU7Wip<*?AE-O8PkLOw(? zw!4!tQ%TYpNj+i!R?OvDhKKm2;s8(|oB62AZ1I$Wlrjg~apFjXM8#C`1AzIYzJSLe ze+L)TAH~S=d-lHu&JLoMAI9(%2H%GlSh9ub>eY%m+f`1(@q2c6JOvAWF}UtdxI3o~uj+z&EDWJHM)^{mxPAssB#rdg2@E#e-wuLQ1g%r-ZxW6 z)!#|q@?ZEj2Cwp$e^XE|=`E4%-B_pFfSmpaU%XK9A)m5IBGW)R> z?pFLo`ZDx|JqqFm>1H|#x$N{sL2E^*lGe6N*_#q!13q4PK4n!tc@Hx!I3_$C=l=Z?O*+}n$PI)xG^8@eR3)L%_I&V3>XDty5^W(hXQ7)2y2TS4X( zbp3oA>wVl^JZHIhQ|+B!1$7C-jmt0JsLWQ!VUo7|y4-_Pgx*IywCV{}4VQUDIA}OF zWqGxm;vQQWUF`qjhluQXFgJa{Il|zxyi=m0pL{%*srcX3TaDJpdA3BpX0gJ;@`Yd_ zy>xdp{r2Q|3gz=x_KftkwG`g*Eax?${_)g4gy7~tHeRDlh&`acpaiH`g z8&j{Rx$l9pvNZF-baq#ltqB8u?_@8_g+gdQ1s2BD@^OcF-@c`uDDwGh%hfTy0Hj{K zQ6N%d_fR2__5#Y9w0`OQ*ZExuwhYX*wbWGi>|&eU{v(dGBax^z@qdm>(Ujt1rw7Dc zxty=9XEq<)ck@(g7OR&ms-2`6w|mumt3(XD1|>hIhVDe;J(EMz;!=APVS7wmYKcKD zp+Q!sp^u~LJd&O(aqa~_T=#VM`^6NDvfR;lw4TReJS}%8WZ}9t^NG2a@ejgioZy0J zBRLOw7#*PuS4|V67WyjoG)UmYTGujf`PAMX!72}+Q-ulpTajYNEe|YjW ztmVUl^=lD%PQ^RJ#^(;1NY*U%{(G`U+ry|Y>hPd9Fl9;6x$0;E`MdQp9a2i7eg`U^ zDn{QYFh-st%taaYnxD4M_<~7N*Hv31eyDsSJ>%M>Bm&PObgW`ithl>#_g-d1BKfr= zie;X?+WYU}+Yt-3)b-$~d7OpE;hq1yKIOS>^!p=Z-AADA3LAV}De7?1H4dyb>K^dT zIs55SKx*h(+Sj6;)hN#Y_Ve11sI-8LgCQxllEqhFaZ=$yS9Ucku35hSdL`mV^NSCc z84pSTS%N{%x^?&Ln1Vt->|VdVoc3$>oe-mFYpD0iTz`M)V`Zbtyr5mIC%W;=@*?`a z-?uJE@UO-QjSn&ByzXx-8t@+2w~4N?!&ZOWf6f1{Z~(~?o<2m3y@>5?L`1B`j>a9D zr+p%iM83V`_FjBVL1#aY)7=Pdiqr~FUMOIZ6-|0Lxaj+jG+$s6m*zp17fQiOd_Vnf zT5o&xpQMYp@an&cPXSIZP5}xWqD@Q9rpG4H5~WNAp!h&PQ4f{L$APLp{=iFY=AxnJ zvsXD{JYl!PK1@IlcCDUD(pvt>>Rqo~T9HwRn8=J_lktqZ`(>x|{>hGHVD5EORyZ-> zyWu*OL<4z}G5g9wHz{mQOjGSJiyLbPdcV>XCODGIcO?-f9xgPNFJe_|-V!c6UM%A= zC^dF{vmY#b9=M};rs8*F0`zEi+R{OpA z2aa6^lbK(gvC$@r{9z?D^iQ7{lch|;ImVoOqql2)yOX8e`uu-(^xtp@b$)T}U0KDn z8RsLHXN2wWY{rYGE;&yAxjkCp;j7|^s8^>>CtH)HqFJE{TuM9cifcNgkHZ=JVvj4P zZq;H<3q-9GR(^b~&)8qHif6lW@TVtC%pxA^bRcB9zxXX1bwefOM*G*H&f7n$SFy2& zD>)fguE75NI$l|gQ1zo(8&V5<|I9P}_M0J6=%MV%T_X6r#N*a~(vf)5nSAqlA{~bi zLJ!ez?Rm~k$L@1Um%(6^KNZRwxxww>s@dF7=Eh-dS5sz ztNm}d&iXRnX&qVZDACqdr7$VS-YkAzsK7FMd&_P!z_!@+;8|3$bFlQrf^*PO1BbIo zWZAotsD>_K7vI4YhtmHqVEUG7si3`kHB+0svn?Fz1G)(gO@4h7OJ%+ZHY%c&?zV*` z_z|+Z^4hePHBaaZUe49)v%c{a;e(SuxFaWY_+1IOzfc2BCDag1aW(9dTg>NyzUuh> zI92lpZHY#EF>RoVd-6(iY$KdMavV3(X_y@ONlYrE_(Yfrw{%x14|kpp6R>nDjehCp zIa!-aH+iFnFZz!`GP2hiVyHC8{pi{E8J9=>{PTt0GEetGb2x1Yc01gEJGz8B`n(Bt zc}D;8>kF+7mwbk8QVoji*E@X|TyB)nqec>!ULL)QE>lK%#`ZUGjs!h9wS+$B`im+J>Bor}<2)sP9*WwZ1lXM|4VeeAD@SnK_C_j>iW3m4TNZ>#?7+*wOA_Vd$O z9Q~ip*)LS7_p`F>it0xWeP%pNj!yQcHynJ5Dmg|%bDfE{@77^k_k)gd_;)gKIui8=^s?L)zSoG}AT_;l>i~ZGX`auS0mFg<@)?(U z^PW4FWcdIZBJ~tjkvR&5sV3rCPFq##0*%J01aPL6(`FjaLeTvcF&k)qvjQ5{VCvRBf@;s6SKRlL`qRwlG?7n! z44Z>S!(|9G5_z|!hVO&a-{>kK1vYO5^!ga;yRL+fZf0pY959FX^NH zw0aF>hs)vj2l3|}yo0sr1s7dc6X%5dtBI;_<4uLR?ZdOMrDTQf`Y@R=(pvcxsF6y( zZ!w3>a>@P4u~P;{tz(7mN`RZo{+TG~f$ge;cF;BfS5B#Kb=+?4>CL}-{a(Y$GWgz6r@~dE9GBJf2E-q{WSBT? zCy{5)Ea(*OERCF!eW6U|T_C&OF%!`VP~&GBBZP%geGSq-Pj(L~KU}LEW4)&PhH^p^ z{a|(s$;Q#Pa?^mjZ*x_P5nI%*|41&((;Zbc!kJSe-Vs?p{{2nQv;N4ycJIiM?~6xF z+z5$V$$er~`mZk^$3YdF*}wig*B6^6eyfd{$^OX)8G9e8pguEjtwD3i#`MU+u6THB zCS9_^v`C?7cbAT!A;crbcHGGu&h)!f>uiR z(-5B_)9Ya)?q3{7lp3vXa2<>%tM7ZNYDLpK=8tQ9Im>mIdZl#RdD8c{o#OZz?8y%I zT$P5m^?c%qqITU~th=d66_ZgaPg0?l%LP)H+eVZ{}#>YOU7 z`M84}y75HvDts>#@e5=y95wMb-T0oo>)vC)PYSgqs*Fbd+mqMJYyT1F``sak3@I;< ze;|#i#A43Ngic1952qP1!YFmlA6f1~;267I0WI`C_I%x>uNu9%?_&DC_*TcuL2sU} zW&^D+Rs~%1Xl_B@cVR(l^8g&_zU|%ToSQQTCiOKY@E~)uV2+ZvHmFUzE=U;86B;67W~x5_TUWC)YS27^bBx#o<=2^uo37G{2i z;Cb#IE4!J;Z)2I6e3ar_Df%R$RHcL8ss%5&y(ky=+9@==AZ=)C**H$kA-5>xjEURL zsrR+VTKGT^m^aFmmTe}?GX~W^%zXOErF6?IAaov?+wQ!XQVR_0kF zQ;8p+ooNV+urBG=Fo-$426UrWj9NT#DMa^8fwr zzpSH?15UZv7wWZ%IkBcG|5dq8!ZDK;kc%nL%l{f&U!vAMQ}k3Ru4>P~SYseojdVp? zhfNH~Mp$PhP>^yYRt%7hhs0`3V&x&xngZ!U*IDF%bg39RjRDsC>kJ~KEB%-BOfE>e%!d#gX#uqf6mhV9?h^ed!uW>@215)O@$P894lL5*i?qs}<`U)F6P@uE) z4rUaEo4mFSZJmN%D|~&8SQ9av+FrB4#bFda#wx!^l%K1S+VJFCuHoC+;Qd&0>9|H} zzUC6^#{?=+p9<{2kKSxU+iB4xwxP>6&_d8W;(H?=>OilMmjK9vXOzT2)e+0A6-#9l zd95Q5UwboIKqRzQJhYal`Qybxd#&h0FP;FP$dmcIfgfa!y<8`UWyAF^(*YPKsPIK9 zjAs)zL6sHrR(zuk$85qi>l8!;6*L5uH0t0&-pXEe%JFq_7?b~^lGF)$>ZNt+gz z9vU6q*dLcJ^57cP-l}bz8r7TX59>5r>sVX|Sk5+i+elYz2Cgt{vCwR>yw~C61G3X> zY5x|~y}ZS%zomb9iv>BL^JA06XNv>1DZ{gAGzd^s*o1BAD!&29g?KC03Mt>JhYwQ~ zBsOJmb;^B)@I`IeXW0sC0JuJoO`C+JLR(K9qk6zm$OhaiL*z**cFORMc!Q0|HjdBN znol@t7;1aK>;Lk62#W;5W4EuyH?Z^o9P=Ak$i7b1!cNh?&gH`JPGL9_s5v^|I^*lM z+`w~TxmpzFo)h*s@by>|_HZ6>``ExU4A!FRd5QRGiTQae_<2i+cwgS}(%tbHO!Adz z)Rb!U6KQ;kag|N<3wUmCVF%P4*3&E%32JQ&YTOCx^9%0S2^850eCQXv+!!*`2-i=N zGSIvJyHS4MFZxh~P)i#I`V>#|DfF^lct?(W$dGMeQrH4(*ujom0*<#*H@cO?Rhh_D zs>>DmNvtwaJful{juaE>AD=H8Q{5C7EgBcy6jSXVTPqsV*mSd1hj(h{*0KNX-=A)S z0*L2Lbmx8GY4HJ)0EtfEbK-MIl0-AB)IgF5DbeyX>2g4_NI;@{Gpl(sa!yZcN-sSz zAU#toVIH`C9fIl=^y&rl>cd;>qrcRfezC<2HHWs!y0*3$2FYnBF_sRV zizVxd9qQaaCW)mF-dmQq^i1O6@|TClK@WdOJbE2;|2XJD=AhVxmdTEx$_t2G+^zg# z$g#*=FSggw%7C0|>9UlpdL#bC@?O_vl8(*2j@Ha855VvEb$W{>H3(H2&0yB%p^H;P zOsDs`WLh}yvnP^$#e2^8ho05yJ%7IUwD=zDu;g47_~KdNnHWG#kWfnZQEF1dD*HT| z_lGQ{h86aQgYFOIl7=$}Pz2J5jMPi#{Slje?nM&g3?{|8bT(`OD-?kCrA&wb$)S`9 z6<`@45WOlj*~jv>I1l-3i1mLIop(Ic{{zQwA9v@h!`Ww_efCUu#@VuxS!b1mkd;(t zZ=I1ybVjnXl90|!NH(c-_DUg@mhaE+&-?H0^Z4B3@&3F%uh;Ww9Ge1^sy*omqKqEP z$vsxuol@GKjPRc(Zxe9GMwK>!GhW+ey7dGG1GowweIBT+rtOpjbsi zT^0z7tvJ0+=8NWPhDkO@$MS6sa&D48M00)RQvPZHJowT;(tNU)pLROB2Pz)hQP%ge zS-zt!UbnSRH%$3O(A6K&%0Kh395u^#MqK@!rQ9`n<;Sr6&oa7j%89Jv>c5KrPeE{|h!Rt^uL@>V3G16%@B49g zmmR#HHa?TT4qm9)Z7tbqlG`bA?J1h9=8_M2*c@9}vsHM13j^OSve|FqlH)$3g25V9 zfPx5)+lxy6EE|NmP`Royxsy9~#zL`1b6ZWa?~`i$Qc|!`a-(uPRiMkizSkc=E{B6R zH^L6;#Z~LJw;KdP8xXY(4MF(!ZT#U1x2^mXg>=;uC~&@3>W00H@)cf!^`m}Rv1>C{ zdgqukiWKe>I!`2h?~v~(dD1-`s@QV%&tfxUF;o%$S?SY`?$8dVs|R3I_gXbTCGztt z>1+KFpL;DnV0$hEv8D`=eiCd2@qbdtG^H_r#FCFFB@^e2vb@#=Wz52swyd6kHv z?4fhi3a7JmKCde`hE&_^5~%*CcIu~W{7>Jg5ztdKNDiqu2F=X^P$;TFN+5??m{Di{ zDF~o2G=y$*c)w(;=!4;qZHBeQCsLZ0 z&?{&E+j^Q->*)Pds_(M2gggdMa#@vsF;=TM)y&vd{Zsi=>9L05RIS2)JDfv30EYmN z3!jH(!_}?@sGJTMH29|S>AQN7!mHVEp+h=6e*ziGM1HB$(fi?Tq&e1n%_|aD_}PS8 zC=d(pBU=+5`lR{LU#>g+|oeYO6rU6N0x@thW6n)D5%`1Ew@BiD5j4-rQPE zCT_s@+dr=U*H*Os1M{`b^xS@M-&XKX&7x@1sd+{OCWuhi%I{WR?ew!Ur=~imDcoC2 zNIo#~_D{BQxri&zCSl;(1^@N@28kO%n+-~$T|Xs~8|nt1t#^FcfL*~?0lyp@0QYX~ z93Y;^C@SwOJ)ufmxebrHZPat}U*D6~-dngMTcj#T^w5j>>IM(^(Y=%SGc+l(-8KG5 zAu_6~Qdze(-u(#)RX4_~4Wy~<*O^(`# zbQ?T^3F|P@AF^j%`DP7os9bJZUMC>sgK{s`uqE&1uIvkcRjC#TdL({$Y4%4!?k|IY zty3Jt1`i5ysa zhC_J|7DcTM-Lps3$Q(U8XxJ04f$?@3{p#NNUZUalN7c16Tq*sDV)^%9 zZD8FLFjP)~KP>iNR?k1)Z~t8XJUyx6_eBA@zdgP5=auef!91LnRnb@%vDmF7Gb}w(?cFDhVVZNW}K;6(tOU(t}kL4S-q@|_(}jA z1{YfX?f+`Td5-j;xtjc>p7-bFhX(t_LF# zL{Cjxo|?L$E2Mv=d(Ttmos(h#C>F|24&EYMcKzwYi`l zB2&jxB3~+_cROuk_Q3nft`7&EZ52j`y^8M|T#>r}MaL}tkZg2&@a68XTOj>hy=s40 z4Wi>@Hy3d=a;Bsv!l+(os*A0_u%6sJb6VBz<@QwP>8UgS8bP6r$FY*f7kYT~W1aM# zI=HF+2ky^RaAx6EkD@WT<84bCFGsDK8{v2G_QsEcgt;ro|m+~PutzrNIU!7snF45`Ae~m_h1ZEa2>fv=)Uy} zgoa`FmwLN-wZ5cdn=cFv=lx=X8J$!@i+%ug5Iz->!~ z2@@WPw3JLi_BKwW^0KEnYq(@dl$V4oY|AUv6w&OJsS{-CP17L1G7HPo?#)tqT`*Zi z@5RJh5}s7#tSObZR#z1||1i(c%Az@p>rY@DmFf>#uPL2)htw1*w_L>$Eorx81Q-G@ zT=fG#-pDW*)~>g*^Og2+EVOvmz}{#gbC(3TsjsIod)&TOQuL)`xBE{UpsL_Yhe9Rt zNBd4b_t-ombwv;JdHX+^;~lP%3P_%|5YReS9O+yl%@@&S?jI68MaOCLHOyC^@wlGMrnI48YF_EW zjJ5KT)_P|Gy)qnv|2a3R5%WmCOo=8lG$FI$=2Nz!`q4w@3RRda4X z^qG=Y_r&Q-hiCBQ31}SNih?RS|FZ6t{#O-c(?8o1yv+>SW*|H37JZ&(mfw9R%IcP3 zoao>2pWSJ+4DaofREL0zr;Aa0zK&dJBMrvHvJ6!#-Ij;Kw=U+l`aDt!Npn4?piQb~ zQ&iw?wJ2itCrED&U|%xCqq=hqO;c?Xq=m$y$d?Lyvy`waq+%Z>2|&yTj-q$d>zdDh z@xR|&5rHsE!<(5!LmEoR7r^DP24eBx?lIYUaQhAW8m)a9-w;AWKHy<`f-mZW`IQyi zq78X;n|k2-&~RQgf|S*W_Ktpay5sC>e1~mfjdDYuE#A0kA}V`__%+94S27HTsJD#x zd?4JlsDU5|0k1n3WdER4G*U_ZVj6Knnf|K@Tdy4}k%HMT&$mk4t1^<1?2-IC-7}q8 z5$_}3f*o3zGsqO?$=Pc3ad=FD+w|7uo_T)3Ij~KYo_8Sa&Nan1o|XqV=)V+sd4x-3 zo(Na%Q3(IYUHsYboMy-)xoeNsTw4!#yY9!wi#whni6SPZq3ca9qstYOQ+!ED!)adE zFzPBRZ9%tqTT6KYYcGZi4@oHQZ*uibQ(0#r<4_G`NUSRjF9rT)>G#x=MRB(NUqO8j;RKhp2A>u1Qm4?^aCp` zPv(rIxJddj;_^Y3b~}&I$+2VVc(yx>T(D zbvQWphjdrBxANupfyBb^8n44_RDpp`QDv`%h4pq4R_v6n8%t4RGI)pW@(=P%T8z32 zXG8jBz5KIRo-G*KppQ@G$ru!E8qbM0JXBB!4H2^(r#i{q@6m?c$*ryPLqE5Bm|nLL zx1{^7cp|dth6KKQEqSXZy>lwuHi=^TF{-hj*`Z4#K25!OHFXjNCrZLm6}#{JJOc!7lfnyp;B_4t9eh* zQg@wx?_uYE=j!x(BBX7YT#W}Zt>eBnUx*=!YxX=aCB*@Zy8}H~$UV-0xBR2n+Y2Cp zjbYs>ReIlM$g686rfV)(0v1qR_2KI6|NbQX1TYp43g5=kf9SYMt1bEIy%$BxjBW_o zEC&fbj}YJCmpWb@iyZat6?;jz&uJ~JT9O^2{YfXV`VU&oNlAC`$i?s`{vQ2l(Ih}o z*PT61J>qn9gYmVYliF0tkF4NwiK-N_QW3c-s6B^r!0*REOO`YHSn(gx5J^x2%TSI0 z*U_&O2~@-<-F_wW<|ixW-O|1?uZb|F&lNHc{`xn4o5T3rh_bpsS#$sG$fp@D700Ep zz-*}>=^lF|NA(3|{b~;K{g+tE>f5|px|Z&HbKK#a!pH40{$FO{m+VzmQ4I47(Nnd< znn&YBH(td+72DMw&UlIkM|>;lof-b|r4Bvd;a1y2vMZqKkcYBa?Zza?Lz!I7o(50f zlW(f1QhyzfRrm#`MFEFMs-|0!Y84*F<(BJ9pvAq0qQQrHc(xrmNsFr+ zgjkh`*Cklj`KRM#ZRE@Dz-Pf$44k!ak{qsQvnv;N!upqv8h|$7cj6J^Gsp) zvTPb6M{^J$79fmC_40q?b>G4#P0~xf(ks-O)jsJ<^m4A;=Q;m4^kBk8xd3X|y5&)_ z^t{ET6tDgdBm^iC8Ys(p=nvhB^Mje3ao*cogeVj+Q`Fw`cWW@!n+# zWE0G0+PN6V4Qa^{}7c7xT!zhq=^;&S(B zYKk{1knVqrG}ZxbNr-J$w{-guyfREqDBHp&#OhUCPRL9B*iiZQz$k0oZ{irj_6X5m z&I`%Jua?{77^(I@%3>hWY)s^=#e!66vsbTA%ZoWmnyg=$2FQw8uz4Go^(du^d%fZD z4ipxtW8hLHVU5)aAl6D8x}OT#p!21Ap`F(8+O~ahZ|XXjx1$U#>2dEG`a!n?(azP@ zTxaBGduHp7|7$UzY^mtY?&f#qzEOC z1t{sS5nBV3Oi(dKnbUA~-NBN^Q3zv7MK^7OlCc(D07rz?W3*tbxQv_A8&dQfAC>?~ z`#6Kb&>5mv%+Q#5Th5obUD*ygXn?COY8l0#$}FHSMQ2-%`cn;U`Sv%cjoAxBW&EC7 z`SYc$i=>4HyS>dC<*DVy)|IgNH^1h=AB~m7BQ41*fhyw?W7wJ@Y>le16d-3@EjiE3 z=!z5XhLhYIWLMpT)l{)`BXjGWVp@3#xne0u`RS@Gk|rfUO{Y6&1*DAgjdL7;w)yA# zt_>!sT4b#zjKal63fqjv+>_!`2qDYYW+q~!VQj#H|PvIMNU(Wi+9NVNGo+7sU+aLSRYVhcK@XrRq;8xTsH2q z<5Z6n-M&_GaK=!4E;d;maflW>0rU&P5dd>TPeBoT(I-CazU6Tj`Yr~Nf!youte%(U$D zhOF}>R`*>;^CSiIXUBK3PVaks9t<-IOD?}A{c@OAIF?4#)ekugp_-(%2W!809ry_mtG9x+ z-hU3g<9U=NJBIUqHfihjTU;%1`n=m*_ywrCWi1mCfovrr+vpdrJkcJrLbek{+8Ezk znFxdva1nALNq9i0q? ze2Cj?9+PLpzihI`AVh-BA(u;UaILezmR~dz<3hB^EL*}Sz!mAhQqHJiWRFNPSL6Z!+dn zdQt6wwC7ArSv%izD%yr5lIMK2TJn;O?4@<)r812(70mbzF_8shzqV!JGMq>oPNar{ ztf2RAgAf&r%-6?9Curz_eEJzma9m9|t0s$`+>UbPGl6m2YpbI!IbwWO&u(z?qu(~3 zcrU_Fc*O6l?;m@KT)3C3_wH=iCTsTecS9=hWLup^jCumTP;-cIr=NTT=+ap*GR197 zFqR4sV1gUy#nE5C&!?xX(u*4<1X@@4B1Y`IC=dlYNC5y#AQc#{@o_+5sti!Wb%;Ol zKHW3so(@m}2gc>R<(2Z83C4CvUAzs)kYh2%^gKZ-3mKTH?o*sf1X1`)9?4^li4XxQ z5K9NAQ2%OI=%ruU5DFm+eP=Z|%C~;bf{>-0mlSb-4!|<={=tl!=`0kT1kPSO^s?zO z8I)T#%;LlZq5xn&BI{VG>G)Dl-iZp6Sb53SafTGao&ob&V9^&Bu9Letv|%8>Y_46s zODo8;E<_Y)lb+LoEK~r3QEmN8UXSrmQ2 zVy{5W!vz2WhXKE1F`(Cr(V;`mh{J8`y=VfM-|iN$J)s(w&kua?PSB|BJ~y+d<( z^`S)OiHiVb^5>z^2~>m*UX&dXf)y0?A%sp$qlsFzVqh)`#0hr{<^aIt+z1K4qH`1o z#~)g551&5+guy*Gr9c9x4)Y;}xJPfTBIxW89)0@BpoZmqA&W&1J7l#pg$fX&vKTYK zEJVofdn^bBc>6sNfGfHArDJ(*;~0mrS$YK8B9l7-1}7*JIzU91+$MuGr1!Ydp$2po zT8e?P3)L_y z2C9YWHCIaQPs^7{l~lZ=NXy#lK|%pfbMMP+QVqZ2;9sbSF8~Mt2dQgE>M~l-2+}{< zTa69Z4^r6&l%T&*=Y`Yh!5NgOurZ+T&Vze zX2u2(gvE*E09YsE2G?ukYIu}?ET_et?AEA|njUxr14Lj*oDi0iz81c@w*X}>49wm4 zWWdL4-+q{}zKgu3BGqD9{M}7d-8grtl?g`BBixDrZA`5wm-z5vWYDvprMx0(^sg;Z zNb&jOr+(L*LzP1kpn3>y0fKxZH=|Q86v9C)N}uA(0Xnqnz#tSzmKgstJ!5jEtTtf>-jLgO?BqQPt6-Nsq`O)JnSQKAQvz1zK%+oAB4lZz&Obv1%zvC}MLd z8JC1#PLa&_TTaDSRxPIqIr%M}0@`Ln1)Zj=OyC@To&!8CwwzR?i*wH)kCR^y5vJoz z2GE@SN}bUh97H1g>`?z8cfe?YZ>GLiPtv{f(?;>so_0Pf4J;maY;~h|hz3A1qBcIN zy~d}Es&HiCtOx`Yu{Vm<84yohdgrnp2@@!w5R?j35qOS7@nO7(Ix!7@GiA~QXq;hh zODazxQdx?_;y5@F8vU9l1Mpi+C)w&I}$#=<0~3OH!%47r=g%Ry{}Vyu1a269}c zSD_d;0!J(oieV6u`;ANe)}jR586-3Xl6S_~<8ofR8L2eE!&+J-uaM^(r|yTQ5Co_TAgtMCbK8O zh2z=CF#oV4mN<$8_=gFfqjN+7tD1b=XqKp2;1^g2S071a*98+*0c03kYqQd;OaX!eU;zSzaXTrNTzn2jQgGpq7 zE|2^~qF88+7bDl7YmHuyxhFGwbr2W3mv}b#Mx5nTpBQ9YHaUQ{kp-L$zDC{^Z z9(^PS*#l&s(pfl;?05rITg3xBFYAYZ>=@r651nn@O@S6%TtD(6^_f6>?Q~r~3Y8Zy zs<0bniZmFHO&k?gG7;78x@A@DaG6} zqRMrZ3%d;gfCf0T2B}h3kN5AS;xA_GVV=Nv8>v)!&tq^M-Ky^hs(G+rJ`DY*cV8f6n-msK>E2TZZNLsY`8QaaX= z0-f0#P|gy=ZT*Ua!pC!|WE|jNngJ)Yzc1}h#`WDaC91WH8sRh$_5>!CgVRI?MNDuG z48xQj063#@z?`fy_FKW9@Zt=r4acfe831a6HOoE;{*b17hPHu|bwZw`#8-$OvxbC3 zZ%!Os;&T3OP@IHff1l1GXpXbMmAfqLX)QWE-64;NX7Qm4?b=%KM4O={PfmzT!pO1q z>cDLK4@(3&IGt<06fLy~Ron_q>pKih(578PUu1$bUTVV!(o{0li!!_fnXD=qb?QEJ z5RV}TP|kT&Jw)=&#UVUQN2uqDdDJVttkwr~{!)M<4Wvh73Y2TaYTFmg!53rCfrxY# zGSdlQU@Npu5Mw2R1p@LJS!-#EcH?_cylErKmtcfdX%|$#MbcDj%7KEhJu%}F0O&Nc zE2y_f38~Tkqt%|o&c#^HH~;i|;b-^Jo392XIuf5QmjXGM(we6C>SXr(koGxn!7e&< z!?9XYY}_!nmGJt#lx4~1!yH;8pey>;M&MCDl~9Hph13zTqxf;Zq z1w7m;n-&GVUsB3DjW7NYdy^jZ5JDyqe`i{V-KMZS5~8G8mc{|mxI|Ge3xq=yQ>~l@ zia%z^rCarjBOo&ki(6+AgMD$DewuNB;4wiNH736ME+5rW0iz}07$|@MrtFPn&!cyR>FzaEer_k($r>(LZHsvZ(-k5;Ldun~1H+%^xga{bd0yrr^fZ_2Tddtf6`S2h*^oe(tO`9o6BkMfj zJY`u!Hoo)fHTL%kw(&jL=N?w*CxMKuq<^K{iR$=_PDrT!l%)4}iQ`UG5@GIwm@g2- zHf~6ZcBG0s=dy(kf%q!I^%PsFz_T<{7GrI$6UqbF^>?wp*54CDl&r`E@hC2ib6A6I z1smYqb3c?d^VzRBG)0#cyX&vG1o!I2 zI6_2z^}m6oB=dD~u=}wl1M^Vm&sDhNM4!w?vd6&WJ(^cwn2Q}^R~)WAy%s0ZEkEry zk=~cF2Vij|O^WWta%SuSfuFnOqGJm@CjAFq5Ihl8W23xk7q6)1DUj=7;5XfS1sqsF z*1b0wJ~k6j*pNW$@Z)(Q)4u!5U#t)u6rz3(H6fBX0<~kZg}#6ZMmml99 z1d^v%BlH|yLhnH5>!HrEYv?u2aq|x|BFlC_4WdOa4T#_X$K_IECQrR4iuM3bro#Y! z4(%QQ>epfH$ETKGNVvr%Rm&``>iLedGCP^eS|76t$T$4^LaDq$R^C%6eTm484Qdnp*^ zJ`M%$9p5Y`sq~Xo$~|4JiBvNh`ZG=0%TCbQ6Ea{Y^qC?ww1)o5!QRZxZb^lFZlu|B zu;V--)(l!>sk_INdy|;vH?dP11QC@+navP@CP%V(ipNJVYM3L*XVVK{59XntIiY|Z z?kCMkY3&aL%VYBlb@Gg_i~UXX98}dMOenB5UNU8HZ$bb{^h@N)JY`borZFHh5~9H+ zDNK_1oS+v#JcAGy4egWpX~(uPbIqsS3uX^SjBCc`q6SxxN2|6P4A4(f9L!X7lxYro zg%6M7+@6B}z=N_35qoL~@fp@#Zv-<5k+H>^AB&)g%ZXDF-j7dnOcb7QW7?0)bn)dN zi?;YbCcr%a0+-d{j4iU9lOBatK zd6HzBPQDnZb}B@ohbv&vlWkA^wJXGyAt8|st**MHF~KP;_JF)}_B;djrIBzh!EcqO zEXn}!+XB=ny!;e^_dXg%1Ck|=c{nJLJ7hlFlQS+6Z2=(wBx3A+_!g)p6`n5M45+IB zNN2CkF&KMs!9Sh^SLlK6_^GVT0}x0MdM|WK+cl*1&T?|VG$|YVOI>mNkqOP!<37Z$ z(bZVp7zhAb&>>wKii%7j6qVMHZQd9Qu^flM+TC}Z^j|uH`6)>XdynI}P%+#%{d<6v zVNhTQ7&+;MAb?hSuJ!>8K4n`nz0X+6By*@Tt%daon3J@u7a@4YRW(0qTNq)F>$S~^703Ipl!NTSHyxgz zU~zdVsBQ{M;J5(DLP>JRi@NU!-+Cw9?~PLrM$qPKKN}$$5x!Nb917VEd?bhn2N22x z6qlVYn9%|5fmEgbc2LuAfI802oTza}9SvDgXFs8yHew=TAsdtE4~9G^ zr@|oL=;i3ljY9wOZ0BITB9WCCSGfS@P*Yq}mRB%bm;tbv0(^qgc_eOCda6ANcl|P6 z`UP;}?0RNGM3bf-b%gXz#9i;(N1HQy5kz+`R`rRVmPSrN$_lO6xZ2lS{{uIk^W=$- z*A|F(dH(Rd7B3+DV{F4sHPD)h0n0s0Uwho1T|UPNQRV%>`M(3W$%>xxbU&ipRa7siDuq7?+{ZlLebwa zDNj~2sF2s{oEF(9C6@tGbIN>4l{NU8NPM_Fv9hMd^|dN=_Pv*(>A}L&1yW6VDon6^631guOH(1M zfLKpg;RVQx3HRouRI;~FzgXXllVG2npdC#etyAhoe>?4IK3(b#bEaDFq3{1e{WY$j zT2)85hDb_}S>Yv}aBx?SBILT>b8z$?=G8-^Gaf}$-^=-sG91K2{}nO3HyCx+LmJhK>j_2; zQZ3_G%ylZ;U;2cNjN^Yx$RqkAY2us%JFmH#3|{!Ze%5Pu_iyd_B>76AsNSyn{+jv) zx2Rw;w^Ep+rXA%xk^7nR{cab`JJ<9z%Zq1hqpj@tHha+_8ZF@C7{A*yPhxnS9!mS~ z(jjwHr4AN8j+XSzWy1S{7%>S`o-0=<6qn!50?A_UtM=bie0Kr53J@gd|LI`sLfo60 z1fKw@g4->fV#1ID2xw|tWR~iz#Z;uI>JTV0%r%2bzJ-2DBwkpxmns#t;eO| z5?0Q(xH;r3r%z1r|omMdAl3jxsO#KO2SPF&#Ssw9zO-)Cw`e!R<8@mp{HHYTv(sG80HP zjSfeee#*bKWnvzpHaGS1+Y~+i(}l?kE;&*>TJ6Vgo*AFwN_cN`BPRH{Lo0lZAM4${fo&%)*oRB6h)??3|pZljbS` zFa_h01>L&z7xg3dl#c?VGCg-!jt!}UbGM|G0&I^*`yO|f{(Ca*&Hy@?EWbnN{yTR@ zgl|g0*&&P1o?0UItw}?|^>51id<`;>8XYRA#(KteP!oHVdFNfdxrC+J&F<3I;Tmr} zgm^_age^8+ouz`j!OQhCi}f@5X%?aL!navB`PM$lu3v?kN{6n9>=aiJQ@3PkbltLOapI zE{(grHCK{(>vbb2Q@1j2{74Z}@##+~R2x+0SM^?pRH|JZy}(Etz&q)~3EnF1Am#Ia z=l8tdJWxqbRZBbxN*{ex@ywAlzdT!EuAJ)i`+LrW<%bvDFCLJX6ug_H-gb;*HjFf`hg8e1Zo^ zd&<`B1fEJ(OUClw4l{c$n3<8)kr1}3Wa^V62-E!$cAn8H8s7i)iD9ijp-}MhO zGh2bLY?p3gXs4zF>C7VcW5_KZ|S}k zmrq8?3!O$;K4~Er%zPclvCqv5EDv=2=-&3S8wDmcF8>*FAyiZXjhrSN?1wAVRmJxF z*}DX6>Sm=YePhqu41K?Vd4%sAGO@oJ`yM_3>NOSfl;5=NJ>PDN9cbLUTG5MQid@5D z{VxXR?z?#_P1RWQ!_cWCZ~SN!!AY}H)^08|SL)gO(k!*99WG^oPTWG{r~33bo@W`g zg_oy>@8Ap(gG0?wC8cP13@CMIP@1=7lb#-RHNjhGP5YJB><)3>yMzZ1At2Z6NVrgh zXv^HOKFReh0($^TFHvan1^fXCErCd68`+%E~UuXJBeh>cv}emGqUk&3Ff&6X=-A(y(AVBuTv^%pB3J$ z=ZqUmNz12ptBmpRh|?~yK*WDLW}TL4dwb>IkS<3o+AwrlI3*!OEXarb^rp65n$vU{ z_g%jms%h+?4dsNAB z5Zh%OmzWFose^0=3%@Yr)xkp*9P}I0=i3Ps4tcm7)Y>_HybfcFO zbCg%?OsZozTHF_^Qg8d+T;JVl;mD=VgP&dr95cSi@=;v&QZz@<>%SKBlcfo=^GnD? z@L6Viwez54e$yS_0IwCA_N;oW@K%gVq`|s#VbKj8~_*TAf z_YQ7d!sUu(K8NpLy#7Y)rb&3G+v1Oi^H5z{$|%MxyTP$S^7p2Nn}z*jT;1hYA89;X z@5(Mu*yyY+&Hj?iQK^`XAnSOfTu%zCn4fhbTRu%8D}?4ec>USjX*fsjslS^1vBz0S z&v6&L)dZ}Xepil^N{5IJ5g&}_TxM$(aIMG5Xa?`fC;3m$|4mEqsR>N*9g)8DUU{6Ft4u6-VG^~08nKx95^6KmFLj5k_z32!E3nG!M1 zk{$A#CDJXA#rB1siuAp#svd1}^xl%2+_`td=Wd-Le{@im^II~CPN02LsCdOC)9OWdkFJBX;9idHB*>>C`RpF=C5=?%9{L0^NMis z8=)XqZlNJN8|NYa4}K>19?ThTkYg*Ii1%7J1T}PG5ZBQX<>2DIJme{m*_=UZhVQMP z?>~h>zQhZJ<;pw0H5?a=>MiGM)Zi9EUMLwqEK3*XotSlfY`^?`I_3 z7OD#Z_)N8$z@>^g z@gDx?T8lkhJU%_Vqjzq4-vO1zX^EX0_pTN&evucQJKjQ?ly8Xb;SfN*h@u0H)gCH{ zlO(=97Vdc=m2oO-UvDw~Me07UCsD^tqML>{Sep`E{bncw#Qjv2GQdI!ez|OyxvMDj z$JqwE-np$_xw;`BO-BTtnp@5k2n$YrB8x$+)N(A~JiG;4QuEp=@ZM(KF#x!CFP*dH zH0RVyuJ@i%rck=}#)XHk^<03EDG5C{$GhZ@qiDiDYHZgaN`#im@&-oqB3{j3%u%QE|$7^0JNXNlJmVM-h^Ig(`&q{u0F=9sIo+D&rZ zEl|HpJrT32$2@4gIi2%2T0N5m>=K;@b)mH^NOY8?#l@t#IG922r?q%#BstO|+i_Bq zyo~;#jM1UCO*8Vlp-jl@><1kY9Ol`pLnpv2%XfJrN0_D&RLAw#gDGN#e|0aGzxFK3 zxOjud<#nbs4diRbm~7XaNn!g)Nv7xVe6(egZ4`1h2R*j37`YCYlQZ%3Opd}n3*FZX z>5+YjlyGmw2H2q`Qd3fJRTT>by}2w{EZ~?&XpoXO^8ByXiYy57oE5Pp!QC`?(~i8} z1G@Q+FMgE+dbxUMLV~-LgSRK^GY2e)RrLdFGmiUJ_yfgYcT;u+Q-NQJkNgWW2$1}i z=Xvw3MbC9^v_716XU*IM=Q(qc-MCbJ;vFSwPr&Kt1m#&n5lya$#{{cS{nku%E02TJ zXxq4sf{un@WQQ%h)jWO8*rwffEP`m0)tcUht$Y!R%ncZ8ae;@JS$Mi>3QV592f2Ij z!MqEo7bJJLyGOdG3g!Iz!EAfm6>ssGlqS!nyU@JMu%&oGYXe#ZmAeBkeBlWNGbe^&n2?{(6yH|u{U5A z<}wSVd?%&u+Dm_rmk76SaJ4sTw>Mykk3-fz1f-R{W>tC3{YZPNQ9M^YF-~1-N=;Ef zIT)l74w9k9osvX`nI*T(Pql0|qLG5n-Z_w7<4*H98m=`O%}g1dowmp>($bE{aK|f0 z84uYjSUf^mJ)Iup0uwf;C9s+A_jo*9;1T0Il0%)WnEDCI%#_)TtqE_i9-9a2**UDD z0JbB*0G2~0`gYhGA zsHKsOU$n+U$n8mF|=YeuP)_51CZ1NEB>M(tUPn2Q6eiH^G!B z`qs+1tm65VQgf(KqsdnFd_W=@oU7FSrjlrPP7RTCI7>uGB2u5GYM3Dx%AxO4bzY|` z?192v;a=Nj5fnJ$n-Kz;Uhj-)+`MluoZhr)^NdjX$Q!=7ifE>|7 zTYL~yBi6m#>hM)tDEJ`#CS_Wy00iq-coUKw!D5S#6QV3Bh|3WHXY(Tq*j#{a57prU zjX=+#taFY@$KHfuR9ICCmStw5Ax?WFDAvuBP5FKF_@Ohq-}m<~?@FXj!U0vF>*F9r zg1gduT;E5KlJObLIS_VAeT^_Y|TIDPZu?4yr!iXY$A$NCdL-X%dDh){b1)B(4+BDMI7^ZbhM z;)-zM>Z$YVf#+8;7nh3{mxa$Soq`U0jEiD^82AXf4)~;q`?UY>{DH6gq2h(_Ywm;p z7Jmp|I9zj&N`Z$~f&NF)dB;Qj_;LL1oO3vw+1cl88E22s*=O%n;;ge}Wu-ctbM~I8 z?3I*AG~|pkLP$nQ_KYM&rThN;{`>rKe}6uYd%a(;=To$lLAo?GZXOhI_$8(^Rp#)^ z*@nk|AHLAUQ~wo#{)ywHG0QQHNsuKE@GsgIxl)D*!ZI@}E`B*lYZ;_<2$t8N*IG_u zlV4$%_tZMXaY%S_T6uE1dvey(am%m3Ta$UCS9p3n4~mjv8fuICXuQzmgP50N@Wym@Pvd#cs$KVONP5@?`btAkFb1%T? zwvTbPk4e1`w%EshX1V|XcR>GHoOt4e)WymGi>iRmk`#KnJ*x&ZG zyYK514S=8t=Tfe77PkTIm$yD`L!W2UlXV>e^??xpFyuM|Zj-?V55?jAoi_dPcpvvo zd?+4`dA5C>@Q_|_IxK)04w)H|K43C&|0q8VR54bVf6`V|1PemswEW2aiqg>`hCnr<>`zZ9{T@zj zaoYcY-*3|0oh9whJB9S#4p|5d{6yVfoZbJtxxd7-yYwrhs5NByV4n_4m|}sr7tMh~ zw3BfMW=RLzzk0WYnirM!_oxSd&;hGGyDJlc-!?;6g#d>=`-i%F&5+*6&+DJPyS!9;TbxNd1>GlDx*G9{FBvr%HlgjsKw*^Fuu~(vkE-z2t|+8{ZIW zqzUzh&fKQiZ6DqLA~9bgbsBsybCKG208B1^@%XJc_bB5BekOj$@ts>vPFt)JMd-Q_?T2~m4NF?zDCqfj}@21mMY;lySQsGufP2N+r$a^6FQ>_*??ZFbIYId0U_+- zn^HEP!*%>2_*SULMz)NJOm)8Yp6-vBlC`Y#9Vo)xt}({0_atHN#I7sm*2&48=P`tL zCw9WJW-g$v7?Aw@u7n|iBvFtxN8V236eAc+Q(xAd29EuP$3diQVn%fHXxZY`asqLQ@bZ?`_r{j*g{ zL7_=rQV>5Wh!>KyJ4Ex6O5J;Nd*>T%H-_XreEaJ+T14*cA19Eb$F%(tn%5y|`#*?i zW3rM|vMQQR6+w57>{3F~sn{ed1ISAF(Of-m2n!{Vi3R1;xQV3ks)9x`*-IfTd`f;Z znVKJv075PY_P2=7aIL9<42$5E9XnsCl3P~I$2Cvc%na06zK{H#7L&G@&lXhP+}lRi zn%ojJ-<_Fzf-|f6lh35;VpnfnqFWc=v4i&Q_n*A0jGr(_$B$(zzK@12DusP67x6AW zSnTu}n_OT%dF0XKGqbDncQc(+@}N@1^lZSiK+3ijeS3OcX>+Vp&Llf&;m781HK%zf z%9k0O3jn(uet$cs&_qcZB<){Z_}cvJ;poyeZnco6t~<;R?tQv|UPgo0KW&|99&b-K z*gp>ar}gXW$LEpX4*p#_JzO0s_;y=CES{kb(55gz(>eP^BP_nmx&EIn&FF_^w#@1y zSIu$gJWPE+qn{;dQ6mzUxCoesDGn6cTM-AiurMN{Jk~-5!RoS{$gb}%qHa8|XN*AH zOJwJ}=v7#qAv;Qj^9EF~Q`6CP?^uTT{Ofv)ka4JtweOxFrq7te?Z zXUI<57i3s0tQWYc{jMv)sl{<1?P;E5n<1`q2I)JrvV~$ltbeRLVnTZBZq#Tuz9N2= zSMq)om3K?|CQsycc?i!Hunzq_MJjJjbCqpIo>O2zZ5~HqKz2pMm7Ti#(K00n6EO%q zrxiLc(5tyKo!2MVm*-+j%Y;@?bIWM@$2=Y=E!i=6M5N$uSf@w>f0+6BFTS=ngZK|m zJ9uvvqThc0A#iyk;HBL2&+&idx>nNnKF67u*`$~5(DLM8;)JHJ{+OX0e{xTI6G`=bwv&i3MxF1YLa4s)g{EJaQ|VC5< z+}IGz!ejf{uhiE0bHRaXz_qY1|F*w`&{nQF^ZJc>?q0H$4PRidAC2erul8)!+`O-8 zdu8jc`D`<%dBtcDwq+lAHFRWrQ>*+NX%|tV`P82l?-4 zAC1^MhsM_x9v{3X3LG6DPnmpE`IXvoQRigg<=;9K5o)xO!mMSH**kc=$I{FFCurp_ z{6Zs|^7Zmp-9KdL#^=p^UWPRF>a|@@cwcQe5s@PK zs$Z}&&H^t+r=|MTRwXbP5SJ$N|*^u?UtbPzav`|5+fa36Ml$;*KMBPbmHng^A z_zT)jrY+g>86fP@Eg$o^;#Xo4%1t9{J8M{gMoGcqn%HjJkm-?DowW7uS_AEX%Rif) zFIqA}ouYTl>krvdQ5}^4Q>X!u501X0iS)D^Heln(MGGIl1r)7esAyY0wO`e$wc<&l zK~ObCeM@2b!*WUQDOLU>B@t6NOO!t#9<&Uwhld1c8jKWn8rCUzN?*G=JyL3;Ul-3b z&ZTSf{zD-pQw){NO9S02P~zZg)KnBju-vlOXOXdOvvH`7;9xc^v$aX=Nt4Z89bVB* ze~t|h3zbNjSpg5Ro61?)T^%Fy9d;UZuk44Xn_Bquy_bnubWqL;H}RG@v7ct=ZU{Jb zrV-SM=ry<*SJfbp$us6yfM^%D7ez%hziKkpm2Ndh=yzE7(klX{++(qEk) z*?kK+<40=zCo#^O5^21=9<0{ynoByhWD!)h!-CKp7kNRF7Fwn+aaZ5LH`_bCRvU_; zQ=HXx`cgM(FQo40>KUv^+EM)yT> zP)){oGj?ov;L9(n3i0Y3O{KQ#27`u9x_(?{R`dy(Ey_XD<#YERLr zpU8QQtxgG2VG;keoRN98vziB~KW z;gwk0`wB9GyJhOhyWVGa#McM*iV&owqSe?AvYaD|8Eaeet{ z?w$@aex35S^(t!rTK4=3)aFou%Z?>QeA%`kEAHn*FYck=olNKSkuhK0A>(&OdQHH? z8mBzq%XuTm+_7|-y{HSYMdV%eDXny+FuzA;poEkz5oCj6``#?((uyorIDi2eW6O*Tr9or2u_f7N{jV`{6Mk7L(uuYL*;{819fSTCQV;l3}kv2M?6_PrZ}1EoU} zK^I+xBs<-p0IJexUa52`R&H_pag<9N_Z##PFxo-IsGNLlcu4zpUW* zKmdZ4saL+kSe%*U4&B|s0nX32nA8#y$$=xUMFp7misJ% z4(R05@wC%Ak4jaClPQ788xO)h2q<-V)aZOXJI9CQ&eaYS#_6qMO}%Kf0u$Jx8Cu&+ zw4-4yIni?Ef>j&e=5V^eE#u8b!4@}G8QtZU<133*CP9r9!&YsA40~svMe-`! zxjDpK-!emL(5!1Q>-}pxxRROao5Xx2WA_&orPn?@<nkGea~!^N;`xPF z4_S=9eyJxqo62On`4d0;pgx~{`8+1}Idcp#PvNd8w^G#GdPU2#_JVkqe)dm**y1N1 z-k=JWu?99mg2q0#%ie9{HCYn5%lHctg68j7whBdGh0?MkTo8(+Iv0pHY!r8;%6whN z^wvC1)>P=`B!|)gE`4NL<{8oEEHxQMrg33CcRr;6ihCS}Q!I+TSSotL*S12EeR(UW zjhR$`CXGj9ssXMRmiUn{PP#OEgo`V%4tSx`KJZb&Bza`iQ@FHu#b0g2VnF0)(~8*) zuO&IjCU*QTm)D;%VWq!)JYiB@s6PTN*f>a7qfpNcD7+Irfrys!ut z1@{j@V#(iJ9OZ>vt_xfxT9(Sws>L)-T(j#Eh5S3manDDlm95xHa6-Ya^EVS+)}J|+ zeoYnWD6-L*ed7tGtY_fZli#e14`d4U6d5QIggE1CdTq&X;`|j$p@Ox*%ri^AFjr%N z$@2|sk?DXjcrsnet?ve}KJ>xp@w=jwvcNpzIa%$+GR#s#cua3Zr`wegSUXw4t5XIR zJ}s^QbN#*Xz}6<~9+p1rz{Va?^06b$_;ypdg^$S;o#C0~drH<)ZXq8T=r;ewFn8u^ zu9#q~S-t1hV#^%&0+)49x^g?z%EC0-eQ+a0o0(qeu!YyNnc;icufwDGs^ zXQp3a-^u+;B{%1!zRFQwcOgZQ31Aor$#a$q_5J;cEFjSn-mfgjo5h-z#Y)P(BH&jx zmXk{vPOvwGPJW&8IN%=0sU60gvRqiNN&P$~dn1=2oK5h6c+6tSn3bl@Qmg&ijh`t+ z@ySR@xv+{))|`L?TFll?>T8MF;O}u$#jZ3V{CJ5{p*@nWW~!e`_~g*x4?C_>>yB(MC~h0$8H=rVj~$i@Dh94G&HfFe+Ya#VA&}cSuw*1>N@231VShi8 zoV)O;Lx#+9(||I_Wo%QH8P!@GK8AM(t2R;a?gF`nHe4|a{(}#{oMBC7GE=1=n9bz> z*f%sE3q5!`V2&MG;#fB?H2l`*dYu~>?$i>l^8w9u{ch~n0UF*8kun=5TN%!!gfS2i zq968}tNR^LZ)ix%Q$qoC^F~66%gdH)NXglzIi9wXYi@~KvXwiC4^#Di@1A=3H8ODt z=r#AU-&~#9?!C9j$Pht+fG+ksq&K6_k`&MSK-V9M$eA|>*2yazqV(Q≫RfR{q{7 z_j=}SexafC_=kj7%u}bp0y01Hj3R6iX^J*|p(2{HH2viu0~TqZY1psEg;oBZ(Sznp z1whkfnW^nSfdQqla~}HmXeV;H1lrcEHXp9@SKXWRl1UujTHJqX=RhB^BgsDaRKz4K z80oC{;W!k0KU@Ie>}d25)sAE9?2HY29L;F{b=Pfn{}mFHGHNbOPC^ZEZ`caCl}$7M zt4ul;Pm>DOERlNky>v|`lsGwGrQ)Rc`)XFmT8>`9==nUEH#aBDm5+G#AMO2jq zehJkUP`5^v3LmN5LzqPscFZzaTdQ5@Gwmq4^eDIc1JQiu(f)3z?jSGh28&J1*WV#A zcC_{5h1e`bu2D3`K=Dk!T%yDtU?!wQ$LjmoPV%U(gC09&8B1XaZTKpXdBKrkcV^t7 zHk|KIaiOFT(2kn7Q^@YS%;%>;X>QtbwI(&Ed|qI$^J>YnQ}tDXF@NS3LMLd@F-$9n z^*8iUN$(-E?jLN|l|sT~p3WPm_@X`EX=L3Ua=mYeN=V9nsE-m$>V9U2YD~)BI~AX_ z%VyKeyCn5Usj>9cqzAWr3L;q%PH)R)5g_WS2~S}WFklhrXZ`!O@08hAz)T(D)H9ru zDdG0tn2=yapLCcWZzkwPL>Z<-^Fmkm-AIa*7+A2Q`|f+agK=Tt3VA4V1xwbai5yd| z(ErZ<+cojkaH6nJvZYV5Q9rLHeBxM78mjrC1z+4ec>t-k$qhJBOhL~O*c_74!y`dN zOY}SjazB0!Ze|BfW^;IC5?cFHzQn~}9m~2L|2^g2+AkjV0mO0lgD=0OCxKUT6Yu?} zDQGig_t2TP;lZ;|=%Y*jlMo*6?>MzegKc}HybKJ}4Lv+4wkk)Gu z`uvFcS%SSLI%I~>r-yV7yuYUH$$H{naDuap^GK-F*0gNOjg7b0~i;ZUGZW758oC2BiyJ>_xi}k+p<6#Wy3qSk834_kljs{4rCA9sg*7xmi#TH`mwYG;BA1T~wN!rDR2jzICj9(kK$bCl zKmhY5Nm>hOC$jR&n3JzB7!aU9d6j5~Jg<#%@ldC)E)Y6VCm$>z*`>p4f*gLa6Lgf$ z%_BReLCr!adS@{pO7&XY0zyk^_wwYky4`|9jlUM^CD&(`DF;~HkOJr%rUI^Q7usgy zv&t^TV6=zWd;6tT1GmvQxS%0}a^URY((8(tGUY%kRVe0AMoK>c7O=m|O%`%YJ^dr% zV8-_8#nsfVb@=78SaUKm>C~st5Gr!0qYqOgDlkfO4RT$m^dyyelH$%Amtx7VWF{N| zkMPLml_>SbBnN}RvKr?dQ|)wEB6}d#mX|ed=Freb?KtJ!9bahzQ=y((Woc>8%QLJr zV%K0wqeyRFWs%U=WxW%3uvkoX!^oo20hqrt}d zIo!Vz=mm!Gdiakn6WEQ1D?vO-di5xu`;7IXE+$x{aSHK!5&b)-ZTiyE+;iqQ z9XZK21c+uR=rb{%dQeyu2cP)EGpM}&cq}f(JG)6Q5tf;kb=SX3kqRM;S|O9Y)7!@G z@^eih6Fxq;F9|7Fb#}&|=HR7-cOe(^bh%8= z9hH|w2H6<-XCCYI{8aY&#;m#cq~9BdHARpxm$pkwkt98YXEr-2h0e*Qo-| z)#!=vOJX-R63SRl4W;zC?AV$cH%v&P5&)u4vA{TYuNIa7JRk+Gwa7vMs_k=pmA99jUolzZ0 z3XWsu&(D7_u$n|SifUUt;qSA!l3olLQpm4pRh z4X&rx-&-uJVg^?xNorH*trvsU{gHHhLJO+uKdGE7NGzbWuTv1D$DO8Oh>AUB=sBMk zHxuT>#|De-S{`vVqxso**Pq`plDZ5}$`z3>MEM*xia`CB(X`B|hrHnaFnup-x|%~W zFcax@EujM4H=Hzd4|_vfiL_hnTB}$|3B*^@iAXXg0izOOIIZ`*hj9{GDgVnxt)*@3GQGZi=Bed~Md z(BEogig@XDYjC4}Re2cLCULZ8{Zk)ZGl!@Tqc@nLQ4e~Po37Yp{lXi7F-x&yGQVK5 z1XCA*&C7=6-R~sS2yR3nH}8XY+rR~MVlm*LjT4bw)Z(kU=jtJ4@bGKCJE79|&QY&I zPuJeXNa+-oA7Kv+&ydi|%Vt8qib{F`n;y1cm?tw0Upgj6;>~L7RWH{oA81?V#}B}$ zUif4_ES{cQl&Yv}pZvD!tLnw8u0bwNu*Ikd^?=wbbh{YR=Jr?CFTB=>X*k3nS_FX7 zFhX>Nr2^J_>A*+~!;8ual_#7mfy4J^6O>7h*~LJOxs$?y^E`HkiSU0E1M~&kw=Z^q zYzB>NLR7D}sB(37uOIwh$fi|{b*uUlHiGGBTw)-)pLY-|0BdpW@2U<^<<&F9e6g7Rhb-#U}dt1D55agGK=168I5ptY1<$k_P~<9JvRU!UXc6 zSCyM6mA@^dDn3R*6t%f-l4KlDi^FJ*D0nZu>$U^uDuuvC#9V^;Cvpv&=xTS1)^b1E z`KzniQ=X(k08L6iRJ)nTrwvc+U?KtAwfb87DnT+MDQGT2KGQj);qom;+q^*zx@oa1 z8hU#VfP(&dYb>3uS!J}P>OJO=n$eFTuI<3M+1~KECdw{ z%JGHus`3v-LuY!g7^w9E8?YSWjDRHD^2)GBq-@tq==!^~-f1oLnaZ$Mg~ zFenRMnOHrLg*4!^qh>2Pcsf%EylO5$kb7=TAa)ZZt#yx7DpK3`9m-4fzlT^SPTpE& z%`Ftbg#74!x!t3j7wOL_sFh0>sf-zXdpBQ?Vlw)&0u+K8`O0qAGXne6LtzW-SwjAm$a#!3I zjR!yUbj5`80B6m;yn0(9qF3IdjyDt^#EtvCoh09OX6KDE6sfFMAVW{s;M8lgB2$LdO-Fj6zUL3e(176JPZC|7|vzd#&xT2Qlhft zpw1m&JOyCB3|adzDn7L>o=(-jrn@r5A@b^w4o6K=V9iiKP0){0_#Gs2T)=c*HN2Ug z5EzcWnewNj5z({K4sDwY?FHeGMsAbyPLEW|MgCsd^d(dH!)jullJ0d?0<{POqm}u_ zEwHG)OVfZ$jmS^NRo%;vcii^4&XH={m3dSH%?^pBF{IA+{^T*9o#xOcXO_suT6LLJ z$pzO7bC(V|B9X`F*>K;MW3j=t25eR(bRxk_l}yD1 zo62)GT`wRV$GljjaCeyzFLQH=Vb3fD`3lx#L53?-@=4i31n|82AW;`_UfH20D4VNV zq=;LtQ*g>d+^P%e-9st16pJ?1Yd=)~8=I8EmGo58QlY@&-c^p%K3RE6ur7s7AGm^3 zPylVR#WTtsXf}s(49>Dog&NNC`Sv z9|Vv2gX@L7dasI0_Vub!30u1aeF}5WU=`Zo($8e-snImt;nJ?W_;QoW5C@3B-J49d z+a^4FS`Q1xt1pfeLi>Q&L^ut}K6BCCZl(1$fNh}qTpB8Wn8IeIr}1M@eQl6*T*(Q} zNQYIiX-$^GDEe-=O|HJSY#`t56@(j{GlpnjM?6YM5kGh+f$L?c_5}yl{UNQC7XqC# zS1PQ07*Z+Fq(jTiW!U|CrY0h51PuplT;>X!fLCV$!SK2cs`$E)b zkk2bJm}z-u$TTReoHc(8e)%N{DH4@`uF4YYT{`tD!naNLm|tte8agJjWvi|%Cq149 z=@9@>yTnqQL(lp}U(2JOjvHfq4>RDaYZ_t*pUQBlU@2R$5s|{s2j#rhiocU@AO%|| z*RtkvSWXhQyKw5SQ~-rT)ZHc+v{Ue2hGjIv1J!Qo7nRI^lvx@hx(?rWqnaMX z8s9I66!JxPuMcR~n(H$p>3@5K8N~y$$Z`&e(=tzPn2T_8Jt7Y?ufSRLG#;V;Rv zCc;Id(&dcc5^__LMGTFR*A&AMN&ui5UW^q(*Q|cCPKD0+Jb6hnw2xYJ&tL{|fuAnq=NfDD_Q~YO>Y%H7OlTt});iX|1M~=_&C_K;QVYNh`RH0DeLdjqcIch;Q#(cw{3-|$ zO%cIufFx?7e8h81LAA~VFiHGE(u%SYL0QP_9OQ^PCsergaSA?;l#6>R&F{>TrGMRC zci!tKB=G_L`m)yeH_v*JhS8(FL?8joex+HN1h|e^sa=3Ev>Wy}YG* zO1H-mx}p;j#wytq-cd@X&tC|~GNfYeQfiDqR*u{2s=pIRSkznXVI0@-&qjU%Qx}e9 zIvLOcXK9Yko5itAD#ij*fB+?b7jtc-%F?8P4g>{&2g-+}8Va?P6u{{iUpi1@B$DDYGmTMi^!(;20^@<-b6POGkEivU^AnL7YrGzWvo zu!x2ixSo+OT5MBW1Dh(cuCdNb$c=0EGYd3Pc&BMWF>Gj?jQ$vIR@kqpg=>-C$RGZ; z!;?;J)-YWnQd?aVH{dl=%>d-GZ9A=j4%&OmPNtUnV)XSDrc>ggo5dxt_}ixzq3sQK z_zIBv4Nm8?H5(9}SVNQsDo1BgOa?Qc0ENh7B;^S7+6+lg1COLYu2=eqVlSU^-ETLjnRMhTWU{ha|pctWhxjaSl-pnNtNd^r?I z5Af7W?glnlQ3K*d2I705p7&SUgR^dWdgoZY-TX>0TO7Z4H#u-vfyK>y;h98`v7V*& zPG0UhvI!qIhSa{jrrx>vy1^1~!R`sG2sa7JmOzHv0S(v_XINL>hwpG;o+?1Rr@;d~ zsR48g68ajkb<8HbKC$eyEYDsi!%b8@L`vBN&r$9~jF6I)Q1R*2){t`C+=+{j;*L1p77GLA)_;XDl&S_32ac=IG1b~6GaV7iljN2pxA z$mB$n%@34AyA$v0ujB5M@!TKWo8ayaxKnY2N5B<&0|mDx_!*|y{pOOjP7Gre8@391 zTW&7HbHS$uQJMJR9eE+YTcArsz?mh3wO<8Z!~3f04}49>Swii2J7cVEzQ|m?{-*jY zQm~(3*4mHLX`q={nz0{_PF7@2f|4I7%8>CD1qKFvEqKKHxA~@Fy z@Hc^tD=}T@bkReBORp3D{`nVl;%-&JVI~MNBu|PArt+!FJ3zzN_0xcY zR&LvH-|-CKzwg@6aIdjkIY~9g+EQh|!F1Iio-BSvudxdDHPXvxyIxBbsu!K*KL#%h zlH=~~KRVr87)w)K!{6-=2{qoXl=iN-;8pdVjD6F(i7Wpe(B*N`gMV3m6g2IB=M6Q1 z@0;&pj7h=0OkQgi(0U;zT{r`Q9~L1Y^r1-%@*3sB)qA6YS1FR)Yu&g(B0CC=yPmSz*{ z{VHyw;`)_ACFHPLpOb-yp;tSWJGe7O)Isi(qJTc5azRZ;0X_tRFKO&FT>TF$F*AV>J(M zqY8a0!6kFo&->TNQRmz1@}RE1d0EC~$@%R1u3qJ;chCQ2cM(1XZ)nqfQrpmImyc-Y zw$Bdzs56MvNDhTdInX%dkjtKiiKrd?kuj!9zKf|RnhA?7j9it;(Vq=iM>@*poY+USApj$ z^wkr|QB37^uGX9^3eBMSmPLP)+qxUt5ND&0&yE`~hCc%ja-dw-?W)|`Hl=AT4_4?pU4Z+hg^rEZP7oiOwUu{h8 zeY*be^(?G+QD94;cZys6>fZs2t*enIoWVn7jPJJYDHBCt^E(qa0sgs)wN+jVO^6}T zsUT^`#OEWUK|iapQbC@Sp_pB6JKsP%ovFV8XP^ZcdrG;hdC`wiDUUfjQH-xi%Gl!f zRCYO!s!wfWfGQt{QXx>(o68{3ou41(Sc~GHI1fxJ^*+v`8|1t9L;mbcmrpvtFL4{f z=ou?MwK_o5KJUQ5P5HNn8zD++1I!n~D~yHmHHR1jvjxJ_1li~tOKrgR*=tZEFn5mS z#sH{Vw8%CISTvItm`A}Hqi@F+Vqb*bc)mC4On%y2Nn4gdyuNf_@-pM7P5M#ni0IhS z6LmgJ)Z44`W@CQMMLG@<63WuGFdrsjxz|8F@5a@M-il=1FT?e>4lj}v5B%QzeMG%B znKltz{eJ9kcnv~r`%CT}s=uty4(9g-AOl|6@$GG@E2iNwl!)G6eaHS*RtvV9QLWCWjoj^nz;}-xDBryNxnft#Jf36A6wYn zc+Q^zNKhT^Xuk0%IOO3~Ge$z6jd9U@&BbFO1D&_p_?r`>7AU9gM?0#UhP?rh7lCVU z10e59-UV3oRQ%@m9W}l)@#=wZ9;aQV=X*=w00KaXd=i>npu5lh-v1ltM>s8UuFfb9Ny+kJJ8kZu)VG(m7oCu_65p%)(T* z0I#R_E2f9pXN>4!FP<;mHwI-x+cJ{r=lU=i#ZlMB{J0Fvzl!%M%0$h2^%Qvz2D2%p zB8>ve+gL&yua{A6pDLGDty4SSx%YTRC_AG0@C7@}6RW^n*aPr} z{Z&svr--^2u$X5Kc^1Z<L4O=J)2xY_HIX z9GVEi9K&;;aU>wX&m-^u$fXcYJVSVoVAgT zDy!w$UZlDQ;Eir=s^Ewru_|T_oElBV%6&yJ(bpfQnIMwfk*v zJC!2iXgdjp755t^#X8 zyFpwAZJ{RYo(sjMpPfwe3b`szu!v4AS3MaiYP<)OUwL_kLLPogeUrQlH!UW))p_g9yG?!Oyp0>V5TK{femf z=q<_HYsW%7g~rX#B2=A)kp))DC4PV>ewCB@1gER#OgL1>)wC4`sdYV!NmTcZbVFqW zEY~%6Z(Gv>Lv#Hb!}N&`&SK8WuwH*Rm1>x!nD{j=kH}<+tJCh0N$L{Vq~YNtFE2?o zqnOvmNt8_*Smrwi_ms!owb&&ohtl)W!v_;3&saH%eHjYNXpFVwWT<6$dfBW`N{9orKY?}g2!56F#Qi7(3w8-HrnIAFHeFZZ!Vr!7jS8fac9!qiAW zO{km`Z4}}Vm-$W-MhE2kJQZ^2BLFp*OT;5?911aUMjlfHQQpGARK>Mg*DX(B{+in- zdFXjPB^nTLXvj2GU_HU<#(ZS0d*U}ihvFwH(+%Dce;Va|;#R2!!-{g3M%!30*p^UX zSKixbsa#WB52&t34Z+0&V&EJO@bLB|mjOjrE{)&@iK`sO!PP+${^GGEMj!oS>3?Od z(z-O!o>EHR?Wxq*+dkR4T_z@B!~zrSU2y!>#FUzokSBei#lN}RlP@`znm)|;{1%;C z7f{|{N;>u73p>5axgb{$w zO@PuZzg?qCF%*op5!$S}4O14DK8f&;Mz21XP;8P2+*Djz7xeRjKbQyBqHTU0E4P10 z*K<}DvAgp|)h4lCB&`Z+IK0)3#RJ=U76X43B!Q793L*X;=95>{fe{IJl{VF`1JbD6&4j6 zhUF7MW+HywA#%;rYEeaaFA@Jh^&WRSWWh&BeeSjauQjO>dhu9KRn)&dML%0cMDQsx zPpe6*UxWl_GTQ=K)`-#nc~I_D=_766ni_yHEOs>!H7;l#N)a2vG~AJvyj5Tqm~0&7 zCmHknA>H?D7qL~Ksbbq&VOTgl@p+cw3zN5eCjU}mGfgm$zcbAO{rbM5|aYsk+_A?)6@hkA)0au@CSzO?glGs>&} z@?Q4RF|Y@3Ao)p+76Ou%7o~6gsqy{Uo<9_mkp00hilC@tns(u#>p@$=g^O0zDS``4 zx0m$pZ^kvDIP~*J)3rF8o;lvJ<5fA*yeeS__Bc_Uptr66*!;4zeUv*&QguV>Xi-S2podm0ikn6!(kw*yY*K!(lFQo|J6EP$zaLy5FsLXv|#!+>ig}vRTEO zbx7(&ez^X(8S!-P=D*FG@9DzEmGdS!b^Sl;QWjir#ZUTymi-M+k`QgMzZOI4_?;=V z+H=KU;g%OJD*mLjMb8Ru4_vw6067yW3?KCSscEw>?SJ|C{e$0nqGC}y3ra8!8^cJ| zQtA!Ox38E}60sctgLUoaZMJg??X`kyLkNu+o@CogNoIQ;%vEsyavy5DfuA!YkmG<`Iq5j1i*GL^tLq>^7)XW_Q# zJ20g1yI%JTj znT`?JPrIWVlWsV4=Z9bV?{yL8Es#!9fLR_2D$`7VrP7W)(@DD6i>@-jB16dmhBRWE z!a`h=;6Yv;;nfoLDM|cSs%{!cO8KiNwo6RW36qlsH{Ya!oVOA;D!LcmMzyntfPzs2V z2+m@Wk=s2cY)jp87@I*?u@}sadkCdK3N!#r#9165>BA70!#~{qaa*>-@sq>Lgq+J6 zl4G_uoOK_C+NWVuPS|O4=LJD%_8lwd9Uv1jbWkz^pl1Q1GYAq29}+Htrp2e&p1u2u zQW-*A?4Ad5B4TNv428W;^TyV7pxMh%`zfjiJ$C!4HT?-hFTzCH<;`HkYj85iEwv^b zEf9QsMrv{pYtkK48*73yCdXRI_eD8$gelRgDC&5>Uu%OV(F9LWIDk@wktdW2rYiL4 zz)%=Ts6tjqAy(*`D@Y;KQqisCwiU==6em^|vuxE@-4_~6xK6q93~iW!K}Jg790 zD3fo@^O|*aErnJHktRNb7+JQFag+r=j`c1IJGvHE*VF!V9AlQw29d-z_nk{@olH#F zN8G1sR%UCEK?2|(?`H^r(5bZ5#Q^O*?}z?h&u z8^^0%B&m&-2Cc>{GIdMC3?i2{_#s6ql|^il5G1HxhkUes*_P%dUeVZ>R8Vpb|2{gDQ~{meSIT0}_?8R46*YHNX|XK$%3%RppIU-6j>7q!n0&RmRY^ zT?JLN^8!_^Vpwh8_Z=99i>0$CkyT22X^2W%DjEMxJ-^JTLqjh1v!={JKY3lcf@s&B z3%a6v*9x0i*#t89kp^d0yXKeJJn1p#aWO8Jr~V}#TG#Q3d!}fL5DG{6GMxRM?#Z(5 znW-*2A}FMP1ASdOa%wALU7?nNK6_E7RfisvW$tF~Dgujn7DUSDFHJvzx`4DIx-G3CA2iA_)Hc6(VRX@Z? zc`m`{z0~HK=l+-s?)@t2Dg*S5--nLq^}QBk_{$fGaHXL=w$#hRd@Yg@n^NW|^3Bto5g|-Z7t5yRXYIYT(XNtcp?@ zpEKjAK*u4$KI|WKp2c1>ve}KQm0V``V2HAbult^F;pbmt0ore zCRR>fu4YnQQc@%swQ8dfX+$@36DLk9o`5|D^VQ~F5E5-35gb7%Fa5t`9w-3ptj+dc z{GP2i@s2`46j${OZhJe61f;hj)DNFJNCBk$7?7+2q%Z&gu$<@}-|?>J2b5t5tJE!w z%1ZyrEq#?j4^l0#G#aTe8l&I>$H6|y6fY5*KOyP4++Yj{TZa!W9G6u;l~phb1G}Pi zehrhcw|>qssZOv<1~ut0XaL2z3)xT!XvVIdpN+f#6;J{d+V?zL5<((;;6gBj3`3Od z(d%4MckOHsBbU~^UkOn*Vu4rw<3zRgH|#}I7D_j*Rz`9XzA?xp#rmse0HI}4C0;To z4xtk7kgV!RC~*E?`jzjLayW#NIg>x|mlG45EC3e})S)sAw$kwWc&=Bd-m1JRKzcjh zR-`>KkME|{9&h`yJg)^JG>y@yBH|d((x^XHXoR2$|7r*@j~TjeLf-THeP9Yx;KI?+ z^SA+~D``K;K|j27OvO~!&2SvXWK6_h4966)+J74@86M=T>;H2mtUHtCp&sEelL-?Z z+mR61aS+>49^SP;r+^)F01#)?oN55TU;qiIO7S7okPn|eejxHO^spjBi+v19>_do; zpNoD#QuK(CqM?SMLOuTUD9V%z8@`6g95(Y=&6><+)}*;pr>|$eWIp?a^C!-pIBnK6 zM$;G#d#B8(p+knwoKs@Xj2Uz0%$zxNu9~5vhUy$OUdNa*vo<)^TC`|w_b|QH(Wp{|1Y{opea+4_T+`WSb!5{zu z7Ha(RW$f2$U$0@yj_oU$?AyPR(boMs_pe~FS<4m%Omh|56!1X<50p?sdg`$7!c!`|a6=32=#axYJhbo>4n+jfLVD;qg~U4S zG3_)5PQh|_7#n`M)U``B-SksXKlL{#tCW#gbXcAr+dT6AxMj8eixY@>tZMu1ohHbbZ+iWAmjz++>*G9W-BgQdeZ2-5~X6>)L z;b!i-Aygne48AdEaC5>bd~n0Z8N3_B7bl!?#tnDe9K{cpoN>Yp-^LuwZ3sXgT0HVsaQ_uB83SZ>*#pcPqUqX80e6nZ2AJi8RKD zdgkDYE3lj&Lk#DmA7c*cuLnze=f!}&`sl$2OAP9_55IbPW++Y=VSW)N7hr%9rkY^l zw?F=B*axN>`}Nmf{^H#4e(nok{Msjs7zpe%8L&Y&x-kW9q|Je1bD%f8!416`;TqWR zM%cPxL2rDp8{im+1wR-<3}R3T+vuPJvC$0{ZetwbC`S#4R}Nx4gBZ>zhck$QLm&3= z8QgeT$a=U#Vl?ArCgX+=OV*4emh6W=>>=aD_EW0F7dBgBXJpBr&29jU*Z)8c^hh zHx4<-Lmm=}h(x3yi-AZ(?(9l|5C8@IZWtnS67h;r4j-8lOFJ{?COyec;#rAj#B-99o>B+tLGO9{^d49G zIlbrobD#VaC_(x8ygAfi2EPEtGL``h_z6&ex(MJyA1YD#-LE(owf
  • UY0h1Y;Kp zGzbRLh7E3%1BMu!paU`J!IqALr7flD1ZV0%nBG)xIOLfjxzR~N?$8^~=wwleQOGL- zaga+506D^;OIH$;nZkT5 zF=@HXY+!?yt?Xqha|w=Iwz8O_t)*qPAr5VX!$>xP1wd(wzH+JbEvrweHg@> zj6DYOlBiB|&Xaf|TW2|o@r>aS_a<0EE^>_}q$P;Qg+6xuVd90)rP0l_+}!xC~uLR2RB%XnY$=UtrMJzR8GRF5H{mYE(b~rF_6K zLI^{Zz7VGlRB25e+>Hi5_`xcjX*qtH#AXz-kb2E4Qjf~lEAC9gQ}qUkryA6qMZ+1; z$SN#gA;zzgm8>o{Ym9At0<*#btvGfuj(r?s9Dh~Eu8_qtpvul>6a$LQ(4k&A%rhl| zII2?~Y!Z{W$Y+$}8OOlDX$F7+$G$R`u4LvfZwU@;n7Np?yoM{mG1@n4*3Fy6B{H!w zOmzGb9kFcfYis*wKnGgTuj-~jEN~3u5-HK1OmuVpkBi9VE*iMzl=P$*`4l@8l)9Vd zbf>wJT~CL1dguiPf0q$a`?2@b>AfFE<9AW`B?HwBg>|Xh3k+(IQ~(&LLOIH|U|>U7 z!oF@POeHLA1T)x$5N;%tm+HeNE;ZWE9;6SC>O;I*JKIqu*|hOW?M|}T8nK8gS6%Un zSDgFCT3su;$NJTDzjdvAOg9SDZEkj-pxv;DguYX{)xL8n~hD`)vWsELpC0N~Kk zARf&p{m$o_6H4^l`7n}9yyYGj=ru6{JwpBs>QHYQ=}9;5d2R4s{xUQd$>1nQsov_T zr#gP7zIChP#|vQ8fTRJyKySCf!DH9d*b+82OpSe04Dyt!Rju~itKCGRetX+H6eM0b z>BK1$Rf>WvStprM3$Mlku4@IWx=|2svO?a*c4U0I`S|X1xBIMpv<5VoN@b}=mEZ}d z_UBD6`lAkm%YbuiYFU|DGgG|HWzH-z(fnpyVw~c*zj&QPYvx-zv$f1=dGU>ZeAucE z0AxQ#&UJqC;YFj;H>bIyZT|eH&?BFpW;&>s-t>WbztdN-gZirOYFSr(>$A3~*4@wF zS9`tv!mz-AOZp9Bw={$b)>}&>tp4`@tEm8VAaiIi5Av{PP^zesq8)OEZv(w(gEo^v zB5R8x4=W@GY$9kV0U(R9E|NTbgEz+WKy#Bkb+bIKVmvR(JhcO%jw)_*ou<^CEt0|Ovu(f-{x3i!h3Yi=vnH)?a zqza-P>NW#n{CF zL$l1ny^bq0%^EXiQb*RhLN0tqc#KEyfVuRr02^q->$^UoBf~coI(?)xOJg!4u!HUE zK0C~=F0e!Bkq7mOKlU0#tBa^*;5tD>wTYA{_$$O$%Y|ytH4LZ$ubCkJi$woRy9pvm znhL;_!o*V~MY?-7q9Vm>+)9rE6B**-$DmWE>5IN~S~TYZF2gjQI4sCHG)V5N z!|&TCQcE?BG{nlJ$Ul5F^bl=H$bjJZRr zr@@r8=lcFf!JH@QQz!l;hGAIDfzmWji_D`_y5~vA>#+kHD2|K-hTG`O_KUytf=E`o zI*1aWS?e!|VxRhZxN6`W3&MtND2M&4pmGQY7@{edVo|rF#0iS9|GTOy!Zt>tzy+kL zDdR*QjVhi3qP}B5AAPn2YzAm>MX>UsA$tW}G(mP7L01^E5G+cdL`o;cH=*P=e=|7h zEV!O(D!#f(Bznd`O0v~Eh{jUFacHy3a-~_yEIDIKWE#tK3`f&yOU-&L&Z0sqd{6ld zR6!jydOVFo`#GB%Of?)HG~~zU6U;L-Ofp;^f*h!=pe{0CP=(yoeM+c?EVWbPuZWU0 z0sewNK-^GP!zc}{P=~y_Tsw_j^B9^su+tR4U~@2?velE+n@++u1&qlk+e9l9R@$7l z9sSYVq$&qgqG#Yha=X0A8^L({Krga4S~SWKOwKAb%3c&fKC*_+b4uwXQ*jNvXPinf zC8Xpk2F0>Q001-18V6ny2Vr8Sj2lOB94+(2IJ5M)v^*xX?6~0@RDvzoY@*NR0K)=1 zE;b}g=VLB5EW?{KuE4Z8E$OZ~0563^P)_aCI(X1ld#GT5pNl+14JFxH`>(7!M6DA$ zz`?(>qg7xlO_OXp)ND~^%f!R7Nm8^n30x{ha!sIhBHctH4CE<6%9(J>s-Aqs{(N&c zUi?KFTS0ZxR&JG2qU2T=w8}4CxS2#fFQu{>OgJvHMzSLZR;og6a8r^SGq=s9JI&KN zT|!_=!d%**j`LHx1XP3VTfa3OFtmU%yg9?XK1Efh9CB0u-NzsqNaq1HO>NBaI!J|_ zgYwfl{~E*$b;wffT(8?YT7#%Obk)VO0GMSdm>SKSMaln*!~nD~Bnnog3fgCb&26i| z!t2o~Dpo4vp-xf;SDZx>gw`s}H+}oSc9XFbWI-u)H*`Zzt*V7(h{>_7D;w;oqLnhQ z)xo^V&THgO(6Xg^J*~(JIgaZ%xct5F9J7F>y<2j#XyV(yonQLxLg*;|R6{$4zXV9? z%g0J9r=TmRh9yIvqtt>DKLz!)w&+-dvfLSfx?mW@tIJ%Bx+o3RTt7TDi)`7kGpUeF zyGFcOT|G7h(=a8&$!Oz5V-36&CcKucwyLVC+mtG9@Xd280iUeeX{}mbG+rA+L3%qk zYlT+E)3FnnD}>9dz`EWqmBBBK9imE8)Ce5alGnA(2633ZSwcdOtFv^JEYBjx&vM)F zoWj!rhk>nMHD2RZ*~^Es0dnfcew0+fjJ`xO-0Ew??0a14;n+H4(8$bGrt{Q2Os@|8 zT=~N&K*Uf)cE4P+zjr+bZg?SYP%xPdz!QZb56Wa2-Bpt;v7!D_vQ(7Gxf8s?lg%i) zyV9${Z}VMdXhnAGB6f>0<0MX@w7hm(D|-{h<%HH=49*~H%IoCbaSc<{132!5&T=>g z?|oNuKnEoxxiWjR_eIO{&A4;SIL&fPCWOmruDCyCV{snm?@&Gg5JMtqhJWNwdg4cl zm1F*mzH~Y+$E~i61wRI++<|i70E$pw*vN+>RnVm~C{n=PbYU3oA!XpnS0rM53rcMjN}^0bumWb|G--45qJEnN zV?;Paaz-*;xb4MCu`RfUQ&VzirPuq)*rPbReaq0=to}1Q-`z_>^sU=pn#;N*XR5Ah z>%d=`dmRF%GyrZx;3}tK$m2TBp`Sx80==FD)n`$wLr*Or^}^r?wWzGSs8Cf^uM^ey zsh_I@yI0lD-m0Jpd#Q$2UDI@E6qPo-!mDXZ)>9@t-ArMe1X^KDVVEQtWnkJR&AeQ` zJSnwB;VjDM#a5^V#u%$H9Rt@FL`DvKz$P~4b0ya!lj3&`>eEstdX+-VB1b7S$JujB zex1F~^2TGT!ZfyO<34UMEW01cAty<+#O+VR-A7SYTzRH$<2q|Kc+3OMXH4a^gaTPY zd?<#pP?lA{f_A@DeJ@}bDf|1fV}R%vN-)4i{x*kBNs??`3bWB4ia?>=QIhddYzsx7 zZ9tH<)t@VgtsU~Y0fU*uR>ztaqT2S=IINdqJ6nCZcG-@TphC2hfURql^Gs|sWCR<9ga6WDwr(c9k*q95a4cEuq5xSd*hUTt0 z00k1lg+88%!|o0*jwJ&rudYCTa!;ck^h$<|WIqcAU0CC~iF~LHF11tbT#KS8(EJ7m zli6bnn@WT*V>1U2iZGqRh6byv3H-1hswxjd>}tcIKBp?Y|lF&TgcC-E)eSwQ6e0Vr&fcD&nqCbU}#IqYfnY9}i58)F+FqX3O5TexxW1$$tp(q0+Z!m@$%CHYp zG9_~=3lrrL3&o#OvXbGc8(LP8sUi&=w^&eg8{@#0zC3JQQYQ5(6fkWdJ5F89H?u+k zYfuKF+VG<~;b0w}BxACo8Xo>E(!>rgBx7(*Xe?JW)yk5hZE=|Tv$QN1PcvN~rfV37 zYqq5<20oI5C1t{TZwyOc2X?go+vI*7Zn(#E5+rx}k)cvOUx0>ZV6u?WBqmu`BJm+6 ziCDU7dwL+BaQ`Q}n4U9OU@>Tm#ox5U|FnL3e0<(#h0=xfn)h7TD6n(u-4N*biLa@1 zuWHDA&7|w{28IN(zW}fRBM70*!C4Etst_871LOwINuiXYQ5>=$Vx!d*k}yGvK?kfM zE5V^<53F)HEJE^z-g<_T&z(%C!Z05dF=tSQ5`e{|ys>9xt9c`%2}h&m ztTA`Pa7agiSy%?Yi~h+?8Yg%X86~Pp zr*kLIo<4s94Jvdf(V|9=B2B7vDbuD-pF&NFFCkS6$BGp*cI=oiWz4W*H8#u`v9ZBw zY#j!6*s!zJYJ4jeO>Hw{c8M7STkM%$8uZ495i^I*m@#C=n4!aku$VE7#n4gA#gnH~ zTdp)=eN z0~UZCcF18d(A46JD<_l?ODrd%kb;XSoM7S!F|K$*i!Z_m%ZxX=Xrqm;ydtBJK%NMT zE3u#m4Ke#UQ_P0NFgTt!0%hgRKu{VZ%vLsdbDlTD95Wz;HaLkFh*&KNVl%`Xg8@|l zU?9av;f$lrIM|Ux4mjFiBhpMSp<^dGMvx;;L^a)s-8kd4G|^1ih{VyOQjeYsiw+((w_U9i~ z$9%A=1veZ+%z+%1(~QZsvT6f_R{F<`4GJVMgOv%cnPxcxakF)k9abYlj7Qd(f(iZ= zRB!qkrOdvNCz`{swJ3$KWWrDL^AO$l@ z!392Pf(_v7M}CtLBmuWdS6HJK)gVSR@=_2tu7Cv?_@goQW2HK#Z+-?)zzqZfjSP%n zeIqbH5{(fJMTTx5!{`s2N_R1F1S%Wj_~QI9U;x=^Ln9G^i8yWm{;u7rgC^cwK?_#E zfCqe|cO2Oh@md0@lny9fqq`n3xf z?4qJIAVC61KpbH7A_hJf*lK=rn|;kIV!+rz#ZCtu{9wZyGXNuYY+wO~}HPL3V2JMeXFQ8{D~1eW=qV{tQw=2VFr848qX-R!;*oV)F3ArZKl|p z7~+j48sh^AFv5JUXaFurN3!X0VtT&uvBAc<3iv5mgfWI6G)8vQMsuh$%5jV!bBP}{T zC7%&w7$V|OM;kqlmC#ATMi_@WT1NmQ82%xg5Xr`aS73oE7+`nez{U+UOpb3DVXri> z2*v+|JW1h`QkF_F%Uka9mq%})RCxeeFaRB}9P|cxDVAECF%}!ZAa}va%9)^%!x|KW z8}ojsGdlzph(?13DqX2#>L7szV2})EK-V3*@R`5>kPK7^00=PH(hxMj6GrU~JR6$*I zocN8C8G#$v;06l_aD(~mY}3Zrh7G>A0w>0~1~oGR4463|P2=a(XH3s*Kh!3xfS2ZpW)-9Sh{3h|7C5g5P?e8YrMoCiL@4Hy9p zD3}rKiJ|<7L?}--Fwcm=+zirS4cZ`4$Q%F&fQ$XrRy4yg%z*tsgEBP3SLDS(_z^-i zRFm`)H~3WskO`ZlSN{xIu%%RCn3-cRfCJc68PGu&5WpQUM!N`*lLY|ViNRrXK><(! z7k~hjnZZ(ph8V!hY%ItAU%3XI$v|1HhB5@vaR@`R!GLK18eLt0!4RSlxtVE1-Vwox zdvw}gL84xL;CpmUb#TKscvAxK6^8^FAQ?jyWWWGqK$l3z>&@PMI3FD?NDo2?F$CcX z&>JEBOa@rY01VxJ)IfyoTh#zalh7N|AQCZTPSP<0iNMHnWgiOYhzc~9w7~)jIHTU6 zK#L&Pvz-8QA%Fl(*R%OejEq~5U>7VPNeL*)lt6>4^&nmVA3{}wLuCaY?a~1p)>UlH zFu;!i41fhZj(EXiG${_702lxSzypxOOI!mvJODh^i8$DR0knWNtVHsF1MWZop=6vl zAmj`vz_*Yhp@jaEpLj&_Jj6tR*f{9Z4XWfyvgALlSWjqARiFi{2m@OTgGB|)SAfOj zzySS>fO+kOKp2A!?9vSQ0$J!}vZw`CJOf+MKpo6MRGOKVb%0WRApw9u2mufl(!nXv zlsy;*WHdkroB|!Zp-su5wA6tOoRDqUhAmjYA*zN9un=z$VlF&PoQ;4C9Y-$sKwPc{ zZM@kK)twPp3;+xO22_JLV8b{VgXLgH6WIVRx(5?ACO4E!Pg)M8eSJwGjNFWCEa^4 zLkZ+aCjOm3+ISzhao_ljQusMnHJVLw8A)`VlG@lvIA+)TS(o5g!!k@0GcW_`@EDZr zVlE0qun_|c^yCBlBv@<=4BSAR5lx!#$CWsp(j7#j9i%ph!#6-c2I$F47}yG=ggAJ_ zIG7U+ki$5D5rdte{HzZ%eZ!vEiAKD{MVO>G(1{GTWQdCBh)z!qF2q*!;8JeZF$5b} zNXS^6*S#@Ak-ZoT9K%=`L$TQ$bvE?ovI!L2K16 z23;K{oN!4KU7iKFM-$CJzd6y(*hhDSgE_p60vyA9T*nOPs|%b~$ONjU;mmYk=GBzR zf8kz&98_;oDmVB?%Xp8AWdH_<)d~y%3tULov>JdU&On@p;%G?N#A-Ck4XwsP2{4y( z!3g*@7mR@0*{A@m&d4Kmk_osjB)Q+)uwOEkfHf!yH+;=wLdUA*+*SS<3pYU2AUR>j zTnJVq4KZX#d=<@sMALpGQo&sl3@W5S%99Ha6F8)VM7DqexPT3uASu!SHjsnD1q#Gv z9HY$BMC?hU91o>DkHX?_{_5{f#3VtwSe*Gl(L7Pr+^G2+BUo%j?)X5ezS;rY+b}$X zsP@b=L<4M{ESf}Jvp7&1l7W*E03B4RSyGv~&_Nc`K>>7t*+oVHfQz>bzyJ^cxyWG` z9L5~T!wekWzfeOC#lSDv#t0lh4D3q~X)VKaZJGWh1_*R&YmU6;^bk)s_L|cr&!gY0$ z`jr4FnctDj?j-xx2CScnl=3&uND0)!GCae62n5F@fP1}K)j)%e7N=cAgE#oVA195d zCXF#zR1z5*15!zUK#3z_06DnC`Wk={%*(V!DZmGn;O{guHi3f&BB(|5 zj!4wRhee8r-S7UQb2_sm!|sDVyomrP0GDi*gVaC|0v}Mq=#2tlpv4WW4-CN30wmLVdG4AZ(wg*gk>%DS*$8v#jfJrWMq)b7JyAb1|5h&Vi1N|f{;?BWnn07ZOBk+xJK@5013#= z3nYULB|vHnwFZy?39tZ&SAz`6K)}GW28@6VjBRQxMLeV6jRr zebE+;n*6W;`)rXph)gO9uAml@e^js=akn@1<}l$$wErVj4k2(8M@=00DeJ z3qZg(EeZxGfW2me5h%ff&I2=vL`Aws?PUZ{m^<;9F@tT7k?JuKNuMFtm8 zi?vX}DU1QP6hPK_AquPe%>srQ(6CuDz#359QVprn(y4FcLNL^eA6bCD^oFkpLoWye z0vv#~Cx8xBEx=U30UQ7VApSsIwi)Eroedr4LWGRY;74eB46?h2jVHi+B=`3W6BMRV zdITBlK%psaLs#R>R@aP|053HGC&*XMffpWyR(gfCYTE|5PMGM;g~c%(rLer{jUtOL^fOx^uxh)P`CKp+ zG9|b-%es`XVt5Ac{!`r?JT%UxS!6VLme%1Ss>Uwlp={s=ix-h=D1Y-m-dnjw48_a! zFJe76+I9%8<&8Y(4OZ+4)?mGd>=EjA%nXqWYM^ol(m2W1NK?_cmxw^lez+H#u!)rb z$SN*Q(gmb&0z@-s#E3cLR?t~Fg3x5Ra`@2I2^1(&Og!OgB8rPABx*d7g5#@>87oS3 zB~s+cixFR?eC5ho324O#4kK7>n6YC!j}gPUlb|!6JckwYMr_znpv7nsyy?@a&tnz< zR0V*695`*+!d(M5j@&r0;k1g?s_pDov)HtW75mLCxNK~%v6Y)Gn>K9Vu5k@VLqNWL zg9#Tld>C>5V#SLYH+K9Oa%9PqDOa|98FOaI_pDY`aEz%jrlUzSElRWW=wZX2g=X41 zwa?M5hczQc!;Y9SyLXe}{hPOM-@k_gH-5bM@iFvf0CS!U*s^87f;qPy9sBiM)3I~6 zuIsz=?bn}A&jpN{1p!qDSTN$PTsQgKx|QQbUw(Y~`Mq`HR)4?w{tJ-60{7ETzG4m{ zrWxA^I>;bm%5i2Og(y6z9NV}V$U%Z^8|Xq17j$YMVjwILni3#di6w_#ifE;lQrZX> zhfsLvqLf2O4gk@{8!^M-(<9`)!XB~XTxc1H|LIU{4hQxL~)Vi1ZAgegu@Qc^I`83>TW9C!*-I9BE* zHt9?*Y{A~ms&^Nhp=B;CgW1fq(xq{L?;hwAi(mX?cq?uaQ zaH6%+IIEVLlMWr3S(1r!!hC)W9OnE)&LhOo$Gn8Qs*Ih(kH~N=KUg8pi zDGW#4d39HFsqAVM?`@wajon{s}oaV4Rp#zjJlJO7K2BzjWMN}yn9emEO5_TV&EJ4VTpH@`xdvig7n(?`>*8U9pizrh#Nq^^JKb6ZcFk?3?ul+?;=x#Q z5m+qI6}4D~7wzi1;a$$ZViaCFn-oSYii|xESl*a=)}@(=sV;WBOU&@NvYp|KW_z)o z%dn9qviK>pAXfFNS*@thKJ6GqIk9R&h2s2SO>3@&q(_Hggxt(nIXFgXZP8YS--v-8 z2?U1QfV}Kg5;<-c{BcfWN}UEyM}zEn{+526EFW%QFa>G@~ZU*Wdp_K{sQ>FeQp)B@ru8qI(BVz8V^2ms9k_ zn_3OPRt3T0K*yI<=HP6Mqk484wO(8nvzsNy_VWL^h-1txqcbJf7B%>inZ0|Q+wBBtFT;J*)qQhLo zqJ!1Q6?A<##3OFkEHaw$P5SHp$jFhGpRG|(W^x%?`bwfJ^35&1SXVmg$9_V z`RZA}`p<&4QSGB-H1yY6tlh7Xsj)TH8f5gP(^#cwo5PNy&TQ#G5LwVU&jT$;(^y6a zJTG+)FMv@?zF7ddyDay(qkHZG!QF=O3d%vqwcWI{%b1y~?hOT^jE2COMY40<;EOn4 z`Lnmr?tLr25jF#JZ-$d@uL{==XSI4(x!(?xxzF&y}8T zJ79@dh|D{TWf`b1+c5s_8>DPMn2YShjr*9eKoG~PhNKvpAziv_?bIb+$VKf4B3&2+ za&+iOxXN6h!J1ZNo8s?buIb+pj)@A-h}h7r)pC&o+M-^ zw(gA5hyt(SOK_$uTIPB*?*dImW^jhn@aPfQWKI4kd_oWtGm)ZRuP|QiQ7lE0<_D9q zX4fPok}#zxPVmKi?Nnya6@gDUP9@oPVYf<&Jd6yuauMrjYJqa9`iu(zwxr2`sc=Rp zTgDElo@=>o2qC13%(AM`JcRDDOU;bwNr-{G@Raw?!ur5@+{E8(n!w( zHIX435`8$4FhVN;WUr$hiGL~**Hm!@S1~s_>F8du#&FA6F6ilUg*%k*fu>ESm`|00 z!IhQ_XN3Q2Pjk3#Sm8Hpg7jzza#r3>s{s7(gd=IHE@WWvu|M z-@tJtmI%EBCLHw)n_NK#BnH16udnpbClIX|Bo3aSLQ{?>uvw)E4U? zDe&{`#E$3$p!mW}7SbX6(l5VHO3sY#rBIgI!bWiAVBF28>40O!4dZQIp zsxpCN{`f>mrl?8lN^>}p`l4l45uT4fA|??EHZTXp#+9&c2d^WQmH{UF#vGtfJ_d(z!mb<4p+Cw2aQcHsZ+6XqNEtQ5aKXqW4@{!pkW#E4i@B$-f5AZJI4uN@-V*ui17L`FikeT2#nvkvd38 zI&z^1pQZ+kYMB!rc0#KP}d*H+?$g{%E#AP0E^vEI+&GX~RGfgVhPwG=sV^(G@ z(=f{H$rYj6h_HNnlO61Ut8w}TcZaKR1SNN5}6F8)K-X+oAM3@bMw zCS)h=BXaD+L?S103_|WGS3*u#Au^;vmT4Qn${JoF7G&gLOJ^L}P+!ATM*8(&Yvf>M z4-gL5c)2eZ$7MY2 zJt;9~K5t|tl{~uwpv1u(UUp{N*L@EIA_pV1*atMWW@}`F8M5Xys^)%~B7aBYe=CU@ zW`YLlU=AD_(9Ep3_w&35;%bs_<<{UH$c=3TG0$JxPhBamV}{#W@^an zMuLP3SEzwktz#Fe!2+@brbZSR!O#im22?Mum4ku_t(k8;(IVjDdF)q$jNCCVJwDiNPECcqaUhCW^ut zzQN*<2P+y;O~Axv-V$u$a$ULnHqM$ z2ZVtdytx|MdFvFw2fpDOj$s~-!Jh?sC;r)=``HML;2UhfpKZVh2)duYL7@@)7>u9_ zc0!>SdY}dRpE-Ibs^OJE8l*#7mOol4Mw%y3`WOI*KzP4KTBVhu2CAVNbf5%MfTn4h zB5E3^b6Tf&nx`wGrf<5Zc|fJ>silXas9idxOB$(P*#{Ev0p=kB3k#0G!F$BlpROlo z0IEE-mrc|}d(p(9{(=DT;grkzefc4ktzrNkzydJ90@~WG-#V_{8Uy0`t=k%}=i094 z8m{kJuHm|_F~9&IpaBZo0Seo&4;!%;o3R^vu@&3?u_4>B7yAJepsg>U0`B?(F2Jul z+p|9#v^|@%Lz{1cF$4OVt^-@OSDUpl-~n12wqyIXF(3m}8?aeBuLB#fb33(dd$)BP zxM4d3Dxd-`U;&8x0ydxmHUP7mySR&cvzhz3HygT(Te_PYv#oo&x0||?8@YLVuYDW0 zQJb!R8@$E4tup|IJ;Ml?$I`9=W#g93L0FQ5rnLwFJTiKmf8t!XaG4H=M&4e7!du!}(?aEZoAIrfJrr z#8dp1oCd{L+{9fR#Y_F$g^a}*#p9%oWeUp05UwtYuv~=oX4xe0IDDy%ABh2I~;z70&m7E zGAzwKFU_m^OT>YV?$UcPFRJmv5-%0O`y5mAq00kZ&<9=1OWecNTE?Ax$(0<$7u?VZ z9nvFR(gz*OC*9I79n&*i(>I;d8$|#N;2X?Altywt;tMK=pi`;2UxT*Js;N{GL=C#;2X(VO-p;d_BG$+8rI+IXuxl zz1f$2+8sW|J3iaDJ={kg+*$n53th@Ro#9#Bz%st$g*?hpJWD=4+5=z!n%%~UJi~Fk z%W*y!o88(qJm`tt=d*m#dw#+-JjWS40feD8{1X&k4A*eSk6lAGL<8ueV%Ma;kGUQO z4uH=KUhG5g)z~MrEnu`YyR*~Yui5_W+kUg*ezTh!?cW-+(SEcu8|^cjv-=*p>;CTV z-U9TVxBgk7f z@MmHoDeCW+kzgP&#$NtE5ZD!92f+FMcLAOCADrzun|A;pgzeffkXnr$1`+NO7DE^{ zf;${Ge7MWt!-v%d?%1J2M+{lDJZ{v%aYu|89#K+5X|f~AmnUDQ6scon&5<%c#wfYN zmaUB&PukiE6ev;to=8cKEcy^=(~V7c^5Ahn0D!9(cqrw;u_H;ZBfWMk`E_a6uVa;> zReKhOyi=#>aH+$E%c;6^xp<;m#}i(=c<*wNn^!Plx_|{I9t;?-mMxF9NPY~N%jC(B zF<;JnS+nQPksrg0w}nfqRj607N}YOj>(r`PtCsB=HtX1_H_2`_;i^>&)rxT&W~>;o zV#uQ*UtVn3Fy_RbF`vF%I`v}DoG~*ly|}R%0`%=0PrkhQ^XSv7U(de1`}gqU%b!ob zzWw|7^UKF~wZDZLzyt_PE&>La3xUAwV&H%a3Ugq9yF5ssf(s7lAcDX+xQs5l*m4Uk zw}eQ`h#>xc2qG=ElsMvtCf-uZi7U2OwfkhUkW^u)-rlyK&tEXo1YOAu+nkuWX#_DRSR}jHUCX_^?$tIc< zYwWR_2#f5p$|}2Tvd|t&Y_Y5K)*l8waKlY=#V}*ta?c?bF1O@z>m0buoqHX1FbHrD zKmPi~EAPDY)@$#*_~vWhJ^fu}K@0}wk_>?lRwyuo1SSK_zzhl~FoF;(JTSuuF0)LC z7?+4Ki#m$P;>R0@%(2EHll<|Byrg^!FDvsxvdg^unBvlPNXIQIkociB?B_3k-D}D#jONjIHR|Xe;_f z+HALtw%Bf`UG^JScwzSwc-!s86MFNFw-tNm?Zp*-^NmH{gZr&F6m0lS_}+FyvG)~g zuo6inkW2!}=9*W6`R9;e9=hnBi@rJNqlcb3>65fx$t014&Plaf{#T50&#C(@{-_W3M=$;K)R)I^zZOtfaK!{e?658iV;CU#;17r) z`QrC7ATPPx(#wY_yVO?FQHwQcO@wID+L}g4Lak{{YGL}CJPaiU zL1jv8hsxC1ZYYKv;_!z()S;qs__iS)Dp6>&0~@@6D=aJ#3r~Pm6r-4{Dnjv!t)ik9 zvzRL?9)WVH(8TLB2Z=~TqIG6WqZ-%9#x|xAcCkaFBs5nnNN9p~M#Dh<4TuqqaE%Li z)RE3Jq%%C?Vdpv4`PT1>$1NBPV0qPxq$DRv$x7y{dFSB|_JmQsWnhS4!0@DibRj;7 z>0%ib8l@;5Ccb1WgBQK<5ZAEgu@n8TfBZWU{d5UHTdJsMy0l;}7)Xm<>;i%;gWxTG z7NTK>Xn_}erZc7VOrKS7nHI#w4z$#tE|CcrVd7CO{x>39yoiM^#6^nANg{N%vxOwG zMUh~@g-A7uhd0z=KJgjEAi~Xu_}pg?@0qq$#R^qWprREQ%Fu<*ilLIb!W~$liOeO7 z6K-r{vNXEJjjmC1W`td_gjKBoSVaJOEJibmqtfR@hdIws4s!m8F$|ej$ELTNj!lDy zu3{+9l0XfrP=`v?^k~vQI{*gwc#%r*p>i3?VCwh^Bp_6#lB(XbYJfahO$MHvGT z{&ERcU7n1W9_yIK_=mHYNw8-T;@>il$v_?rbC^4$AT)i}GrUSEN82=-0J0LbHJQ^$ zSR>OqU5KJHF*Zlz1ZR#I3r;_6%2At&RHXE|StAaypGCA34F$@nBeKB?Y=D&)8dpVy zrgn;^QfO-vvcTsRy#Ez))GS2JT2j_%oTt0H{`vxy-_6l^Xv1H&(kQ zlSh4R-^PFzA}QTYg|}+uA^ zV;GBk#&)#x3}di#U5Q*Ic$-rk?0Rpfch2*k1NGAY_z$WHq}5cyy1$_;U%^Hx zz74T5U>WnZiEvy$TLKfu#ME_J{a2#?TjDxN!G`uQ<0ms1!+3!+3pNC$MB*U9`zR(is;^aqgX&rYpVHNuJJq&iv*(A7A?DSJ0^;j0}-7)ky!vf*)p}Qo9;r2g-#g z$tTKWobnckt+3xyHfvfY7J)g2-_~}WYyVY=N_n;Gk6@-VxyrSI{U+#- zR7TTPXO*A{tWa`DV*|#)QZn^j%kgr{(F{1nQZ7df#K48?0#h+%95Vm_@A7Iikw6K> z!U?L!IdD{5t+!E`gJ%~PT^a{Rc-DGRcwM=tU1OM2;!-Z#@f^j0NHP^2E)|C0QXJb6 zX_2;5mewwq_J#l%kOCQp_yr&aq6+~gU=LD#2IF5CvLOs+AR8inOjlJGLsk@4O<99= zI^rQUqH7msR=fmZwlp)v)@;ZYZES^-Xoo=A^gz-UGqlibCI)x7paXJe0X#5H?lg#g z$4(7cilrEVWkOHpwnK(jL}g<`hj&CbGZsU8#a?{^kW=G*OKJapEXl8`W`q277Ddgq#p@NN8~%b$i(31Bccei-c&%k&k|4 zY1{R3-Gv>$!wk-mkK&SETliA~8JUv#d_b2y*vE$jq96)VN(`fi1F}jB@{kE5fDu+p zF!Db*b~A!_BJ5{BzLYg1lS>g+COk5U*fc;a`F1E4Br|g}?bB=`mTVx_K!N2O6|jJ* zDU_qgcRr?^WMY9wrcX#ji%cnk{sd5rmp1TrM7qd}`8JGKR4ZQaDqLoHwbDca=Xqwy zX0Y{dM-W_MgoLdF2}ihGb(T>Wcb^)Um&qb%gmyF-5FG1h9Mqwh+tGVm_#BM6F5|*z z`iLC<{y3QuI-#8whe1an24gWtmmtDmF%+3P;`ZsL+(;*xJZPFw`{DW6M(>}+9F}c7@XjeftS)BgjCEhj-r$`G> z>YPm@C(o%hL`DyW@`_PbDU8QMZNn*t=UGqIZkLrqp)#H^m|K$LTX<7jT2y8Lr*KPz zm2yC4SrkQ$BU^X_Mw0+>obw2+V|#D(XLbg1niFz(x>!#Z!T_91%*PsG6!p6@8n8U|K~m$-rL}LP|tOqFY6DO9%c8 zybz)h8GiqDby9afA0t;SYF4?VGB*-`H7b(#Q$RPvGRbtClDILXNlE~ySF%)MbXA)> zLqIXJOkyLC%89x$~d}Zg9evdoTp}QdN-02IE>Stoj0)#hej3kjekatWCTYWMG2}WmvrHB)Pv`Ti3kOtUfU z^N4+gG4X?b43sj-#7yjGK`A>?gDNp4|t zZiRABWble}P*3(ouU^`UphB-0=uh-^c=(2bSHyX;@=&#vu%G){je{y#5Dx^Gp6r=g zcH@Jr@TVo)deyiI#3EfEcc{{ZyW+?RoUjSdRdGjXQNyJQxEG+qpsCOxUdC`P&aivN zK#wi8Ud<4y#c>?NpqSS?42_l?>2ij};7I@2v*J6x^nywIP?@`+Rp3J~P_;c0LRBHk zY6KG@N*AK16fqt;wOAKq8^g6XvNSE^h_oOBUYj-SXLXZBYae4I{z0=$WD7HC=e9k= zGJmyyDnqs?(}}`1Kt>X+-&z42Bn@-2PSXITj11matKjd0n}u2e(_S;%1q*H%IUUvXWZl$wjs$TV(bGl!FMG zAW|52M!b8df2v#~WrUhAa^P5(T|5cJ^$2w~33VA=|A|tQs;S5!$I~Hv*wM1R!-Y84 zh3P_=&2fg6c4_20$b?KU7Mfp3YfJ|gO5SHau^OxGn=l%xRIlo^t>#Jul%lSwh+G4s z@smJHqlk$$h=8b85#|%^9ZFQ zQm7|7W+bSOz^HgO2@L&b-}pu(D+$+yQpX^M#*mNR@VwE0sojxNe8fl5aH(QgnA~6t z+HyP5u*X{Hpq8eyg&fl|EgzLRhq)jS!jRJo;-R6*3sEJ0k?cvHBoLd)N)x$N0i)iXkbi)Skz;9+|uqaq6C>F&Ku@7;|t2n1XIQv?$m42E0|LR?x&x01sP`_)s@(>we$){hIP!1uU<1d{UF&t}j?U=C;0rHHs+bCo zFx}mz-rcO)O%AdPF)#uaPyrVJ{sRC*kpxo$8DIekPyrjz5XqoEyTAgkZUH0E5Z4z< zPX#f(`eD8_6eCapuucN&bTu(h0PZ;7K}P{#*h({ zat@si1wiEJK%mPUGGoNdv3UoCPAVV*c&H-=OoI?0sI=^GN@dC~V{qCW1654aIaiK& z!SaMnmUtfugmBS-0F`)No@jlNMN0?*A6kI;P~=FKEd(IAcw1!35iEVPvg!LIFdM;a zps=}OFpLo^7Jb;U6&Y6C7-2!d3>(mHB(PpR z`LSWerv4XeR?JxR=*@;ji#|iZy?yoV-^ZU{|9<}c{r?9rKmi9Nus{P3L@+@G7bNf= z-yXDpnq6KEUh8-$U5*J;T)F=iSc0fpmUGTKUgcD=n#g;p9;pG-8 zb+L2IkZhS#0N(_-KnI7YLCDA$bkNWiAqk>}niqK3pn?}2GoSNf{rnsP^4xhTqjNgkmDlOKZk4kkpvUbKKVosPA=volGtXe zO*YO}nczVHEI1}QVvd0!ffal>rUnZzumL(}$}`3oVU9`0ml_7hU_Bdb5GELCe9+E@ zVwf3*m}iQCjy?05`I_hX7(_d5wby35ZMWZsJ3$9+wqTcPMyMc$U4YqT1O#IK5GI*q zX3RwwV000NhvahUMTR$p;ai3SM%ZPSWdvWQ@EddVFjgvW@f4Rp5s*R5zDcL%OE86u z!EYH1Dzg@9EZ6|c5_$#9PSnmKlb{0A{76Cc^SW0lPqlFHB1%bC6cIcqIlqTmfNy$pTxpkOeG+ zkN{hV!WFCln64B+SX*FM7S5st0)(InTxi!{2<8_a%F8fNxI!0FfgAo!XrdA(`vE3g zW3nJzY%@pH!(>+R2uPsDVy>~wCsx5SO2q6oFpI=(W>&LF1;-fCAciqUkboNGY#3{_ z!7zsLHD^TQYhW9O4-_zh@;oDH&UglO5-@{dqz7zhEQTG6@r}r%GW^h;q3grJn{T3=3R97&OWSFOreD z4{;7hzXMZ{Y;l1IeW?~Cr~nD5u{!XTsUlJW$ux9;Hy-q97iTIHnQ~_)F!6#K!ox)j zE|53y;UZIpij^&Rk*Hl@NlJ$5#S4m*frVUvOS%9PqdtdEEB=Mg0+RZT1$Mv@Tj+q8 zI+z75Fol2;$cjl*l7`=206g9`P6j0y=kxZ*k<_`5Bqrh3N*d4rkj}wZRCo$ibTx)9 z1ONounhI39Km`Uc5GppAN(NK1Q3xM89aC*heNTxK(-`OIY=Aqmu2#xtN9 z0uX*61SL{J5?_RwZEzzAR*>;G7&wM8zUGSz;H;3v=-Ca9;S6S|=Na&s+6*!#T2nJAtM6PF%&wx&O*mFl12ml|Mgl%kPJ6qbq{&sE<62|N>aJ{`zhy=j694>$H z3*ZW40||&hE?P%AU?^i5w2V;ZYy=F*d1$(wD;%waB$8TEBT={@B?&wQq>ac9nGIo5 ztCHDHhB`!dW;#>u!n8d^=^{fGxE?KH;9I>Z3Q^$O#c|U|rFI_R02d$u-WVdLTWHU} zzALBr07@wxq|O%i-2pXfam+VuSbwyTouG8!B_P?N1PeJp1xgSDFVX5FkLn06s+TLA zN{b64rGvCsI)FQ9iwr+@N>sSOmI;PS40k9%00PheXq^Nl8N_J?bJ~k3KkQ zff`Ul0T-yoG9rh75eOr5xWrL`M?fQC;B7>`=mjlpxeQ)*C>NPqhT(9;xL&C47PW{$ zEp-r*TIk?@W)euEZ1Jkl(P9=GxPadH4Nh%tbH2C81?Wzhr&`8;E5|q^BDV}(jC&DGInNlGH5TJq? zaC#DN>6lx`vdI;$+!r>u>jPQ8SvTY<5ogMkJ*7CKGT;I#a0!swm8qZuE&u@+8oMrhiku25 zwZk$kC>FW8qVSO5ahf-Bnswx|MfnJPADJET&ogV~{i;UOsKGa_JxNuY!< zn!GFOj3~N{NSG^0{?NQ2_!!V*B8#bnEt(7^x-?9Kgh@a>)q4;Or~z(p4`ILn#9{{7 zOEnlshMa+pXP~U|7={|yBVm{$^XRrx@&&NL#hpnH_*kUJijLOOhwBr@VI;<4)DP{0 zkX?W-%i$0fNB|jGx6IiE8PJgA>XBu@k{5uST-b#Q*ezw4zh1H~9?=D9%bZ^D5=Gev zTTqDPp@koW6M|x}LUElXfwytufs4u%je0L`;vJ9xr**mo@rjUcs)Z9%CNSw9&#}1Y zu@g)=02BBTf?$zv>K-%+KkvZ>9T=ZUsg!gaCJPubkw7LAkewJKC=*bz)TxDDXeNb{ z2FDozl;nv1S$MZv!3Ao-l^PQRGoXW!YM@@>u`FZ?Zwa6Rn4q2NLON)|9Dp*E=$0?+ z7B~0;5PAYA(1M|Y2QQclDs+Jo!h*SIs={l6C)k1(2!J^xj3?mAEwGn7lozk+A+5jy zKLa#Kcmz%87|~Fom5HmCc?3%{G|yO#PdJ%KLgNtl@1$PG?BEYdof_xdfrdJYhT}jiXlRCVuz@w|EU+;xJn~HTXudRpBx9t_+O$n#+a$UH z9F23h=UO8_;gI^%lWGh<51GayAue5z$@&8>Uf`T_JGaZ>g+Dyf0~qwpq22H25N|bXA%e;e3XjFxloDFA5kdBF@YH19qS~BAtWeV zp#wG8pgCX*PoM)bpaTh-gJ{Vh3>p?CgvzMMF&b^csY5%SiYcntDWCF-u*w3jh!;FV z%PN>EEJ(a9NV5^53pxC$sY*O}Kq@LAp|1K19{MSU=>k@eyuVs2z9bDKVj0&UJ<{kH zD-s#X!vr*ygxPS@v&sxk>qOG}nl}QqoDn1&sI}i424{c<)$)N1Pyk{0Q$dO~_Wl4Q zW^e}cP!F+LKJ}Q*+mzHvrBt}NE#IgcVPFAjlMrE00A~9I7>I#tsGN9v#!;;t>Z$?a z+yTFV0Ud$W=!8yk0|tsC2t|njH&Lcd(U4hrffDGboSddXiIa9BffDdJ0hkCp(LVwf9Fgkzx2%U@;BNKxu{1s*y7LsDhF371d zpaWmY0kVTSB+NoC@PM(P107`wZc*3@`l-J-0xh_^5Qv9(*a8E9fH#za{wl}{cwh?` zU|D$}7JWHcc;Je8sHqWJ7{A~{gMk7uvw|%pqD_FSO1v1pqy&}0gxjdRi$S8x=$O=? zTG4Q;MsU+igG^13Oabsju}QTRXd_s(0BA4<8@PeWVg}=YhGVdS8UT;PItFLV03WCU zV{jv9pse(Gj@FEh%X&UxtJK3q+{7IaWW>~4YMly@2n%4YTu_L;`6L0*02WXV5|DvV zQm+h1FlyVIRHClUIX4~wF5r@tAOtZTs0fJ|Cj;Dt3-}v>m<8AsfZ7o+MX7~r!jvtU z75EAh_!JXF;Rs#O9u8rY*+qcbpda}R<(+EXj zl<)GV3A+FnG+r5~2+*ZnE)k~+sD*^8g%JQasF8^DIWg=7xCD^F6C79vRH$sy$vXHS zX`ur%h=~x$0SEBdF^Gv26|xSnshyew5Fm;i009Ru042mADfrmt zgggih$*`ExaJ0(ct3iwzIMqa#Nj<|7+r5>J<1hf_uz&<`j^jXpV~AS?$N=RS0Ru>Y z8+eWxH~<6Cy%k^q7|`^uxxFr^w9rw_@6 z3OImQLx2NFh+W8qTOt9;t$-2G8*m{OUT_=( zqhtiQ00w-h^8(<4>z#kxh!1VB)$yjsO^6l4h1ty;{Gmn!NB~Nv6PLV&{i59p*nsCT z6g<%3S?H1`*(jvbDP74z zIV*q$paPSag9hNjli&iU$}uv)AUe2%4)EuEp1P=zvV{d%MyM9c-Vpu zV1NRE02ltnq+ps0E9_{q5opZ5`8epXp{^1&PSc(<0vO|^>?EmyuctCk-*$m^YmmA_`ki^oDYf>dY*)U`T7e^@a6Vk5ZeC#7Y{h zmfK(`wNf*!@<1eJ;Kjjpz8L_3Kc4Fz_wnny)SID(=RzDyUhEI4E?nZJa^w(nGfrVh zE?k0~Uf3mb^QBz+lv`b$^3Ey2yvqi0`WNUBDgRA-KZ+DW`a2R~;CLg55BfqXvKV*LEYi)X}*PEZ#6L2-bxi z_(DmY^dFx3;0=O8UI7KTQkNDWo0gj@JBW6Qwfm`o*@|=N3oi&O9y*wT zNsB6FgCiK25?U&*w8NJk+OEoiD{#D`MsSy@jYO*iNO&twX!c5AMAvwYN{9_jXw$}^ z1fqtGmywy%Q^YK`8BhF7u2B!ZEu@^m>a~`RIdY`bV#RVWBsnszW?0lbc5AWWML+)W ze)sq3yHwz?9A(6%!ReeH$+mM_w`k-X!RaO9>Lp&95n=cR8u=97X{KJr`0cVz+}S2E zFWxlw9p3q!DG_Zn(Wn040iRgGbCXMyaymhMGM@ihojdWISP7mKj4%YnRD{|nj~H`X zU_g$Lg%9(RO794yk7xJcpOKJ@n3t?bJQB)v8G>&-V>_dY%N{NaU5A2qkb^B$ls=@j<8OH1u^Rs zdX&r`c{HDz6#Dfq6Zb*bdX7S`k143s{~ZO0IbsHm8AwpfoPuNwjv2EMVZ$$C8p^T4 zh7A-pR-UlQ!tu!yFIk#=#KH#46D%q*Lc!t%rOJ;fW2!_s(T(q#?x<37Tb?e)y-zs0d`*`cx!K3G{T|IpDyyAD)_E&Ji8Uwr%NGT#ms2mqS^73@F_brI51i-gq8QeiFHWk(%_6?$k(bskD+3k>p@ zVa7xgH8jQ-2^9pjEoCpQUBVd_1=2&A!a#n2q z0ANr9G36BVS2Kh~^9*5x8CKX{f9*wAG;|TFm}1S?<>;ZtOr}q3nQFQzr=5EGDX5{U zrca&#Sg^}&!XXEpaK-tSn{&(A<|=f={nlzO<$m{4) zbsC~)oq)Llh)cBg@ls#00NU4`b@uJY?Y8#y_aA@o&8LF}u?b)S57Y2M3y8vM7@;li zi5TI$_eR*^hWL_L#~cF5xZ;Wf_sF9`Fy2^@Kq~sE;*J61IL8$jf#l?qGieM{l}ne8Qo0A71be&XUs_<_`3YaO|e*UPPTw#1MwgAUU2 zE`=TPo9~1peuyFH`d0UE!w?Z1QNujum?Do2rHCWMG1{)9>@N<95gT57lq8lQ)ubep zEj{V`N*xbByzo zTO?>fvXKUPD1;qbfJj886CDyk1Ul=?qKMQXk%@M6yWGX@MXPH_j>gbB9UX*`LXku2GyCL+nn6}rF*m3hT`-pgLiw0Aw~VNWbK z%Sz7jM7}^VGHF6HjQSq4KA{bRFokKR`nzUU79e%H0=d*P|R|F?FqL zU63fohIi~UBfWbPOibcN;a#jHBCF%bO12Y?VZ;i0RGId^Vm+}Ga*$^F zK^Qp3FqB4@L`^bin3N<+AGN+=#E&jB2tfY!5kOJWw5B$_sisJkrwY(!RTJDzE^wo& zSIzQOvO=IyW5ufC+@fCR`3|>^B|=}7P+lPXr7nlLA@~UEU69L<4yOe;u2M6FC?wFd z$OW#z;UYom(wvBh7@>EObDa7*B0A+k2ZU*HB45b<-Cw21kQqr)b_z4BJEd6qZ?!Oe-60AO(q0rby)}L~)8x@Z`fq zRt$|khR)vf$Q6LN^V z);4`80B=%*u6DgEIP=A?5M?K?32Bk-WJgcLMhs!^9GE->QeiD(5n^+&LKbp@g_i77 zc`C_lM>a;#nP4QcA#-t(pm2nfr6MevXzl))CW;iB&FrGt`>|^^F^NZ*rK4-HKn?VQ zmn94LE+S=A(EfC&LHz}j=u=E^HOa~4a=FW1?&JWqMw?e9^*OgR4lPkhW?9baRzw9( zGJMmwVD>GU$qJ^ut}4tCY7o5gs?Jy`$5j?~=WqDU?|`nu3juXUK#Mjjxv1mT%hcA6mV+J;2Snf2OFf1}m)fJW}>|8BaD;8D^Z)mJ!FOpCb zFK>Cs3mzL2YCJYL;RtKk4unEqpfWK{vxRb(5RQL%gPdP^|hyE&Y**n2m={VJ*_ zWRKl^*{txSM?SxDp$(&1oaP9!obL|iq#}aBZg9lBP+avdhCn=9jF~+65=cP1Sq0it&V#v z^{{krdq3Xj1OlKIJ%Kb(BOTf!>!+98M&{fi4Q^rnV#&K$YRm6_JKWB|05D`^JQ3ws}v;i0__PzD(PJ;eNA!@ z$1*$%!u?X+h?ld7klsL!FijrADcqo`)eJoo;q1p)l}oNXQ-0(}p&<@_?ZW5HM+{Ve zx?F$`?8_6C+<}Q&g*1^k{mWi;k%sY=@AVb%8CK14*pK`OtL4Z-=+m&Rk?~v>N&ruk zR7poL4?#`e@KhoGOhCc&utI3{(V47PMI8kvJi=;;;mbe;S!e<)7@3|V*=Gzz%a}YGo)iQSkg0C+1J$}{gm649m>(HTOj76J|5Q~&W3c^ z1}r%cEUErk*r?f={Ua(FULx)gvw#Ox4O}KxBHd6GFjW=9A)MbxoVAe4#uW<;F`9hA z5XQ~W=(!?nT)+-&PN&fVHOS)ZaR|!Y;JuU%IsuFn1=e8o;t)PeLvTUODU6F8%&Vmc zWA&34oCIejRP;q*8}-^Vx{=SpMDMf#^hl4G$O+9%ogayb_bi3?XjCL{)LN_s22=wv zB+5GCA=1Djr7Vg)+F@i2!~5l7{_Ue(-lhN0T|Yk8zU3X7@kT8@$D1(+D}4)Bw1xL&`|;DO;nUagbs5KJ+mTJI?YLHr=B(bE^Xjy{RRMLYpN3Dm|=mdF^C zRE9*xJR`>d6jMHd6Z8bKO#&E#qiT&|R+^!j+>D)ITL4%9NGXjRvJYHd*(5z#9zx19 z5JTG;7hcw&oV5HK7LzFejyI^HgAP8Jla1(TQ2ye~sah5tR;&%DP(p;Ni2-AU*c@?$ zL9v+hiI{ZK#INE+95K()K><~!%=d+1kF8dmfM0516jTgFnp~S3uAh7cjabZE5u5E-XWQDF? z@d-C9%}BjvNxfrWoGG}@VPU{6U9?@@UIx43tpejMKMZ08)WAUQD|D?5zUo`vDWW6R zMlf8^GVnq#l!tUA(~W|fbl3+GNscY-0uga&Q&|TQweSf~QwoEces%75<;UXo2Q?rb ze}&>QbxX4l&I-v;xp)=kqNZ(LKn$iNyi^*55D^sbF#au6+UgOJED+H&)B=R;@ax(r zHK_13v}AbP=niC>?cQiDlt_+}!5N4_8R+a7tO4^bZx_7r8WhADOavK_K^^n4951i( zHiR5I?;exEAUiL_EMYRnLl!i07HmNyvq3yyK_h$bPF%7hSF$EkawcOkCwHS!(1drJ37WOo6pchLo-x!T_}yxJj47LgM2b`U0BKi9Pk25Dj4Zy$*z<@sI{xd%Jvp)ATKlAfH1N1)&bUp|4J{Pn? z!+;IIfLFEZ;<6;eYA!QXv_)5RtJ-EnzY8_&fD+3a4!l4PumBF=fD7QjNSE|T!vGGj zvYLubyF*K z8a%aBBQ;ag!4|+nJj4T5XEj%Abyj2bR(~~Dhjmzs^;Va4S9^6?%R?9ZLo2MpDzw5| zx56jPHC)^ETi>-^f5IokwO{A;U&l3H+x1A5Kz1}x zHZe?gWmk4&V>V@HHZ{M6XNUG?SGE!UNH#avz&2P4-C9Nhle25rX?{AFJO2V;9>*{2 z!rubNJpV>7G|+MUEpO1aJI@rIWmHpt1IO1J494gbM>mXa+-NCBNDHI8LqyOG7~LQu zD30z11u;g7gc6E?h?EEjhy_^tKRoB`)qS~h?!CYJjn7vZyB{LYvB5N+92rE`!K&-% zs7K43L{1JzDL;%H9**%+kFh<8a(@_YaP{hS_1K_?k!dTkuH&+=_2p`=?$kWIu5KOQ zd=fvYp7`n{VG@-1nV2wnl0pEpQ2m2V0n*eYDJBgmrZ{pnNJ>;AiKUOM-j{5mLDoJc zo5xX<<0wV?(j5BIXnko(kg!0KkfgqJcAro}pD>U9S*;)=FD|3HPY9EgRo*8o0Lr=@ zm-S8~qy9AQ-Kh`;lvWNx(9Y0yfCBbw1tQl9_^%ayR;Cvz{3<%HSuFCa!2DXV`Y+@W zfFT@k`>G}!K)#*UUz*m>nB9LT@7kTHU!ajO0FkCsYM@aHzFR)5kz#N+3w$>%S!40n z-RJ%H-)UC-xmIzadH?w@>bq-|<-_KeVL{IxRKMN5cXq%1RSW;`IP^cY+DYZ`|Auss z55wRlVG4X<`}&bVc!9n_et)vS89lTI*z8W`?}%@Hju&Wo)bbGM)gLb~{=4~ke6u^S zwflEIZIE+Zi~s16z<4rK%ZbWeb-BA&6AT7a#viIoCCOSOtE`>$Ub@a@KCBWw5Px$( z)^R}A?$3=6Cn`HM=qVat3Xm$+&pEFz)YA8udno-aJx*xtdQSc6<9ZDtj-h9t1|H{~ zKH~=o{r&TlAC&cO;OY4xjO36oL?44l!k8wEB8M=8Al+GAj zfBb&JSoPnLw1g2jNEo3nf&odd;h08}WVasoa$kGt{zul);MF8(2K+>21UGY{GXqbQ z9ml<7c{1}U;lcSQDmx_9UDBKT09n^33t_r%>v3J!-n?FZmzM=TeD(9HoU;hwwS2q}; zAq3EW3I?71DUaQ6K7IA)*!IG)--VO3o6k-LgcB~Dl;1pddG-@IBxsuO$K~&ctwfrz)988@b$qYFuzqxX{X0X@>OUBkj09BGJI`>JUpi4H+lPlL+IwO zs9l9xe46olW0jz5SYOL>uIu*uOTDdcnshBsWSx5B-mV_-$9;J>lqPap^?I4?Ic~_0 zTR5}XD*ZG7(8f+u8*{kSpQi95Q9JJ7!@%|P9}*L;|5$=EuyOcbqT;?De0uxr{TTMufPle6H6wxhs1T(LGXyADTw+pCpr8=~@#4d~9f$$uyx{$BhiBHVwJNJ_r>=LuD~dxy~3C zbttjAe4iQ>;Gcp`Sh%z@t24?m=W09g_w?w>BrhrH`OulRC)UEfta%OpwW<*=ezTje zy~QQpcTusnR+QPNd|H#&)@WqF?VWAPlQv{!ukIJvl9j{Bm@)t3E3@*3OI`&OZ5#-= z;+6Mb=|@~is(c}{LAYhvv1_tmwoujZ@U7bA^hZ$cufeP*2nx4*@a&biE9BZy4mIjk zQbix5$=zA#s``tzZ)XFmbKR`E9#y7rM%hH!w)e;HB3?Z?mg_$YAr~*XPHkcFsHa$B zwtDc#X=9mIt4i|W9pyRtUNpU1JLK@!M_H8F`XPd>F}2(=yZQb zcjQrKfSstwJm9u!G)-ev%qk@wN>D&Nc z0uxW*gwY+nSmQB(h?20jphujLM9D8Em5PArb5|ae?SJf4uHv;5=$m>=u^TlphpMCWPkg0scuJlT2L0k&!9o}R=aTNw%?xGBXD&JV%eW;o0)hgZ!wwT#0V+GX= z&@?AcEjR!GFC6FHt!^@wvJ$H263y|Bl=UgUSQZFxcvREWGA{$%%d`J&Ufv4Wxj?1o zH9)<^pzt0#?UZTf0-49&=D*?B4O*9*EICtz5j6~u{0^B!Myl6Z(!Fpf+(mW-Z=!!4Y zDyXShWTn6W)c1OdSv9cpZsAy6txPM&#q&n4%Q_*p6ZeXPNw!U9W)el-;8*Q0mDTPC zM&3O6>?}1euBML$-5hdWldTQRXi&QR#Ny{A+k3y=7{_imeW83L^HhN&&$a2JhsJLj z)l|}%M?FQuI6DkpmsRH3jjMN&j2Mo74pbJ)m=ny z);%b_#vEy`#s8(C#%8;?f{A>k=fd*{(1c->3;t!)!{w&AE{44Kk z#92BUhZ%;whi<;lAdW_i#2T_zv}RS&a>qF_Aw020neT@n)D7-eWedHzbKKKxUI4ly z%}dbW4$j1vFqib=Z75%?d?9P;l|>vf*3jFzBPEr@SpOO1}TVTtU0=bbA~8jyBG z$k%y40n_2;>)J;K0{?c5>TiiA1m%ygPH9l=io_CIe=uKl3_#vTaFr?ds|7?BF?z>! zSf*rph-q8Zr|?zOZ8GX=|9upk4VLYC@beW}F#6FCTZzXD6uxUZrPUH+IfF1cFP)No zO~oZ8_fy-+y_AMYm20YZow_DXhjpikjt#dBAy?uL9^W$CQ6uEkTegWOL)DDAfAROi1- z@)(2D=DojE?wvIJ?0NpgG@{AwPWz~-Wz#`-)~i{Wn6lSH(?X>p?*XL4g){9pq~6iz zs0XRXMW}yVtrOdH#FN(~jb39qY}gqCeUJg4csVk8uUA32u%bUW^o1DdIW2lD!de>} zJaUQBTNIU7gJ!vrc3&cEb~kNu#fapef15x%Vt_O5;>@n}MB@qHXNjsE;_uOsDo?2d z@0f;-^f-ChvM-FC=$+^guR^)sYm&T$mX@)DiwSZ|v9e3~Y1op`o<}ptH(2fz4~sha zywe(`5d~|VO~MZ1QV+m4F7>b3@c1GI#q;sH`0k2trCr0idf}MuT)my?#DB|a^tM#< zID$@vT;uYEqJL3k^2>;(8x3%+7^hj?|d4& zi!;?Xuo7;?Z3)SHCv#W=L|z(9_!;m_&$12uAjl8dhh1rV0jIkU{nW7hNDB}gBTGi- zF}VA6#kapM%+D-N>M*WnOq55?DxOo!+JOOnr@|FuTd>)3xgZ0}v6^g)&)B9sMe$*! z)ir{rv9;{*_*1|?!{I%(J>@M7%76QFHj}DSvb9PLm%j)1o0pR{Vu?Cp0n{LRMNodq z@O8Ef!0zFVzEt3R@~UV!&^&;HAMG$!r*&lTlxdu8)mX2jX-#jYiNl|hCGXabL31Q& z9aS&-$frddG%KxNlUiWlOAh0Kz$9{^62dlSFU9oRHjS;|a&Q?RPq*J7a5mAzE=Stb zA~PSs0IZWPX&Ctdl`Iy%XErVHrc?fa?P_?dYy}HFXKuzT9?zqbnv*b%ZEHskVl6hl zj-`ZPS8BMo(aOPf4>9b@D`)U0HjIxi;W6rAP3vFD2_;dbK9xD!q{-Uiu|VS{4gIPE zHfv3JA8t#d!v-YlV#qXY?51lW`@4fx(-%6&%xH+mn!i%$?`pk0uP4&jS1RF8_GHma zDd0yFd;}_{VNO%p)0w9RdQ)*$>(X~FOMSI1*jFG~CMBd6&}QX}L~o^lFbzFhzodt!alrPqE7dtr6O|G-3eIeM5ISroeza>u-> znB+f%RPSwXIN)rG^6-+X znBu!wRvYhX;fq}yDB!KNO|^Lob?t}{G!O`8c{x5DbPO)>o+TYkHS)dUERTA+Gxnq< zQ?Jx#U1=zMkx@8vmou7_Oe@~t$!%59hd-ixZ?c(PJ>30uR&`cI%}YhF&zQxDufVWpCnZ=>uiAMX3K-AZwbkw)787~Cd$J!Xy`2so%uQTo%k?A zM`7CN>0Di4iolf8Yny5QJ7|IbR6I83x2L2|}E^~e5F~R=N81_=ZYr--~=^=Mh>$<9gvVVSEU;FkuG7E{ov{i zaO=%GOUkZK}`}?UT39)`+if+-iO7 zQ0OhKafStM06v;fFKEHr)Zv|;OeY2=i#B?FiLQNbZ**&>Bna(s8MST--udz<-~(>5p8^i^;VO$0$Af}1p5u> z#`@3dz88SGHUvZ4YVcyj(jjRRncl8c2Ysj-;BGc9+AvLe%0plZD9|w|c{K02dr_(| z=@L2aqyx`?L!)yfBRZPJGCUA^WH`{ZwD$O=i7KWFM*KBe~|9ILfpP zxlcolDJ(-M)}F*fvm`!m9Gv8Az;(x|-lO0@3%ED6pedD)^c$MQSJ=D%u43B>i7nzH zqxJjIW@HWmnL{ZYGx=q3dUmOI!-grvKn15a<}1_Fu8U8S-@;C~*4Y z7?c|QdYx~q@ZobI{@eNt(d7c;9s;o=a(B{+9B=M7YR6>AQ(uj98B;-QYaqw9`x7Hx z-b9Ej0LTV_rH}azTd80k+~5@)GNonwNBZ@W>I83agC!M&rc*(tB%bo*8nd&u%>O~FT6Z>~ zrYj>C0R^ea>Pg7L9xeyeV4@KT$VdZzqdpn|vzn0+I)LY5B*dz^HcE&!=`K)!296WB z`OQzqMiagL=!0w}hau)nRt3l2#YNsJfoikpU_d+=P`XY#T!^(T0U$~Pvyolbv({=< zl3(pfS=K_-s3S5|h+16W4b2_}>aYS8qQApw3V6jz9oBU%^$e$|OAi0s870 zL})f?0WT}Z@J4#4>k;LOH?Zt2og@TP!T^m)5G)nsM)m3%n31DC_94RKE4jzdhLY|< zWT_xys?9ZWj@~WB!gV{R%}T3knl=@nPlNK201ZSGp-i<013^&1t^kM@0D?6@hU=Vl z>_G?&T#EyJIT`Ii%xAsGXPN*uCXKkbz9T&vu_QsXbQ(+qfYMJNU&JLRc@=(;SNnwG z?Ag{b8w4Y8kaQdfL1kz1(2f51oPjuQL<56J9%HsvnH$VQ*a3RJNOUZ zMNXe$Io{v0y|KrQ0hs|=n^cnMBHWs578G;18gA2a?bStM zxOyC4?j8DwC2{!sO7}(T;b;1cEvd)hR7C4#872{Ym;kgO^~w;z8i{9RHZe~KP>nW^ zPXxk9ZbOYR#48J705B5~*2Nla5F!vtJ0r`2ALFB^gccmAP%{4rCs{6?qE_s|EhULo{ANT#}p#(2v zL;xUL9>_l2U^`B^359AP0$fRTA8{yWRTK^fXrR{iQ-ORqq`28<5Jd&8=f>Lz(Yf1o z{Xfv-R^j?V2mpj<2+(rFU@HyyG8jdpch;;clb8bLy~qIdB(`ct%1A(YHD8;ruOv&26R~U#eQ7w z&0yrWS*$y!9hI$9DL=sBw8eBpn>8-7TxJCUUBhR`I5B7{g+GD@9OI0Qrjc}9 z{S|%q!^6)UYgbxC9TG~2pxg=RSQou#rNxk@qcpW)eP1b|Klil9JMGHEs!$!8+%XG9k*n zoH&+-eP=Ua%YH_LorS9Et!A<#=Ym+>2HBL}eFjuAbt9{MCPNw!o($6LafH|Q*N#%m z1c6%xF9bcW)TZ}%9B}f$3r9(y0Ma+okjyQHtr?kl$LeG+$&o(+P=No{`-lIAY{MNH}>^q9zbk&&FMvDI?MeO-9pGR+L@z))B}Oc80!>YESU)pqQ3QOrO{smEKl zo6NC`o_UtBOPs62LIB2nTn?0rFkB@kD@qtDkm-`F!K#hrR~PV#qQO{W`!cfpB<972 zf(e(@sl1v$Fzudi;MvLDN$nzi6rlNd(6`8$V+N06Ivc-4IQUmWOYPhSM{i*WFShP= zy$TT5x&Csf-ej(Cxm^OUg5DS2~39~Y#;{lPO z^l4p0N6M5iS0@RfUq?=wup_~n@%Wl(0(PwxrrK?q6X?m>DS9Iqj2vHNxG~S~fSFZ; zYb-GUYLnP8^sin$UmZtmxcCd@(nP^D66OtKd0(2!={gOTZ)TBmoF5 zhg!Ka<_ZizMrz?mZv%uN&>ek&4H|_7f`PaKlDE9MCA6>LFTLKheJ0-l&fB3vn52M* zzO3NbRWujY5a1;{%q!?h&D81`|EUsLpr>kf9e{o#jIw5gha*|n`OmYmk&E>)NpucD z%oz)YcFPW92LWJ6@N@v4aRwqW3^M#LG|5JP&5$*7ElbRd6&T97arR(h6#kaP9mq_q z-;|!f$QQsFAT)rW97S5gpTYZ{3|i=~Ink9}ocFbBZKK18i%eL?mpDbk?-KVM|DB zkUR37c~z?>xPg|nQc~@+qYvZc8I<>BU?Oxi*hqR7V7*BId2@)0bnb*2 zX|2c?bHF9Yc$6y<2TxjpX-8K^2W_wkJirg~fNi3&2b!P#1JN!G8^h3sVrN!3* z@JaEsBSUj05BB=t*WhcIBnU@7yfh92(3Uci*)T~tTd;7D6@%DOyAS{?nYt!PZsDV& z5}R;reG_`n7*xN7laua?ZDc$?c${=glKS~1pd4CZ;4-lXflFBUOcR$N9}_94v$eN1Ea$T; zzyE{jYc9+y;w!G{U9j#E7lr7~hXM@pFoh1>89fLMbR}}X(6EtXy`43@>j`rn4=q|G zS;BBcly8!OVBR&TI;oP`b!Vq+1Cu8iQh8CH2mmt9kOa^e$X3OZjnDWG*vH znh7T;u^To_h$Y8ew2p{Ox3A==wbG;qb5s5(|8K5+t6`bgT~DEl(|$RzRfTly2>(d1Ok?DRmN&(7{1 zIh&I88J*-R_WO&Yi=y%EuKPUe)-cD9DiVN{99~VvU(CThUb;EVCfZ#>=R}BW;1BtC z1S^Yr8+h^la#$ME6!7!WQQC$7Dtp`9S^g|e3I^W^&JEFaBA){UaKoJNA4(#!Tvgg^ zly=@JoIa@L#%crkCFgpJ29blkJ0{BcCi{1bwI-)CkauK~_*Z5`<;O(;hZF;W9zc@1 zVt|<)HA&~ky=3B8K&(~SO?m~9D9J2gMttOgU>M5Oo+lcl494o!giNNFJ;D~_=ifJj>-Xb457;TYwt8M8Xz4Mgdhd`5J!7bzc6ORyWN ziqbLR9GSCmtVEQM5P}_C7BP_!w3n$52Xu|?dI!8VQo4m^-O%BKQ~?MRxz$H~oJ}ZK z(!d--%pN|zoCGXo{bsrcvyG7sKNc&A<<}wd{ILQ36=hza`UM&Bl_&Wvkod+6K>S3b zlqwK!lz;ZNVtOjz#!tl&yn{z^94AeP<%oG*5w&#$cm+i5U{Qo9@D>2IOhs+ul$D7P z6@n8lkq+L>S5~b?Ew3>fqX9BR)M}+^G>2f*Rn?7r)n?V}z5ZDb)3cyMLFe?DlSF5qYYf$!cPU#kp(VnxmJpiO zV9%BPT5;DSs4E%-?0`l`ibs+xt>>UoB#XF6iSO_*EQa~ZYu1E6Xk`uOjlCJ-G%eYuZO95v0N47wF%Ug-MLb0E8q0pn!*58YgGq z821d=ttulWFU$UpVmC?SRFwch2^=LBKbB44owsH70xNz#rp=8HLD z47uhXZ=#qPYs6C|Ahf9{O_jWQrc6@R0E?73;@Re4?$fk;{lv~ z3NcavKn#Epqk5j898!GlpfbK=EO0$v6;PN}pHbEr5cHcS)cey#Asiq=yLhkX;s6G$ z)Ny_2NN9v2JZB<2fMZr7xx6mXm>w5?Se(;OHGi9CIr>AZWf;IrgF4JW?ZPd0{484L zuY7b6izR7Y?f{&_-n;ib?COyCw=n$1^|4v2isi$JZA*K zXn^nAVKJT+K#79+O;(}mM$Va7p(1VDK#(X}>FWy#Zgd-WK0uoz4Jdh;uP%aJij9+} zP@PQR^zmWE0q&6qC{ocyKWR1$NO%pf1-8z>L11eDpbJ3h4cQjYX@j@X7Dt$UOxB-e zlP_tX76wAX?MkT90CNZ{0ANT$+{s6I_?{T_{_DKF)tl0faErx z6j-rVx|qV+3G4=Xco^{o(EW6x`||L1e5qvnbYbM#08L(OS`RrbbDdA2Qi`V*#R^i0G2^>u+R?Zc^UwUb)VOSFI^22 znt^`j9{AxP5srcH{IcRT;T<1tvah0jOLl#5v63#eQ>>tZ!*yc{%XD1FWpvGw3h} zpag+WjP`_^0MbG$%%Q>Ecn~8vE#_fflLt>aNbzQ`qJ<$aHlz2^pd$IIBC-PjN3+Ie z#7QTod;b6=Wj%;iX<+O)r?SR;fJKENm5(g`bwH=uES(LP^C35Y>f!53f|0Yvkd9G3{>kED^M!@+uD=M_-d{={N=HzqD*7^RvEC3Ox*dZ)R2btl76O+Kr z@@%e692$3?eEO7IS8RSCx-MSv+Gu(?ezoEgz8=bZj+YX_Pf1!~!6OjP;#K4Pg zuZa%}up>3re^LbNpKZNF-3U3|gFxiJXkE}KD=kVl+VbpmMFvegtuBm$FeSYe;7lgS zVi{zU!x!J|chJ6dh*E5prnrqIxojl6-C_ERO8UAJKC?-3SboIr+{-=vWxe#Id?8+Y zEo{0q)+oh(sT}Lqug@`962Xw_6nHi4j$a%%(rnp}mpOnOurmJD-<}CDWo;bu=bT>A z`GOeJrmM{U-hCUasMsu(U%@p@x)~vERCsP!_(m6g^X?0nY zxvYhd_o3{N%k8DBik%hrmT_sz%lp-GZxG+I!72EegpCr27psLfa><47viwL`qkW&( z)$V)tErWip@hfaOv%DeOo_hWcD=RBs1C@2(uira=toZ&Es1iLA_?Oiw{Aqkg>hs-$ zPd9G_-EehL?a*l2T0Kn+nZ1P^B4nTDgrrx0$YB?yhtTt)=EOe6t^xE~t&|p~KJUU; zMZZxKfIP(Qa;Yvfamav=-vjvo$9jJCen0s0^(SxpqWI2LYTq9c5+pG~Ty030!qhd6 zR{c#@nMe(XY1)nHwD76tQydZ>y`*aq0u$ACl0%knHl08DTP^G!4XGd25^`hFiQI6;% zz=w2$*TtxCiw#Sw4VXK2A)4*Ghn&Rg--(l%6F=w<70%Q=5Jj8qMc$yulTSC+lg@nx ziR;wq{rIE73{)eFxfN)oE{;|qQjff3-duPZN`p;Bx&J34^M->&S~^&%Hj&s`kzal% z$dy4mIGlPJ{NI7er|2_2-DSblih`Q30Om^-e3vsNe@F6!I%5C**b2_5H545|i6DQT zvG%Tvo}i35nP;Ove#xra29SUsuzjU(Gv8U7oZwC7)-%KOoblgF!|0{!Umar%otH45 z)I!Sm!v36TKf3p>AKd%!=N|H5$90)YqK^r042CooPIt1tBhPeK;a4G@B3AcM@JX}p zAplsNMaAM5O-7WJ=7rW@ixbh)GynSYI9^K78*Z$d=Xrp+KfjIo{Fa>!<4~D>Wm@d4 z&*nfoP2T+V<$`SJbO|&1?7SlkF`X60Cxgz=s-^^kROC71U=8aHgNzcae$9Ai^|0=3 z!ukxWc}a^?yYIzvHOsP|qV5(amtQu^GUXCpj+4LkwwfL16NVC`CVgkCE_n4v#tiM+ z)>~F8QJmlGG1S>r2lg(j7(sz&y?h!@{#}3YPLGdQxxK4K#75U>EmFPpL3rS&xm(w# z+xvR2&!;%j1 zPrPxy-R9jN4-A?%f7|BX;`01utxN1@x?At{PoTQypAlPa0k24Jx6S$0oqKvJQE*iB zUdW>3vl~W#qCSTVbvIV%YG2#idgAA<;8v0CG#_YLuG(y$__t^8;g&+k)5O1^nlP@w zX!ML89ep?NX+U%zMz&KF2hBHvfebN5vI3?BNceIGI$4Fonu>&iI`z>PRd)1^5I}3z zO!4Jpp)_QDF3D!5cG9l-;von-CAOYkE;vX;ZaZLrwbjqT#lk z7UN`Dht5Sl$G%6>P0b^+woUa@clT1-31F&hL|cKmK=hVbmR!tsV6&XornGGN$T5>e zhxRDYM1JU4z^`-6Eb51~sR;d6w2hfeuZl|I3WS=(liu8V+uh$hRBg-O%hGPcb5l&s zBXV7=e<2oFcY87W9Yyo|TKI6k!c5d==I3bW!s8zklZTHKmPHnHe&kZXf;`TiQnghr?mC)4gPd+TZ>W8MS``@8s16_|H~?)WtBh1}wUe8q76Ii%vuI$LSL{)Yp$Q zLtM(TThv^hZ?i%v3TAQRJnWu*RXj-JI2J==PpJ|%nl5AA?>H^9G}+dG-I|h?Y~W;0 zw@RR-ocnqJpUQKKsHT?J#Y@{wPm{t)T}MBZb1vD{ zw1@g+91JXdPp62Wt`YNnxt#hwK5z*?`2J-%^xHUT!~=Koydws?eLICgq9#6`;X+;rpL%`H zeZ#*EPM!?x({&>~nldf^R`b92+ml8cpIK|o@i)u1n&0^-4|6_L5*i>%$keAI z+-vyXKQ;3k5pF^e!J7_vL;z5hB9UDDh7E%b$J5E( z5P`KdLSMicB37p0)y<9SR<77)F3A_`iQav3^eKsmUg{`nFo}~$jzJy z?~9i4HpOOpf@NJeW}9nfr9%_yBTnU@An6Ak2SYG!rjn@a%MYDkp(VJGWZ8`PmJ_D$ zx8F>x+#po-@%)ZVWn5lK;wbJbXl+irU}#7|TURj^jO)J>wRUXZebk-V>n3iKA`I&+ zkSv@RL3`A5Olmr05JuR-m>Hxa32dUh*qxOYIi!3tq0z!+}2`G%AShWXCdL z6Mm6-CLvG~1MAJl1Du6OeOayi@y%lYtqZA;9V-p?WuBV!)a_yI((w%r?&m!>b}Gi( z`wkyw#_7tyCuGGliSHpe&_TzHT z8_rwi6@ODq%U-c47T%3$Pozgt!+dK)y8xl=i!^xdUNGXX_u>t5Gpk#9%2?BJ=iPU% zk9&c)$65k!ElpDuy-=kEVC~WzUbhxCTlhVh_OkVlzRdj&(;$P5JsXt|%s}C+>s%@~ zclZ1<#2&W`NM^_)T#K-EOkxhg)wK@3#fkcj=40hBSuKTkyTUAO#?rx- z`o$j2Ox_nwjD_r)*6&)>1N!^=Qf-Sg-_A?^#Fs4$Xl7=rKES3m4(f6z;IAOn0H6QN0by|`-M6fVRbA8g*6WXF~l`HKQN>5z8{o?B*UbB4nM_sZa+J_VJ5kdKW znfb~t!fZ=*vx9X%wc5KU2I6+PnVlg2Db>@=`<_n9BqLLd>5J#irT2~6W|&R(jV0-9 zUc+S>Fzf(}*r=I|7MdFRnvEpc>GC`6YqZ(#MI46b=O_DFy?YnfTkC*u-ekPRl%d6M z)$tkKPRU377Uh?;Q192|2zzqxu_*sdso?LJDlO?onW6JcO^+j3cyN{Ok4nBs4@64U zO%+J*WoiWe>iUl`AoD}$J6jd%Yuy`b;qc%2>RHMsf9g(`FXYZ#$ZKd$9%xoe;SuJ| z7%8Ce5PNb57{C^HZjm}_FSaj^7`0z6HQ8uD+O^sZ^2d(Z&Z%yS_bFev8P!fU zFsSKy_OGf+b|R$!kST7>@Eey2ovP0q9?qE`j!DR9!u8i;*_pF=iw8thg&JnOG>!S- z&pNUegL-G!1{>}eraZo{Rrw_KvH9Z_KHe692i0_=FK258dj30y%debugqScG=H_^7 zy)3ulk(OgHmt(P!!(AhB?TdkE^u!>Gi=%izr_!%92XgOKme2`t*FtS7l|9 zKxN^oI^m{)rIvw{Ldz$c1C)2uIChn+HSPEN^mGdX1V1AzkPa-u>Xemg6q{BIS7wBf zt;ByZe6)Cei^@=e)rsG$&t10)pz*X}8Z5O9nR-kTg4s*J+2NA9w_VC|3e9IhjmaaD z_lECtO_~3ldS&*a^U@00HIDgh+=FK$jn?1HFIDE${CUzBkc;XncSvZfE}Rl)XOSlX z&-n`+@!vk|ZUVlT5-5bZ_9&rZG7g>Vo_A8>42(6Jr+*CGCi6hZgX$X9W1K!R{JIKP zyPnZ)`j$F{qXSF-fEenxNUL`!Y;jgwP7JbxFUvJ_YVtEa23w65G7M`o>?Sbm7ia7M zd&f7hU81innmZcSY}7Qz;78TLfV4UK0L;1C-0bcv`$_g6Qi?g!op_sU;Ev|M6p*2*DLV2t>mI0R+E!sM}Eu{_<;=a88peQ?BUOs|1Tc|JRNlsPnER&OKU$ zpRZp)-e3~AcoxZCRjI?fgvfx5-D;td4vIL$+F?}s7d7>ujPmj|FmNZ2<5qrq=72_b zokvWwJ}!m#0Db-Y3!hf;pI{!2U)+L&h#XU%?8Wx;A@%2{Qyw-|o%bot=&%>ImdHoo z(5F`MWelJwUh!M0cV_1H?$*mbv6B0lVOTC9dC(_&AQ5zGK#=LzS24%F*J5~;ZUi4>aF?%{S2A38@ou5>INN5wTbYmO&1QsA01pw@fK@iR&7AD3IFojcxuG)tGe{yF#F%5B$m>AB+4 zOMzb_0tFge2A;S)o_2Zi-sS0mivUkA0uQy5gbto_9TcMs8M%%aQAR^t$8=XlC@as! zT!%SbCt_SjEGc$GR~u94kQ`+idc_WLWkinh(r3jE1GT}vpZ>WrDEEHW=*rN6>rlx1 zm(#98|KMEtK<5TJ#SMGgIUg9sy1I7GZH?1yLvD4;$Zb>3ZBxu`CuH@89B@w#$lKty zKh3)r!?^$6t=^Q(-mrRfu(}8R@OgBVd*(U7pCm1rBqf<7tN}Vr{2)b5I-$7#iE;n? zZw?4zN3d*kUQRG@_jb)UIz0 zxlOb=A~L@uUfLnDJ|MEJt|1=~Q42)YF(P*dkhj8vug!!1sfWNzk8>3s-2Tw>Zj{TX z9+*%HM$i-E3l&lG6xQ>^pf<$ZJjJX$B||9;Jfvx7(xX}zff-MMIN*R7Sz!#wx#+1V z?!`$z@*MCXD;{phx_POyK6b_* zZR~yQ1wY#8`9u*@BTYZrR=}As7pC91+8?dB9{RY`Kf1%-yUJ6pzwu4rE!w!AJj<;=&z`@792IJcy$6&ohUjomWze9E> zr261s-iTE1BdGf+#m^MxP5a~>2XjZH-l+RTI`ne~z}(mT+_C=T<4-ALpXe!qJCs!a z1g=y8f2i3a#B9;uEFY3f-^n}P$%h3LtnL(U?-c0;_DPqybHMZGQgp<4|@&DQQC2@LStVs(2+!`e7Sq~aQ8mfo{wXyHz2h^Z>J%5k9Y)i zbA-Y4{oM$u;BS-$R8V7EU{l*(v)*SP^k?E4wDZOT;tcc*G3nr#NdKRw07A7V~~82dxk7D7I( z9<2F7HjYC!VTUHC2WxJjA7G(d;-Mz~p_>m<%xPeN$ z?WNemIs`vI6#wRffO!#Nld-`xuKj`SNKeVMr}V?mLxTIh=>7jube@51y?-0e5P~3f z5PK79?`rH7idt1P_ExjiQjM(|#4L)&tX);JTEE2JMNz97d$y%RRW<*|^CEBa;+&kE z`~Ke7=W~&*e)RVqDgQ6f7aj5T^n1TVkjG*QJA3LACOJ4VI7$N_=tl?7V@EsEp}B8X zKE7(O>jV!HgHO;#0mvpMjNv2za~^KB>?;NuVlf@LBOkJt5@NjglOpiTIQr`bHF(SF zm+gBR9c!PjaZ$SezS?Nqxl5aTjTm*u>e~;iJCUb8QM8ah%_)gieo1P6qR#`;xxxj% zg*_Vj7I%6QLmTS%XfH-S6#A5gp2Z*5d7SrkkHvAH@wZ>dm3^3G7`sUZBO;(k7g$sf zcJI+W#GNpnZ~N8F`;KWqsq3@?!*{$#0z@RgyZ`UrBNo3Tw{Ynz(c*N`Sab?imgRe+ zj}VwH{5Cd4>FJTOdc+VlT;unl0^O13wIdbvA3at<%2)oVeLD<>(q&pQnqGgdp%`hx z(qv$g+OL5R@}sL)gT7fw+^bWlHTd%vXp4w?;#UsiO#Y}9|%{QhZs z@4?>J;2XYC01}nwBPw_|%_^mlnjH8oG?yBB=l5PH-N_x6-(<(IXDht8ef(2 z``(?f`@^w3Pg^*D(>G83755eDk^FA0d7Mc5{emlcmL)nV?u;7u@Qvj6xZl)oZ*+fT z{dY$AcBpbELiKm}>a~cq-wz3$f#V?&pMHN2R!G&>OO4P-EfowlczHk4n$Cdr=6?!5 zi&)P0|Mw@$o-TMK&aCsxt3rT1mc|4cRVDfB1$WfxqepeSU#>qor`)AE{F>Cb`{3Wl z0hipUP-b78#+{K(2&&j+hO@IeYE(a@}}lBWI5N>*eOplq`88=V<^A zKsz*$aY2^jWp4tL>?KUr>O&6s@&<)yxvdm`zpCg}uvw1uX2rmtL-ENjDmibybo_3J zTaHTNzwh>qJn9|;#YTY(c&F~+On&8^p+d!-@tiB3I}2w&wnj6@GZq>j-0BM2n{(B^ z$IO438=ZR$PW`(4b#2e@zpr0-UDeilqWE?nTJoQK?kP$XO?4;P(>EbB0|shsL@ieN z?`I7>)cNzJpdi@t9A`Z;vM2x2sDI_L< z*CO)mnF-&A^@$hd1Or+oyY*yx|3ZR698M&gsn08t+ae{{>w}3>{@&~@>FPx`OPRjC zS$C=Kd;?hnPHzsI>nG588xy@>p0+lB+RUu;tfTS)+*QX6j*=zz)zm!C`w^+k*LK<=?RKAHe%8`%WE87kNF@7lJ9B~`z^I4TtsHbf87O4kv-xc`R=s8b0`TM2|Hg_eWB*NAM}v`j!#^; z30jL@^B*pa>6jXGYig@WEo)xAnLr2ljdE&yH7xS1v2|Q&1(m?mYo2bvB|it(BRV}< zPonkKCI~+~c2ARf(M7OT8gZ(!Es=XwWNRj~)9zs+US?Ng;VDsxanmxd5X-WXQ{A#P zRa339Gv>u`lR;bEBXu7dxmNSOV1 z>ry+JQ&&^#JJ2#zqVeCu2UDuW6=VA~Z?YLyB1!|y zOTw$`(=#<@-_@HpegbMA-tX-g7FgV%+`$YoiMvf;~y1mB5#yVlD}HWkw z%9~BU&bKWm8rA&p(oFPO@+^+0?JcLU1pE0Fyt=y|{(h8N)AoYo`fop^U+>G$`8T6A z&(wM*xjun`b|##Qz!lNy(}q@ILXfXEwk0L1lTKeDfvtoB{&6bg50MvU02+PnG9r*6 zUxnf{Rt;&vuwFlytH@)dsfdZh-R#EXU5~O1Xd=RWZ^Q(6W090F*DpYAz&7%ki*xM| z8T}yloJ5|;PR$Oi{tH1Zz}JSY3Uh8ZhFTuwu>Nf9dL`dHDu>b^{fXXT^a(Xq$%aC&e&0Upfp*5b0`|RI8u4&0SgoxtlQXnf5Wyxux{?&wW?pVn(-GIb56-XI!iQe zsXYH)h-w3*-X!>NX=-WC=P?$7bK*dXOI!GlQE%)FCjbaI$qRTNjA;wb-)K$83hH9X zZlO(INpn&8sG(Nf_*6NEUa4aw$D=+8ucG(V81?-rQsA*^m2#oVqqCO*To_YB{~Y^( z)CBg>L0R)ve$7cXqnnKo#UZgJh16~d-eQ76cBO@qt&b6w_q}&%(b5@1veX=hx$?c$ z8x;(^*4Oy!l~+bG=B0Pt|>y`(@x( zliXqzN;Ijz!@5GLMyXWVlKp4L-?x_e%u>m+?c0QqsFs4c4J?XvBQ*_Uh=`woJ}F$P z$f%MHCsVdvtTyzs2S)2hCPuDgmWx_aO}(*JdO518wU)uBe(Woa=IZ5J49%wCdz-Ny znxjPpY{&YZl1Gc$yF6FADOIAX@$RF#J}n9;JVa z0baHA=?iP4C=9FaYVZa{zphI+&rln?`S~2$-BvGcN&j-)8Ng#h`jM8ZorO&j4}YTe z6U&w>Yw5od5U{!_qTXE>2?q$}k$3e!8hjKnUB8?hu+;||y+g6zZo73os%Tt7X%=Bl zkgmz6NFg+xjIQIA7R1F-GJgD}EV&|DEl2stfH$6nfvuM}jn9icZ6YU9 z%6a7EM3mn+Q#6>=oWO*ih;d5F^{DUUECw{#nsbY_rvHjW7bOu2 zR5;qLlA9d50RwR_INM^kNx&43iqzDd-AR9ivA5ZXmk-wEQyR&*;Bg*d<)xCD2^;!f zYo%vqy6@Y-wHH<2C|mg6-I^C7DE5;s@a7YJex&e*57Ca$bto#Dy?cV1 z)mLxYlR#~GWQ4S4-=@GLK_Awc@73-K$)Fmq-Q2#e2swe%a~4Wo)R&9>%6QNTk|Fv1_xze{ z+UJv%(6uQl_?mg#)#(->y3QiZ=t%{mOLg+CqkFGNi2Duqlzcq0U&gamp3Lo6qrTo#A?vZ5wd=e9vb5kX5vcy_kvhw(c<#D@DAa{S)!(L#fnjfQ= z1LALPw|U)5j;_(R{e#>SvdPDuZ~wl1<`A4no)&ztoZ2clpPvM96x5$N)vbEI5=)vE z7P0O6$rqC3eLYP&mN@tG;EDNBvN6DTx1C4HWk9(RGAc~mZ@PJ}lv{x^Z3Qnm5E8JK zdVIM0RBW2(ki;!UdJ3UM+5w7LqjJ9lC983@t<(wP)^Pv)+I9_P1 zx%Xjw0AFfokOs^k3e9=9q^Jo2wfqdUu?+PkiCR(_e}yvs97U zrTUeRE$EQ!X-G6}`i-$UqR3komb3R&JhHFq-#vB^%}&-xHZ2QgMuWTpBtg&}irYt-I3luAEf zgsF%(DhL|nsLsVm8N9oIxDZTPl@{$DuDa0}82P!FMNe6_zz*ndcp$(kU3gD3W5r{$ zbj`#vl_A8N8Lo47ZomrZFoH%pt|&PhDjuvMxKi~od5 z0ok;FfCLAo_kC1_7(vW$u*!`-bgiTmS;f&W=T?!*qeQ<^-}b4fxHuBg z1-003C1_O8V}^4pj_hProDu3GQ*|p}t9_6~snAuZ-%Vj%w_xDd+Wjg|`IWtKj%A6G zPutb)16PQw{sR+U2m=*VN|{s@x7ET|Rs$7710qMi$1(ovLxj=yQMI6cjoS4KOuE+5 zbpa}^kdkGP6qAO~Fn@EYglsM_taZZ zYQNu7)f>l9D{aFgjiixU6p>9Z?Mmx|cF#&@RwpAs!V@A+eb4{=0VWrS|F4?_s%Heb zAvgjs2lh?}OGY}nQ^T0dw{wK(edzUL!tJjsInbRAh|!*P0(B3qpN+uKtI>ZETD|p( z;(*W9OfmW<#j34~%x@tMu2LLvW|oAR?k=O0(s7PHGesVE9j*^oFjD{tE)Tz?r#U4o zr#ZW435q)XrI&)hG~+3vv0`JL-=dlFT%GbFCN0dsn?o{S8hPgBpsz=(o|b4TZ(_(Z ztmy|fzC0u>8Pja+H{_wi$BSu9LfHWlD3uuv|C+qL!`@|~Z}5H1I3pCs?dE_!tST2l-w$vzR8r{o za7}u;aO1!=X}b~Qu{glq3hUZPVrhN`moP5vy#x&mG~1*-Cs-?0yflLniNv9B4=>F~ z@Dm}QLBAAUhMr{*&;R2pX5MvwI;xFsxRT~zlPmG>*uJ&X4 z0K6$;#U{%8>?5eUsU-EDLWO8;z>-x3@PzBNk)TxA^QOMw)gdOSj?$zgZznCO*H+b1 z35>G5`ZCn`E*EcU89%8s5X!AqHetk&8hX?%MB#W)hTs&%v|w)WW+9{<6w7Gr_W3u1 zntP6)>``h}sovLr4m{+MkR&~IU#q=omf)c^N?QfUmYDg5@o~oGs_U0z*Nb}B6D!_< z`~(_hO$~<7A;b1{N~r#0RKI!S&gNRR+rZ|qoj;RKV|U}u6(8g?DFfjELE5w-58pf^5mvW;A>_>sh`ig#TN1z5`Qry z+Dgy6!97R5>a%@BGYO>(=B#lzE+b~9lcW-6@cYTF@KK8bRPEY+)AU#)2U0J zUV}GyIr$dmg!v5jYc{#?wC~Csi5c3@j8!ao1sNAR>F+QajH4+m+i@$Q@mDeTME0XT zw5oITBd;2ip%a;6`xTGF!={u}Cc>G{a7>6q_1cn(+LVAr>6s)jp8ks}K9Q9G)JV;r|93O3SWbzVZ zYC#S4!^>_7y0`llzb{-njF5nPi&4#Oi;wk{BOd|$9&XK@@kr4 zDJeXNH5vfsz|p|ThEEdcRM5c7_SH0!)N!8dRjCVUsg-x7<~kv4SvZIt3DN@K+_g|Z zQ5d!&AxY(PI};uaLE<290VOC!E^#o-3kMOyf$(S;mc|Ecz%+n!A18HpZ<~o3 zd+ZH*C@%al9zyL#g1ksfh2QkJ`k8|6`eu=s0OpEFub=4M2nY&4AF;ZhR2}OBEP;&5Dx& z80LJqc`TlWrn!V4x8n82Uo1KIiEoi<;OI-rTZCXIu@z5ec=;uyhY55`Im6dG@=k%I z6}6wqo@a4;9G?jZbZ{Ir4M&GS8|Qu!CV*HuaCC6Jv_~4!h|J5|KuwS7qQfU7h7luLYa-KMPM( zY(&#GqFW<66k`lRZ;_Z9NtcFRrV`=u659F`vv2>XDIZ$PSz~daMl>T92b{(QZI^$@ z>}NoryE@T5|NCaxj*jXi(ZNUzgnoXX#2~{T-~8pS`}Bz|{RRza^i#;f*8;#sa)|AQG|cIa>Jf}4(1+6ii#(%No}Nh5o**jU zL9K3(S5NxWKF|};kV3Sl!~+Kfe@E&bj$Q~re-`%n8G4Wa00rRynDc@QS=`k+ZQl1| zh=0(ua9EWVXGIwd6ULE?4%>X@@d^!z)q)*)c#!EGy`4uM>=zFORYoV;z64y6)ucrj z1gID*E|L_XuUVjf6r&Rr?d~y6D=DLBpVt=p@Q*#qEN>t$zL>@V+ipq|DV8Hf#f+@0H|*`j6zD)-sE>h%c0~SR z^T8A;`!BTJXWzLB(5BwI6z7;5;%q9>*=+7a_F7#Fqm0X;z5NY4RwUv3=CHN2O$`QO zX>K}aYfyo9-QG0Wx3#VbfHI!SB3iJ0#z;TDdxjvyon&zXuAmSXtTr1Su=Cqdl~L4m z&yovPbfuddYp;tbQc`f&bhi0^!SjJ2RH$IXaP>`_#QO(vdt|%Ev-Ls-3YNtK^y(5< z^;}{KwrP%%}Gf27lU$-F- z(9(Pyf0_kpco=i`s_I?qkZiNp?f>>f;2kI)tT9@M)+_Elrto$gkhah}p>7?=8bGi> z3@Z8IA>9aS(qRi2cq{b3i`cN5BEPQ3HAS8WS~Vpe_?yB&KD;pPV7PVA5W+9$ST7!i zKORCr6pqVNJX54!AVZ3UQ%1$}gJ{PjAKwgr7S^VxPaoWD;{gHn(wjUEd!r)47u=>t zM<418C8f&Xg;J5?&3e1EAfJ(NBrnVlSQ0TeYFrvY(%YTTof%Cjp#xFOK=!=%tFkQ& z5tJMYrEC?PA;Kex)#4Z-lw~h3BVK_)xo_n-h#aFiEs%AlqQ+`{PK9&U3gQ^qj&oyZBHH$uV z=#4pt|38Xn-QADjdh*PV>VROzB;XJlA%k6ayG_uOAYt?Lj1dSp;g~zYq?Sm_1z%)D zJQ8J8PXaL#;mOa}W>f>Uyixy*W1&Ij~q2HScNH7K8+ zmf|13U1+hc`Q1Y^J_&4!b}#;2rTL!bxs3u)>w6@QcaOmwUEce>CjLtAsfYCS6mO9f zSsto<3z65W&}InBm2h&{1b!ILmqKG&3v59cw+PX4u@T5}w*ib8a<@bbruDX|ozkeg zQVRufr$d-F1&lqrG@@26(VBP$jgWAh>FYHJMmFoqQve4IpCR#af8$ZA^y78oQ;Ek* z5+%y-yK73rgmBmr)XVa699P0My3&Y|n`8i(Et&!rZJzPD55|wMmEM7!;YtkVyJ`Pu zb{e{s8a~O#<(u%?3?|Y$*a)(KzB`S~W41rEpMZch*cqs6$(T z;ftVTS@)~(1lBwXjkFUv7yd+&`PfjJm%?t1=!2I+aZvLx&TBt`9F@l;0D}S!00(k< zpK!6#3gLhnMD|L;g6*4pdG?Ay*P2kh1lnOiM&cJaaxEWyQaOHj03sh+XH3fs(C|4E zq2+YaWBLwCj1jj{8ZCSlC&d2BJ+#J$%)>0&dPT3syv|G-Iu&=@h&3=993e4;w18kzSjZZJ{|lD8S~^Vh zE@op$Z6cmICgpPXCv?dHD?)hTM962jphojd0w>`Im?0X+mWRz`ak|d84>!@xz+R6@ zFhFvZekdP_kN035urIJqrg5sic0GQ~ww3y34s;2DIz)Q_z}AD}i1jp!+Jx$Q0I`v+ zHrw(3fhZTpf%Ri&yGjRw;9nB9pI#Y}F!q^{Hy_@!QSBr%T}mmU#AJrG zGZevy766#1C0?EA1@lC1B6OqC_;fv(@1lH}wa09nx@4xZB@5Cx8cjPSoEe1ghn9;D z7fmZ(jeKiH{~4#Zw|tVpqC<>XF5EjEDreRYp6e4A;HXe&sV%F$3|(cAXq?<18+%o7 z07jl?Ylc6jZ&6{rPauH+#JYdp`kZGUJ{uLTe!$Pk0GM?S3P)XhlrOg=7OGs*B_MnM zOIDl)4wOC*?W>3;0c+A0?2u>7{4y}w!C9*Ts(aN_*g-o z;4cfL&uYT65u~Ojb=Z~OBf@(?O_e2MPRO$fadIiW0ijdxELtVkrlWR4Rouc18lKUtF$jf>nt z;Ad;t9OaTQ7)*!VY_L!dTuha+nTk4T(ufL^7PAGjexd1TEg+`@GW4 zO4S*Y68bNoiv3K``p0w~L}|lT7QauEV4IG1xyi{2C(otVW?k!iwaneH1-Xz*wMzKW!~KB;{8c)kH(?A_FICBV>}p2g3B z=*-N42^xzXAox;bI#LdS5owo!{6p(}wAg)G$PRRHm+*pLYiyW?XcLgmy7XIkf~uvHW!}SK{ZWnG#vyb{Kt?Fv%JSL}^tMxza^`zws;FgBTRF<&pr-Aw$p>Bw-6vSj==4!vis^5E8(J!h7i^s~L?B2!XH+mIX|Su@4Rd(1$MKwuVH2IoVFE z8laLqdOFO8GOB6~8Wk|A;v#_!lgE|%zW5AMZ-f{z_uHa|TWgm?1&_;}@ZKEo>|!x!U${iKQIT4vjkh#wKl&BBcIK4$&B5jXh7#rKwQXx z%BdIjr<9RnEHH^^YwHebyIiQC*n698L(Hd}38PBooE#*uGnq}M9M&`YrJ^-{k$@VX z!@U3LI5z<_FK00Yo*1GVXoaNB$v`NMV@FA9&Wc8alAN9Z!gd`0XIC42% zWCg0>avObC2f#=hB8?0syOl}H&|kYFF(8sc)7pP-O0Vm(p10+J->IOjO;fy740=%% zm589b@pf_)fC^g03=1)2l3&(;%DV{xoCaITDM!`?=?-w?^8*Z>J4H=ZQ4*g!PON0pEfPO+4 z^oxoi;YenNNCm8kWh*2t6k?1Xz$sY=p@z-FD>%F*K}4IMbVu@Juv0oz3`YeVQ*0%0 z0F*fUn}EhL)FV3?7K#dB#EPbQdOUWFO^GbJ!ECu(5OTO6emo^Z!O$RZfQj@e!*Vbb zhq6tBmQ?^I>N#rCp*ln$>{5;R%ZmPn%}O<^=nRCJ##T1bPVS`nY+Dh9`)9=>$c~FV znE{og0sd4zv1?L-OL8f1UWs*Az2@mDMb0#}_^sv+4f&Pv6AeA{bNsZY9%m2ziDggc zY?PC6 zaDqdyq;w#d4ZZG`beSCn`v~dX!HIlh0Hdu-?c}%338j9%5eH_&cTJFZEW~>E@<#38 z0g+mGq}v35BLGrGQd~j+k91J;v53z$eq~B@VLD`GG2^pS-E6DOLV#3pK*Xu`8|*K9=df-D+XWzQvsve{DI-PW`tc zSTdcOd`Urt%2!5x-U$}yM@;W2yS1xDu>Pj%fHQ-F+ns*o>kav&G_|`hE)iyYw-RHb zNVhr-c$@7XL{V;SCu&;b3WwYCT4}rMXLJXAuE~KB%N{u=gXnV+RrQ;;>OAsd`D|uU z@0iaF?94)KBd+1&d5B5t6~fzUrwCe1xR?rvuMq)1BK4M&tuu>=>xkZX1iqZ?m&zxA z=_~LG`^j^!?5nl$PfTbYlC&N}iWe6wzBlBK99g$)ac9UY5YG^bhMHRpq^5o`vnHet zr?lF@Mn)=fIAsRyii3m|Hf-g~tJTFtTQHnX?lm&vn0NU2K3Ha=bM0^|)5%>|h9VaI zbrykFD0IAQi^oi?RHI=$3zb4mD&w6w$JH-YPs!YqKEkA}?cBcDxFU1&vfqWLOfM8R zewhNhvZ0UNNU;M-0Ki^&%m*c*`|~(pe(p!*kbV`1@w<>1ZaLZf`&m8(j`@KSPE7tc zpHAM)>JB6nEw}a;SxB!|xp(7u9*RH9t6AstSihiX;YYL#RlKNUvhSoS^KBmoZRk_D z26CeOLn&Gx5L++f;@#($cqnG;UGD!8fo7NiKBW+lX6S!-caY_j@W*|!oxIsvU1@O@ zxkImGfmc$`aBx`k_e18D7K>dzwidD zk@}@UxW2HfzFxN8YG1oEs<3Znr!z>*#{;G54%=$t9LNDmxU)&PgTdG_&AXyJIlqeW z3BZh?{+EF@PKm!-p2+2A-$P_&aa|erVc6FYh|xsz1&&$f49sx4#TQnCxq4{>aervq z-s<$)BZu#3dkb?raNd^r^LFi{T(4<9!*K58;gF5}P!@gjZ<}^GgLkKuW(elH9b`k~ z1W!7dk5Y%=J?b7`=Mh7hSyhow$IWDpyKgU;spY*9H$l35#SloCSgU*Hu!AEr|M>R|Pa;Ezt^ zEdHfQjOD#r7XA1pv5VBV+iuda0ITJ2K*d9d|UHo2R);o?xk z>w|%H9m1PKipJNFhl)E7PtV@8I(<;aCaQBk)1c8VP%ZZ|-ro}^JTlXUe!DyZM*zpW#$p7arTg3Ide8m0%*;a#Zg+H=XjCr$HJ3-l=Ilz! zF;K1BlB(`aR1iWf#IzopVPa#a22%2DW_KU6_s6kU}L(qbXibQaZ~G>?@Pc3*$Cmo$5lmzv=UA0 zu0KO`@ME<~GBx%$-8E7?5p;0~u-i{Pi&5?sb?z07Oq-<=iOuo;r-E@4GwN-fw5t=2 zvHi&IR^x6fX@I0>2ngKqzM+GOQISo}ZDi=S-LpcW|GnL~{vndj;7*dzy2ak>Ivzi2 zl+5w9e=g>EUb7a4eTK)P2H#6}=;5k-e_PS>B>2JW^oOC?LbKR74DF4AQ%d~E4ih!!!T$ZqIc_!Bv&$jpP zYwTVnFvyJMF?y46^o#`)jyG?+CrW!3Tuwu=BLs0qt`5{EY;WwSh=1q9{(~=n?Hm2yUY7dkuI8(_l5qbRQecSn+aKOLKcxy>J%70u z-NdM^^Uv|-k9!bFgRFOvPegi=X=@6A%Be_FJm_I&r+M zvZ~R~qoMph3e&1888UvnVI}vt=cYdxx2sp z6q_O~VX66r1_!=`n(ge3SV0U@`v1ht?a->*bb=LWY`&MjC{6}J%n7}@qx`D)r9!>D z`f22tl;^~iJ2T!-m4YEP1u@y_yN>SRz?21-1xRk92k|ObkynZ4T`G@%<-?Svh7!FF zgkQ!xXL|AaqMLdQ1LAZRsPQkfL7}f;FP;StR}gN6jmg|Xy?!$YYE(@-|L$5vCjQ=c z{czcdbT`>jvf%b%@&m`4zEOM&x5m%`FqI$MAMua=UQH8J-X35SR=?1%{hYXBH)5&y zKF^cV`-8dbIT=K?m{8kE`(TwSoKQK1=TGq5P(5hT5FOr2lVaA8PS+6C(4Lj@RJ-yu z+Dh#+R)J*2)}rran1M3eo-)~(P|mxdHoB#ccSOhvM_t!0w z)ZOpo9lKEHd6Tk-T!VCFHtwqS&r{NrPM7pqwF;Cx!?Y2up5@ePjQgWO?=PR`*7Mi8 z?{lU&x+Sp6%g8>fI+tm9a-S=(sipWp#w$lm_m5a@wHnz9(quE`=seaT zGwGXp!oqmA{@0K(X?eS`XN4P+wj~wT1%1zivZyB9rumSxk&yyjVSCf=Gr21vWX9W| z^a31hxLvKe3kNs1ro63$O1|i1lc7Peln=Axvz5_ibf?zm+NsKjpOdd=YmGGvR5D%? zjE-=QrXlpq_bkN3z8#Hn_a!sj(Hoe06DlLW^{#;jR=o8VkTeRgnXP>~;i#Ox&Sd^| zn1`A(odhxzv8;fWZvJJpD+FHqEe@0J8M~kM zd37|N9OgEkXBm_`Z6_x#J+2JLM0G(X*I?U^Hh!O8Mb7h=!W^8-TH1#1rjEJ_hrPJr zPEZg1l+1X4mkocCw2O)w9q%EIwL}k&V}VKp+aD3c3xy-yi}dL}ou!s8t?f- zLQlJb-N+V|V%tyOl9RIrA}nE|+a`-^P@A zUdfItP?7~NMS;0_us3yyU$0cs^6#NPy^L3>V&)Up>pvYAPj@o5csXSI)ZbEeRM$oH z*6l!iXtABs!49NFFD@k(e`&+30F?RD$mVLt}E{$pSNty=xIOGq+#a z7v98#E7!B@+;&lG4uu?si2lXSmuX6JGMhO1ntZZmw3+q4el@DIpG@>UyF<@eHPPPO zW4KF5L#|#aJb_&(P@(^oc{wHP>uoTRALD@lN86TUUIgul`h5b4pa% zQ-Ab>5i)F@V`Bs7v*J`*Hf3+q|OK2Vr@ArQ7YoQaK`BKG&3H)BdI^E>APxX?G!e(Pt4b zvGW+y?{F@6MFiQ_E21^Q%74P9l;>FSvv?3IA)^g1vMNg95_SIcpA zhwfyETPBr%p_Tu!n6^x(!3S1pmBN(XT!Et~o#%96MrVn6xkhy&ytN5QOHN5y-i z3<9CPI-z0hHpcgT_&QnnzCjr!t@+5ntVfrXCpgd#4Of9*zOzD1<8W=f!*$~U{?B2A z!4H0GuM(k;{U|d6EADcOK%`s+U%A7Y~JqdnHHwy zQ0AZ157(|#U{BR`4=5tPeIHS6L-M(DCy59V^mxMZAV zf-)=Fk)PG+l}5f@T`;`XN%vJqV&}ch8dZL&keF7 zpi7pUn=itP@3nANe;BAU48-*jIh<2e^e40QPtl1+aXS!s8$rl)j9tiv&HR>|bs|KS zrv&VoerC<~5A$CHGM(we)~2sqh$x6^mT-Cq{bMVk^DG%3Eg5-SvW6>Jon~4mGEr5e zElxJ=FZ+wH^ra!cm$p}71rkKh5_QEr6qf+Zyo6l25+Up zgUTj?jk2)799;8LdG##t5C774hEbBY!mqd{$hpF2xm*HOmEgBQ`oFCM&k5h3%!7)@o~nij=r)h5={ z#LLvi7uQ}Dsm0gVCXUr6$UsulYmKcT2qA!qvRL3#T!i;?ZLnVbBE90m^D5_hz9c8O z#uKy%kQokSKCZvqJ2mn)v^h6)JZR`Tuje2FxTYI=&l^a*jeS~;1I~@TZvgCsMs^Bt zY`Jl&zH#Ebagw)bR;y{gxRDX($!PumR6JuSkdxH3e$YgbXlgK zhYx^!@aD}o%|j1fo)y3Roetz8y`(NTP!C?ROuxKXen}J9^1lZy&|57CcniZ}3lrZf zroR9d9FX(YD=xlQ9NMptvab+NUh!+c66$#+#s?UcAU>q#D@Ta4($9#MIeRGdV2mSz zYn)%JHR#o+dusK()*9#4`TH6jC;=1#=&taBaCH-duT94_^;USbdfKk_xPTSfeR6`d zgxeif+O*tT?Vq$;Y6Fh$15A3@-D28~%sL!<+HNqm1JXLoV%o7Q9lj0ik$+zoI=9{Z z+hH=!3&eG5KWQ^P?7VaMI{t543M0TsyY1F^=dG2lw8Jj^->$5~*LhDoA2N#NYqx_b zoiuPqBU=o&JL{EataG$_g9Bo)#dP(aH;oM{OLm3Q{} zE@-@m#P_z35!Xa)_1T5~=!QpCx0-uAA3sD#dt;~;;o>SiEbzrQi_hMvX20&2?ZN%+ z`NY?|@wdZDyLTt1ckfy60b>tPLCjYjSAc`Z;CY- zdB0n}MWThVztrx2aH~CwvCC(Jgt6#l#C!H0w!x9)t?>?v1~OuTyuH#Rs6%E!^|9)7 zWLUI6YIuFKp^fW^%+c5$V1N-wU=$L_ilz=n#tz>P8fL@|N8*R$bVhKf5ssE&+*<(tEr7n| z|DDQs)F|O-Bp)@JJu#Bi2zZ7WEkKPva~Y*ak3L%+&CnT(#{hY;!;Npp8YaeCkmId7 z<83bEjX~`gE3nA}*MEGe1H*8wadM?aQ~end?jPE36*nL0qd-_&`3=<(W@ws*eVS(ZG|hH(CVHCw{xmM6WQLcC z?UdLbHQwJ6GY};^D?okNWy3fdX)&v!)8ElBYk4>;&fhEbus4BG?8bQK!zX?DeBJo* z_OzJpYpWlEpL7}pvhY2e3tDA4#j*Uxv78ZDe#7Ul^UvG4&fAx=V1nnJg6Ds?%sZFP z+cdG*ewzC&w16pPv9?_B%UE!Iw_yEc;m((Z;CBlVlMA7)AH(<;EHhZnr$2fGFWi}& z^WmSrb#>9xbs^>7$3(dW%P${en-(og7cfipeB}^pFU?+vgK->!)=}G@^Gnaows)#xwbAqT^ADA5YgQbbK8)J+mI^Tkm=n( zt!>CtHxvXmm2@{%+&0zXHZ{sNwR$&o);4vin`nV8ecdfXw=LtiEz`0s^WH7Y+Lje{ z%liMt5ZiWc+YWKtPG#H9z1uEp+iuittiYwtbjQnW$0u&buWTovcPD6VCxp5aDzF=_ zyBq1Ydp9n2H@a;1LGNzt+HM?m7bmcnpu3mkwwDsOmsYly-n)li+q+ab5CryfbocYz z_6y?n3(NM4d-qG$_RFaIM1g|}-GeH(gPOR5+OmWC-h;-qgJ$Z%r7oaV_pr_Fup{oU ztL*Si@8R3E!(QqkN#LkY_h`WFXejOoBLD{o|DgdC0002gJK7ztJXE9+BB)!GOX1)rr9~E**K)sJG0a^ zw9`7c)jF=vHkRWpnd38!;w_}%Gp6D(pWPkn-5T=V9rxKA@5U9{?GNeY4(I0!^X3ia zEs#g=^E$i74qg0@#Y)!=N#ej6UD_l%fvjw$~(r&IMTvB-rYL?%`N`K zDE-wX_1q%-+$8?gEdJXn_SPf*#5MoSGylaq|H?Z5(=z_oHt5_l=j0&mpL;0vl=e9=uxJmM?N&A&XtJ6=a&s(U>SF6!gtI%7d z(Nnz6L95MVtkPnx$9BrdLe#@T*2F^I+)@6;Lge2=;oeQ`-cR7%RO;VR<=Y~0#rfq#O7gM)~Ni|Mm9v@A2;6;orZ%zzF^T_X!+Gu%N+%2oow?$grWq zhY%x5oJg^v#fum-YTU@NqsNaRLy8oJq5$&6_xL>fFh* zr_Y~2g9;r=w5ZXeNRujE%CxD|r%fOt?uiw9b0}CEZxUk{Fh!ZPb%($`R$B-jSo=my2<;$2e zYu?Pcv**vCLyI0wy0q!js8g$6&APSg*RW&Do=v;9?c2C>>)y?~x9{J;g9{%{ytwh> z$dfBy&b+zv=g^}|pH98H_3PNPYv0bjyZ7(l!~TmO-*L?gzpeiM00wvxdjCnJUx5N9s9;VAF6iKc4&ehp3m>fj0E8H3XwZKWQrMtF z=;^bM3Kwc<;(H@yDlG0d~V0Rylg4}JR#Sr0+&0MKJSOD?H`jbws%p&I? z%8!=kJsAVBvC25>v(Lf0B|#JbU>=(Vt-24GFLDV$w*wu?j(Kw4!>X49!TKMh(6-B4 zwe`VTB(DT%3#_-x62uO=Sps10sfY?>;bi)V=nO!3JoI*W3O5idNk%g(O(FM1vT(9f^>s*JbL_Hu zA(GwFx>a+%ampq)^m4*&E1a_a+Ep9;LI92$45qFJP=NKK=IZk^xB~r?E7!0Js?U*v zrhH^N6_beZfsd*T;?pl#>^4Xf%MDcNWt%iQPMHo!w?m_^uC+p|qfC3;3kiE^!F{SD zBB1dMq>hqBW{NG{hn7mQ(j^%D&V&rpLuEP!%Q@}GDZ_2G#vs?!Jw*uCSG!E(r%rn7 zHlZGO=No>=BKsbD?)J`fPi+zR9V2w40coxYq(FxnzzblIBN7#AAFyfxp6s!YJ@ttN ze;U95DyO1FnIHx&7=Q}|;HrJ>>jW`Shdb_fHLs!1bQ&oi)?QZ;*;yn==0l6x zH9qc9m>4M#`(}4ZMLP4AmxLkRcA3a$B4w4V6eL1K$1=U)&YBE?q%lK+y=yX(V*2@7 z9_O|&W46F zEb4O^$zWEnj(YT?m;&iXcR13LhIDJ{OyPX!6G)gmN}7Qr5HG0~Qx?{zO91T{_6oYf zoo*A3o#W}%Jjzmyjx?kt&D0!;n$)HuwWB9pq%Td_OQt$(kzz9`4Au9;i=qUo^^;pf z%}LgAHVmvgoF$gfhdukXwV(#mW?SK@qJp;5qNIyoO;HL*iaKP_qt}qu6DP}-R^q#yWkD4c*jfLs5G~{=uNMB@pInwx>qdieXo4yOW*q1_rCbe z?^WWf-~RgdzpmjgfCo(A0&_;d22QYo7tG)WJNUs6j@uw4+W;SPKF z!$;vTh(}D~5}WwMTM;peSIpuTyZFU0jQbBf)TmCis#neGR=fJuu#UB?XHDx` z+xphH&b6+0&FfzK`q#h?wy=jy>|z`H*vL+{vX{;5W;^@Y(2lmWr%ml@Tl?DB&bGF< z&FyZ(TE*TDcbLH~?&OfW+{QUKy4QW>*RuQFUZyO(=gnqy)BD~>wl}_alf!-c``-Yc zO1}e6@PZq>DEdA)Y!a^UhCBS>>4f-n7|u$00Du4iAb`dr!10cI{No@Gxyb%U-f;vt zpyLvNfCxko0uY4Y z+~v-8x2t{Z4#>L$CZK=>_&ouA@B7~ePx!(c{_uz|yaE&ufv>k+?_ZAq-W}k0%3J>H zd9S?YFVFeBqh0S2F#FUsUH}nPSU(mAKmb-B0M)l%^{$6~>|6i()x)0lvbX*1aqs%q z_Z|Rl488!|xFq5mUrEA8zVd;;eBv|z`NwDe@WUi~>RZ41*T25+m;SWJsCD1yTmfj!}Fs3jspXo1~FfNc1N z-Y0_HH-{KVhc0-BVc3S$r-yNPh5jdpdx(A3*MEUnh;G=1{)Fg;189eah=_eSh=wSL z1sI7j$cT$bi35myX4qk=M|=Zt0ibvR3?Pa-cL6&$ilT^$KPQT&SOE;+ilwNEqxgyy z0E(^%inMr&p;(Kqh>E0Gi?bMuq-ct>Xo|QPjJ+6&#Yl^*2#l@x~SOFM-0j}5pA0R4)`Z$XN36SpCiv*dF{K${^$dCe=kOrxc|2UBY>5vG?kUN)=uDAf> zSbUqf6F^utYPft0U=7d!4bhO2Dw&cfsgf<}lF{J)4AIb%GzpU{Ig>XjlP8IjEXk8T zd6PdGlRo*9&QOy*nUXpQlsoy7FWHkQ*_1d5l`AQgH_4Pwsgl)T4HXb36<`h3APv<} zlqg9J&LER2S(atFl4%)}X(^Lyxt46nmT!rcNGX>>`IdDlmu5+qbBUB_>6Ub6efhmwOqPcgdK5>6ki+lFks8(LfE>fC0s4dO8u3hO~wN zfC1282#TPZiJ%CHpqi`cnz3mJhR~Xca0rTU2(8JQxw)FPDVwkPnu;KsxOoV)8Joyy zo3k04wyB%4Nt>;io3}Zg%h{Z_$(y(-ov!}roV$6PyXl(8iJPx!2#C;|y~&%@*_y>U zoVwYbi4dH$c?gJb2$Zl0)ldPoR{_tk2$YZrdvFMa@SXxHpoCDHiLeOoX`lm22m*?r z2s)te`JfOwp$!V5i}0WlDxnq1pc+b{9SWfa%AgU7pdvb;8JY-$prHZDN0 zq)-~AR=T8A>ZDJ)rCdr0e?Xs7+NE5orBiyPReGjc8l{FnrfZ6(R$8TR`lM~TrCAE6 zb{eO2`Ui1Z35Ng;pt&R!;0%CJ3I2az2!Zeig}MiZ3J8e$2Yb+{h{^|u3J8*lsE_KX zkP4}n3aOMjsg?Q%pBkx|TBxJisGDl4qB^RV`l) zTcEF5uqa9h7MlnKo3RDj{s$hLu_(&1F`KapOPd9|vJES;Dyy;>`S zTu2J2(jcJU>Z0H}pw!x~Z3~~lnV@kCopsB$26~)$OSf~Yn(E57dHc8EnwxF=w|X16 z-Fdigd$?|Ew~jlv(V3vix|M)x4Jdn{&x}O`cplhLd zE4T|vpo{Ritt+D`51OK>3!%0PIyP%&-o4YH!zB`~4+YAU= zn?U+}&YEG++6#-F~!Wk>W0GqKsT*LrN#N?~OGmOF}jKcye!yjCr#`&)`?7;$C z#3IbC1**X(Y{i67!AtzaEgZfqdclbR4f_gw)0-1U3M~dxy$6iGbG*OYD!=+$$9kN{ z`^&vds>l9-9LRPYzz58|hFrdUoX7gR$k^MY@Vmi=@W5Q@4FCF`hY$#qtHMRR!Iyl= zCH$qN`pH)O!HD{>B^(H0%*g<|%BgI`VQj^r9LpJO#XeldpnS<2oXWBMujGr$tDME3 z49TR-$wF+X!HmjBd#jOot)y$S2-*kH zP|mM+>V^X{#g^&xzxTFs3qGBl}xb|`_n9q(;MB_=jyq4yRL$Lw|_m@^orBf`qJ;}u7!=Q z8ynPv9k7V)t&i=*^*XaRsOQ{odsL(-TYIg*~tb&fgI(wFO(C zd*HInOq;C=4bZJ5rOgv)m|CdK++1zI_59HToYe^J+F0D;UQN&iz0R=B&k23pvt7{m zdkC`K&SdSuFfPx*edGQM&uMB1%R0#!{GN!~-4JWDFHRW7j<9<^F~ z-dc{+L@V9`d)@>7#AbfrH>22Uz&gY8#nt^br6b<5J`(YKBq-zSl+N;{D|#|Iq0 zznG>+v@jwy-kYhs9wPOe8}{<&hD!S(Hp^p00=y*!R~F;AAHz> z-Pa-f(tmC2E)Cc%joIM(vdj+HZXLxsJ?%#Av4=41){5;aeApfP*v#J9CN1vP8p0l3 z!Y=9z%u0Mt{nWrUaB}SGbIiw>9?15->f_7EcKp5fF36v*?}9w=b&TtC%(uqLo`^sV zmMhiNP|1nF=pik%BF*MT4c?1wvx#l3ot^0C-q{G-@zD9RgsuM56>qN{-?7)8)6uT+ zByY6yTGSi8@|JD#;7ap}Znw9|>+KGFBmS(^hXKvN)gD~*IsVUQjpO+o!(@HjO8@lh zoYh^u)o;z>R=>~zz1CSj(4=m_3(WPZsm9M&0o9=H%FLkJ-Qa(H*^5ot&#u_(D)K|! z)AgOxY~Qx-&Fv+9*+*Nl-U_uXAKyex+0!o4<__L$U(|hnuH=fd6iv>>7Xlwvy-6R| zSpULH&-E$J^!Y5~M4#iDkH1@g$65dRR=@QzjK!=i`U7qBm2a)^jGKm<2umK_&M?dQ zjj-b`@;)o~FQ3^Q8~Eya;MorM7w_$IU)l7!?ALntF8=NOQyce}{ns5Y*dSln`rYjw z%d-|tu8_YZlJ8-Z|N1tq;_Q6A28{mu?D|g6)|DU6Dqi(qALBeO|G6Ie1HJm}ztB8x z^o=~N0Fmg>Aw`n_DS}o3fB*mh7)0yy$50}PhAj4(c#)7rjvV_W!kDq+MMEJiChCaM z&73(?{=E!2@nXcED~Ilg+B9a-s}H9(q{z_0 zXczths1VK29z%pSK9y*dP{pK8nL>5S5&2c)6JIXxeLQGWsVRZJEN&6IQ;^0#5|!zF zyHK1+Pu7&qbdO8IDE|<0#^5$szV*ejWouvme*XRa{|7L@xcs^;FvK<+DYJnTR17l) zLmP+^sw~s2!3PhKP_)M&ysX0sC4;cF$rwaXG7~dIO+*k6nM5FhW=kZYg#ZYkf@&hF zYB)m{;pnBQnnF*fseaThC8&hFD5mZ_!VW5%s7;}*yCRhn>7yo@+7ii-tO8P` z9+ficD2+Ih$Rw0_<7!3#LJ%-7e&VG5vraqj#4}I22n4Jd$|^HaO5-X-FtHB@-7vEZ z0i6uAL?^7!P!Ci4tg{PO#L&dfs5%J7XcPj>8AF2W@sB;0G_y*P#1NG;b;9!1OJmt>kOyD?O*Bwpwei#kM~_8>#>r zN-YCd#nC4H@Xv9>Wi8MYH9eGEM$-kefU?8P(mqWk;9x4swyYu*t}W^PKjZ&oijP_hV_JzB-gCx<#*jwtCA zkQfcet(sAvdWpv(r<;RWEGNwhD-kio%A$ohnB`}D=fhzRby%PDxu$c5N}3N7M41I?Xn^YKT%(j| zuAY?$TpSwA|L(=PdjSGNlTd`!5$pJ*|kC+nMK_hPpzHvN5Vdozc)aw4Cv*a-}h1W+uUyg1GLL48aXG2oW&C{DxG# z`4z*a(z}hBL@IdnPBnoPM!1n=J3mTWva;mIGo|rICc((7nqxOX@v(Sf($YoVlQr-u zZcgBYX-sh_$@NjOUa;Zc<}|m{`IXWzn=GBtB6{*Gu z!lG>OByZalMwEpWIa-Wl6~kRfxEj%}>O@t_;^>uNWwI7pOe$v;A@f)wxT&h_djQy` zO!d0{SH5aXoRMQ<{f@{QC#VRWx|A@=<8>xD$j4&nZL!Q+kBNs80-HzDTu?bLl+P>E`LE!+zI@*wq|iJWR@YZVH{0kd4(0Vu=2K z(x?K|NCY1&Gul~NkEFO1X$WUR%;KofwsKADGkb^N++75NHo*xROExC@`cf%G3Yqkp zWmh+WU~$O3aG1rcm*xrzUPBb|h~=di4_j7fB%UmYK^d9ST-HI8bTQ?`J1&&8@|B^Q zjZ(C?*7SrGh4-y5ZJUEtF%da=Moue!k@r>ZDDz^099&N7R?-A$3NYb`qOYNovAJhh~5?OjZ!xYY%p&94!%S+04lH^X5LR-%oZXg4d{ z`GqD&;Ye|;E~QGgidCjF2$YHf?@7|b9Y|=k9nu0Tz`5gNZfjW^ZNA&)H!okQQ;hF& z^_0bI9;(jG&L{$5!>lD}!2E_7f1EZkBG>v2;9c+-2KHCQ8 zcqTkbu#dW0-lBttEFv4P+0*8>N%Nc9!H8|REK?l2alUsk)0C(MG$kdUj@X~CnKAkf zJY)G?w8Rr}!-{?Lw<04oPSp|`u**$E9vz&I^D5lTGbf4dr2+AJFZBhJe@ii0vJ>5k`NCTm+EY}2o4!;*}Hp6OAO^P55TyAIr%yF&|> zu>%$z455dr3Rz+)-%G$GR6+rXx52wWpnpcByE1~bXp@fkL%-wrDCi-k&zrAT86>GNv>wC~W=j&@ z89n-YL1SZcP+B5(xS{xN^JY=sH1ht*`4Cw-LhMiM=1ZKe#g? z3@Q&{EE0)`FuaPyXp}}W`zyf+7kqNDD(trnY^N+ttfeERbfKpYL%I-qzAt1trE8)N zX{yU}lM48vuhEggi7l{WLHioHZrdBWldG*mvRwYc6`!cK;s8Q7EViKVy5H$ELR_%> zGA1km6E5=#b%Pn;o5qXGp?LGgev>s*f+qM4QCLc$t4B+9b`+N6F` zgg-k&&sq$DSdYQ+8~+-fkP0h+td(Ib$Rwi@{fiaU8Zuuz#Ng4zVVN)VL#=z{h&^}; zjA$>-^S%kg$gI>#u~;Tk}%8Eq#3yO3ZeRxFe1W`Td zO7H^53}lT|TqTg4Kn>Is2}zphqn~k{94w?a?>e|5Iw14Hn#+VLFxs1M^9VuI2otJ7 zU9l=6m8L>F58eT!3?fSEP)sx8{;jlYzZH_hqVTp_ls&R4qa0D0V8hN371O;-pF+?u z#@m#)R18lXF$6g}=u%Gs+OUJjqN&*@Oj3|6lFL9VKIfwfE=(c^yfapcje*!FVEc;z zSOD44irkzS9ciPfR%K64B>5#cUHPy{QIjY|3??mMSM7}fh!Vi3o&> z@C{PA3R88Fhcg+YYeAe;!2 z(c?#_1&S|qlQ6xO4{g}7)sG%R)qBH2E$r2OTOYzuggR4>sTl}Fz>-5?gibYte~V8t zbk;k?q7pUPO>A4o7*E1*Ng1OJ%)1eJe2;x>KM94!Vk0^vhD1<;L z6F)2riHpm5RXP4WT8bj7qu)fxV!8u8#ywrD+ejY^oXq0ItC% zfe{Is?bnkk7#^grB`dAuoULSHm{I-I{VasdHJIr52tQCcig+kf(UOq8l27f+wh2VB zgWUkzjv(usjnSSgQ`jy-Lf(~N3nM=5`oeQDODDt>Qj8Zs_y}R0&)4wI!#G^RxKnTu zh^eunJqSxzY#myi1V1zs#i21ZzMsS2iK!igGWD_pbW;H4fe7dx(r^`^`yhU86 z`iqB%S()32Q|XPd$3gV6SAOu7}1VD}n1qp;hs9#70 z6Eiio0yWqb>K!RNo+|ULA1tE*Tp54@TWitXGdAYB@VlF;lt48x8SNP;Obop>WYDlv zyFDkxCRPWx!q=Q47lvQ(J1QE?8;V}(;9fxSki?FG%~rr95NGn%*yp6v;iqt z{w7c~F@*X>gi7{Yi#S?=A>~1%Ux85@DwW#9L<-?}5>Pg<-CV1cxKN-FE=IJ~W5#Hh z;!bXKrzoN`$g;OLr3|Z>1cUg4=Z%CuSmD1lj7WflNRWg`_ya2ngh}uNmwsMI=mQKM zB`Z*;&zv0gwq2zLaG`wT7jI_M^|MMc{vU!gs&g0!tlmEA~_K5%JB;Eh7~h&@n* zFp2~}fP|Nh1VBNX^=Oe%1?ZyQZTI!m-`?5FZcC=)NA1YD7ta z%SLr-y66fWpekvB0ff7~j8~S+j|Bt?VFVW8-03}pMks_Vs^35mm}8X?$Huq*Q1Vm3 znl9Es3Gek3%yWj@EWcxmS|~0urnKUOj@)t`(7Pc7MKG0p%I!qxXMFxygQf^}{py_AY3IY2VNN|aWPnllfO$1bMZd;w99L1QLg1``~@Us!D28XL}>`9yEU9CaHNAhshxlyBn1L`Cgc}DA{6=$6U-L01zeAXYH=uJmK!iwu<&QXoDK4AVT^l8l zyIV0UWiy$WOB-Om-C>sUVUL;4RMAvIs?Q|L5fN5LxCSiH0Vr?-C!hf_&s)V%1vj7p zCvbx@XaXAu14lrFWv~G!fP*tYcQZIzN@yS~Vu|1lx^dp#)BeaQEt3bojAN8c zqBR~2Rd@p%5Cc}o24whwB_R1o@U?|!0%S;rIamfGkODwP1{>f4WXOh85ClPB1Y_%; zvIP2&G_eW1tdU?HN&rEFaH?1A14wv^iEp0fZR!BrFGF12Sha2Hv5G?&grgS&V>kw6 zV1g!?gR$4&MMwoI5CR!bhGi%MCP0NzR)sPkf-$&({$+TCo#yK7)}6Lvaxo!NCe!sp zT)mMPwDa&NQTOv21P)I8?XXqp!#c=1~T6RtGQ;21^Taq&u3mAkWCD!t4&yZI8U>` zwt`NWlYd=uww8E1Izp56Ceq@;JVgQIOH1Y`QW9Sf}YfOqHONA|25hq%%abWz(lnWpV$G9;`oyd$GG|JIEC0Yu zL^0%_Mb0V!XbRxLzNk{CQmtzBD%Px8w{q?N>h&wwuwuuOEo=5H+O%lpIc-{@naM+k z;?6x-ckW%farfTMd+;Qsyp#SCj!UUep*c>NB-VnYtd zWIA@};L+sFHAjF90g~%lkzB%g{nmY1`}W`1du7)=1W1vgKZ&4K2q3_1wLV0RF2~8d zs3Dv%ch)qTvgb?b)^l>t{t~;+nJ+E-L1e7Sl4X$~p@PH1O+k~91aYweg-&P5f(#W> z6=exBT-1JlscnqSHQ%T^tblpyKu_T-`3^)}432K>m zB8n-hxFU-!y7=N*Y_;{)NqyC*S8o1z@l}{^xZ&udjx9BkOcPEdLkT31?cl~LfDA^- zAjvpkLk>3hAVxBCW%EQCaJaDq4Pgl4TWb>XW>R3e*=E;nJo1=Vnt%Z(mm-s_77bI! z1@HkiiR>{QOwoz7$V=i;D4m5r(WGd1GgaslqAdk=QX+Ew5lJG&B;mv|%|Mb095ZZj zO(b$b!iX_c*l@!QKFE;_BZde<#|&^JQw9yoED}i}2PU{+qcj;6Xra~BL|vpZ9Tlvy z78|pdusJfv7FLKMhW->i(9y^tf?R_NHrOmvL=UVm0_g zZpOa#ND^y^;5nl+!NC+;B19eA5_gQ+1RkZ-X}A+hHh*{1qdH9}Y#-(onam%OphCwI zVm33<9*!6j@W4#)00$$5%-h5=$v{y9GK=`pYe+1G#1qWfiPy7qkGiyBcN-?FVM!Oh zL=@RJfhb~ABzn6y-+lZ2H{dIR>sB-EevB7wJ03(yVQaQoW8x+O(#MdS5)H{JdC;(h zAe2mk$RVg;(L^z;pppy|OrUZI=&r32h7NS7a>yiy#Cph!J}+$!uW*Gt=zx(S-=+EPc!@)RT}T3wk2K9;K@Hpy>2OQ9~gFwVYzDz)`S* z7Q7$^vo$yX(1>@zGn^sf)H}U!j9rMsm|srvheBinJmXU}^K!%26B7`CyaSa`W!3@B; zh81owhhy~qzzTUVh8(Pr17?_E5rvow&aOA91Tu?40c=~(9EGR|8LL7O`p~rS)|3fm zu$8WSB`jk(l?}c{N9KB*EoajvT`taXb*ZA87{Ln9AcGNqu-^;_V!J~$f(|~gg2V)o z4J+(m4vuKVB@HwOG1%Y@jsOlKKt~r{g0Nk{b5mUcV@ty1&JeYuA|QstlyEttWh8OV z%vAO~H^u~w)I&-4AT$%VIZq{NJk}q4vxh^Z0tz=k21ts*1sWtn5RWJXA|6499ate1 ztYd{0d|(W0=->uAsKE_}0E8SoVHzM=TcHYQla+81Z9XYhOo`=B=$T1og!ltbD6*0< zI^+HV05}c+SSib;GPS8reGx6;f-ZL;pWmh5|Rr?Yr^82M3_~)P@K2iwJ78EFDT$J2k`bJXhySS)M^(Tmt0(Il5Y@?Bg|NQqTAVFz3Y!xVI22HiG63I5hp z1r&O~gfN7G2|94YAOul{CTt-LT#%X@9Kn*+&`A-MD(rg8g<-8u)q{2!*@P9blo?Uu zr#fZHp9snwp1 zH%x;N34Vki?56`}2qF@JIK&=!A~praOu9eWX-;G+GX&0AJ44p~XxH@;(io4tksBc&@<9W9N&iE-pU=~m}8iZ_U{yXSFZ;D+r zZ}M11Nkk)x0fi+9x`r%#0u74Mt2w~I2|OtFHJZ_fN$|lBdl*C+{7VQ(7zja|mDzrM|h|_P%%D;*3JRELP7`O(AY*1MnI-Vil=i z1uIlx5=UHP6^^(hBOHNnR;;4XwNu!^S&<4=Fv2$>RObzWLs;D7H`m$2kl4Tt^fFWGfNd}SU};SP)I}WSr-_h; zDwB%uz4N{AFfwnRPrkT-*EyZ0KIg~WgeKMmqT_Oz1f~=r&OiKt5{$U~Auh!d$w&P1 z<3t?ef@f8Jr^|$gS3K{yD9@w1B^o9+adrtosDf5#Sl6*`O>t6t+=sUJEB?8L6OI1Fp2OqhF#MaGPQTBCm$ifH|xCJ#R$8-pX zK82aC0biKigmm@O%j_KXOp2mF3xZIeg4|fIs2@of#2@}lLZz&Y#f;rZh>c4iU-vu; zgf!0@Es9KGkIp#@xXncJ3=1lWUsJ5v`30d64&f}d1qVr$;GrHlnbU^7pPhY>URc@+6S|w9UBT z6z?6&Ow^$r3JP>d7j{{UAE?B;MMojLTS|}}MeG42WSPx{jm=S^mK9=jEeo+Yigs1c zc3_Al&LAI_(mv75P8i zJ*r=CIOKUfjxXAU>aiLCd_c-92PRoZHs+)CRpPnvBZEXp9o`_be22@}+)c;@NbCVn z9mGl`0<;7|?zqGr{0a2k6ZHY5*_{n@@!`4|lpg8?Kw^iC-JaY{hld0QcZ~?W9i&;F zWh}*6NdnKgpvlM_<9m_Z6?qzKw1)nX{!5v(Oc&C{EwYYWl*!7}pPDdZ7C{cb{YdEb z$Z5sNJ6%j2Mb?ZUk7a!bPhFw}R@d)wS-8;}C3;7+_{2{V(4UBMDln@ zNeBfC-eFD^idQz!Rn8Rk+=M=6B}#mm-BAltl_gpRr*Klx23-JLR8*nF|?a3^2(CC8Wt+?Az{;q-@CO z0P@nxjoNPr&xq+s1@uWmVdKmQ-6xifA4Z@)_RNv$ly0_>+R5{H(lnSP6hHUmf~(vpFo!w1j09%it})P~Xp-;!p4s(}KKkBv(4;3d3Jf|= z2101fa2AD-*&I%4eCA=ATBv3FB_|eeDp}BHLBv9#ZM_R3%LU6!^tvW^$h=^;)&gr*n;CYij8` z_T(8|VD-q9gK8zDrs@y6s=L1HTBPZ1I2MVb+&cLZO8(BB?y3@!oMDaH|0O1#jpx1k zg*x%-Uw|6Ry__4a=VjqjARvgkMko!g7*8l^CjQ=*Q3tNc(X|#(Oc33rLMzE?qS1+j zDV?1`MeDew5tyk2P{9-*OpCmI7rfpq&UVEHVL;(D&ifsNjmjmkYSqBqlHw?&{J9^| zvL67VOR5d5NjfC%gb@94(IK#;tN96_JWFRbXxX^kr=nYHrmLZh*4uPP%kHBZ(TpWd ztF$)HI94dRY5tGf)#Q@SW@cIDN~}gH5@gQ)E#OiGta8|l{wr76X%gxP$oXp%62`$s z`mtHSJq7riU?9=sPQJSyAJMQwj#=DFLEmG73FHl`KbdRX3`#Q z{85n^{t{rD?ndsM=Wx`l%wePN!e(|^SEm-#lG5IiZX*u*MBO5(PUfUnhE}&uYG=Lf zvv%U^k{Jpk3rvurD%q?8A2F)}-ts=@@8GFnAgAb+oLf$C!4~XZU@l2^aKDA2NrkC65R!;!bX^T5fyo-z(x$TlQZPLho9hS3|<76mjq38ODUkQvwFS zkG6~)2k+WNpdVf<9@6cj>So*lGWVGc4+o0Q&~jy_ksk64XVn^P_Fkk0a84o=bQmK3 zyBaYfCv$KzFrY#%6r1QdNn{pHu5#`Oh#pQ`_NiVxt<%CH0IF9mmC3OJhj557&8-YM zQYMv}ZV%ToPDMzJ0pD}kF&gDuK3Pws$S1nRC)>?3+)BtiPpquXtr|(lPyDXMfXG0u zsxmk9DJt>xHck`?VE2kBjs{-hgyGLZczUd9Rwvf#8nL{`AL8jlH z_FF_safs3cBM)XP>MHx~DV|C)_I7iePW6s7R*l3{BK!u{Rs-xUM{szXDs%psK8I5J z#&vcLwa`ThDdk|a(sY%29|4620&VK)ZkhV7uCeGOiW!0r8nRP2wh+Q<20Nk4Ev@v1 z46WKL<9_f*AFf9~a*bT%ExD-^JFa+gGaHIAA^gc8+A>d5D`sX-_kfoEBIyF@p4gJC z1qRC4z^Cg*pj@{vbm=DUwz3YQBfJf-syeoEKX#h_>k>cVFG(}vE$?BusaHR+2X_Yw zLHAZub78zI-<9Ii|}0?K8H`!frp-S6r!Y>KS=rkjfETI^mW%2uVK9H~CH^Enz6 zQ4|&HF*br9dA;qdRADrm3ZPVf(1d5W6m{?eOY*)FRv7lFH&62QQdsh?WQb?&Ldo6v zf@@C}bS2hgW@4$#+;{cFGBE3Qi@|45mszC(@h^XE4mVdQt}6uP9YZ6zpo16S*(zg2 zasyNF{;l7ZW3_kQSrwH~CHt)Wp=9MMErn-wNlR{BbWx##E?XA`&V;Nk%XkbM^!myn z$jYE?+i)w7?AoR9#HyoFmyK}SxOe0s{<1T%j9bo-AlcBA{zA8ffe*T|vl&Dq`gv(I z;z8uSQgZ)g*hw&ZsfqS1t|-&~sdpy21!J{ci~y5BY`=HIzGQTSrOM>ouA5e<9<&0b zYmuIMIS#acJJ)7{YHqtRZIfdtj`zqRO-qnrt@962XKG`JxHgi&*lyVx*lgGi{X&)BjEcAV4kt?#aV zR!4h^k}%)6oeTA4Mpu{Oxr-armR%_E61>nimBCxpvyTg?X*$O=q?11{!6NNsOK~=T zwKXUGj#xcm65wihxT>{=7?Ux~(v)igIoZ2z*i`J910R;Q{`0NRHz>s}q(BcVD+@ZN zvZ>;nRLoia&d0D18!~2VDZ)ZauH;O~QQMo2zX5%uJ&%-sken zu-K5-ECK^Q@LNzqirVBiK2$uHX^$L!d;P{)kEME= z_anjtf%@@vr7*k8&1E%kFGiUnb*>ciPnK*SCy@@lY&!aSJ!W;<*5K5DP5<$E0 zF+c@piT)T$bm&haLxe~s>O%-E$v!93wl(zDY#+67PbMPt7B64Ca!>j-Yj!W*vwY2p zbvxKCT*7`0+x^Rzu3^W9(I&1N`6OVsej(pg3>Wff%cl>Y+%q?=p*@MBG5838#75h< zap%^(n|E*DzcF^49g?L@t4LY$ES}tD)aA`{_5|AUWzgt7OHyi%w0d=#Qj20A8gr#` zr%}CIKcvVI)@T)mfBkbvFUdu7>k~@+*K+=0&aLP2470{gTdOYt4};7=(@xuOLDV38 za6q>_E3L8p6vI$I2uTYJum>>&(7(gNaz?g~Y6DKi6o-zu@ zpx|0cz0-VLN+$2{GEchYhQy1+tg5%Jp3LDtR!P_M#5Y!JW#9cxQM3?;KnO})~TQ?NEIt540q{)CgkMNNZj z#Ih2*uf!8QdLYJ2G0jxdO*!3dMvz1@jwDKu%+W^Ynp(0c!+zv$%I3yH4ZE9A4Kh_) zMO_uuqI8n7B&;SW>%I8;DASO#j{a@UFVE`aQ%wmutW(hr--M9Q4nZUIw9M#K6Tw4M zgSJ0L$-Ruj%BDT`uFT3Z5dbb}>r~!(>8;mZ8D|3^Bwo=Y(n;%*{3+H{i-VF@@|2RQ zyC;*Ba#dHqBNe&xe#*7rQd?3a5iH^Rh=FLpic>NFLMAZ8KJyey+PvUI*H3E$E0@5| z@&c?*#HO`y<(Ul=jb+rb^|P)0Mr5;RzywSrvAzCS#9fa9Nb%mOsjk}Us|)&6-)5d# zn8_n?)N$7DYMM2=q;w^3I;OtUk!9~ipFZl6zj~f-Wpm@ zpgS~|XLJ#i*~7lHg>&aa{vB+*v_v`lkEF&6EpS6})7)9CWfwH7T}tD{TJ_agZ+%6r zF%U^9A)g!=Vt|=Eiq`Ok$?a$?y8v_^g$TLqDE;gGz*Zj^08}0T{rC zXpJ~e;n&2DNq89^m*U6=E|*DbY0B!*;qcVH@|9*Yo_pW3GS|W#j)^mJS((z(azbZx%p+10 z;1i)3#npYPE3&)(kxRw`7xu8HC+tCqBB(?p%P2xfUTlgPOR_~DdC?GLBncZ+!bQBW zF*$TR$rgozM#~7vBxvm8SgryaCUzwnizw5X(6SbsC2=tIVT=)3rXRZyafWoN&-F5t zAC&Fok&B7bm^6bwB<62qBFm6s){>c0Ds4=Ca-_-m8@f!77N)*4-Qu9rV!Sv5s>N-a`zj&0=C3QgmtQ;IV! z?^9Vguc=Mb;1QhUR2mtd$DTw~gNgbA+-nxW#YEgkWJo;AV2J3;S&}j{`C+Bf5ULl2 z9yFn#oRj|2RCrFd5OkscM4>{tl~9DD@S+UesLCE$(TT=sl=<6`KQEfejV^Sdhq!3| z4$2mm!gQrF-Dp7@=~Dfag&L@HA~S&+RH5?5nV>q}nTD6F2(k*PD`{%^pbDg$%*RJ0 z2}|-uV!Gog)v7VMD(#YbtgI#rE>vj=B@*$BL0;e)>Pkc)Viq`n9!;dF^d%;VxVb}G z4=sN%qF?jcnf&bGb3fzUXvRdQ{PgoIh%C!qMv6UC1}LL}r4tcBIW)k=$*-HM;pRTs zn!q(SWO-ez;X?NZxF!Ov&sYFg`ZrYB&X%?m(b@ux)-aqmk;&T?|=o|-U8c~!31V-gCA_* z2haDw|0QsC1HoVT+SkGU-9v{XTwnJ-c)|7E1AYw=UqkTW5Lx011gfTw+Rm89zbPsJ zFaQljY;nhA-0@g;+{PX^Ph&yuv5$AGHz6B25kYS9k$Jpic|sYFN0#!C)2QSl6IsYo z)^d;AxE{Sd`NwH|@|BGYW*=u&qlYL&AzHGC=z+w9lUT$e2%}Lw)0CwiWhG7T{)0rN z;aQ_J1?fHyt!Ixef~NW*G(s7@(mulyp@z6KL-`!(l|EY07{xQ6dkN}EH#DJ?M&hOW z>}OL$`mJKQkJA{irFZ>wUi_%>t#Q2(8SjID)QAQ&q7iIi|C-pr2KKOvZER&P8`;Ko z2C$DUY-mf{8O&C8w4qV$V{cp9$&PlgpIz*2Py5>4wzjpo-Rxn18{EIl(|+}{Upc)Jx|aDhAA;r+Jw!v7uc zYF}LA9nW~hGtO~{Q~cl;H#x~a9&w9vyyOjU`N=0<4SW~88rF~vmjVF(<6Q5Y=gOo< zz6MYMpRI19*DK)luty#0 zLmzwCnT~a`8~yBUFZ=>(X~02;7>y*GgCd*6En^p1GN z3q9$LSG?mHU-qdh-SL%Yd*da)_{VR)@rO_RF3U z{`Ik+ed|+S``p)lzPK-I@PD8DU22GF!QBVbQ&;d1Y z`s!~8iBMDSZy)l#jpm!Pyhx14TI1Psn7~VkPg%E3#(8BUyuy_umGpf4@b}i_wWV@ z(Gb7T4kfS<|IiM>@CNlT1k>;i9gz(KkrDrJ4NI^9ivF+@Juy@C&;7y+00toZx+D&{ zLI4856j$-@PH_}bF}z9<7OBtsWYHF9k^5p17vH57!w(gyZx>~8`oilMq2?7oLKrc-9LF&n$*~)WaU8o59J$dM z)o~ngE*;mA9Bt7YMbRFsF&Sg=BRKJ1=I;{$lD3jC88@IH4e}rnasxCVAs1314YDB{ z@*p)JA``MA5t1S;@*y*_A}MkrIT9l!QX@T5BRO&-P0}F`G7eF305~86IA8->k^?kA zCTH>`IY1_9aw2K+CKvK1a}pGAw~oEWeT`xzZ=S(kkH+Eq`(; zThbvXasW_K0QxZ?0W&al<5LFU1J1!4%z+%p!5b8_F&EPtAd?)x0URC^GA+|GCleej zQ!y#CG9A+~FVitKGcp@9G#hg?H&Zbg^D{%!8z_@BVG}Y(vouw6H9d1OD>F48b2cCI zGjr25aZ@tMVH~{S95#Rg2fzZ(!5msa8_Yo)x}h7&K{}(e9Hz4zzQH=Fb33OqI;Ar^ zz0*6d6FaN3Jg>7Gz>_<(^E|h+J*kuaJ;O6RyE8t$lN`j8KI79qx$`~o^FH5GJMj}c zwevjT^BdgrK+Cf~qf8@!<#yum=Z;TzE5IN=2ZVoWeOG?}zU1IVEifB_hM z!54&KL`#%JeL+Q4G(}OAL|b%4QM5#96h>F{MsYMoYjj3gG(~CjMs3tbfmBF=;TL!m zM{{&Yle9;XbVO~mMOjotiPT4d!4!ZY7Dn_$O(7P{K?9c1BQ)R}dLa}*p%;FkOhLgH z%Cr~CG!%MaOhF+{+muby^iAp17w(iz)znViG){YAPv;a*{nSn2R8HwsPwkXV{}dDm zbxi+MPvdk?2^CNwRZR!=P1pX^O&e8F)wEIFR8aA>Px;hR=M+vIbx{vB6a*DbR~1he z)ly3|PD?dXSyfgg6;sg^Qpt2p?X(nn;Ttkw25D_x^x+6Q^jNXx2tSku~l2Y^;yrATjP}! z(iL9I)mhz@Udgo+Oo1FWUf6B>3CMqv{q z7GWcHVkI_XIhJBA7Gf=SVj~u0H`Ze>R%20iWE0jCBz9vNc3}~=VK0_t8}?;iHe@@t zWMLL(O?F~swq`AMXZ{mbWEECqL$+ma)?qQ$WrLPsWtL`{wq#LuXjxWbL7^KoUsD^(mTu1$ZP_+% z%a(8RHf~E{ZU44y(-v^aHgD^;Y}eLs_ZD#vmvH+wZk<#VfI%3@6&S!FSU(~Iw80lT zflNbTbV(N!M1gcsR}>hPbWfLcU3YX_7j;t?bw43?LqQa5*LH7Lc3&5EN7oWQfpWVH12o{u{D3LpRiG;g?>FRUnEg05%|OM^r>D7Z@y8M4dE$M|6LI!GHZ1fUDFO z_O}##L4PZEfd6-ZOVkt=IDj4afeDy`|95};_kS-Kfl0K0{a1h?n13C3f@L&>F}Q+B z*n(j+fBjcRZ+ApV!57RS19UM1!odn zRg2h%NB4+*7>SM85`NcGnRtqmSc|0?eUCU&t(Z|QRaK){h{?2ymsm}uIE;_D7ls&# z#Tbs2*onnhh@bd*)tHIT*oTwYjiH!PftXRnxQLyYjEfkE^Z1BG7m@e4j1?IaE+JIU zSd2^lbadHthk@1mG_+p+F@8PSYQWZk1$Tf&G!#gAl+kv9MO180d6fm1mB*HpPq~#3 zHz^%Hm) z6gFX_v-banHKfs6`HoP=NLr`e`mNzQu5DVb>6)c`S(tqq9FFrN!n7AQp-dI_jA@sv z3A?bjH++-YunX3(8GCdWyRjjgs}vcNM(6 z(R;x~x2P+db3KB%<@>^AYkog_Htc)9OZuigJjCzYzpb0EtNE`7{1ax|U>}^RTO7iN zm$7Rb#=*P6c{{d0K~3A+hE4FSG5p78tF1-6u0edrjr^7G8m~2Xuc!IMW7`v!c(9e) z#+jPRX&k{J+{zvMz=QY2wcKE1`;6PWtSfwKIvdE*d?H4=zKP+;TiVTu{G^M#u1(>t zRXVSM8cdyhOuKt@1KX*o{KXx-yche+2VJqBTF`0xVO3nLdqEp0{v5vT<;>H((jQ{Y zIh?uSeA79duIZJ;$sD_d`mcR~WvTklWjD~9TG0)h!6Tb{4Smb6Jl13V%DY)%k9fz2 z+tPXciS(h9l{?creb|YerTg2Ne>xV!6t;bVzzNy0xtG-oTiV07)(^a36Pwznys2$m z+pApG!ToqAJAKJo{CvFE(cN|Ey8}DC*yGw1NMX+1y|gE{r-LDylaZRmI!+%pqOYCC zYrWfV8{4HEx3%5dn|i%A>L|?b=~<|)93x-iM`Hwy1Jzq0G1ua7dBG8 z+qNIu(7T*;AN#1S-PV_Syh}dSTiv#Ux4~C_+t1nA+q#NIqqWxO#hz-`dc*m; z&5u6qr`zaD;S&1x&i^~81)RGZ8tRcc>bZLAQCIJ^zN-`c-><&q5gV+(UIQ+h=f^(r zGiBm89A4U8?f0AQ=l#<;p2Nj-tb2iFR~*$JobOrQE$b10woz$IudQsiTlbaybN`yVxjvlD8~F(v z`KdhMr+O3wpZV`U|5yFnWxVTEpTYqGK!|+=3mQC#FrmVQ3>!Lp2r;6>i4-eZyofQQ z#*G{$>Qg8WfJl)V#+5pWGNsCuEL*yK2{Warm^5qJyor;guTrFdfzvP&K!$I=Hi7E( z$x|p$ph7uK>NKjIPJR$blXE`uz(yu;9Uj3mb;`(O}|`K}+U*TyrMm$t_9Mv@DtOW6QvR_52xNqz0l! zk$PfURqWf4bu5DJW>i*iFEcFUyw_An)dIj)3Y>^-G<; z_L&Cckwh9OzR1N?Q%}A4RgA5P*_mnXgecb5jC0U!VgM()XH zpML&%kc5p910`ja6^dwf9-ah@{wY;9W}Zry$<~^?S^8#g zbOo2!UVyI3YOAgahNMACR>&kx8nSd~qPn7V>#i#GWX=X74KP|!zJ&symtU&+DX7e1 z^{i8)m~HHC)ZM+^oS>(K)#A@y6UbQVXOtsY9V*M`bux2itasBXhvKMD!d_`&Y0$3sHGS8Py|vd`jeTjgH*OlEeQGh~p1`OAO?T&> zf1VM@<%+x#-+h~m64NU=`lKnQUw3bakB&4{Q2-~aY`COawK$v1HtrVi)G7|v)?*L< zmX31<*Y<>OgART4ox8ga-XsxvdZDkYwEf6Y`_!^XwU@?a*1Eq&HmBOoC%ohHadmv9 zaQzoxFWR*IqxAakuRpxl``W$i32;i_`3b+om!&w#Oe-&nocj=l8wZZ1Tk?sY!#4H6 zrNl*GYwHFFE(ZR9j1|dl{41de8KMt&S&wvyQXNYIXp?&lkTN>~kpat7lwSNsEV%<) z1}B(4-_@vDKit*@El3u*oQhl{dEN=12*p+%Z9_1;(7dWRLzI{zWg+SW_#pMK6sae7 znnGf0+P4-n0ue^LYtgja(v;rxqJya_P!#WoMblWh(6mK4~=mg7mx6ACp+pfkDSaSWBP@(KjO+IE)?ZWJY<=u#ezg4A=WqQ zBCgXQZdqv@3nY_R#38OlEN#Kr8_$HW^?hl1bsC)rJBiF>Mn`m_`JXADghkDWqLeY& z;z@krnf@yxka=#ar$;|} zqjc!KzM^JIQkhL~2+L)Llcf-OS5uv$kv|d)9xkzZRZvN+e!}VK2#JbTb_oiU zOx4>`IrKfMwbEbYlhmA?rBhJJ=RIB3SysrD*R}Y^69S{t{WzD_#44701ktBWnD*AT z{<_3RwzqiT+z&U`Ouu9)1jj^XBoW;C*^gpc^FfyYhNp?k%sb_kv-WJ zWB98t64IqHlb^zPW7C>qP)31eXE5Ohmq}gHPWd}4Y@Z7^#qe>qk&&!soO&JXrX*#h z09=*swAq&-#hp``XhV&w*KIY>YrhO;S4vC7a^ehJ6I*S$Ps^1Wokg@*T=jm#D zGAfNId_ws~7_ri&bv_ckzYXq9h3Q-MAuogGeGSi+=)s^IQ^Z8Z<2@Z~-OD8Yxh5(F zO}{u>zVd!Gh%yZuFmEi}3+Gr|6I*U3L0sf8PY)y$O6ro&tQjeBCU6FAvF)~`o-Gfz zi1F*ImT4McJO{V0yR_E0=tv=fhc6MEDeIr2!InP^=t`Z0lg)RV=ly9dYgeKHc>ok^?Vz(r9$6c@JJhV z1hsQTJ?wO&5Fu+dGh~)MnP}D)-t}rSO9zkvZMan03p`&ULuEPU~qsXAIqpO92oC#}x;v=>_q<*STc zj$`-2_uCi5oF(RTnr{qeBtxFK&owq(4!thG8qZ0xVRA23-VjT+n^bC4Q?M+5O*a%4 zxQ!4l&a?FNRJ~3W03fiZi2r=-3_pd%f*37eR^{z#- z>QPayQ9LaYut)bGVkf)A4Q*_cnz(87KJbT}4XL_x6Q+9SyM}LFX-jh(DvTs)PS$G9}=5%$QvR)E}cjI(-!DnT@r9^9^4IgxT3&?^BLOnqjfiZ|H zz#vyx@-kbJa8f3KGL>O9GJ@GgYcNN1>a!-G$7>dYel4hkLehR{_6ah065KTtQo}L} zCVP?+GqQFz*M@o3MrAwYUi3j<6eA#`LwZYyhRmTt{{1x)0YFkv5tn)W8W$9aQ@fgOfPBO!Q>2#bSJanjX%?$$R)2Zfu_T~YOH zks>fyql1-KVa4->ITe0@7(q7DKB|E^?qnA!C}goHjm&X0a`;#>k&6IEfBS+%khlhd;|6Y}>M^eA-na#@YGS+;{L`}kQsn2ITPkU&U?Xi;~2CrqjlmK(TXS_5PV z$dX9OmKPC|Z3R#=*_67)bi8I7bkUK2n0XB|X9PK8G=)whnNeuzGj?K>ZHbtKM`n#d zU8V$&P#6m+;axO%Z=)f3roj|icuN5{T!J~0QFo4;hZ_pHT*j1`r&(0R=3gf^N^(gP zG)a}?^NYz6j3g;vp=pX+`8a-AM4JAhTH}IA4T+k=xqPvOXjbH!CsR=4(T%sWeIluh z#)v!xH;Dgeot@V>Jfk_F;6Z`blEcZC{8fug*(5|)FHlK+T*Qk2KseuU2@R$}87PDv z$&->}AM?4BxLIXArHFL`Uf&6x!zq!-d1hz#gp-jg%UL4%r$e5Qg9zz0)Tw8_XG`Ts zPTR(pgXt8k$XX!;aRHiG4LG3g_e|U+FDK!YuyYbqNSOD8CscV%YbclLun38;UGZZKcQI?(r){Z}gTi_xB?_j139xVr zxG)Q_;5y%N5}zOn!XOK~fD5-U3&BtdzbXs2kPEr64cU4Nv>*(J>LH6Y8=(CTP01MZ83$Wl6n4=WE5DT#23b$|zu7C*0JA;t13sV!vH%RCfUJ?~M+&MJt`tTp(ueD6T($9Fry&fkpan~?1WwQf zuy6^3wG@}23v1v6N~;C0Fbknz36_AbSfI2`fCaIzqjV7!rHZfyU+zg;00+g1wX(ByATY3;~}$<1~^a!UjPO|AO*J&43_W;YoG>?`vpfZ z1a=S%!Z0VLz+Gk9B=cf#RtROdlY!SspVj%K%+eI@(F?9n1wxRwQ{V$^01N3s6`}wO zX&?lJ3kE;_AO*Hy38Fv?cEAKufCg)T22nr@pw}Cr@Eds-QM;5ZcHs>w$fs}nkZU-x zjTx1Y(I~V~2SNY_xo`{0U<6c<3(|^gm~ab1a0SY+4ayJ)LXZZuU<$OL3;LQ1%76q{ zkPD{p3%{^Bn6N9nJHtPx=x+qk1>LwXd6eNz`a871-4)d%1{JUa0`|& z3cXPZx9|f~pbWTx2k)B(u>cCUKm|pB47gyyw{SHYBo<`*7MbEh7Bz~+XBXeVVp&SP zTY9~UCJJ?{1vsD#rEm(jkOn(|2ecpxpI{2Izy|Q^#HFAMLofxqa0!wT#cKe+!7we- zI<5ZGI)#sMpd9iYjpjPDrGcUX!ktfV4P(2S}U?Uckh%;Gn*M32cA^Ul0qN zUNeBqBpWKeSnm4{G3aBgvSAYwpF#Za& zPzN`V2C&dimtYF7Fa-MRtjK@_Kwt~O&>5xx3wZDYS%3?U0<5$wt;ou8G|^<7$XS-q z#+DqX;&+S>EEViA3b9}dQcwlkfEBv%(BVuepYRImYYVPm3wHnnKwt;E00uxn1Xqv- zU@!zmU<LW3&9}Rv+&MpP}jd2BEL$}k~u)Kvr2}EKx;EzmCSuYi>FxO z3%0-nRe%dA0@ObM2D(E@H`7T{*;+pEv0ih zy3ry_?&Pp-=*ecifCN3HCyEni+!CjZ3p{NLqi_n``vYG<3yCcXvS15Y-~%_%16?o% zLtqQ0aI>W_3sX=8ic7{j5euDxqIz>EsI-gRh(Ob9z%Z5DV(Ha==Lw^LzC4`^pr8q{ zPzOE`(aO6NnN0;h@B=!)-9carw{Qh`I}4-W3ESWUS&$2sa0)kvmw73=1$ibX2`=?R zZa*sAu~?k%BDYW35}JET)tUuUuno@r%0CbXv|y=Y8Vk382X%l9%5VhbeY1d5)K@SI zGttaLjBJbY#2LaNcSx?|GeT4c;6FKVaDY^)*MbFbjB~26o`% zMlcJ64bQaD%DFJoxBv>AptT&t;Wg^vm6wcYv337U;w27?;yI!MRNb_o1ww!fz90&= z00usQ2a2l9P<+>zfWCO3zO+yZp8yQEum(eL2f;8CrjWB!;>Kv>Ek1X_>_ zp+LyMfOC^Ps}3`YrYIK+$(_R?oPO?-fnFJ-juOHE46x9>U$6_apbS%>!LyJFnV_h` z@Th}|3tjL7bzte$y$w>J<+5-J8GGrN;3!Yn8RCAC(ER@7oaG*PIpxjLdq^}#S_=zP z`~|n53(6n`RA39VkO^%d3+YkpwJ;00panM&2c}>NxKIQ|pbW0y3SWQ&YfuZ&Qnc1w zr(KiV#CJ6wHrC#*gd&>aqVyfZoCV(d1ydjdZ9ogTfCWh~u(E&#T95{M>jP^r3jlo3 zcW?tPsWqVC~7Bvv7;}7v!E~L zVB#pNXv@$3!5;QX@}sW?qUs7!zyr9z9&B*3K2QZ}z^_;l3$CCBd;0@ka0{y{7yHN_ z_3co-?M^((wgHX#O30{k`YLs>RH;wE9!{iK(c(pn88Kd>ND3IZ4Fdp-)bP#MCQ!XTeF6n4)XPvSVa}vk z^HWWnFg@ku*)yh3oGd~4nx#t@EKH>=U7`gGmP<^tToJ5g$(OHCOP#v9HLI2{OP?UR z-070$O`vIC?o>OIZO*qm(Y{>i3)H4i{@pYL7zu#HKEQzm4<=mL@L|M)`6XuD*zse? zktI*2T-owv%$YS)me-dgF^!=`hZZ%u;=zNWM8ygg%oHh65>ZbSCd`*av0_b`a{bWK z?n}TVJ}hn8`0 z%G3K#m%@Xx^wLuMmezZ62Rh0pf1m*T$)``O0>zVF^z!Vl%^Gy@K?oz1a6$?zw9qkq z^chUU&Y&9yxrszk4n*Tfv8cF-MzmSgJ1c7vFhI4sL^3B(s@tiX3;+;d zBnB^Z^G!J8lygox(aK$`~)lnyWv1hHtSS%RaRSd^;OIk+i;&dNzxNgTyrH6MqNeY zb)$zE@(3i)GWe#K>8gv;Q<%gAbyFp?>y+95s14IgYqdMDuDcTaF;!T@6?a^6%e8Z) zH37)8S9V9kvE4o2&Cx_2o#X3JWLx^AEbC;7bV=@>?JnD#rX85zBn8IF$!hhz@2)iSorqn3JVswd2F)>_rQ^<=KQ_PQdDO5sr$ zaK454q+V3=B)_A9PHU&03vQd)oU@Ia$(ui6Zxe4(rMPOq0~dU7$vFIY-LL(WF>%IM zoCuia%7*TnyN24gQAg9VQfMxH?)1^N+2WjYxyRJB%=@Hk+Ha@{cl~wPF}|9x4%0=v zaolsSXmJvKiQMJBj2*JfvX;G@)46d1uu`E(Pd+<=o&8eZQB~J#;?`r=etYg+HQevp zbtkPNdATk>asG#db#C%}d-+K}f1j7X^k}gidT#!lKYIF~?BW~E1$9q=10O*n4E_I;}XN#0IGsy5=9Njc$>qH=;jxcqJ^$4XgOZ;z9cZVxP(zm17Nzc z2P=yeP=+&nRo24yKpf_fBFa%;*Q59s8nb8c31z9a4;o#7N5qU^@(2Wr$m9e)qa{%8t-3jujbRKiY>fw5u8NW?n_-iVPHQDkx^SVn}b4@f{%)DX|Y!EC|Jl4^9L zC-<|-Cn>3J$h66M@fg&fVHCienHFDHfX2gsW6CO&4M?@)w@O}~g z#ndr}`6P&*lcMRYDNW+Z!kz-)rx%N%P~ED%kQQ=$Bqb4zw&{?u`N)FCx{EK=){+fg zE39P&6{H4h#7ma*h;iK46#2NO09fFylcj8C^!Yv8f$=m=H5pVd!r2^pR#=2OBL*#( z&?PRGV27pYzoezwu2vMMu2n5}9JWUVrE9H~V`bu`?0Cdy0aH`aOJ+0eS5E#=FKM8tVzGRCUi4zfV?|{ba!cmk9PTKW z#M&jI$n#hDwX>`?rS5)D*ul=7M@nFcCUYa3v9_jH!5SNB*Whd1l96$$;2i$aqCx?` zxy;ss6?LOxxr)cF?w5WDo9#>{i4uB_M6wHJF>gby#t`)af%IKAAjjfC;m9fk@FEdvA`q!^1X&8Dk zO>z>AeB^{JXx3VSKc&~NWbTvO7?@xNsaWtqJ7 zGD%xn)~8G`W^MIg)4&Efxc*a8b&WFQ@^HSuYp!WU72V7odT!G_wEjawOypCidAr|* z_OpXFlWkdIZwX4%n`ixPR9+Uw{d_8(Gs0_=-Pnu(FjPy$iB%7qT7)Hzw!Cqs=4@9E zfPIOzxF`*7fKMoE9B`$%4GyC8`DlA6@O; z(?=KWeCw1SJaH}8NGyiU+_Q;Oz(u6Eq7lnzc}vv094__W{;8cU73!_8knYQF*Z%kc zR$1=H)d@Vm>czkrfPC!* zDEY~YY>|T6vO$OBd7npM_PJ_2yLdG8$YjL*biZEhqfNJNIUhcMGb|UqvCwQEpML$L_V(c#H~S~+{%FFc+}^7? zAzCBjAv^<2KU71e(^DcWc_~ofgkCs1R5>uVvOf!?Gr>^0{L4MMa<7-uF#vEkZTmbn zIzMS^KNPH~Y{QDR**6ulKp31F{JS9?+Q0T{96$U1p>TRGMzNWlYrU9~HUObGLgTNS z@~YAUF+&52R{Ng}fVCKm!g2Yx2IHoV;6UTTtJff{zPO-#0lo{vI_85d#Ci)8R5mjF z!O=s)A45aGQ?(U(zM`8nDXhP^N+27=!Y~?<%_}ySQkFK96uCg5!s8^Lc_(REK=eaB zX3;5(;x^B4ygHo3p}Ir%%AL4tBt1M1Uzi}6!@H_V8uAk~{c686B)vNxYFL|Ggv!63Oye5(EvjnWtyD>S4VgReXs4Z91L`HDH1`NGS=u;UXmK|DZ4#Gm%l zIqX<70^_z)0ssoEz*(F=+{?X8G&gUGg|hykD$}BlvcL+hD!va|Kr>vdQxvgdRURNQ&^Y z@N1u&u@A#kJ+OKs23$%v)TC5IO`o7f)seu)i$2V(w6>bTz5J)XG!5R9ocaPZnn_5; zq#I^h%tT|yO-j6$D6rtEP3N2pOT4q)%*q_3FoYDoXgtdke7~1e$VDWRyjdD2oWQAk zPV;PxD%8I{!MvzCH+bPQg;1Y`6F};)sz4;Hw{%9xM9b7POTPoc!=p#>?3S#4WU|%OCww616pdJUGvMpdCq3@CheLqPe0(Gx_N`;)K%GYsMf1 zx^01`)hW;{ty2Ut(Gw|Bl9Dimdk#ZEBa{dWelanHklwA0=iIh@Q*J*_cc~y}J~2H=gd)mHQa~~zRsMWQW_-Q6h{{-%)#tp;Ow}O<^2_XNR>%3iKGi{FEQ|Zp z&IDw{HC$8FJU*8Gq$2_07Ale^N<~%!jn1XhR6P6A8QUE$G^l(bo|H^B8SPKkG&M(U z8x_(vvztgPr9*SAEKE&LK4Dkyo2xJaHIVRB@f!;0a@Y_eME@L4#I!fK#H2IJq@k2l zL&?T~t<4wHKpNas<0?o#3zp{a24e)&w3#F@(@%=cSYO?$B7|9co!HaKih7ip6axSs z{aDPT2jS8HZXnr%1BTcTlu{@ZRM7x!umlK17Vhj-HmzE#byb(RgqKJg@>7s@lO3S- zEe+8JT5Sm3p%H^kk+Vf46cL5n=!8$KuU_B;ZG(vVcn?wi)~jvGQJv6esSozxgix4I zwaSgqz28Ixu(zp%G)!f+F#pRgX zGTPkEWeD>S4&tEPV5!{AjomVejmjn6f`nYuC63R$o<_E zS%^}ATXHBZPl#ISxG=oEj{Kb1L{od|{De<*lwmDz!HDB}% z-}C+6FkIjCec$*^U+^to_pM*`E#LV)5B0rX@`DMxSevuE#==G1=M<#cS(9}^+TkF^ z(G8KwT^aLuoY;t925wvmmI#LE+y+)%%bncP_+ZJ^+|mW!;rQU;#U&1&2oEj}5wYCS zb^ct>m59c@;1Zr;6^UGo0kBMSIa~t{ zs;t=na;V(hz$+VBk;wwg5E2aj&5jTAcuY?24X0Ob_R!YC@7K0fOGhUR_F$A7zTLm26*O%ZUE|_ zF6y8b2XQcJc=l!&VjZt&`_R%^3H>!e=lVQ7U`;D%NxhjaK5k@x^}zRQpjTE7Vr4aflg4cGt;DD1;d zY{gb=!$xe!X6(c^?8a{F$Uf}JUhK-Y?8T;R&3M(oQ5?bIG^ z%ie&=ChV7Cx6R%F!sY-Cux-N501n6i-RAAx-tFEFZr~nn-!5+BCT`(A?&3yn;12HO zes1Jm?%rl@=vMCPwr=XiZtK48>?ZE)c5dpnfZOH(@&@j~b{V`ofW9_Qen{Ytd2jfR z?~h3r`SuL@u5bL#@4mQi{qFDh=I{CjaR0`}_ZA5O|Cosxa0Op*25)c&e{cwoa0wsq z*d~DWHct(la1GyZ4)1Ue|8NiwaS1P$_vM*vAzD&12=IPkMR+Y?H9Li z8NYEH&+!~zlNuk102qK3ch0QQaUw5r4j+Knj%_1fawc!`4Nq_Yu<&xJ${@d|d3bLR z=m0I>axU+3FaL5c&vG!|at{#mF+X!OPjf9Vb2aC3Gf(q32Xi)8^D?hj1;#&asC4g49t*fm-adnw*%VtIRSTW zwG8YIwR3KLr_%-DRFoA$0&D=f%gXK7v)QpLmL|c#FSyjL&$D-*}Gic#r>hkPms0A9<25d6PeR zluvn;UwM{qd6$2An2&jxpLv?Ed7Hm^oX>fk-+7+zd7uAzpbvVXA9|uMdZRyjq)&RK zUwWo*dZ&MSsE>N7pL(jVdaJ*Btj~I_-+Hd^dawU_un&8&AA7Pdd$T`#v`>4rUwgK1 zd$)gkxQ~0epL@Emd%M4Tyw7{R-+R9Ad%ypCCIKh_*Z;r|e8C@l!Y_QoKYYYbe8pdU Z#&3Mbe|*S~e951D%CCIOpOAn606S;EsG0x( literal 0 HcmV?d00001 diff --git a/tensorlayer/third_party/roi_pooling/roi_pooling_example.py b/tensorlayer/third_party/roi_pooling/roi_pooling_example.py new file mode 100644 index 0000000..f5366c2 --- /dev/null +++ b/tensorlayer/third_party/roi_pooling/roi_pooling_example.py @@ -0,0 +1,52 @@ +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from roi_pooling.roi_pooling_ops import roi_pooling + +# input feature map going into the RoI pooling +input_value = [[[[1], [2], [4], [4]], [[3], [4], [1], [2]], [[6], [2], [1], [7.0]], [[1], [3], [2], [8]]]] +input_value = np.asarray(input_value, dtype='float32') + +# Regions of interest as lists of: +# feature map index, upper left, bottom right coordinates +rois_value = [[0, 0, 0, 1, 1], [0, 1, 1, 2, 2], [0, 2, 2, 3, 3], [0, 0, 0, 2, 2], [0, 0, 0, 3, 3]] +rois_value = np.asarray(rois_value, dtype='int32') + +# the pool_height and width are parameters of the ROI layer +pool_height, pool_width = (2, 2) +n_rois = len(rois_value) +y_shape = [n_rois, 1, pool_height, pool_width] + +print('Input: ', input_value, ', shape: ', input_value.shape) +print('ROIs: ', rois_value, ', shape: ', rois_value.shape) + +# precise semantics is now only defined by the kernel, need tests +input = tf.placeholder(tf.float32) +rois = tf.placeholder(tf.int32) + +y = roi_pooling(input, rois, pool_height=2, pool_width=2) +mean = tf.reduce_mean(y) + +grads = tf.gradients(mean, input) +print(type(grads)) +print(len(grads)) +print(grads) +print(input_value.shape) + +with tf.Session('') as sess: + input_const = tf.constant(input_value, tf.float32) + rois_const = tf.constant(rois_value, tf.int32) + y = roi_pooling(input_const, rois_const, pool_height=2, pool_width=2) + mean = tf.reduce_mean(y) + + numerical_grad_error_1 = tf.test.compute_gradient_error([input_const], [input_value.shape], y, y_shape) + numerical_grad_error_2 = tf.test.compute_gradient_error([input_const], [input_value.shape], mean, []) + print(numerical_grad_error_1, numerical_grad_error_2) + +with tf.Session('') as sess: + y_output = sess.run(y, feed_dict={input: input_value, rois: rois_value}) + print('y: ', y_output) + grads_output = sess.run(grads, feed_dict={input: input_value, rois: rois_value}) + print('grads: ', grads_output) diff --git a/tensorlayer/third_party/roi_pooling/setup.py b/tensorlayer/third_party/roi_pooling/setup.py new file mode 100644 index 0000000..b262072 --- /dev/null +++ b/tensorlayer/third_party/roi_pooling/setup.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +from __future__ import print_function + +import subprocess +import sys +from distutils.command.install import install as DistutilsInstall +from distutils.core import setup + +try: + import tensorflow +except ImportError: + print("Please install tensorflow 0.12.0 or later") + sys.exit() + + +class MyInstall(DistutilsInstall): + def run(self): + subprocess.call(['make', '-C', 'roi_pooling', 'build']) + DistutilsInstall.run(self) + + +setup( + name='roi-pooling', + version='1.0', + description='ROI pooling as a custom TensorFlow operation', + author='deepsense.io', + packages=['roi_pooling'], + package_data={'roi_pooling': ['roi_pooling.so']}, + cmdclass={'install': MyInstall}) diff --git a/tensorlayer/third_party/roi_pooling/test_roi_layer.py b/tensorlayer/third_party/roi_pooling/test_roi_layer.py new file mode 100644 index 0000000..d0e2744 --- /dev/null +++ b/tensorlayer/third_party/roi_pooling/test_roi_layer.py @@ -0,0 +1,54 @@ +from tensorlayer.layers import * +from tensorlayer.third_party.roi_pooling.roi_pooling.roi_pooling_ops import \ + roi_pooling + +# from roi_pooling.roi_pooling_ops import roi_pooling + +# input feature map going into the RoI pooling +input_value = [[[[1], [2], [4], [4]], [[3], [4], [1], [2]], [[6], [2], [1], [7.0]], [[1], [3], [2], [8]]]] +input_value = np.asarray(input_value, dtype='float32') + +# Regions of interest as lists of: +# feature map index, upper left, bottom right coordinates +rois_value = [[0, 0, 0, 1, 1], [0, 1, 1, 2, 2], [0, 2, 2, 3, 3], [0, 0, 0, 2, 2], [0, 0, 0, 3, 3]] +rois_value = np.asarray(rois_value, dtype='int32') + +# the pool_height and width are parameters of the ROI layer +pool_height, pool_width = (2, 2) +n_rois = len(rois_value) +y_shape = [n_rois, 1, pool_height, pool_width] + +print('Input: ', input_value, ', shape: ', input_value.shape) +print('ROIs: ', rois_value, ', shape: ', rois_value.shape) + +# precise semantics is now only defined by the kernel, need tests +input = tf.placeholder(tf.float32) +rois = tf.placeholder(tf.int32) + +# y = roi_pooling(input, rois, pool_height=2, pool_width=2) +n = InputLayer(input, name='in') +n = ROIPoolingLayer(n, rois=rois, pool_height=2, pool_width=2, name='roi') +y = n.outputs +mean = tf.reduce_mean(y) + +grads = tf.gradients(mean, input) +print(type(grads)) +print(len(grads)) +print(grads) +print(input_value.shape) + +with tf.Session('') as sess: + input_const = tf.constant(input_value, tf.float32) + rois_const = tf.constant(rois_value, tf.int32) + y = roi_pooling(input_const, rois_const, pool_height=2, pool_width=2) + mean = tf.reduce_mean(y) + + numerical_grad_error_1 = tf.test.compute_gradient_error([input_const], [input_value.shape], y, y_shape) + numerical_grad_error_2 = tf.test.compute_gradient_error([input_const], [input_value.shape], mean, []) + print(numerical_grad_error_1, numerical_grad_error_2) + +with tf.Session('') as sess: + y_output = sess.run(y, feed_dict={input: input_value, rois: rois_value}) + print('y: ', y_output) + grads_output = sess.run(grads, feed_dict={input: input_value, rois: rois_value}) + print('grads: ', grads_output) diff --git a/tensorlayer/utils.py b/tensorlayer/utils.py index df8ae77..70b288d 100644 --- a/tensorlayer/utils.py +++ b/tensorlayer/utils.py @@ -1,64 +1,101 @@ -#! /usr/bin/python -# -*- coding: utf8 -*- +# -*- coding: utf-8 -*- +import os, random, subprocess, sys, time +from sys import exit as _exit +from sys import platform as _platform +import numpy as np import tensorflow as tf import tensorlayer as tl +from . import _logging as logging from . import iterate -import numpy as np -import time -import math -import random - -def fit(sess, network, train_op, cost, X_train, y_train, x, y_, acc=None, batch_size=100, - n_epoch=100, print_freq=5, X_val=None, y_val=None, eval_train=True, - tensorboard=False, tensorboard_epoch_freq=5, tensorboard_weight_histograms=True, tensorboard_graph_vis=True): - """Traing a given non time-series network by the given cost function, training data, batch_size, n_epoch etc. +__all__ = [ + 'fit', + 'test', + 'predict', + 'evaluation', + 'dict_to_one', + 'flatten_list', + 'class_balancing_oversample', + 'get_random_int', + 'list_string_to_dict', + 'exit_tensorflow', + 'open_tensorboard', + 'clear_all_placeholder_variables', + 'set_gpu_fraction', +] + + +def fit(sess, + network, + train_op, + cost, + X_train, + y_train, + x, + y_, + acc=None, + batch_size=100, + n_epoch=100, + print_freq=5, + X_val=None, + y_val=None, + eval_train=True, + tensorboard=False, + tensorboard_epoch_freq=5, + tensorboard_weight_histograms=True, + tensorboard_graph_vis=True): + """Training a given non time-series network by the given cost function, training data, batch_size, n_epoch etc. + + - MNIST example click `here `_. + - In order to control the training details, the authors HIGHLY recommend ``tl.iterate`` see two MNIST examples `1 `_, `2 `_. Parameters ---------- - sess : TensorFlow session - sess = tf.InteractiveSession() - network : a TensorLayer layer - the network will be trained - train_op : a TensorFlow optimizer - like tf.train.AdamOptimizer - X_train : numpy array - the input of training data - y_train : numpy array - the target of training data + sess : Session + TensorFlow Session. + network : TensorLayer layer + the network to be trained. + train_op : TensorFlow optimizer + The optimizer for training e.g. tf.train.AdamOptimizer. + X_train : numpy.array + The input of training data + y_train : numpy.array + The target of training data x : placeholder - for inputs + For inputs. y_ : placeholder - for targets - acc : the TensorFlow expression of accuracy (or other metric) or None - if None, would not display the metric + For targets. + acc : TensorFlow expression or None + Metric for accuracy or others. If None, would not print the information. batch_size : int - batch size for training and evaluating + The batch size for training and evaluating. n_epoch : int - the number of training epochs + The number of training epochs. print_freq : int - display the training information every ``print_freq`` epochs - X_val : numpy array or None - the input of validation data - y_val : numpy array or None - the target of validation data + Print the training information every ``print_freq`` epochs. + X_val : numpy.array or None + The input of validation data. If None, would not perform validation. + y_val : numpy.array or None + The target of validation data. If None, would not perform validation. eval_train : boolean - if X_val and y_val are not None, it refects whether to evaluate the training data + Whether to evaluate the model during training. + If X_val and y_val are not None, it reflects whether to evaluate the model on training data. tensorboard : boolean - if True summary data will be stored to the log/ direcory for visualization with tensorboard. + If True, summary data will be stored to the log/ directory for visualization with tensorboard. See also detailed tensorboard_X settings for specific configurations of features. (default False) - Also runs tl.layers.initialize_global_variables(sess) internally in fit() to setup the summary nodes, see Note: + Also runs `tl.layers.initialize_global_variables(sess)` internally in fit() to setup the summary nodes. tensorboard_epoch_freq : int - how many epochs between storing tensorboard checkpoint for visualization to log/ directory (default 5) + How many epochs between storing tensorboard checkpoint for visualization to log/ directory (default 5). tensorboard_weight_histograms : boolean - if True updates tensorboard data in the logs/ directory for visulaization - of the weight histograms every tensorboard_epoch_freq epoch (default True) + If True updates tensorboard data in the logs/ directory for visualization + of the weight histograms every tensorboard_epoch_freq epoch (default True). tensorboard_graph_vis : boolean - if True stores the graph in the tensorboard summaries saved to log/ (default True) + If True stores the graph in the tensorboard summaries saved to log/ (default True). Examples -------- - >>> see tutorial_mnist_simple.py + See `tutorial_mnist_simple.py `_ + >>> tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_, ... acc=acc, batch_size=500, n_epoch=200, print_freq=5, ... X_val=X_val, y_val=y_val, eval_train=False) @@ -67,33 +104,34 @@ def fit(sess, network, train_op, cost, X_train, y_train, x, y_, acc=None, batch_ ... X_val=X_val, y_val=y_val, eval_train=False, ... tensorboard=True, tensorboard_weight_histograms=True, tensorboard_graph_vis=True) - Note + Notes -------- - If tensorboard=True, the global_variables_initializer will be run inside the fit function - in order to initalize the automatically generated summary nodes used for tensorboard visualization, - thus tf.global_variables_initializer().run() before the fit() call will be undefined. + If tensorboard=True, the `global_variables_initializer` will be run inside the fit function + in order to initialize the automatically generated summary nodes used for tensorboard visualization, + thus `tf.global_variables_initializer().run()` before the `fit()` call will be undefined. + """ assert X_train.shape[0] >= batch_size, "Number of training examples should be bigger than the batch size" - if(tensorboard): - print("Setting up tensorboard ...") + if (tensorboard): + logging.info("Setting up tensorboard ...") #Set up tensorboard summaries and saver tl.files.exists_or_mkdir('logs/') #Only write summaries for more recent TensorFlow versions if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'): if tensorboard_graph_vis: - train_writer = tf.summary.FileWriter('logs/train',sess.graph) - val_writer = tf.summary.FileWriter('logs/validation',sess.graph) + train_writer = tf.summary.FileWriter('logs/train', sess.graph) + val_writer = tf.summary.FileWriter('logs/validation', sess.graph) else: train_writer = tf.summary.FileWriter('logs/train') val_writer = tf.summary.FileWriter('logs/validation') #Set up summary nodes - if(tensorboard_weight_histograms): + if (tensorboard_weight_histograms): for param in network.all_params: if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'): - print('Param name ', param.name) + logging.info('Param name %s' % param.name) tf.summary.histogram(param.name, param) if hasattr(tf, 'summary') and hasattr(tf.summary, 'histogram'): @@ -103,51 +141,48 @@ def fit(sess, network, train_op, cost, X_train, y_train, x, y_, acc=None, batch_ #Initalize all variables and summaries tl.layers.initialize_global_variables(sess) - print("Finished! use $tensorboard --logdir=logs/ to start server") + logging.info("Finished! use $tensorboard --logdir=logs/ to start server") - print("Start training the network ...") + logging.info("Start training the network ...") start_time_begin = time.time() tensorboard_train_index, tensorboard_val_index = 0, 0 for epoch in range(n_epoch): start_time = time.time() - loss_ep = 0; n_step = 0 - for X_train_a, y_train_a in iterate.minibatches(X_train, y_train, - batch_size, shuffle=True): + loss_ep = 0 + n_step = 0 + for X_train_a, y_train_a in iterate.minibatches(X_train, y_train, batch_size, shuffle=True): feed_dict = {x: X_train_a, y_: y_train_a} - feed_dict.update( network.all_drop ) # enable noise layers + feed_dict.update(network.all_drop) # enable noise layers loss, _ = sess.run([cost, train_op], feed_dict=feed_dict) loss_ep += loss n_step += 1 - loss_ep = loss_ep/ n_step + loss_ep = loss_ep / n_step if tensorboard and hasattr(tf, 'summary'): - if epoch+1 == 1 or (epoch+1) % tensorboard_epoch_freq == 0: - for X_train_a, y_train_a in iterate.minibatches( - X_train, y_train, batch_size, shuffle=True): - dp_dict = dict_to_one( network.all_drop ) # disable noise layers + if epoch + 1 == 1 or (epoch + 1) % tensorboard_epoch_freq == 0: + for X_train_a, y_train_a in iterate.minibatches(X_train, y_train, batch_size, shuffle=True): + dp_dict = dict_to_one(network.all_drop) # disable noise layers feed_dict = {x: X_train_a, y_: y_train_a} feed_dict.update(dp_dict) result = sess.run(merged, feed_dict=feed_dict) train_writer.add_summary(result, tensorboard_train_index) tensorboard_train_index += 1 - - for X_val_a, y_val_a in iterate.minibatches( - X_val, y_val, batch_size, shuffle=True): - dp_dict = dict_to_one( network.all_drop ) # disable noise layers - feed_dict = {x: X_val_a, y_: y_val_a} - feed_dict.update(dp_dict) - result = sess.run(merged, feed_dict=feed_dict) - val_writer.add_summary(result, tensorboard_val_index) - tensorboard_val_index += 1 + if (X_val is not None) and (y_val is not None): + for X_val_a, y_val_a in iterate.minibatches(X_val, y_val, batch_size, shuffle=True): + dp_dict = dict_to_one(network.all_drop) # disable noise layers + feed_dict = {x: X_val_a, y_: y_val_a} + feed_dict.update(dp_dict) + result = sess.run(merged, feed_dict=feed_dict) + val_writer.add_summary(result, tensorboard_val_index) + tensorboard_val_index += 1 if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: if (X_val is not None) and (y_val is not None): - print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) + logging.info("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time)) if eval_train is True: train_loss, train_acc, n_batch = 0, 0, 0 - for X_train_a, y_train_a in iterate.minibatches( - X_train, y_train, batch_size, shuffle=True): - dp_dict = dict_to_one( network.all_drop ) # disable noise layers + for X_train_a, y_train_a in iterate.minibatches(X_train, y_train, batch_size, shuffle=True): + dp_dict = dict_to_one(network.all_drop) # disable noise layers feed_dict = {x: X_train_a, y_: y_train_a} feed_dict.update(dp_dict) if acc is not None: @@ -155,14 +190,14 @@ def fit(sess, network, train_op, cost, X_train, y_train, x, y_, acc=None, batch_ train_acc += ac else: err = sess.run(cost, feed_dict=feed_dict) - train_loss += err; n_batch += 1 - print(" train loss: %f" % (train_loss/ n_batch)) + train_loss += err + n_batch += 1 + logging.info(" train loss: %f" % (train_loss / n_batch)) if acc is not None: - print(" train acc: %f" % (train_acc/ n_batch)) + logging.info(" train acc: %f" % (train_acc / n_batch)) val_loss, val_acc, n_batch = 0, 0, 0 - for X_val_a, y_val_a in iterate.minibatches( - X_val, y_val, batch_size, shuffle=True): - dp_dict = dict_to_one( network.all_drop ) # disable noise layers + for X_val_a, y_val_a in iterate.minibatches(X_val, y_val, batch_size, shuffle=True): + dp_dict = dict_to_one(network.all_drop) # disable noise layers feed_dict = {x: X_val_a, y_: y_val_a} feed_dict.update(dp_dict) if acc is not None: @@ -170,13 +205,14 @@ def fit(sess, network, train_op, cost, X_train, y_train, x, y_, acc=None, batch_ val_acc += ac else: err = sess.run(cost, feed_dict=feed_dict) - val_loss += err; n_batch += 1 - print(" val loss: %f" % (val_loss/ n_batch)) + val_loss += err + n_batch += 1 + logging.info(" val loss: %f" % (val_loss / n_batch)) if acc is not None: - print(" val acc: %f" % (val_acc/ n_batch)) + logging.info(" val acc: %f" % (val_acc / n_batch)) else: - print("Epoch %d of %d took %fs, loss %f" % (epoch + 1, n_epoch, time.time() - start_time, loss_ep)) - print("Total training time: %fs" % (time.time() - start_time_begin)) + logging.info("Epoch %d of %d took %fs, loss %f" % (epoch + 1, n_epoch, time.time() - start_time, loss_ep)) + logging.info("Total training time: %fs" % (time.time() - start_time_begin)) def test(sess, network, acc, X_test, y_test, x, y_, batch_size, cost=None): @@ -185,46 +221,48 @@ def test(sess, network, acc, X_test, y_test, x, y_, batch_size, cost=None): Parameters ---------- - sess : TensorFlow session - sess = tf.InteractiveSession() - network : a TensorLayer layer - the network will be trained - acc : the TensorFlow expression of accuracy (or other metric) or None - if None, would not display the metric - X_test : numpy array - the input of test data + sess : Session + TensorFlow session. + network : TensorLayer layer + The network. + acc : TensorFlow expression or None + Metric for accuracy or others. + - If None, would not print the information. + X_test : numpy.array + The input of testing data. y_test : numpy array - the target of test data + The target of testing data x : placeholder - for inputs + For inputs. y_ : placeholder - for targets + For targets. batch_size : int or None - batch size for testing, when dataset is large, we should use minibatche for testing. - when dataset is small, we can set it to None. - cost : the TensorFlow expression of cost or None - if None, would not display the cost + The batch size for testing, when dataset is large, we should use minibatche for testing; + if dataset is small, we can set it to None. + cost : TensorFlow expression or None + Metric for cost or others. If None, would not print the information. Examples -------- - >>> see tutorial_mnist_simple.py + See `tutorial_mnist_simple.py `_ + >>> tl.utils.test(sess, network, acc, X_test, y_test, x, y_, batch_size=None, cost=cost) + """ - print('Start testing the network ...') + logging.info('Start testing the network ...') if batch_size is None: - dp_dict = dict_to_one( network.all_drop ) + dp_dict = dict_to_one(network.all_drop) feed_dict = {x: X_test, y_: y_test} feed_dict.update(dp_dict) if cost is not None: - print(" test loss: %f" % sess.run(cost, feed_dict=feed_dict)) - print(" test acc: %f" % sess.run(acc, feed_dict=feed_dict)) - # print(" test acc: %f" % np.mean(y_test == sess.run(y_op, - # feed_dict=feed_dict))) + logging.info(" test loss: %f" % sess.run(cost, feed_dict=feed_dict)) + logging.info(" test acc: %f" % sess.run(acc, feed_dict=feed_dict)) + # logging.info(" test acc: %f" % np.mean(y_test == sess.run(y_op, + # feed_dict=feed_dict))) else: test_loss, test_acc, n_batch = 0, 0, 0 - for X_test_a, y_test_a in iterate.minibatches( - X_test, y_test, batch_size, shuffle=True): - dp_dict = dict_to_one( network.all_drop ) # disable noise layers + for X_test_a, y_test_a in iterate.minibatches(X_test, y_test, batch_size, shuffle=True): + dp_dict = dict_to_one(network.all_drop) # disable noise layers feed_dict = {x: X_test_a, y_: y_test_a} feed_dict.update(dp_dict) if cost is not None: @@ -232,40 +270,82 @@ def test(sess, network, acc, X_test, y_test, x, y_, batch_size, cost=None): test_loss += err else: ac = sess.run(acc, feed_dict=feed_dict) - test_acc += ac; n_batch += 1 + test_acc += ac + n_batch += 1 if cost is not None: - print(" test loss: %f" % (test_loss/ n_batch)) - print(" test acc: %f" % (test_acc/ n_batch)) + logging.info(" test loss: %f" % (test_loss / n_batch)) + logging.info(" test acc: %f" % (test_acc / n_batch)) -def predict(sess, network, X, x, y_op): +def predict(sess, network, X, x, y_op, batch_size=None): """ Return the predict results of given non time-series network. Parameters ---------- - sess : TensorFlow session - sess = tf.InteractiveSession() - network : a TensorLayer layer - the network will be trained - X : numpy array - the input + sess : Session + TensorFlow Session. + network : TensorLayer layer + The network. + X : numpy.array + The inputs. x : placeholder - for inputs + For inputs. y_op : placeholder - the argmax expression of softmax outputs + The argmax expression of softmax outputs. + batch_size : int or None + The batch size for prediction, when dataset is large, we should use minibatche for prediction; + if dataset is small, we can set it to None. Examples -------- - >>> see tutorial_mnist_simple.py + See `tutorial_mnist_simple.py `_ + >>> y = network.outputs >>> y_op = tf.argmax(tf.nn.softmax(y), 1) >>> print(tl.utils.predict(sess, network, X_test, x, y_op)) + """ - dp_dict = dict_to_one( network.all_drop ) # disable noise layers - feed_dict = {x: X,} - feed_dict.update(dp_dict) - return sess.run(y_op, feed_dict=feed_dict) + if batch_size is None: + dp_dict = dict_to_one(network.all_drop) # disable noise layers + feed_dict = { + x: X, + } + feed_dict.update(dp_dict) + return sess.run(y_op, feed_dict=feed_dict) + else: + result = None + for X_a, _ in iterate.minibatches(X, X, batch_size, shuffle=False): + dp_dict = dict_to_one(network.all_drop) + feed_dict = { + x: X_a, + } + feed_dict.update(dp_dict) + result_a = sess.run(y_op, feed_dict=feed_dict) + if result is None: + result = result_a + else: + result = np.vstack((result, result_a)) # TODO: https://github.com/tensorlayer/tensorlayer/issues/288 + if result is None: + if len(X) % batch_size != 0: + dp_dict = dict_to_one(network.all_drop) + feed_dict = { + x: X[-(len(X) % batch_size):, :], + } + feed_dict.update(dp_dict) + result_a = sess.run(y_op, feed_dict=feed_dict) + result = result_a + else: + if len(X) != len(result) and len(X) % batch_size != 0: + dp_dict = dict_to_one(network.all_drop) + feed_dict = { + x: X[-(len(X) % batch_size):, :], + } + feed_dict.update(dp_dict) + result_a = sess.run(y_op, feed_dict=feed_dict) + result = np.vstack((result, result_a)) # TODO: https://github.com/tensorlayer/tensorlayer/issues/288 + return result + ## Evaluation def evaluation(y_test=None, y_predict=None, n_classes=None): @@ -276,49 +356,52 @@ def evaluation(y_test=None, y_predict=None, n_classes=None): Parameters ---------- - y_test : numpy.array or list - target results - y_predict : numpy.array or list - predicted results + y_test : list + The target results + y_predict : list + The predicted results n_classes : int - number of classes + The number of classes Examples -------- - >>> c_mat, f1, acc, f1_macro = evaluation(y_test, y_predict, n_classes) + >>> c_mat, f1, acc, f1_macro = tl.utils.evaluation(y_test, y_predict, n_classes) + """ from sklearn.metrics import confusion_matrix, f1_score, accuracy_score - c_mat = confusion_matrix(y_test, y_predict, labels = [x for x in range(n_classes)]) - f1 = f1_score(y_test, y_predict, average = None, labels = [x for x in range(n_classes)]) + c_mat = confusion_matrix(y_test, y_predict, labels=[x for x in range(n_classes)]) + f1 = f1_score(y_test, y_predict, average=None, labels=[x for x in range(n_classes)]) f1_macro = f1_score(y_test, y_predict, average='macro') - acc = accuracy_score(y_test, y_predict) - print('confusion matrix: \n',c_mat) - print('f1-score:',f1) - print('f1-score(macro):',f1_macro) # same output with > f1_score(y_true, y_pred, average='macro') - print('accuracy-score:', acc) + acc = accuracy_score(y_test, y_predict) + logging.info('confusion matrix: \n%s' % c_mat) + logging.info('f1-score : %s' % f1) + logging.info('f1-score(macro) : %f' % f1_macro) # same output with > f1_score(y_true, y_pred, average='macro') + logging.info('accuracy-score : %f' % acc) return c_mat, f1, acc, f1_macro -def dict_to_one(dp_dict={}): - """ - Input a dictionary, return a dictionary that all items are set to one, - use for disable dropout, dropconnect layer and so on. + +def dict_to_one(dp_dict): + """Input a dictionary, return a dictionary that all items are set to one. + + Used for disable dropout, dropconnect layer and so on. Parameters ---------- dp_dict : dictionary - keeping probabilities + The dictionary contains key and number, e.g. keeping probabilities. Examples -------- >>> dp_dict = dict_to_one( network.all_drop ) >>> dp_dict = dict_to_one( network.all_drop ) >>> feed_dict.update(dp_dict) + """ return {x: 1 for x in dp_dict} -def flatten_list(list_of_list=[[],[]]): - """ - Input a list of list, return a list that all items are in a list. + +def flatten_list(list_of_list): + """Input a list of list, return a list that all items are in a list. Parameters ---------- @@ -328,6 +411,7 @@ def flatten_list(list_of_list=[[],[]]): -------- >>> tl.utils.flatten_list([[1, 2, 3],[4, 5],[6]]) ... [1, 2, 3, 4, 5, 6] + """ return sum(list_of_list, []) @@ -338,48 +422,51 @@ def class_balancing_oversample(X_train=None, y_train=None, printable=True): Parameters ---------- X_train : numpy.array - Features, each row is an example + The inputs. y_train : numpy.array - Labels + The targets. Examples -------- - - One X + One X + >>> X_train, y_train = class_balancing_oversample(X_train, y_train, printable=True) - - Two X + Two X + >>> X, y = tl.utils.class_balancing_oversample(X_train=np.hstack((X1, X2)), y_train=y, printable=False) >>> X1 = X[:, 0:5] >>> X2 = X[:, 5:] + """ # ======== Classes balancing if printable: - print("Classes balancing for training examples...") + logging.info("Classes balancing for training examples...") from collections import Counter c = Counter(y_train) if printable: - print('the occurrence number of each stage: %s' % c.most_common()) - print('the least stage is Label %s have %s instances' % c.most_common()[-1]) - print('the most stage is Label %s have %s instances' % c.most_common(1)[0]) + logging.info('the occurrence number of each stage: %s' % c.most_common()) + logging.info('the least stage is Label %s have %s instances' % c.most_common()[-1]) + logging.info('the most stage is Label %s have %s instances' % c.most_common(1)[0]) most_num = c.most_common(1)[0][1] if printable: - print('most num is %d, all classes tend to be this num' % most_num) + logging.info('most num is %d, all classes tend to be this num' % most_num) locations = {} number = {} - for lab, num in c.most_common(): # find the index from y_train + for lab, num in c.most_common(): # find the index from y_train number[lab] = num - locations[lab] = np.where(np.array(y_train)==lab)[0] + locations[lab] = np.where(np.array(y_train) == lab)[0] if printable: - print('convert list(np.array) to dict format') + logging.info('convert list(np.array) to dict format') X = {} # convert list to dict for lab, num in number.items(): X[lab] = X_train[locations[lab]] # oversampling if printable: - print('start oversampling') + logging.info('start oversampling') for key in X: temp = X[key] while True: @@ -387,136 +474,168 @@ def class_balancing_oversample(X_train=None, y_train=None, printable=True): break X[key] = np.vstack((X[key], temp)) if printable: - print('first features of label 0 >', len(X[0][0])) - print('the occurrence num of each stage after oversampling') + logging.info('first features of label 0 > %d' % len(X[0][0])) + logging.info('the occurrence num of each stage after oversampling') for key in X: - print(key, len(X[key])) + logging.info("%s %d" % (key, len(X[key]))) if printable: - print('make each stage have same num of instances') + logging.info('make each stage have same num of instances') for key in X: - X[key] = X[key][0:most_num,:] - print(key, len(X[key])) + X[key] = X[key][0:most_num, :] + logging.info("%s %d" % (key, len(X[key]))) # convert dict to list if printable: - print('convert from dict to list format') + logging.info('convert from dict to list format') y_train = [] - X_train = np.empty(shape=(0,len(X[0][0]))) + X_train = np.empty(shape=(0, len(X[0][0]))) for key in X: - X_train = np.vstack( (X_train, X[key] ) ) + X_train = np.vstack((X_train, X[key])) y_train.extend([key for i in range(len(X[key]))]) - # print(len(X_train), len(y_train)) + # logging.info(len(X_train), len(y_train)) c = Counter(y_train) if printable: - print('the occurrence number of each stage after oversampling: %s' % c.most_common()) + logging.info('the occurrence number of each stage after oversampling: %s' % c.most_common()) # ================ End of Classes balancing return X_train, y_train + ## Random -def get_random_int(min=0, max=10, number=5, seed=None): +def get_random_int(min_v=0, max_v=10, number=5, seed=None): """Return a list of random integer by the given range and quantity. + Parameters + ----------- + min_v : number + The minimum value. + max_v : number + The maximum value. + number : int + Number of value. + seed : int or None + The seed for random. + Examples --------- - >>> r = get_random_int(min=0, max=10, number=5) + >>> r = get_random_int(min_v=0, max_v=10, number=5) ... [10, 2, 3, 3, 7] + """ rnd = random.Random() if seed: rnd = random.Random(seed) # return [random.randint(min,max) for p in range(0, number)] - return [rnd.randint(min,max) for p in range(0, number)] - -# -# def class_balancing_sequence_4D(X_train, y_train, sequence_length, model='downsampling' ,printable=True): -# ''' 输入、输出都是sequence format -# oversampling or downsampling -# ''' -# n_features = X_train.shape[2] -# # ======== Classes balancing for sequence -# if printable: -# print("Classes balancing for 4D sequence training examples...") -# from collections import Counter -# c = Counter(y_train) # Counter({2: 454, 4: 267, 3: 124, 1: 57, 0: 48}) -# if printable: -# print('the occurrence number of each stage: %s' % c.most_common()) -# print('the least Label %s have %s instances' % c.most_common()[-1]) -# print('the most Label %s have %s instances' % c.most_common(1)[0]) -# # print(c.most_common()) # [(2, 454), (4, 267), (3, 124), (1, 57), (0, 48)] -# most_num = c.most_common(1)[0][1] -# less_num = c.most_common()[-1][1] -# -# locations = {} -# number = {} -# for lab, num in c.most_common(): -# number[lab] = num -# locations[lab] = np.where(np.array(y_train)==lab)[0] -# # print(locations) -# # print(number) -# if printable: -# print(' convert list to dict') -# X = {} # convert list to dict -# ### a sequence -# for lab, _ in number.items(): -# X[lab] = np.empty(shape=(0,1,n_features,1)) # 4D -# for lab, _ in number.items(): -# #X[lab] = X_train[locations[lab] -# for l in locations[lab]: -# X[lab] = np.vstack((X[lab], X_train[l*sequence_length : (l+1)*(sequence_length)])) -# # X[lab] = X_train[locations[lab]*sequence_length : locations[lab]*(sequence_length+1)] # a sequence -# # print(X) -# -# if model=='oversampling': -# if printable: -# print(' oversampling -- most num is %d, all classes tend to be this num\nshuffle applied' % most_num) -# for key in X: -# temp = X[key] -# while True: -# if len(X[key]) >= most_num * sequence_length: # sequence -# break -# X[key] = np.vstack((X[key], temp)) -# # print(key, len(X[key])) -# if printable: -# print(' make each stage have same num of instances') -# for key in X: -# X[key] = X[key][0:most_num*sequence_length,:] # sequence -# if printable: -# print(key, len(X[key])) -# elif model=='downsampling': -# import random -# if printable: -# print(' downsampling -- less num is %d, all classes tend to be this num by randomly choice without replacement\nshuffle applied' % less_num) -# for key in X: -# # print(key, len(X[key]))#, len(X[key])/sequence_length) -# s_idx = [ i for i in range(int(len(X[key])/sequence_length))] -# s_idx = np.asarray(s_idx)*sequence_length # start index of sequnce in X[key] -# # print('s_idx',s_idx) -# r_idx = np.random.choice(s_idx, less_num, replace=False) # random choice less_num of s_idx -# # print('r_idx',r_idx) -# temp = X[key] -# X[key] = np.empty(shape=(0,1,n_features,1)) # 4D -# for idx in r_idx: -# X[key] = np.vstack((X[key], temp[idx:idx+sequence_length])) -# # print(key, X[key]) -# # np.random.choice(l, len(l), replace=False) -# else: -# raise Exception(' model should be oversampling or downsampling') -# -# # convert dict to list -# if printable: -# print(' convert dict to list') -# y_train = [] -# # X_train = np.empty(shape=(0,len(X[0][0]))) -# # X_train = np.empty(shape=(0,len(X[1][0]))) # 2D -# X_train = np.empty(shape=(0,1,n_features,1)) # 4D -# l_key = list(X.keys()) # shuffle -# random.shuffle(l_key) # shuffle -# # for key in X: # no shuffle -# for key in l_key: # shuffle -# X_train = np.vstack( (X_train, X[key] ) ) -# # print(len(X[key])) -# y_train.extend([key for i in range(int(len(X[key])/sequence_length))]) -# # print(X_train,y_train, type(X_train), type(y_train)) -# # ================ End of Classes balancing for sequence -# # print(X_train.shape, len(y_train)) -# return X_train, np.asarray(y_train) + return [rnd.randint(min_v, max_v) for p in range(0, number)] + + +def list_string_to_dict(string): + """Inputs ``['a', 'b', 'c']``, returns ``{'a': 0, 'b': 1, 'c': 2}``.""" + dictionary = {} + for idx, c in enumerate(string): + dictionary.update({c: idx}) + return dictionary + + +def exit_tensorflow(sess=None, port=6006): + """Close TensorFlow session, TensorBoard and Nvidia-process if available. + + Parameters + ---------- + sess : Session + TensorFlow Session. + tb_port : int + TensorBoard port you want to close, `6006` as default. + + """ + text = "[TL] Close tensorboard and nvidia-process if available" + text2 = "[TL] Close tensorboard and nvidia-process not yet supported by this function (tl.ops.exit_tf) on " + if sess is not None: + sess.close() + # import time + # time.sleep(2) + if _platform == "linux" or _platform == "linux2": + logging.info('linux: %s' % text) + os.system('nvidia-smi') + os.system('fuser ' + port + '/tcp -k') # kill tensorboard 6006 + os.system("nvidia-smi | grep python |awk '{print $3}'|xargs kill") # kill all nvidia-smi python process + _exit() + elif _platform == "darwin": + logging.info('OS X: %s' % text) + subprocess.Popen("lsof -i tcp:" + str(port) + " | grep -v PID | awk '{print $2}' | xargs kill", shell=True) # kill tensorboard + elif _platform == "win32": + raise NotImplementedError("this function is not supported on the Windows platform") + else: + logging.info(text2 + _platform) + + +def open_tensorboard(log_dir='/tmp/tensorflow', port=6006): + """Open Tensorboard. + + Parameters + ---------- + log_dir : str + Directory where your tensorboard logs are saved + port : int + TensorBoard port you want to open, 6006 is tensorboard default + + """ + text = "[TL] Open tensorboard, go to localhost:" + str(port) + " to access" + text2 = " not yet supported by this function (tl.ops.open_tb)" + + if not tl.files.exists_or_mkdir(log_dir, verbose=False): + logging.info("[TL] Log reportory was created at %s" % log_dir) + + if _platform == "linux" or _platform == "linux2": + raise NotImplementedError() + elif _platform == "darwin": + logging.info('OS X: %s' % text) + subprocess.Popen( + sys.prefix + " | python -m tensorflow.tensorboard --logdir=" + log_dir + " --port=" + str(port), + shell=True) # open tensorboard in localhost:6006/ or whatever port you chose + elif _platform == "win32": + raise NotImplementedError("this function is not supported on the Windows platform") + else: + logging.info(_platform + text2) + + +def clear_all_placeholder_variables(printable=True): + """Clears all the placeholder variables of keep prob, + including keeping probabilities of all dropout, denoising, dropconnect etc. + + Parameters + ---------- + printable : boolean + If True, print all deleted variables. + + """ + logging.info('clear all .....................................') + gl = globals().copy() + for var in gl: + if var[0] == '_': continue + if 'func' in str(globals()[var]): continue + if 'module' in str(globals()[var]): continue + if 'class' in str(globals()[var]): continue + + if printable: + logging.info(" clear_all ------- %s" % str(globals()[var])) + + del globals()[var] + + +def set_gpu_fraction(gpu_fraction=0.3): + """Set the GPU memory fraction for the application. + + Parameters + ---------- + gpu_fraction : float + Fraction of GPU memory, (0 ~ 1] + + References + ---------- + - `TensorFlow using GPU `__ + + """ + logging.info("[TL]: GPU MEM Fraction %f" % gpu_fraction) + gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction) + sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) + return sess diff --git a/tensorlayer/visualize.py b/tensorlayer/visualize.py index 8505bb9..daba94a 100644 --- a/tensorlayer/visualize.py +++ b/tensorlayer/visualize.py @@ -1,39 +1,131 @@ -#! /usr/bin/python -# -*- coding: utf8 -*- +# -*- coding: utf-8 -*- - -import matplotlib -matplotlib.use('Agg') -import matplotlib.pyplot as plt -# import matplotlib.pyplot as plt -import numpy as np import os +import numpy as np +import scipy.misc # save/read image(s) +from . import _logging as logging +from . import prepro + +# Uncomment the following line if you got: _tkinter.TclError: no display name and no $DISPLAY environment variable +# import matplotlib +# matplotlib.use('Agg') + +__all__ = [ + 'read_image', + 'read_images', + 'save_image', + 'save_images', + 'draw_boxes_and_labels_to_image', + 'frame', + 'CNN2d', + 'images2d', + 'tsne_embedding', + 'draw_weights', + 'W', +] + + +def read_image(image, path=''): + """Read one image. + + Parameters + ----------- + image : str + The image file name. + path : str + The image folder path. + + Returns + ------- + numpy.array + The image. + + """ + return scipy.misc.imread(os.path.join(path, image)) -## Save images -import scipy.misc -def save_images(images, size, image_path): - """Save mutiple images into one single image. +def read_images(img_list, path='', n_threads=10, printable=True): + """Returns all images in list by given path and name of each image file. + + Parameters + ------------- + img_list : list of str + The image file names. + path : str + The image folder path. + n_threads : int + The number of threads to read image. + printable : boolean + Whether to print information when reading images. + + Returns + ------- + list of numpy.array + The images. + + """ + imgs = [] + for idx in range(0, len(img_list), n_threads): + b_imgs_list = img_list[idx:idx + n_threads] + b_imgs = prepro.threading_data(b_imgs_list, fn=read_image, path=path) + # logging.info(b_imgs.shape) + imgs.extend(b_imgs) + if printable: + logging.info('read %d from %s' % (len(imgs), path)) + return imgs + + +def save_image(image, image_path=''): + """Save a image. Parameters ----------- - images : numpy array [batch, w, h, c] - size : list of two int, row and column number. + image : numpy array + [w, h, c] + image_path : str + path + + """ + try: # RGB + scipy.misc.imsave(image_path, image) + except Exception: # Greyscale + scipy.misc.imsave(image_path, image[:, :, 0]) + + +def save_images(images, size, image_path=''): + """Save multiple images into one single image. + + Parameters + ----------- + images : numpy array + (batch, w, h, c) + size : list of 2 ints + row and column number. number of images should be equal or less than size[0] * size[1] - image_path : string. + image_path : str + save path + + Returns + ------- + numpy.array + The image. Examples --------- >>> images = np.random.rand(64, 100, 100, 3) >>> tl.visualize.save_images(images, [8, 8], 'temp.png') + """ + if len(images.shape) == 3: # Greyscale [batch, h, w] --> [batch, h, w, 1] + images = images[:, :, :, np.newaxis] + def merge(images, size): h, w = images.shape[1], images.shape[2] img = np.zeros((h * size[0], w * size[1], 3)) for idx, image in enumerate(images): i = idx % size[1] j = idx // size[1] - img[j*h:j*h+h, i*w:i*w+w, :] = image + img[j * h:j * h + h, i * w:i * w + w, :] = image return img def imsave(images, size, path): @@ -42,66 +134,90 @@ def imsave(images, size, path): assert len(images) <= size[0] * size[1], "number of images should be equal or less than size[0] * size[1] {}".format(len(images)) return imsave(images, size, image_path) -def W(W=None, second=10, saveable=True, shape=[28,28], name='mnist', fig_idx=2396512): - """Visualize every columns of the weight matrix to a group of Greyscale img. + +def draw_boxes_and_labels_to_image(image, classes, coords, scores, classes_list, is_center=True, is_rescale=True, save_name=None): + """Draw bboxes and class labels on image. Return or save the image with bboxes, example in the docs of ``tl.prepro``. Parameters - ---------- - W : numpy.array - The weight matrix - second : int - The display second(s) for the image(s), if saveable is False. - saveable : boolean - Save or plot the figure. - shape : a list with 2 int - The shape of feature image, MNIST is [28, 80]. - name : a string - A name to save the image, if saveable is True. - fig_idx : int - matplotlib figure index. + ----------- + image : numpy.array + The RGB image [height, width, channel]. + classes : list of int + A list of class ID (int). + coords : list of int + A list of list for coordinates. + - Should be [x, y, x2, y2] (up-left and botton-right format) + - If [x_center, y_center, w, h] (set is_center to True). + scores : list of float + A list of score (float). (Optional) + classes_list : list of str + for converting ID to string on image. + is_center : boolean + Whether the coordinates is [x_center, y_center, w, h] + - If coordinates are [x_center, y_center, w, h], set it to True for converting it to [x, y, x2, y2] (up-left and botton-right) internally. + - If coordinates are [x1, x2, y1, y2], set it to False. + is_rescale : boolean + Whether to rescale the coordinates from pixel-unit format to ratio format. + - If True, the input coordinates are the portion of width and high, this API will scale the coordinates to pixel unit internally. + - If False, feed the coordinates with pixel unit format. + save_name : None or str + The name of image file (i.e. image.png), if None, not to save image. + + Returns + ------- + numpy.array + The saved image. + + References + ----------- + - OpenCV rectangle and putText. + - `scikit-image `__. - Examples - -------- - >>> tl.visualize.W(network.all_params[0].eval(), second=10, saveable=True, name='weight_of_1st_layer', fig_idx=2012) """ - if saveable is False: - plt.ion() - fig = plt.figure(fig_idx) # show all feature images - size = W.shape[0] - n_units = W.shape[1] + assert len(coords) == len(classes), "number of coordinates and classes are equal" + if len(scores) > 0: + assert len(scores) == len(classes), "number of scores and classes are equal" + + import cv2 + + # don't change the original image, and avoid error https://stackoverflow.com/questions/30249053/python-opencv-drawing-errors-after-manipulating-array-with-numpy + image = image.copy() + + imh, imw = image.shape[0:2] + thick = int((imh + imw) // 430) + + for i, _v in enumerate(coords): + if is_center: + x, y, x2, y2 = prepro.obj_box_coord_centroid_to_upleft_butright(coords[i]) + else: + x, y, x2, y2 = coords[i] + + if is_rescale: # scale back to pixel unit if the coords are the portion of width and high + x, y, x2, y2 = prepro.obj_box_coord_scale_to_pixelunit([x, y, x2, y2], (imh, imw)) + + cv2.rectangle( + image, + (int(x), int(y)), + (int(x2), int(y2)), # up-left and botton-right + [0, 255, 0], + thick) + + cv2.putText( + image, + classes_list[classes[i]] + ((" %.2f" % (scores[i])) if (len(scores) != 0) else " "), + (int(x), int(y)), # button left + 0, + 1.5e-3 * imh, # bigger = larger font + [0, 0, 256], # self.meta['colors'][max_indx], + int(thick / 2) + 1) # bold + + if save_name is not None: + # cv2.imwrite('_my.png', image) + save_image(image, save_name) + # if len(coords) == 0: + # logging.info("draw_boxes_and_labels_to_image: no bboxes exist, cannot draw !") + return image - num_r = int(np.sqrt(n_units)) # 每行显示的个数 若25个hidden unit -> 每行显示5个 - num_c = int(np.ceil(n_units/num_r)) - count = int(1) - for row in range(1, num_r+1): - for col in range(1, num_c+1): - if count > n_units: - break - a = fig.add_subplot(num_r, num_c, count) - # ------------------------------------------------------------ - # plt.imshow(np.reshape(W[:,count-1],(28,28)), cmap='gray') - # ------------------------------------------------------------ - feature = W[:,count-1] / np.sqrt( (W[:,count-1]**2).sum()) - # feature[feature<0.0001] = 0 # value threshold - # if count == 1 or count == 2: - # print(np.mean(feature)) - # if np.std(feature) < 0.03: # condition threshold - # feature = np.zeros_like(feature) - # if np.mean(feature) < -0.015: # condition threshold - # feature = np.zeros_like(feature) - plt.imshow(np.reshape(feature ,(shape[0],shape[1])), - cmap='gray', interpolation="nearest")#, vmin=np.min(feature), vmax=np.max(feature)) - # plt.title(name) - # ------------------------------------------------------------ - # plt.imshow(np.reshape(W[:,count-1] ,(np.sqrt(size),np.sqrt(size))), cmap='gray', interpolation="nearest") - plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick - plt.gca().yaxis.set_major_locator(plt.NullLocator()) - count = count + 1 - if saveable: - plt.savefig(name+'.pdf',format='pdf') - else: - plt.draw() - plt.pause(second) def frame(I=None, second=5, saveable=True, name='frame', cmap=None, fig_idx=12836): """Display a frame(image). Make sure OpenAI Gym render() is disable before using it. @@ -109,14 +225,14 @@ def frame(I=None, second=5, saveable=True, name='frame', cmap=None, fig_idx=1283 Parameters ---------- I : numpy.array - The image + The image. second : int The display second(s) for the image(s), if saveable is False. saveable : boolean Save or plot the figure. - name : a string + name : str A name to save the image, if saveable is True. - cmap : None or string + cmap : None or str 'gray' for greyscale, None for default, etc. fig_idx : int matplotlib figure index. @@ -126,13 +242,15 @@ def frame(I=None, second=5, saveable=True, name='frame', cmap=None, fig_idx=1283 >>> env = gym.make("Pong-v0") >>> observation = env.reset() >>> tl.visualize.frame(observation) + """ + import matplotlib.pyplot as plt if saveable is False: plt.ion() - fig = plt.figure(fig_idx) # show all feature images + plt.figure(fig_idx) # show all feature images - if len(I.shape) and I.shape[-1]==1: # (10,10,1) --> (10,10) - I = I[:,:,0] + if len(I.shape) and I.shape[-1] == 1: # (10,10,1) --> (10,10) + I = I[:, :, 0] plt.imshow(I, cmap) plt.title(name) @@ -140,11 +258,12 @@ def frame(I=None, second=5, saveable=True, name='frame', cmap=None, fig_idx=1283 # plt.gca().yaxis.set_major_locator(plt.NullLocator()) if saveable: - plt.savefig(name+'.pdf',format='pdf') + plt.savefig(name + '.pdf', format='pdf') else: plt.draw() plt.pause(second) + def CNN2d(CNN=None, second=10, saveable=True, name='cnn', fig_idx=3119362): """Display a group of RGB or Greyscale CNN masks. @@ -156,58 +275,55 @@ def CNN2d(CNN=None, second=10, saveable=True, name='cnn', fig_idx=3119362): The display second(s) for the image(s), if saveable is False. saveable : boolean Save or plot the figure. - name : a string + name : str A name to save the image, if saveable is True. fig_idx : int - matplotlib figure index. + The matplotlib figure index. Examples -------- >>> tl.visualize.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_mnist', fig_idx=2012) + """ - # print(CNN.shape) # (5, 5, 3, 64) + import matplotlib.pyplot as plt + # logging.info(CNN.shape) # (5, 5, 3, 64) # exit() n_mask = CNN.shape[3] n_row = CNN.shape[0] n_col = CNN.shape[1] n_color = CNN.shape[2] row = int(np.sqrt(n_mask)) - col = int(np.ceil(n_mask/row)) - plt.ion() # active mode + col = int(np.ceil(n_mask / row)) + plt.ion() # active mode fig = plt.figure(fig_idx) count = 1 - for ir in range(1, row+1): - for ic in range(1, col+1): + for _ir in range(1, row + 1): + for _ic in range(1, col + 1): if count > n_mask: break - a = fig.add_subplot(col, row, count) - # print(CNN[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5 + fig.add_subplot(col, row, count) + # logging.info(CNN[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5 # exit() # plt.imshow( # np.reshape(CNN[count-1,:,:,:], (n_row, n_col)), # cmap='gray', interpolation="nearest") # theano if n_color == 1: - plt.imshow( - np.reshape(CNN[:,:,:,count-1], (n_row, n_col)), - cmap='gray', interpolation="nearest") + plt.imshow(np.reshape(CNN[:, :, :, count - 1], (n_row, n_col)), cmap='gray', interpolation="nearest") elif n_color == 3: - plt.imshow( - np.reshape(CNN[:,:,:,count-1], (n_row, n_col, n_color)), - cmap='gray', interpolation="nearest") + plt.imshow(np.reshape(CNN[:, :, :, count - 1], (n_row, n_col, n_color)), cmap='gray', interpolation="nearest") else: raise Exception("Unknown n_color") - plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick + plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick plt.gca().yaxis.set_major_locator(plt.NullLocator()) count = count + 1 if saveable: - plt.savefig(name+'.pdf',format='pdf') + plt.savefig(name + '.pdf', format='pdf') else: plt.draw() plt.pause(second) -def images2d(images=None, second=10, saveable=True, name='images', dtype=None, - fig_idx=3119362): +def images2d(images=None, second=10, saveable=True, name='images', dtype=None, fig_idx=3119362): """Display a group of RGB or Greyscale images. Parameters @@ -218,7 +334,7 @@ def images2d(images=None, second=10, saveable=True, name='images', dtype=None, The display second(s) for the image(s), if saveable is False. saveable : boolean Save or plot the figure. - name : a string + name : str A name to save the image, if saveable is True. dtype : None or numpy data type The data type for displaying the images. @@ -229,8 +345,10 @@ def images2d(images=None, second=10, saveable=True, name='images', dtype=None, -------- >>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False) >>> tl.visualize.images2d(X_train[0:100,:,:,:], second=10, saveable=False, name='cifar10', dtype=np.uint8, fig_idx=20212) + """ - # print(images.shape) # (50000, 32, 32, 3) + import matplotlib.pyplot as plt + # logging.info(images.shape) # (50000, 32, 32, 3) # exit() if dtype: images = np.asarray(images, dtype=dtype) @@ -239,48 +357,45 @@ def images2d(images=None, second=10, saveable=True, name='images', dtype=None, n_col = images.shape[2] n_color = images.shape[3] row = int(np.sqrt(n_mask)) - col = int(np.ceil(n_mask/row)) - plt.ion() # active mode + col = int(np.ceil(n_mask / row)) + plt.ion() # active mode fig = plt.figure(fig_idx) count = 1 - for ir in range(1, row+1): - for ic in range(1, col+1): + for _ir in range(1, row + 1): + for _ic in range(1, col + 1): if count > n_mask: break - a = fig.add_subplot(col, row, count) - # print(images[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5 + fig.add_subplot(col, row, count) + # logging.info(images[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5 # plt.imshow( # np.reshape(images[count-1,:,:,:], (n_row, n_col)), # cmap='gray', interpolation="nearest") # theano if n_color == 1: - plt.imshow( - np.reshape(images[count-1,:,:], (n_row, n_col)), - cmap='gray', interpolation="nearest") + plt.imshow(np.reshape(images[count - 1, :, :], (n_row, n_col)), cmap='gray', interpolation="nearest") # plt.title(name) elif n_color == 3: - plt.imshow(images[count-1,:,:], - cmap='gray', interpolation="nearest") + plt.imshow(images[count - 1, :, :], cmap='gray', interpolation="nearest") # plt.title(name) else: raise Exception("Unknown n_color") - plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick + plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick plt.gca().yaxis.set_major_locator(plt.NullLocator()) count = count + 1 if saveable: - plt.savefig(name+'.pdf',format='pdf') + plt.savefig(name + '.pdf', format='pdf') else: plt.draw() plt.pause(second) -def tsne_embedding(embeddings, reverse_dictionary, plot_only=500, - second=5, saveable=False, name='tsne', fig_idx=9862): + +def tsne_embedding(embeddings, reverse_dictionary, plot_only=500, second=5, saveable=False, name='tsne', fig_idx=9862): """Visualize the embeddings by using t-SNE. Parameters ---------- - embeddings : a matrix - The images. - reverse_dictionary : a dictionary + embeddings : numpy.array + The embedding matrix. + reverse_dictionary : dictionary id_to_word, mapping id to unique word. plot_only : int The number of examples to plot, choice the most common words. @@ -288,7 +403,7 @@ def tsne_embedding(embeddings, reverse_dictionary, plot_only=500, The display second(s) for the image(s), if saveable is False. saveable : boolean Save or plot the figure. - name : a string + name : str A name to save the image, if saveable is True. fig_idx : int matplotlib figure index. @@ -299,42 +414,103 @@ def tsne_embedding(embeddings, reverse_dictionary, plot_only=500, >>> final_embeddings = normalized_embeddings.eval() >>> tl.visualize.tsne_embedding(final_embeddings, labels, reverse_dictionary, ... plot_only=500, second=5, saveable=False, name='tsne') + """ - def plot_with_labels(low_dim_embs, labels, figsize=(18, 18), second=5, - saveable=True, name='tsne', fig_idx=9862): + import matplotlib.pyplot as plt + + def plot_with_labels(low_dim_embs, labels, figsize=(18, 18), second=5, saveable=True, name='tsne', fig_idx=9862): assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings" if saveable is False: plt.ion() plt.figure(fig_idx) plt.figure(figsize=figsize) #in inches for i, label in enumerate(labels): - x, y = low_dim_embs[i,:] + x, y = low_dim_embs[i, :] plt.scatter(x, y) - plt.annotate(label, - xy=(x, y), - xytext=(5, 2), - textcoords='offset points', - ha='right', - va='bottom') + plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') if saveable: - plt.savefig(name+'.pdf',format='pdf') + plt.savefig(name + '.pdf', format='pdf') else: plt.draw() plt.pause(second) try: from sklearn.manifold import TSNE - import matplotlib.pyplot as plt from six.moves import xrange tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) # plot_only = 500 - low_dim_embs = tsne.fit_transform(embeddings[:plot_only,:]) + low_dim_embs = tsne.fit_transform(embeddings[:plot_only, :]) labels = [reverse_dictionary[i] for i in xrange(plot_only)] plot_with_labels(low_dim_embs, labels, second=second, saveable=saveable, \ name=name, fig_idx=fig_idx) except ImportError: - print("Please install sklearn and matplotlib to visualize embeddings.") + logging.info("Please install sklearn and matplotlib to visualize embeddings.") + + +def draw_weights(W=None, second=10, saveable=True, shape=None, name='mnist', fig_idx=2396512): + """Visualize every columns of the weight matrix to a group of Greyscale img. + + Parameters + ---------- + W : numpy.array + The weight matrix + second : int + The display second(s) for the image(s), if saveable is False. + saveable : boolean + Save or plot the figure. + shape : a list with 2 int or None + The shape of feature image, MNIST is [28, 80]. + name : a string + A name to save the image, if saveable is True. + fig_idx : int + matplotlib figure index. + + Examples + -------- + >>> tl.visualize.draw_weights(network.all_params[0].eval(), second=10, saveable=True, name='weight_of_1st_layer', fig_idx=2012) + + """ + if shape is None: + shape = [28, 28] + + import matplotlib.pyplot as plt + if saveable is False: + plt.ion() + fig = plt.figure(fig_idx) # show all feature images + n_units = W.shape[1] + + num_r = int(np.sqrt(n_units)) # 每行显示的个数 若25个hidden unit -> 每行显示5个 + num_c = int(np.ceil(n_units / num_r)) + count = int(1) + for _row in range(1, num_r + 1): + for _col in range(1, num_c + 1): + if count > n_units: + break + fig.add_subplot(num_r, num_c, count) + # ------------------------------------------------------------ + # plt.imshow(np.reshape(W[:,count-1],(28,28)), cmap='gray') + # ------------------------------------------------------------ + feature = W[:, count - 1] / np.sqrt((W[:, count - 1]**2).sum()) + # feature[feature<0.0001] = 0 # value threshold + # if count == 1 or count == 2: + # print(np.mean(feature)) + # if np.std(feature) < 0.03: # condition threshold + # feature = np.zeros_like(feature) + # if np.mean(feature) < -0.015: # condition threshold + # feature = np.zeros_like(feature) + plt.imshow(np.reshape(feature, (shape[0], shape[1])), cmap='gray', interpolation="nearest") #, vmin=np.min(feature), vmax=np.max(feature)) + # plt.title(name) + # ------------------------------------------------------------ + # plt.imshow(np.reshape(W[:,count-1] ,(np.sqrt(size),np.sqrt(size))), cmap='gray', interpolation="nearest") + plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick + plt.gca().yaxis.set_major_locator(plt.NullLocator()) + count = count + 1 + if saveable: + plt.savefig(name + '.pdf', format='pdf') + else: + plt.draw() + plt.pause(second) -# +W = draw_weights