From 8ea1ddd8f1a94f78f3b98431eaebdb5c8bbc547d Mon Sep 17 00:00:00 2001 From: yanjunjie Date: Thu, 9 Mar 2023 21:48:41 +0800 Subject: [PATCH] code release --- .gitignore | 144 +++ README.md | 27 +- figs/cmt_fps.png | Bin 0 -> 180937 bytes figs/cmt_robust.png | Bin 0 -> 97630 bytes .../camera/cmt_camera_vov_1600x640_cbgs.py | 256 ++++ .../fusion/cmt_voxel0075_vov_1600x640_cbgs.py | 365 ++++++ .../fusion/cmt_voxel0100_r50_800x320_cbgs.py | 369 ++++++ .../configs/lidar/cmt_lidar_voxel0075_cbgs.py | 317 +++++ projects/mmdet3d_plugin/__init__.py | 6 + projects/mmdet3d_plugin/core/__init__.py | 0 .../core/bbox/assigners/__init__.py | 3 + .../bbox/assigners/hungarian_assigner_3d.py | 157 +++ .../core/bbox/coders/__init__.py | 3 + .../core/bbox/coders/multi_task_bbox_coder.py | 142 +++ .../core/bbox/match_costs/__init__.py | 4 + .../core/bbox/match_costs/match_cost.py | 52 + projects/mmdet3d_plugin/core/bbox/util.py | 82 ++ projects/mmdet3d_plugin/datasets/__init__.py | 2 + .../datasets/custom_nuscenes_dataset.py | 91 ++ .../datasets/pipelines/__init__.py | 2 + .../datasets/pipelines/dbsampler.py | 268 ++++ .../datasets/pipelines/transform_3d.py | 969 +++++++++++++++ .../mmdet3d_plugin/mmcv_custom/__init__.py | 2 + .../mmcv_custom/ops/__init__.py | 1 + .../mmcv_custom/ops/voxel/__init__.py | 1 + .../mmcv_custom/ops/voxel/spconv_voxelize.py | 71 ++ .../mmcv_custom/runner/__init__.py | 1 + .../mmcv_custom/runner/hooks/__init__.py | 1 + .../mmcv_custom/runner/hooks/optimizer.py | 23 + projects/mmdet3d_plugin/models/__init__.py | 5 + .../models/backbones/__init__.py | 3 + .../mmdet3d_plugin/models/backbones/vovnet.py | 390 ++++++ .../models/dense_heads/__init__.py | 8 + .../models/dense_heads/cmt_head.py | 1086 +++++++++++++++++ .../models/detectors/__init__.py | 3 + .../mmdet3d_plugin/models/detectors/cmt.py | 252 ++++ .../mmdet3d_plugin/models/necks/__init__.py | 3 + .../mmdet3d_plugin/models/necks/cp_fpn.py | 204 ++++ .../mmdet3d_plugin/models/utils/__init__.py | 2 + .../mmdet3d_plugin/models/utils/attention.py | 138 +++ .../models/utils/cmt_transformer.py | 282 +++++ .../mmdet3d_plugin/models/utils/grid_mask.py | 124 ++ .../models/utils/petr_transformer.py | 487 ++++++++ tools/create_data.py | 103 ++ tools/create_data.sh | 24 + .../create_unified_gt_database.py | 272 +++++ tools/data_converter/nusc_radar.py | 651 ++++++++++ tools/data_converter/nuscenes_converter.py | 648 ++++++++++ tools/dist_test.sh | 22 + tools/dist_train.sh | 20 + tools/test.py | 289 +++++ tools/test_speed.py | 85 ++ tools/train.py | 293 +++++ 53 files changed, 8742 insertions(+), 11 deletions(-) create mode 100644 .gitignore create mode 100644 figs/cmt_fps.png create mode 100644 figs/cmt_robust.png create mode 100644 projects/configs/camera/cmt_camera_vov_1600x640_cbgs.py create mode 100644 projects/configs/fusion/cmt_voxel0075_vov_1600x640_cbgs.py create mode 100644 projects/configs/fusion/cmt_voxel0100_r50_800x320_cbgs.py create mode 100644 projects/configs/lidar/cmt_lidar_voxel0075_cbgs.py create mode 100644 projects/mmdet3d_plugin/__init__.py create mode 100644 projects/mmdet3d_plugin/core/__init__.py create mode 100644 projects/mmdet3d_plugin/core/bbox/assigners/__init__.py create mode 100644 projects/mmdet3d_plugin/core/bbox/assigners/hungarian_assigner_3d.py create mode 100644 projects/mmdet3d_plugin/core/bbox/coders/__init__.py create mode 100644 projects/mmdet3d_plugin/core/bbox/coders/multi_task_bbox_coder.py create mode 100644 projects/mmdet3d_plugin/core/bbox/match_costs/__init__.py create mode 100644 projects/mmdet3d_plugin/core/bbox/match_costs/match_cost.py create mode 100644 projects/mmdet3d_plugin/core/bbox/util.py create mode 100644 projects/mmdet3d_plugin/datasets/__init__.py create mode 100644 projects/mmdet3d_plugin/datasets/custom_nuscenes_dataset.py create mode 100644 projects/mmdet3d_plugin/datasets/pipelines/__init__.py create mode 100644 projects/mmdet3d_plugin/datasets/pipelines/dbsampler.py create mode 100644 projects/mmdet3d_plugin/datasets/pipelines/transform_3d.py create mode 100644 projects/mmdet3d_plugin/mmcv_custom/__init__.py create mode 100644 projects/mmdet3d_plugin/mmcv_custom/ops/__init__.py create mode 100644 projects/mmdet3d_plugin/mmcv_custom/ops/voxel/__init__.py create mode 100644 projects/mmdet3d_plugin/mmcv_custom/ops/voxel/spconv_voxelize.py create mode 100644 projects/mmdet3d_plugin/mmcv_custom/runner/__init__.py create mode 100644 projects/mmdet3d_plugin/mmcv_custom/runner/hooks/__init__.py create mode 100644 projects/mmdet3d_plugin/mmcv_custom/runner/hooks/optimizer.py create mode 100644 projects/mmdet3d_plugin/models/__init__.py create mode 100644 projects/mmdet3d_plugin/models/backbones/__init__.py create mode 100644 projects/mmdet3d_plugin/models/backbones/vovnet.py create mode 100644 projects/mmdet3d_plugin/models/dense_heads/__init__.py create mode 100644 projects/mmdet3d_plugin/models/dense_heads/cmt_head.py create mode 100644 projects/mmdet3d_plugin/models/detectors/__init__.py create mode 100644 projects/mmdet3d_plugin/models/detectors/cmt.py create mode 100644 projects/mmdet3d_plugin/models/necks/__init__.py create mode 100644 projects/mmdet3d_plugin/models/necks/cp_fpn.py create mode 100644 projects/mmdet3d_plugin/models/utils/__init__.py create mode 100644 projects/mmdet3d_plugin/models/utils/attention.py create mode 100644 projects/mmdet3d_plugin/models/utils/cmt_transformer.py create mode 100644 projects/mmdet3d_plugin/models/utils/grid_mask.py create mode 100644 projects/mmdet3d_plugin/models/utils/petr_transformer.py create mode 100644 tools/create_data.py create mode 100755 tools/create_data.sh create mode 100644 tools/data_converter/create_unified_gt_database.py create mode 100644 tools/data_converter/nusc_radar.py create mode 100644 tools/data_converter/nuscenes_converter.py create mode 100755 tools/dist_test.sh create mode 100755 tools/dist_train.sh create mode 100644 tools/test.py create mode 100644 tools/test_speed.py create mode 100644 tools/train.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9bd8999 --- /dev/null +++ b/.gitignore @@ -0,0 +1,144 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +*.ipynb + +# C extensions +*.so + +# Distribution / packaging +.Python +tmp/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +mmdetection3d/ +mmdetection3d +mmdet3d + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +hostfile.txt +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +# cython generated cpp +data +ckpts +.vscode +.idea + +# custom +nuscenes_gt_database +nuscenes_unified_gt_database +work_dirs +*.pkl +*.pkl.json +*.log.json +work_dirs/ +exps/ +*~ +mmdet3d/.mim + +# Pytorch +*.pth + +# demo +# *.jpg +# *.png +data/s3dis/Stanford3dDataset_v1.2_Aligned_Version/ +data/scannet/scans/ +data/sunrgbd/OFFICIAL_SUNRGBD/ +*.obj +*.ply + +# Waymo evaluation +mmdet3d/core/evaluation/waymo_utils/compute_detection_metrics_main diff --git a/README.md b/README.md index e80152a..1e6068f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ -# Cross Modal Transformer via Coordinates Encoding for 3D Object Dectection +# Cross Modal Transformer: Towards Fast and Robust 3D Object Detection [![arXiv](https://img.shields.io/badge/arXiv-Paper-.svg)](https://arxiv.org/pdf/2301.01283.pdf) ![visitors](https://visitor-badge.glitch.me/badge?page_id=junjie18/CMT) @@ -9,31 +9,36 @@ https://user-images.githubusercontent.com/18145538/210828888-a944817a-858f-45ef- This repository is an official implementation of [CMT](https://arxiv.org/pdf/2301.01283.pdf).
- +

-CMT is a robust 3D detector for end-to-end 3D multi-modal detection. A DETR-like framework is designed for multi-modal detection(CMT) and lidar-only detection(CMT-L), which obtains **73.5%** and **70.1%** NDS separately on nuScenes benchmark. +CMT is a robust 3D detector for end-to-end 3D multi-modal detection. A DETR-like framework is designed for multi-modal detection(CMT) and lidar-only detection(CMT-L), which obtains **74.1%**(SoTA among all single models) and **70.1%** NDS separately on nuScenes benchmark. Without explicit view transformation, CMT takes the image and point clouds tokens as inputs and directly outputs accurate 3D bounding boxes. CMT can be a strong baseline for further research. ## Preparation * Environments -Python == 3.8, CUDA == 11.1, pytorch == 1.9.0, mmdet3d == 1.0.0rc5 +Python == 3.8 \ +CUDA == 11.1 \ +pytorch == 1.9.0 \ +mmdet3d == 1.0.0rc5 \ +spconv-cu111 == 2.1.21 \ +[flash-attn](https://github.com/HazyResearch/flash-attention) * Data -Follow the mmdet3d to process the nuScenes dataset (https://github.com/open-mmlab/mmdetection3d/blob/master/docs/en/data_preparation.md). +Follow the [mmdet3d](https://github.com/open-mmlab/mmdetection3d/blob/master/docs/en/data_preparation.md) to process the nuScenes dataset. ## Main Results -We provide some results on nuScenes **val set**. The default batch size is 2 on each GPU. +Results on nuScenes **val set**. The default batch size is 2 on each GPU. The FPS are all evaluated with a single Tesla A100 GPU. -| config | mAP | NDS | GPU | schedule| time | +| Config |Modality| mAP | NDS | Schedule|Inference FPS| |:--------:|:----------:|:---------:|:--------:|:--------:|:--------:| -| CMT-pillar0200-r50-704x256 | 53.8% | 58.5% | 8 x 2080ti | 20 epoch| 13 hours | -| CMT-voxel0100-r50-800x320 | 60.1% | 63.4% | 8 x 2080ti | 20 epoch| 14 hours | -| CMT-voxel0075-vov-1600x640 | 69.4% | 71.9% | 8 x A100 | 15e+5e(with cbgs) | 45 hours | - +| [vov_1600x640](./projects/configs/camera/cmt_camera_vov_1600x640_cbgs.py) |C| 40.6% | 46.0% | 20e| +| [voxel0075](./projects/configs/lidar/cmt_lidar_voxel0075_cbgs.py) |L| 62.14 | 68.6% | 15e+5e | +| [voxel0100_r50_800x320](./projects/configs/fusion/cmt_voxel0100_r50_800x320_cbgs.py) |C+L| 67.9% | 70.8% | 15e+5e | +| [voxel0075_vov_1600x640](./projects/configs/fusion/cmt_voxel0075_vov_1600x640_cbgs.py) |C+L| 70.3% | 72.9% | 15e+5e | ## Citation If you find CMT helpful in your research, please consider citing: ```bibtex diff --git a/figs/cmt_fps.png b/figs/cmt_fps.png new file mode 100644 index 0000000000000000000000000000000000000000..087603dfcad4361a9a4737804033e1123196fde0 GIT binary patch literal 180937 zcmeFYRa9Kx);)+5B)A5GyE}zLa4QJz?g<{;-63d@!h^dz0fI{r+@WxH_a^t=-~GOa ze&~n)OONgc#;L)!wdP)DuDRDaCqfx0{T}%PG87clds!I?6(}e~02CBl<2%^5|5&8N z;k;dt9Avbep`b{5{{BJR{c#b7f`Z;xR#1~Xyxv~hUnnRjSYKa%m6Jn7MSXdGetms? z8?>;n;OgpnczAevdU|2Y$-%*KFZ1c-_!u4@{^I<6e}8{zY3cU% z?p0C2JIIshBhmWdQrqvQuMSqPs;X!hXq(5&Wpzb*MmiFb;(dd^1%(7wR#tY;*LQYy z{`~o~wzl?ge@{wAx^udwt)oRtOM``lIkh;>$;}a0wt`Q9-_X#oxw*;A!u)Dv^!WIA z?9KLBRr#B{)2pp*bxqaz-d=QU6xiEU#wSm=AX;T=GG=M^V7SB+FvftAFV82$Hrc= zvyX4~?hJKbQc{jHGhc6Sn>uS>wY9zc-CvuUUIGJOdwcSJ=ETRxqoJXlMTEW{9K0?s zpEov+O!cjGw%#l*u1ANB3=M@w2aQb)m6n#Cl$FlT&hE}mkByFA{_eVQcfC^u?u?FJ zjE}!=t#x;ILvl0c>PjB$?c6+Emp5lxI{TyI!uD5}s+-Cez}>eO7e}Q5-rnAxo}Nz> zcwf5@ayw&oj#sx`?DI>rU*|M8ub&#n4&1%Jt?yqCubnTf&%ExWoZ2zOt=vA#$yFa> zR6;?KLCH#pskuWRJwGbpEZv-LbZ|dz=WNm_frSR z493dfVZbnueb$D?#V;OthFu^QqSYP}z6XI$!+u>6nywy9=0Cu!{FxCNZYb8i6keOr zuOhkBcpcjb!Zm`|g4XbVyU1xkekBtpQr5zhLos$CSHqP1k3qK3q5u6GicvWf1{KWF zyHWg4vuHurf-pbw>;21X>2!k>zTFBT;fLi@%NFf_PpB18%&1K6;M(7`;rRNh8>Et< zgiaI1Cf9}MjA{#x{A7}*B0+B*<0 z7tzY41EXBssi*%;OF^4GW#tFW?0@&jF~%*%0JI6Y7JMq$D>)`Q59A~vwX)VKXj%V{ zp)R?i=3NUZ<73TxB_HAJR`*Gq_W9KDOC2fy3_N5flBk#&(&hI;PU3#KP0#gZ%W#JYSIvrVag z_3c9DDuqdg-gWoRU1*FBt;*fSi%27KuXrwJkpv$uhK>IhK{(vvcYum`qRv8CD!=Zm z#YRgO1!-J>YG>qdT^}CSX&!n)VdZr3N6j`yN^RvvYZcqLN+K z+umcosL|frm)%hUfuJCdm!togLgsA>&OfUxsYKJnXLYtMd8$fy!zdQ$|K5Z*n30jy zpg}@iV}_TDt2C%!0TY4TcF~*v#s)>6C~nt%^ijzQHUtzE&ZMXK&q#lb9AwB5jKt3E zbSJ`EMS#=pw2s%Y_kVDxhS99(5^_tH+3sFIiUQd0sD5z#;Fol2_-A)PIq<*BLk@5W zIEb*-NI>(d8Lto3G-oU6^fE(FU7cGq@_pR@HA6JOtg1(CyW0aBV<8Q(8~)7aPs*3{C#VJHx-@U9^HuncKlc$yNyiRAtoj-Ce|zE$56os7^zeMejd`2iOGE z5k8n*dfJ@+t`xi1urjtXDTdpfLKB8TzB)SQhHt;a!nv!zjc_5u*)!LFr_TRybMrv= zDs^kBU#!%ndR~^R1M!x;n%-&MJa<0!ChPXd2N)AEOX;erzQ`>|VFy^9J{K+pvwB_E zuuO;L6qJ-Khit3yxa=J;4=~qw?ipoOf`XE=O>#c;KN<0$$pef4GfuUCS zUN*2zU>*F@`m=yB1bc#*j zsQLX}o2VO9M=_@GofUV`CcPq2HejwJMWBAAE1On4um2WFu%f=rYomJwwr`VEpxC!aAN1&+#Mhpsc>QF0+m95Pw;kAD>Vy3J z8~^_exBtK8|9j}{sFZyB$?z8Twu%h~DzXy{p)Ta7ntlv3N(fxdw1=O3p#Gmk=KuMd zBlVrfB|q@N;@nemHble!YWq6!Clk-%r~NmMREtOuDF_W3i!VK+R!@1O#sAI!e|yDWm_;Ra>i>%j z|A$%sGEYPjZP5SU^8XeL;j;|kmeyA$<$`QWl8iABnzhB80alvcXb3x+g5)_U_htgia>AQXykW|M& zQG3c(dv`wRQPQc7{*j1RBP+6nbDzuFe+c%k8N0>^ONoc!uxu7Y*$wc*Ry~w!yg%cPHhO4zBYKZ3x|6riq!n3 zeU=gRp{n|-^mGYu7Qq^7aY)Ff9jTpEg9l}vSGWtFz2+3@+-ApqwPgngMOG*NO@aZ= zblV%^6=U^W6~nIl5c z=irb3O2Z)O01MfnEf8(ZHTLHMCg7v?Cj$RHc>U&8Q*uXqpY{2KRVZ-$%;84E z5#ly|Qiqa@wvNj%Z%nXTfC#!Th;=_iRUH;&o5whdp#r!$|Iz*Or%O>Bh_D-CPh$D^ z+#FNx&v7bsG5fVpO3#7DehUGI%qO)ax!Fzkp2$IX(&mH1hr|Lbj$Jk4i?3A*2HQI-?;U)vIkBrkRkdo+z0P)Pkgw%+KXrI$*dkFl z;=Cy*D;#*PmBI@m!RHYL<-vakCBPTPtJt z5g{G?H;Fb~FkP`d*sHnGQsp3#7C76C0u&vIj(q4+tEWb?ShwA-U;<+A>Bp*w;veD` z-fBsDQijqi0r$RYY*P(`w3i;8U4ew~vvZ_%oX%d=I~J8nyIK8furC3+AJvYXal5Qz z?)+fC^={A0o+s9+v`F)kBGCGr{?+L4P-6YRJjdA-BGOl;J{CU? z5v4qfC87*c#?fDOR<)0#0NM3t^^u%)f8>qKhAdQ@NfOIU&&j$Nm8d?L$n@Gy{*W!U zvcZ@d?3`CZ-_Dx~Qw@r!vRGE9JDW~&iil-+s{stobqgVUzj~snIcDjl3957AO=AY? z$p1&kQ#(Q0S+tX6?%d3$Vy3pkRNxTjidJmgd)N`Sk6O z!E?S03OI6n(T}iUmZMo%0NMNWl>^X`pf@@O`FZ_zWcim|pg;r$mGnF-J|b}qjYcK` z7w%kZF17+8!IG>FQ2sOQES2kVFKb)n-+IeD zAUwfHLLqriL>t41W^*?K6DfSbQYf%!Osi4&l$&KYY+SAGh$#r#{F*vQm2q8!6Y%W0 z$B0m*(c>0N44`hrifFDP^uX`gsAuk=kaQ`JML}Ch`94`^BV@fYB(~nVwL#JHg|1|U z0PB}KM*xLQZod(vUbWvL7-(O4N9DSJ7^tXp}pgHnDHv*Yd~r^R?-UTC7?y6 zVL%(gr5BQgE?pX)QXVl=K?a;gOlNK6il`GOgwwD(5FiiRnX?hCf1hybIj+@Zzmd*6 zTxE>@5$tPd?9E^hOKYghe<&R9I->XG^&qTu0;&vT|$6&ykAD?(LiODydFyglgbu-2^d85u?$%C#f zE5J5wxY|p~fYx5j&)#0s3GR^bAWPzht)UWKKH+i&hh5dX=4laiL`%);qLgm!eWFpw zPh9i=*>2fv2}g&XD{TNC&TQ*C125L8=&z;Wab*;tzR!_;^aAQ_>d1rY+fvgqP6$2Q zy(HS#m}x479O|i`Tk4~9qC_GqB8u`f-K83(sZt27uthbw}9vrfJ*x z{wovMD{uJ|L`}aTvXr9LM|$RR7P0xHnN{e#s=oDg-%~H8Y!T9%rNf!RNHh~NFUSkP zf4GQE!Ybotg%cV*VHYy1zFUVU?79({Cigq*LN;Ld^L0LvQvC5jy^nto0>rgR%=NKl zXbwR=nTQy8csSwZUO|(v`6e4otf;6+qOFC+96Av~J&^z*?3B8F%%h4pDUG?k#&u88 z^8=;Ek0-E5W}hGWtVDdC_!c9L1awbHH8qqxIXSep!7AMnF`x!dPZN8$A0WxNDeyd; zmyQieJi4H&LXPUr*lFZBtf#kfGERK0T7)_EdHLpOIl2_i&Kc&yK1E%`!Xy|iPm)%7 z@ZGK#{al2!BZYG*`iJ&R{L4#A$QM$8j#;NG1B8{;zD)U%pK!S(u={I1nvf}~uuyL< z_ARzCq=43(H?iBa)x^w7wG8=$Ea>r@$z3ZpqI?1^vV2d1Kg{RXjc=`PpT~_AuXu1h zQ`&)}!WLnR0)s<3F1=s2qQVf71`I$XI7oHlwDK!=ptICv#ckt*sFiH1 z+TiyqRw!_xBjG#l>4B8h7Rhu&<1QTjf{vX%vq@F~ zS>>k*jbo0~L9IhIT1Pci9v8vpmD;CW#ioTlS;UN9%pVea(54_2{3(Vw1ro~@KSx1Z zc-n8r^*HwqbF)Qo8eGGh=_NeZeF5x1m;CEkB)3MX$qVPzpHAQLilouyDH~8+WY?w1 zUo*&&in=1Y`wO3Q>Ly9q8@|!BkU7WGOEY0I9kS&lXeGoCeZDJgL0|JVt%czmFjGUt z!?Uc_#oy&S>`kJ=P(<8qlH1CaLsQZ-kGiDM0$Bin>N|muX~m5?4S!lhYw9P zal`X1vWnnS;+7$?%t%iYOEN=MZ+sTOSR~Hn!|sx>gJ0R+yBDDPo7qpv#U55aYU*sH`osisgm-sY$uq70tJ{OQRr5ZrR14bSfH!A5 zVCrOF<&OJwZ+7H!mgkZe-$`lU)CWX|^lh?Mv1RqLj0OD1aT@QM7gaF@q7qmtyu4M*j=bCa$6D7 zPjtpeH@WI;P!!M(aKlyaRN;%i!7eu9tR`?+$R!v^V&MmH;JZ0}=g_%T3*D&yu2v@R z2_J|n<+!FJn9$6&oe)5+dG<|Hsd zH|Sz$;~9-+Q&M5r7Q~n7mNI|(ap|-0Mf{Y9p3i;@ask><{8f*arcM(tYerR$$g5!N zucbzmXj9L>XUPGdN0fb z2ZqNSIx3?0LN>G?o4;7$gt=Q@5^p;`w7kwuYwFpQYxwOMS8eH<8?bVzI@9@6U9 zA%ljpU|NqNWa40~rE|TF&O0wou&oq;9)osn{pL3-`a{8iR(aZeKce)ogn!#0=dE?! zTvGLX5;1lMo<72E34pNE{?YFdZ7*YYNG-g9S-m z;|{p}f{(45ghY$HyTgA=D0s=?lu7xbCtG{R z*?yfu=}hT^!tekjUD+$ z`sFXO(Kovj1FC-FBPA7HQOBp7?E&2sI3hplmpg3wbcw zRzxws%%^g|2D;E#J8d-#u|eE}gOAYMd(79+TOM0SDTNP-s&OVrM>s+kua`cPG0Yb? znIoU`dW@TSGv8l^4m-{-u}3M&tny5-OnjDfIDsW!A%UGKoPP0#vGo2kWv(n2MF=V= z?sb$?BWrl%%NPQrje5F17CVsYKP=rck;rGSm5e$MEG@Xmw01sEh~pgU-y9u{YLAr# zaJTTs>tnsv`aL&vw0b!e_#7s-Ey<^$POH_gpc25B*{1l)7n_B7jU)?aImS0s1(&8+Yxx643{Syv{#_K5EJMxRNy;$r>2U zLihHv-I1cx9>UrgcpnG2mfl9-x6hsr^EVv>%}gwny}tS|rD!7&QXH)5M|Ph=K^P^R zP15#1eF&K~a?-E=pv&?S$A{g={3L3JYwb#=)L9-5W8x9cW5JkUnybLnRdjoYG*5zb zf9JWW-mNd>pyP&!$oOFD->nUKDXQApRLzlU_;}?wBzCqga=zvY?Nw^}X#iL(HC~qeezX!ixC) z9?kG8@s+}#6q7b$uS)L-b97<>`Ab>C{CKu;qAi{&QKEF=*R^96Tids=Y zZh12ARyWL@|3NT|dRk5N#}xmEmYrEtItYK<;gU8*+x7i`XqgkDRqBwq4(Pco)xqbn z3Kh#(x34KS=azG)yfufDPH0fkif`unJ~MnN6qOm??04Mu0DUeB3?VgvJv2Pm&b?MjW@I$*|%mPf4!92 z_X{_@@;JTVvvmV|bq@Z=kJb?IB=sc~LNv!5Z^<;~en`O4W`UIb z_(2-8#+HbHSXZQQD-qA~>$B1Aad(rVanUCZ1NAj=5O57o=+MUCd*0 z07|qXs+ExEn;!M_*zLxx)6G$hBt-LROxhyU`3pN zn{i@IaTM}by;)EHi24{RF<@>odppKLBT=$G+$xX3s3u;!r>Fr7%F zA>N&qh0&!vWk~x24u|UXqCPP8lfSwJb=0U6m2~I2-tsL*gCCM!EJ3+9;&UmxWuk8@ zW0dN6lfJ|f*N*p1;FL{`TKnv-_P9}$`rwaB$yxtK!+?II{9Fctk+9&km2uk2^3lqw-*OZB#;A)oZ8J00 z#Bwg+9nJN5At5dOWS2Wz_)5hAt0i(KO#QH*Yv{rUPflTPFVmF`Xq=pJ!lm^UmD;r| zDcTG*bJLHB^=up&A}AW2RFyjd7h=LVqCTct9Q6wF>{7X^CQdn5P(F!aj#yR12=BYi zn=S0XqHjMaX@~^5@jUuG(wQ%{F0gqKW^|NREKZqP@f5zadzNR|l8V(ep;s)Q)0mw- z|3R@I?K?v3Vx6Qd-_VN8^-QrWF5zs8tVBXxj+F0-T@!W;ABnMes$pA z#M{>geco4b)+ru_GJZS^F*n?660qWctgO04Y%M>+ko#jX?m!=vINOj^ zv|m$#RA9r1H@)MqffE}Ftm`Z=P~c_c4^}5|T1sepYIQbsLX*E(qx!N5qw4%_+xB)t zn20U+p-xTh;5~yR32&2Zwisd%{ZqWbXQ=1-39nn|6)sfmi@Evyz>v zwxLI)V3XW?Q;^9v{WO9fj_5P`OIJ7{{7WNldF2;pS5YOdrx7MBWEiKKcWNWfTd|FG zJwHQ`{eu_mTixgy7)HOA)DyYTWJE=`o-+=^`z&~UE2_dQj9fGvV+GX7jI46wd zQ9Iyu2vAQEz=?ZQr$9_ABH2vRH0e=bYAnq{Zs!^V<|6XpE#V0DK$!D}6Z#BcfnPLn_R1M^yf)_Tsfo6!)=3sJ zgwSbn07j$OQ6o$!Dvb)JJT<5-I+BZ}&{5b2F;$r*u#=%8yEpv)$n;+dfMhyq^a`@c%*l#+FvHfQLU$4r3(!ZK(r>% zim1N$scf665t(Dg5b=w zNr#JJZl;a@SsVX=XBHlSOQY|w=-IJ-s+PgJrAY#~m7HOC4;HON1wO=v2Gl7k(71dX zC=rTq4l$`&Q}I~X(m4No)CCiXy%m0P?Hxu_T9=UCOY8Dg;0Km?>aUNx1-vAl(byJM zTQtx~smD(xs3{uxo8{wvDJw~qR=bdQ~FOWXg_$pzBl5k^-$O>&!@Y z8A1f>OJ*=lx%9)a+Yo6(R==7Cf6l(vNZs|ec>G7@1@V4WIKH!&91-|OY|&m38Lz3( zHM~^CesnO-JH*#6i$0h7z37M;nndk&*G_vdtJu4vQHbkIWD4@3hsw#;mRaYxB|G0w zOcATau8pL$^^(1x+oE0z8>q;6m4u)qwrePG+dbr|=;DI)!+xetYRCEDG(nm8gCSOoF5pKrTs>OB9##d+)+{# z#Ho7Q{~{5m|g>g%h`FI>-+Wusyhg_02yIg=ApTU7(Ma>&QD+$%ob% z6jVTTrX<)dJ{^=9MCN1RY3}j?;kJw4#RQv{!*0pav86NJ=1T@BML}P`4tL6C1%_8<&7HfjCbO zxqbUgiobk#Tb#>hCy>89RfL210E$Jbq?&c2S4MSWTwieEgDrl2QEWQu@d<#nk7w?p zm%|=2Kzcm#ve|XvAZxd2wwvvn>2aBX1zge~F1WVASoUyU++L>~O@4d{Tk>8)yKC%E zJaD&JJMGs?h+siSK86A;xm=mmJP1UvH5ezZ8ASb=EADkK>iB6B`M=fr_ zQbgIvtWF&ZQZjBqJKSV%}f2wmuDPWmlLQAr@-P?6d zNr&4-&(d|k&%ZdPYv}PA5(eitSut~cLpq!XK+{*E4Wg(8(7 zUI)CL`ee4CeppU<{Ok9x2*p9#ruNQnIR-;C0+pst|;E?n--5%rPtf+y`C^ml}OT2P3$){z{hYl4SO@I@E zsj*<+hI++|S46OXy}drfGCkyLAJIL0EbVOU4&2sdQ;NgP zZYPH=a9P!x`pI2bcj{?tWdp_Jc1n3g|3JR>*-()e%tRv`4k9WbN?R$KJFGmvhcD97 z%|#`$YN!Wfjsisk>h~m)o!!4_RzxOv-5y`UUFF)5He5$p3mk^cQnhzr#Zn){7@Ks2 z=0)eczq+zR7Oo$3l=D9OZJdeBs9FhwDS#*&qzO0Nv1{TvBBTWq6rBmc4QPDN>F%#M z#WyH{x|qja9{5ybVC*<-)5~!dT8GiUB|OYxxK)<8fVV{A;cNq&f&JTL{id)6kXu-L zpLJ8*o>brbN5XZ6|I$K|K?)NBhpo?z;Az9o$Tqwk@Oyxby7hfG{o>DEyt1MXOkuZ=+y^}7$`+3TktpCI)(aMZ;bb_6P2=o$-zg)Ln z(rDa#AKy{%el4t)>q$PP_{aDWb8#Ma75M?N!8?aVtihF3=yqqRv?-c7o(ZaIfVwUs zJNb)@vIlB-r(iR3MM=1>n_o}YLf|2;Kj2}4kr_yvUzLo3C)gi=a$!!DSEdo?82N60 zXh`;mZ3WlaN%L{dCsbJLT)}W{W@S@H#nuryO1=xhQ#5njY9mWV)zy)f`b9C29>oRw zcdXMcSyf-eHBX`D32Q*!{Vqd^o`^VPsr$YqPV9512M6t5J}1cE)tL zDn=nY&Jg&U(uT8L)T8fr-WO_I-ndo0Nmj;=J*l=WKJ|H7?ZlEp-`z|cc4T2luAzX? z$VMS^8`Fild8+jfB(-LufQ&yLW&Wp>(NF`7n9|?Dmpk#^4#TT6Kf6>j6c(8iK8Yl> zCl%z9>Z9wr+`h?%wbw|iQMv~UCAR2A3H4Hwe)Y0}^(0`H*wt{UF zRa6f-JitK!<3u~cV2yey=ya(B`1!%TJ?R`MrZ2L>6H8aG63wgo%(9;)BU4c>d)W+KmrKwkyT8Bma`^Ggz@Z;?n#M75p~6i1q$XL zD0TgY?3p=#Yq#`ok`0 zPkk_jCJOwZc7s$2n<%7pU5{hwN`e}VWg>2(nW#J%Z5KN9hcJoX5K^QfG1WLI3fMbp zcoRoHX?ofqtT-o$di7wcs*wkr;*w}(LqC)N?5r6Ufcf~2{lQSVWLFKBXhHk=2SpzAn% zJMzS!p%gG*hPR*JW@CIDD#*5wmVSu+h&KP-6a;S6Rm?7{+J~K{;J$YxD%T%9SD5eY z1T@aief%`1DNF*O=x zlAasaAvpVs=KMpBx?CD#8k6EBYX1bGhTZ^@0Krx9>_)5T**iP`v-bi9oY~zn!J8di zQJsE|H$^W9j$pptI=l23Vx>WvcN$X1Z3C; z7|>Q{qjOv+JV#<+s+tV5bK#*FqHFh+&?54kcBF|n(UrGh3N29>mRHn;x89Hj5;8yK z)JHE%t=c>4?A!)g)jv3g@+!H^Z~&fl46h>JFw;VZv@?Dd3%Z1D)Ik#h%0dT!p8t42 z1ZVkURA0xT5$*a&z3d3U8jDr?Q?Pg9y1Jo{Z^MUX_viL}tTw%aZ^8nC2W^Bx%PWXyv@djh~xuN0s=@u?n#HVMi+7dtrB>YNsSm&@VuC+ zk!>K;O&F$+czL9O6D1Z>xo;CaMWy)r1_3I(?l#4zcuav6u{KWt`$8hrh_n#!v(K6a z5x$$A1EcLRrw>V5OS1yH=zZ#sr1X=ykmAdIH%us9jGO^_QLL)5XyQFl{k}dx&1k7V z!}qDjg=@1yCg87Y6l^EQ&#@R{F9Eqq@nR<{jmrZ_*YUcz@cjT;2aD-)9ffPYc|0Xs zWIjTSdj4{h0{OS1m2n>;a7ukCq9R4JssJjGS~s`zL(Jj^D4q-}g_Me6Xk*&GAC)4u zGOS_%FklWpvqn;#z8QDE(MhqIg^ILe(?uV+-mvE$J=8o20}MZ5AKsYePOdrp3z9MX zI~7s%#_q%7^K~D*Wt=A=;Sln4N|KAc(9I(S)>qMrty!TpV=lB%hFr!vSOU(tb1HZ} zzCN&6Wy}|pFL+JDKXf;{;O50o$>S?`-g|UKaR0$8Vn%Y}Syyy-NBx)%D8sYCo+q8H zh(-Z63}Z=21yz53yaiSbnH|i0S%_o6Kg41Fau-tVi7bGa%SGrJikajejCw@;b5uKZ zqRq3qE&3N<*M^#QlMe`d&?f*h?Hi@)Hp6nCkr_Dc6rI>+ll(oIpB+F?eFy`R9YQ`O zuxz@L8w9E@tfWUriHC}!;~bYw6nB@b>Q&@wz=4o;`AS?c9LcHQ3UX<^JHidu#=@is z>W^4n7n(k7ny^6B6!GQ=IKkiN%8;bNYi-a=`tbbqH7^e6u2S}{XumP4Fwrz?Sts~b zhW;H!ZV$X>mp|plZoA*f(QHxJ-`3!zQF+CF^lz0$P!eAp(|K0uoCT5?j2?D`K;ZBd z@Ds21R(-_T+^@ifZNIoo^Cy~uC*VCRpH3X_kY z3Zk>^+Bo{rCg45ZQZbvHj-L$rA)X%~h5yS;^v{QXYiuCAEGqz)4QnNDB0&(AS4g_f zcB)g%0X($amgFhfRWke@+EOOs185atFXI~eX#bo=8!_TnJDL$JP=T|s*D9H?_(2d$ zQ`q6*3xnz0+!~w1@OEG!BPZ>wBdEktw#>YiHl%4BhDWBjTekJAD)GKpJrGF@gTfQ( z7&bb1$`uM^)WLEHI6##YRZ-Hc7{rn$_xiKS?|ktfN1m%Q;E{8g0Eg)=RYeRGi^)vm z1M;W8iEGM){(;jof7KHz6cw2$PN4-(`dx@OEWxK3bagol*s4ARAxOv#4Nj(Z-yq(% zb`v^9ARH}mXX{|)7AM~b5(2ZN6F%sS;mdiT65W~7prH<^aG^2P!wW(85D~o*5EQAl zmEJ8o^hQin{QzgI&k?;MH50?gZHM^&H{6*uC=54ARG*t3v~}S?z5*;pM4`#WZ8l5K zxWQ#$LamI8VGW*LAwrVD86HS4gRe3~O{5K%WcytO`db|BBI!>OtXB z8)sB{!{(VqWtZ{`9_PN*;@%?ld~C?6>W7DGW3!@8(0R+}XiUI833!K_sPz#@I4QU^ zWT?qTgQ0VUkkMbLwfYgJdE7VA<-FttsNm`HqpLVG=;}g&N`N@aEoE=oW|zz}+WDgT z_O~tWB@TVus|0U@i{SeV{;$Yo7a8?{UnSuwM_=%W9s=IK6VF%nvE-%>tdO;KuJoEO zD$5fHGUl2S>ZOt1sbIEweUd~SFl5^B*ZgYQSM7c6)ot5Im!Sm%3+d(Zn5`SGosWPP zW9)5JZk>h&M6iPmLgIqiCUL4;pJ4uU8bhWdqWq`q2jDUAHS!t(xCARU!mDyrja`}z z%3OtAPB;qlMLLxvVztfyc7}_qfGI;&Oan{5Pu)l(@aHY3?{=<7H>Whl2yk_?O`0eL zcb$s_5)&VOnL^jpZ4Z6)&ldzb#>QtcehW{75YP&Dm^S-?PP?=l&LXj82W02dh)3|Z z48%9zzc{-kjrj=5Mf5sFW8(!t#nz}D!G{)GA?OtS5kqk{SqJDo6r+qNl0;xZf&RQ< zH?=7Ic>Jt+zn_W&n{34&`Op|%Pz=VZ@9~Yg4#6Axz&Lq`KTI}3j`3hU6Hqk)m#wI6 ze;j40fTVq&(m$x^YOJh1ty_iAm%AJ{B7K3F4G#$>=CrbnviOZVjhXSD4sR@!T2u>) zIK`&18bAnANg94OcTmxJt9T`QynR0IUD!@C%rjEP{H&<@=}#xQenSw0Bc{W{aWfP^ zeb=Fy#6qJ2CcjhZqy9TFF7aeVeN>Q^WQ!AL{P#F1?lK-nrLBF~qF;rrqn`X>fm5q} zVSIvM5mMqx-!MyI!TdUtPqFYL9GK7^LPa(^6$1$hn=;l4y4ogzpAmTM1~3KOVe4M= zX)I;}Z+CB(PS^ax8nObDH;%`uhXl-EtvgAxzKQalpLg#vG1+&E+d^%PR#p1;YbI`` zjAIbi91R~lBIQ`tEmw>Wk$@B$6@Z;fD`1O4$xkinS-zhe2NhmxUYxQ7?>9LWhWWJm zN+bQ>84j$r9%6^qXr8u-j-e6KN4me5*VhE$v{Lc19VFGJc{NJM$m-OfITs89QN^ zZTgWG&0}GdALa9uy~2IfE{8iIhO;La|Js?UP_P8--E$lj`?x(DXjeCLQg3;Pc*-vz zQTKx*yM|$LH>)LM5@}bj@n*a6knY*DoRV<*{6y)8M=Dw z{pQrqx%jFsXr~*}A@kLmg;(_ZsSgeZ^ck3}BlPWKVA9tm`UusP3M;r|p-iU%A+v1A z%p_9iEy|boO8DM z7y0(0k4ne7!T#=|40nvU$~h?+1bpmzC{v+Qe8D|T8|7JB2ZR^P`XZ{&_49CmoFeA+WvOM%f%$zx{@D8)E>A&ExwE+&Ky#SCaIY zUxqYiCrDA)zVAyuBkRKo*TE!f=8$xyiS#c}#0>R{hrstu!2j~Cx5oh0VF@`U{rR>P zTCsVxJH~;p9YF3eVT4!omFpVc zlMf#6Y@d;!kQd>{#|o$PIM!T*qVELaTQKkIegI#21b=0YPGZ@Sox(N2D%aT#;Lqu$%JUC^ck!7XPrdT0 zGjl@7ofrO{sO<_NKNCPVoOZp0OVmw>&#is|>^;$x6 z$1FFEimK4J@5^!2{tX#D?*|#G`iOg-3(D1!2CK|~?Bc(`<^AHyy=iAg2EGm&%R_<4 zsXsUz9IW4GDKPp9KQe=jDrd8z`RNi)N1|vPCm2|hT8{qS-4w2yPuc>?AAcT{JnIWx zGqbBEAC=<9IdSr-l^Y0N9?`N`Ad||bW4AX@)N!M4 zG!WKPqz*9fioDt1-TB8vK19J@roMAay1~4~R;oqiA#N#xz^{CN-M6OmThtcWg{gPtTY3++n!VBtDE zw!t=zbRBD>2|N@P+~W>2G9JFdOb@-NUhO{QcOm%cwuLg_??0ej{+}?Y+B@yn18=`3pTk1Dz+K=XnnD0 zEIg}uoa<?(}k#5Sk-h?%ZYCRU%q z5S`Wg_nQ?SQAgBb?D4n9OBGONrjwS)V)f8Rd$h;RX6vv|1jw_Oi}R#G&H`ssJB#gW1o$;n@5 z%u}8Hc29WBDC9Ey{H9@aSqFdfuj=XOEDFxfeGFSt=I__do?oTbF|TEUSS9zA*jZz< zKYyM!7HadEN09f2j1|&V%)m%mNSf}T^I!jX*VxEC1{@-GGDU1%Sh&9pOWe>5cL^VR z@!or(V(+U7!YwX`d8;*rr1pNfkFme!j%X$zyr!F>ZSQLG@;iFSw`(}$J1*NDi;ZGE z2^1Y?erAiWx`g|sOXFtVI@I7eJbSF1yF%fz5oF6O*vqtsHIw3R6X%w!_DU>+MvZOL zGp=R_Bp~V5qb9koE}Myo`G~Xd)O#TKpzlK;L|jDq4fTGt4obo~qI6tG(r5`kLQ)6Nv!v{58#$|yf3 zFZC53p;aHseL9JsN2S+pdt_8DOvb2C_Z%|I??!msD{V3kBem|e$J^TPD0_D-6S6a? zS~x3Z9Yo$P;vFS?4J+8m4zavWXEUwqiixKb@3d2Tx#0#aiJ%gX5uLki^DxZ6EDXW&In^V{>1kjlu!d)FphK-E zt5cgimiAQ`Hh)l(bS2qV7zQzHm><-L2Fsz(sb{LBBH|p;@}J?4DE-@^?*~SueF~MO zqokzuUZH!2ZfuJ;JZ*QX8g9jJ=b4!n_``_WK*-Fm<8ToV`_oFdDT1F229i$g zW&X<0_i@y;4x7^0G!)!~Ql1ItI_PPn1Xpq7MU{(+ZhvJYS*Xe=!*boBd#Y3-m+gIF zEJi_oE0lU7J8;4euba40Mn6!BAwJ(!{WEmUHMj5MyO}_o*y`vq>H=DFSUQP1ba%G5 zwb{nDjJbK$$konuF(7727@@f|y)2AA`Kcwm9$Gv2xwi97^)}{n;k1;Q9vNwGq3HyUwREv)Mxbb#AI&P`<@o7)@UM$Vpo2(F*>`3z z%$_&VD3+alv^K+qxpFr@Cv~M1Mx$8qRMCAgs+&7{La|zw&SE5|2-IZnKK2NCA_cmeu#l!#`K%Jv)c1-y zKiqS2N7n$iS=?4L14_l?Jzpk$ef&4{v1Gj6=oYqkYXozzR8!+#CN+FhsC#4WBc@X-aU&5JKC;YzOuH z71VmPd29~8wy162){c3J)^RQqkdCeNAPUpQRw@sC_z6GV$*urw&AC>mceX81C01*p zSe0;IP$#q_Pjn2;?2qf((q;!3uJRvt_IWUskMf7Dyz(X z0HYx|6Z+W$@Mw;pEt@uR@zw~#-Uz4u9*v-@=O`2_<^s-m`m1B zY>8U_HoD#dS#j%0+IxlhwC;WLH1%*+D7I22c}y#?9vXpN@ee;uF$ws!&ynP}@zbJ; zckj%n{URnx7HykE@HPhePKSQt^hy&!Z0-t_Oi(*9lG22G&N*n#8Rj7#nZ%DfEav3# zS`q)b*7uiSJz+n3eK`?F>SI(^mOKmR14B3i*9{Z7!1bP)4_ z5486M{0f>ARX&QktN4a-9-_wt;suGCzJUQv8;<1vDfw5U0q^g&zA%TeGA!9S(2~^Z z8vbD*4jQx$s@wuttz^J&sm6cQ4kmvoEO{`c|tLkbV z(MWTHDauX;M44$)d;=G3G!tuj{|Rp5pHEh~EV(WvR~4;<%0)z&xdM`FvrYftM|oon zg~@YQ)(#+ViX5dvAA;Q3z_&@z$Wc3|W2=7&xXL?IsdGsl^n;6wCnio#NaI78>Ym}C zp~5 zbg!8^c*0D>C+N_GAA7Q)1UjGFYjZvM$|)eJ%{4Vb_RXLm?mt`jhD4oCryp|4x&JgV zzYM>fq8@XV+A2~f25;yF=t}b@bbIDjn9X^^oK10nPsgqxX*en5Rc~NfPZCCYzKHS; zM3dsIm|7?$O^-o@7u04`^~fPZTjAT1dK9TsS6yRTP-qzwaJUp~ljEOimSx$8c6OQR z-5a80l2y=^Vl}@mp~$Nsr5KGCp4Q&sVWdQi&#F z#s&*B?pMdXSdq@_oJ!Y|5EeXBwQquMI*#nl6Q#KN5O4m=FkqH8QiyG-<96lEA?oTT z{J9c9k?_uv`Nzg->@HIG1ZttIK)l0<7D`Flq zJl!h4<75vJbf9bbUfK|I@VYUbpotc>CkLmB1P8ld(xwkbmu3ft=LOX@sm+`>A90|! z_HE&wtl4}DKB*0Zw3g(%%J+5nFmki`E&K|5#_U!1EQS*<;`k(P!Jy+YW;cx#G)Q=k zF0@&$x)td?3QxOgW4GSKTE}5qHB!soc%1*2-fzI#y5{r8LbFnN(GD;I`GvDQ8ovF@ z3tNHr=^cGpGV%Ql>gTyEbMf{1QCv^|^`D$xPkkS#r=_mb7bpnjTz_{*Otk+PnDxGjq;FBAvt65kj zC$Y#UAOU?Cx-vaP4XCAjjjZJ(=2I0)HoLT>d|9K7>f*;w)@=?$a4**4RkC%h?c}Q? zCdxL&Bl<==kvR{y4|+1=35dR23>pwPF?r7Bq5>0vLF>luV9m@XOT-#WV% z6Zd^9LAH5z<>&NEo7b(*KmZBQyyzw2x;@nkkR}sHNT)Qe^V_*SQ{u<9@s%_aqxFOP zJVRUm;i+nYc%It>fvW4nu}TMy$wt3QU1`!z20y^M>pRHz0^6;BthnZ`6vXT2W|Cm!T8@r0DseU zCNdDnJbP?iegftH!v*vD%EhQ&adKhuJY-E$@yJ}&qG$@I*(_j4%^4f)PyDl%>cmbs zrQ4}oLc-`Wcds93YdMj-@|7MxD02sYjub|0F#Sp#)9C}a!XgPg-nYTmK;Ai9gUqXr zO5%!HgPd>*W42klHE#ht#T=Og#_?R$vl{K!t)MHqykQ^QoUP; zCnLC!rAzBh*Fq6O&IbZg$;n(y9f%5%qLgS_KwWBBbJ1v!lWh-*cP~EYmgI90J7+Vf zgV1bPzP4)nHZkLaeyDfCVDu=t4p&&!mdNj9_OeHHiZS;zOKV%_PL#BU8KLy=Fpnpv zrJxVnKrmAmLxim)86V;S^skXk5yQVA9#}3Q#Hps}PC4t;(j@MY6&1@C`OP^34TVao zX7h5kV;Gr-u}PBp0i-sGXobOg?fgYH`RX%W_#28k%_cUC&Z7#9zy_3dys+&~)+!&1v1T9lJ$a>jLN*hv{9e=cdTo&NlOc>6sV7fBNxvX`5Cn0fF} z*?hQg4<9@1ysQ=2Q{BF@M2aTMmvMx+1M!B@G&0EKonpnww9Kzyy^#-GAkN_XRzt{; z2+R=TnKw$kT?rtZTv5TP;SwGEf0FSfibS=go2Igk4_lwBy{>U3u?>q`u0-%_8|^jO zAX+8>u0&DkxRw){bWKltswmdsXy}LM_VZ4EcW@~~#N${jvuDF3kW?_|>!+kH7aFgF zpXJo_>H?cSXVXdaN)2Zs8JdVzR$o{tFCIDoTYNe&5qa(6%tT4$u3_m#0-^Ee23ecX zB0wqbpr(^Zw$iOB+^do}jJYcfE0ow_jG5>VTo9>hU&91*9e+`_B_c?g@Kq2Nzb=-- zr`&D)2LIIAL6PKcZxS>mDzxgHeFdq*A>C^ExbC;!A5+fTGmMdzF(ng=3xLD!j}FMOSJtwpcgUhvfy~ov5)YU8P*3qKDF#Tebuj1b4km zXD!_TsM)?h((+BB73QOb8*Ovf0~|pHPQQ?4GF_HNVWhl|QR`8#xr*~^T(>TF=o=$s9Kz;q9Y_DJN)X<)&s-D(Gl0yh^8v^ag*j`pgIV?}JgI~Iw7LkeL_{_CTXu~dZtM^b~ zhunmLsnuwLH2BffFc+di`>cX_5q7+upD+2~6+W6M8 zXQnyLstbqkB}{zw&N|Vmh-d~l{CXZa{P2+(D=cnJ6B)|3J`t8yOJMm0D3jac=r#ZN zH4{yX9fe8A06&)FCa{9G3S^bG&|cnc3;^F>`dpK)LrI`N~`+ZVo)HoQ^Qx2fPS z$`E}O)SZ=^t>q7Fm_)02>zC_(yt$MfWF?Cp#6Rb64D!KCd2Cv|{v$!s$t+in&n5m@UZRE$D z*SPc2wbtrrPIwk;UG2nZ8+v&)tR;TN!H_LSbOqMpAJn)0@qB`U3LST6xmd2#qdVF- z;HZA0RME7%j^K=W+Z^vIT+}G-m5fNt3vkJ47_g=5Y!JX`OS-p6NqNi85(@scAg88r zTO3TVP0ij0`>g1?DaipFA@HfJ#M* zFH8h9A{>4;;Z35&OG|J!zF~b6e-qntpI$x0upL)*Gh1@^Rwi#2VT26bN4(r{w&4 zP-gX_i-*JGf$mrCR9DIcbVczDYHBatrs7OQ-qx|rCy5(H4LcPM-y`B#*sJ;(KpJt1 z5$-G%3WVO9VfCaCz{5|$O}T;=>-yuft=sTBvzVm{Rnpok(s=w2*n&rR;%0$Nx9G~=2;l@ z*R^me;W4mP(RS8V9Tiv94S?p9BzXT!PXFnOX(px=!bX0AIY`k4yztxaKJ0YnWhtdY zO|g%5Y%@YcTq58vopOOI#Q}+(sZtJ_5%%#GOgMJ@0&1BZp|_34W>J%hx}z*oHI~7n z&nadBIq~Omk+?2jr}wg^B9eSh9_Oa>+MXN#9Zd>BQyl`FOdWqtE1g&O3Lo>KEH7nq zL@WNUhLctdROo|rq z02!^u8w0?EY{Vl(0)@AjUB)Ns-(WOJ^gnZhD-cNn4%ikKAU-r$6AkIPjFSV^5|^~3Ma%dkyF$Omm`{o7vh2}~EzRah z!YA@nAbbsmTI=7LhO0XM@bJNo%ev@IocaKZnq%Z|<9lvrGn?s^A5T*Z08{25sdSMA zO&()HfD}`RD^>_Ux8PeJGx)GxV%K0?z<{#CWLM}8zlE~B-bda=4wjs*Sp}`|;|K&$ z2YEa8-R5hb;y%Nk$|s;BQOUW>0HKs7sLw>J)d%Th&8Tn*KPKOZhP|KfR1F_Q-dNj$ zRqJUM8Ya$XVr$l2!Z-&EMX+IO`J0MELsEq^s?&>Yqe0qQn%!tOsu4~ctkD!#&=u<= zsOjkqt=5&#?0D4_#tx9rdU@dV+h4tmQ)`LqHa7VPM=e-n^m>xMve?31g90+9PL?EE z4^nvYj0)7apI|Wi#P`JK_MtoaM=Y71Lr?l*fl^Fg>;apRt}aX${>EijNRtwX&zyv% zdj**{h+9}e%e^yhq+A|wAaH9|!BDESXZ*I|#aVM0B1CFqeA;*&S#psR2Jg)}RAVlq z29@LJjD#Gp35XS#@UM1d`fPO6>ip1Wh51^cF?vkw;5;|hD15=^M(JI=O;xy$h4Pl* zSae4*>6S2~zi6#S3(o*Y`O0e&6~;@1+R7QX1@yXVl=C&aH?qfbs>HkB zJ4$G8X@>u_+>_z^jUg3b$Z3Qz`g|@mEz7_t5YlB2WtMvL=Nr7|g-pUqGcz*Fc=TB? zRp({cZ^n7r-m}-LeN;ZTM`;a1OLiT`hK6@;RXcIJo@5Ncxv29cPc=(l(_^4TXD^+# z?*LpyhuX4ZvF>8@`|RIal|w6{g|OtZ<|z95cmDJcE-# z+(j++&)cM{UZ-J=3WX$;;kG51OGhQ;OTTA#XN?_QM2EGPQJtFeeL4J8;Q+3DKjpz! z_?rH*SJgN@qkthh>>_-%z#)$_%9{U=sy%oSMbB+Td)lqbdN~2DaJNZ1)ATFKCia&- z?~CcJ9?G*DILWY&?>bo#P0V79jP?1eN#b;KjNP41SO^(x|h;+p8sCCRMgE9X!TSj z|0YTP5Sw(}HZYy$5+lQ-a9?r^IdZ4`oyd1Kr&oJ02;(a7@{a(SmB9+6Bhi0wgg%W?^+r@49 zxBlF9o%cHRC(K9F3i3#k``DMm3fj0l$@GJ9LG;$w2%Mc=tIG6G-_kO`Lo-V={lf^Z zR$SITu7rA>F-;)<_*)AQJuTjQ=7y+!+*=IrAvLb(dlp+}j;?6zfQBho%Phe6K5g*Z z%!kU$3$e4J!i{0a>+=VODIS0OLJ@_)cZt&dKFW|7^T*{)sgW)(aK6*Y- zqb5nFIm*nS0oNbT2|*y|g(>pw3n`?H#s3P|JC3jraTEP_RmU5`^z&4Mk<0tL4vmbq zhgaLED43Yr&u+}|s<;n6e30kc9Q31PlgnUZlY1d|-&4UXa_>u(=Z*k!dLnarVz=pa z5az2creH$|`F92nh*e+MGREI`egKCpabiM_ZdmFrO&G$oJeB$1@=h{}^B>6qnJ>E%yZfi57KH*SeLbQ|$FLR9vkeJ`J6$E8y?SFp< zb~~4}I5IV=urz4Mm)##}5sVWqwALy0!;ui3tifsRekd~#3+_a8(Bz(_z0!X3Vwq*E zL0UdV5@kT_z9$1=3s$V@`=OewxTmK?-r?19iKVHaqwZ?wl6WapyIP#Zbt~~*@J_5Q zE}inrm(@nw2W+pId-?~xF>+r$^}c$T;f~~f`kQ5S+4-=rhY{V#_oXHp<2fvq<5V~n zV%+Pc_Ia|e9wW!m=u*fuF5r}P=D}E6X+~xr`(Ai>n-Gd+*BrMn2kZacy6%olhhTGt zMYHEY@}qUk45uCItF=0$!*s&X^N=8)d@+ zewwft7!q*eljVx>fDJ={99lH?Cv%&Vj)=mtvmDZ zdEe}r$oo>NBg zn)pzRarNp>KY$=wDdCZ(2?%88jh+*3PBE33$$?9VTod!Od%7^lMngv{5E)IMi!Gwr ztl)lGyUfM;&3dVderoLjUq|aUQ~w;IaB0@byrkP|?RMsoy*Sd8tA3aQwik(;|Yb`VQ?BU`(TcR&%?NjT|HUAsFo2As|qFueA})_hNA% zo=>Rv0-$;ZA`xu@2nAPR!+bg1b#TaQxt5djd;xA+0gyo$%LnPWoLXsj zkShmF_$wNxWAFOrirpJ0Cef3GGyixLz!>|3Sym28*_21YY7m*>`~5C)W)+v}b$BF) zTJM~(<|8$at+HkdDw{Dzrq|JHsIcdCA){6@ZwXwZHsJm%AJg zC4hq~CBl8O%Ur1VVmcoJoa`D)9{Kia-#%eE)ALo&(a?hk4i(w#1x#Fbc=mCcjjBSzl0`v;wOh<6BlyN!5_KCIxt=Z$on_%o%YL{^{b4gP#E9QO zFkoV`do_%Y`aNN}2oG5XgB)XE=y!=L8V^*TEfGlY0$2_wmNdcI@svlw7akN0s-y5yE0V~#Yt?WnxLVHKd{Ur8=L)O+p zPNnEdw$FiTy{keCi886s2Of1TG4{GpcIY>>i?% z*(;oyae8+d)g;_h*DfYal+9f@97(gK+6wEFqsUI4KjyVGIb?&*s48X?13))RfNbAC zAo4hJ&k2E*p`*w9j=`RSOmyMPf#Ad~5>dk;JNWk;?PriCL69N+;Zuz4Ro0~^zRu&k z;R1iwxS{@^8ZrFkv-}#H%{qp$vLJc$1s>kJMvdt4vwve~ce*%Ev zT54Nr$3PN@g&U|2$UzN>jL8ah-?4) z7o@I3pfR2bmnrnn#+@w*|16`B6yDfaM@|gM-_lSktAE(pO2v5vpfWFUScyZs+yr#{N+@MXlZn#Lajfpie#AL`INDM%R(;rz+xy5y=$`Q$-1-joP8@5+6UQDd7 z;BYXbqgl!EBoJ?{KRgj@k587p8h4L~oBE5vhN4j$wU5R5vCSM)C-Ntmr?D)P;25%u z|N40R9-Nq~W#0$Tj9vh&s{f$Bp-9y4al&qRwgsbVC0nH<>=sZ(arz-hYT_ZK$44(N;(}HHyp(_~2pA)&O8jI|q^N(v=!7XtYqDJs&Xqr+~lE zvWIwU$~OhU5w^Li^4Won^!eHiSD{bC9I@ZJJYJmH(6Ivr0?+>1z8u+F5CmO)egHaw?batW@q<7;=L+6}Gk$CYC1R zN{sf$N50nrKXRujusg(H=qu%igj%Lr!~c%-}bC1^rHg(af1CJFa>VBmzme}N5svdCA# z188UPg(2;K7H}FywgPxxDfz-`h^!d{PH4BC^!};yDJeoIlNcn$!fO!G5g^iJi?8J< zK?tZVdPDB_4+nXvN~aza#NBV_@!md}AEK>F#xbGk+qGrW#qrPYZDjof6-@UEZdSP3 zl-Kj?nq9gA{UqKo(wn0b}tI-^$)u`0_yu=*X2?CQ9GXtpz#Gt9sZ&5Rpa%oy49!bVW@D!kstXZxmaHc)SMV494D{>9d@>V znHphFsLKTLs_8|;r#x593C~bXzde$b`o0u9HOGSC5)9P@zc!at0JFf{5Z zEikZ(SE?deA%rh2N^lGZ^xc9ou%WE@9g@<#AZwbxWWIU`foHU=Qx47gl;1SlG)DEt z*44-i)J(9UvF0UPZvhDA{I5n#Jwk}JPGe}eU^+%knD&Y;RSO5t4irGB zW*bP+eOc;GpHZJZoWR#6o{bGfT6yy_1Q^EjpJD5mh(}*~cb(p|cX4grCR9osF}Vaa zyW6PR0Rg{+8yhD#bIEB1de+>*c?GX?bQC;E1-SySL)x{YomlD zkyNA5tMkVB`L7tnaCF(gW&i=GK8FFJ65uJM|N9hZ&(n?#Czb3n$~17?jUqYt#}559 z--8L`8F7J+$F;zJuZaBQXoagzC>^IpOZ&iszuCa61#23hgVV`m3IxPtT6+GoLJV-I z%P_{(v3#a7<0hLwj+TzE52YFA0Xv))Nt!wtu-D7K_6kEd>Qntp34@X1M@wqwg%_gL z|1k$otr#*_Dgp0_^Y6G41e!&<4RZk-Ym6}MtkPe-`d{aqc|o=Z8S(mbz#!uPtoe_{ zzV5H>EypV?y4DuT#=Z4aw$%VNlXvVu*^uo~MZ7j6Yx+JXr?gp*QsG48_!y7=VDmTs z^zUa5&-YCK~ zkCwu;TjT`j_V9ZLGd7m)j$Al6d8d1n*U*= zUzb`+5m39~fT11o7}jee?nKTVWhDZQ7nQdJHY5SHl9C?<{<%|pmM?vSNYNfY6FpyU zbuJfB$MkR`{Aw;|DQu`QD?c~)IAuyAstn5mZ-Rni(%_<{_DY7RB{4{@p0&PZ-LIlh zL|d20$0X*vDOhGhD@ZH;)6<-MKwss9;iJAv()Rwh+L%y}Mv-K9iK%Qt(b7@{_J*c- zplwh=wYXuG3$%6fw{%Ve@44~&;eceT;eaH|dFmP#{&ihNLz|Lp3(epz-9!U^5`Z&y z3D9NvN6IPR)3=In-6iX%QlPsvUr7c>LCBL=#E=)Q)_yF!AY}idzf|%0HoiK%l?xC? ztO}>mk}b8>Jq`oWM^et5=R*Jhj{DaEii)$9e$o`K#;IAGJ1@)NsQSP-$IK!T;0yfUFewPpq{~*2@tp*x8J^`T^(qODY>;cH zOm6s8Y?rP^PHHrO`a1%uCjWYUUGY|3fBQ2CPA!;MUZ3)Ey||3yzBTuAFFazuZGztl zYy(gnqZJxI^8yn5@|VyAl%6@v)^buyl*u3AKUj*xl@H$MGWiMre&Um6|J6x|9=W*Oaa=&PLf%QI*5;i;FC|W19bBHwY^6n|{O!me()}>FPZ5DPsv+MnePF2s6vhUiz zio7ayRDRi6%adRR4t@f#Edx?(|3+FYSWmvMOPEh*x#O3Vab!f$?UiwW4kG(k^*?Q- zM!r!$a&K*!+|TrL{!%598`ouRekx21@fJNL`0t|>m4+jCd-5*N9#e0a6OP#6*YtdF zH!?{2P*>Zkr-cV$=hgS^M=MH}L;=_Cwc+|C}NVT#bJ>9G%!@lA{yXAM=fHMIa^>3RNGm#YH zWA8oxBVu6r_MAy09`S^e{c=cdrTflZUfj0RRK($+bhcIk@I!r#iMm`bUX2o1l8#P)YLRg~~3+04FX1BIF(@gA!WUWEb097KmtqK3Tub zKpO{AS(PTh;p4$tO_@D9)zm8K%-23?7#sya_|?D6%rCc^;@m1g_l>DS6Fz_OV&sMJm=N|rRGMi7L}bxk|969H=@!g z>ETQs+;yM+HgqNeZ)KQxeTq*A2Hu@`@R`%>D}gDu{lrd=eOyTOmh+}+E(T|wLS)0% z%47*4XdYG2=aS_KZxFnR^=>~byqt#;2PpbX{o;gy`J@#w)W}TN*pWHIwdL6i6_9SJ z__;?WclZllSE%8>k`R>z{T}m1(u=_ECMC?`!*uflLR(!Z>0A9nHSb(J46!0ChULz1 zRs?;@qFkfq51bWpqMIu8;PXgoqOV`#e6Fhx1lsvo*C9W`FQ+Dc^;&=vD+v0_B9Kx) zAVJ(5RFF7|Jr%Peytd-F;DL=Per+E*Q_xd0{85uEwpD5taHePYY3DqW9Yk#Wl`EuC7F z{g~MJOQA7%bW^(%7x=*L%R^7BdYNsF{Yrd!8MUnYh;B!glHj|s?Nn=;t*2Cz*h(E4 zfcrs33G|h!DV=H%V^U&xr7`;wt3{;suyi&K9(eK3k$j#6n5)7W>FEf&4lLQ#E@LZv~^JMEC;f zWS8_PHZ%v@!Ri2VXng2ftIXIHDfV+bo_rt$D;&*gQ>e@O z*g`gez`cFN%V|c&&bfb2y6EFL2)RP(a$>-wat2f@Yd zK1UA9-#6t?XjNK}waWfG7IVKkbAXx_jcHQ;q;o9?=Xc=^#>%OaaM&;a z{)^hR6b_Uypo^X=;;QBE+y#g@_M|CXCxUD#cbvwiaUtLCUDYT6%DOEq-kbA@AA!kl zZooENM+DzN5vnVP9lCTj`VYh^3FPi<4GvA-H{QZBc(S#>=h*|i_|-HW^VEm;=6ijL zn*4XoVSIDf^T(a`FPn#8y_ro8l_iK!lEF8(w0eShEm-I--ftTXP%h+*deY<*wfs-4 zSq`2nBM%ga0g4@Zu88*LU_I2$fwB1e7$Z=%G-IF4Y79d_JQ2s=+}WAU<`{JFiJC%U!7P8L)tu%2i;7Ut&7aw$x9sE;{j9gUjql-ytBa}& z?aL;dmy76zuNO249KGoW!nK`~dqlS%!dDvz3`G?c(~D!p=*x>oYCF;gS5+)D&_{7Q zUanwLa;~6Di@lh760Myxd&pdj2BLb)5GFy+-z5-bX&F6(NJIG$s7zB&ZlQC6c!x=a z3*lAtkPVkl8p4uzHi7Dzj=irDYVt#Sdkh}vNZ|CR=k5)$`xd7f&^nVV-!M4FaJR5c z9iZ4jE`k~wKtG7LZV>xtov~!cRpJ-J)!bS|HdB{S#vcKiid;t>2^*ywMf$$_+G33M zpt%+D*mm{M7kDBCSu9@X7D}JY!3PvgAMq;T>anN)1!7;m z1{)=1cJ}=6x{$f*yuzejLnG1NR7Pd!?JX$C4%{GmBR_8$p)TW&f8o!|j;8?LAH+gc zs$R<6ZSoCcVJjuI28WGAHTR5`7A9Xp88tZrImidUHtrE#;lI7~iN9ttAvGgU-(Dq? z3`M()|1tr;EqGI`ELVa7{o^eUs1s>TVzh9!>w4mT>kQ1XXf0K+8$^FGOsu17SWn^c zRbcm*2Tiw>SJxgsNty7wo58WC_D&74U8D{|Jatk&Y;yzek9eQ5$_Bgn($GGARo#%e zn-~=ZEhyOW6CpLkr9q!j{&%6uCH25drv=Rgt&canYZYwXC6tUj-xOUw=p)5B8 zD1=2~C8BW+GeC#TS6N{sNC(fvsLw+%lxKOWPq*S<91OtGiC+q8y!P)TYu~e*-f^6S&FX@JjHsH|^qDab zpPMf^V$WR<*^U%wE8s`8l$w#(?m**{vK&EYvDN77SmRd2u5orh)twZy4PTvNTu!qG z*3ROqUOA=#Z)L_A>#u=t z6jx*~XCG-Z%c+P)a;dN$s{z2oKvnQLkV|gtz=g-d1{dj3|`Ts28 zBAD2BlH&I}MgA1T$u`meM?+M=n5o@ zH4w#s)Y&`w&Xo8y5BBo!M)gy4N9su*IzuUai=TtrV`pE2=L0q_DWV5iczzLXO10Vq zmj}xuZyF}p(lU2))YR~Uq$4Ni4f@Q#)v@BWnHGFsT8ZC_FOQEQ+jn#VGPBPW1k?LG zophWGN%wP+a4L{Mdk&%U_2-K1?!5a~Hd590AJ^2Y>-m;}I$z){ZEeDWs1(F-x5e*O zaUHjVY_+dN47%ZWqWNtg>Bal5e1y0Vq0l}u=?PSs6cV~t$(){6l0&?z!Ta&-wJati z`U!Xl^6^!ij0h2Gk5Zr0S^+w1WS+9BA*mep#FUM%`bfIrw+`dNHE)UBg10Urgm!xz zJ474-6O7rPyZ!-HSP+zim?yo2!%{Zz924oUIx#33X$TANq`3F9e~ZOFu!x#D4RQA9 z)bNE*c-su5d`okd$ZsXwOvcov5XOXz9(NV9siPHMoWyM!jekZJavbvfxNd>Z4D|lF zwv-?$)i+jT_`3Y91@N0o+^w$@n+%Yd%C>(Zdcdd3k9h{1(%u58grH&s&;@~q(GD)m zj;sx{hhjDU#eSe)t?YRa1Fo_0dWYGo_fB)??m(+On&usxZ;Y-qt1DESbikVAWf#W0 z;6kvik9PUbuU*VRm{;MPPviP=fiNLg=BNS<k|)&m2e~u%-3cgZ6I` zvj!3*>=PQq8b^GbU^|8UWhs|g=N0ktnh@Vg9~DCnOi%e67D#mPRui~0ds`!tZ>7Fv zaaU;c`RiUy)HnPof2gkD=HYnf3>rHGUZ47Wj?`QyD;zKHcCJfM47Z!_Tvh=)=-2Rg z8{=Q8`>HEdYq~ZC8~up>LmG4q)VbvWr_=7oXAnVy*jTdIy?8l%`e|}*yWW&5gzDf# zTmA_9Sbg9O4Un$D1vpqNXA?1jwmH7ClH|8TR5^>>N_T{Os7gX(sSBg%h1)%vN3EK+ zU##GeM_#H^{Lt zCm>jjt-#>0fD}aV{?=AQW$dD|$dt0(Nx--H;mY!TwMjy&v`?qTS|)D_V&gT^g}O@% z72=#F3|J+fg+bp~n3ZrtnB2Wt?Vly2jFwGKO307rz$zMsRj_wuCYY~9w zJ|-0GW&q?)t(%D%X;RT?S9h8Y_;ZgU)ywNlG|hl5Fy$*f>)a|_pXHjzoE&r;hfY2-bGRw2FC?3V8hCLTRi`{#)#K>x%xhc*V} zoNfWt1YSCM%0+o$S9k2n)eb1q>x>-dvf%Ic!?DMMTWqXNIG!pg_%WJv7H?i+Gq1jj zn|z0ft9cdwGxHVJj^NQi?Hg`C%GH<1^sp6LGXhoDNsgD3P_$zT6WrX3H{3Qa`5CWm zCjV^OllH4Tnjk%H7c{YX8pry~qMjzj>2^{m z=?N!d2Hf!O zA}G0!+4^~_0JY}TIPRYmEg;I?Rbq8#cckRY0I)7Y+f(vP!e$JjJ zZyPDz(yr(}&k>;U+_o$~{>1)yodKU>Tkc%cPK!V>A=pxJBvVL(t03<^ly?Ch1 zXSB5_1?S$-znaUGe}bCmWJ-Nrdrw`;(h>B$%+2gVdOKnmx49)2168%2{{|mrL7cUk zYNTJAXVQ#BU%7SfjW~nTw_0Rb$wp5TS4Ixmt)zxh@>2Um>mrPf_7d0`D(aI@!vUeq z4xh98RGaa`mWWLz(X3E4&B4JT0N4}soeNaS$&}S;{pKvTTLyU)c3EKm36oAqM*b-_ z&TrKSWU7+sTH%=8mnCyKg}vS9Vz~^Cc;mmc?2(MV7qjp&HDKT(N=1v+fAVW2U#^>* z#EkEOy!wZpEv2wg^NGqwH<)80Yk$* zz_MsK7w3>2UD2}NbKsXl_NPWHfv3amU9>+ys*fr(9NB=@>%7#~?A}VYFbKChe1;&{ zfsM*`;io6Vg2(@9BERB@t#|xU(TDMr5It(KpX>6-+Q#j$&%4W5>3gL!)+(=aE-D@B zvBTPMeebUGZ|{ZU_c>1JpnB{tPnkO5*iX8X%EWu{tK=cVVD4?%ckx&n7b9wH0`|W+ zEeRvjD9U`*e7gpzQ2zlQ>yT_&o9=slA9v1;(|oZ+jX=UyqC*va!~n(#++zrK1k7g9 zC@1YsN5z@6uI9(5FS3^?jkonY9xx(&z&D_J|KCB3`XX-8}coc#kKUNUrcaONhV#f(+da zQqm+SNC*tw-Tf>)uk*U^=lQ+wcNVkOe)O?#|80jSZvzvo%?uY2YUiU= z$nZVSdrCmt!&Jx%L}KGHVk~|_?IYsMY^@+X!oH(cv|!|yzVCG@-%?_*<-D!F1tb)A zkI4u!%skzYEDX?OLAuAMw*1oBZ$bxZp~y?bT`OjWlBQH_ffH|t^&Sl(oF7{(3o%?J zx?>A-RMUr5jL=2P;$cNo(X;I=6L$f2ath)7KrZUEO)mAxc0}j8EB` zH(!mMy?>18CXm=`mO`oXoR(@F!#Kpu6>BI@* zI&3@9+d8|7E=pY{GTI=K?dI11nYUu-r!FfS)rk{>69fqEwoKz3q*`{3fK!D#=1e?! z{tp!Nfh&dwxK??-((qebccj(7&E4t^`PW)c$zWT?Qq}JW7OAP8JgFDj-D_x>acQUo zs81a`-V#RgOKBrqA;i6_8M&TzT1|%td#o*y&aE{$7AmAs2`aR3HmYOX_lz+(c*f&y zp{60Vi>ds^;l`Ei#;vGd{3Q0H?fru zW{l54gaU6cGj%IO6z2Y|$$VwHFKEz-Xnt4IQ2a%!oyivdaOv8ekNQbfl8>NDn{}88 z{-ver?_oHHOyG$IlYoI!hvKyY(^Y{fP&AVG@!;cR%wXpyV<|M{ns#VjmY&r}^c?bg zS9vFL6E@O3e&IR7iE^)LZ2VH=CFHjyWGc9E1t>A8RK;S<(iq`IRlqd518hRoqU!!; z2QLAI%nfnu8bYj&ZQ=PNd4>k|CVSYosXs)XdbxiRlO;Pe1sw9t9{ zc2#SgJOa*xn?Qp1U)C-G1>815E#gUPV==B4u{xa4xQ2`xa9>{zR6KYFdQdx_R^2EX z+9?7oAVrCQDWRJGag3|CZ&it9KW_~i@j)FUORIqkm&m5`p)#qEEOh{On=y!=OhzpX zfkLbFK+5;SzuDP;qDCwmVrSX}=$Xnfg0&e(laXWD!VXP<5BqQeD>Q+#sl}|Fd2Q+#t zpbW%)Ozv~g!D_u54(xyI7<}imardL4?>0Tj=^2ga@#_cj<&TZP5+HQfzQ$T_QBYj? z_^cWr{P@qam@p@Tn#~9UdPL)huFZbb<@t^p9Y%gS=LW7Z_F9iDkd}t$D zp1CrXjVYwWJ>^^5ynHDYQMJsYr?VAecz_bxc-kz#re*%vyW0QTy9@(J>m*|^@<tHKCr~HyW24qj-3DtsGJip$9F!a{I|(q z^=rskRmCFg^ubg_^a7ILsUGGp_DL9U$becr3hkYASyow6$5sm_Js>}2{?Z2&QwQ35 ziW2l^I0^HU9*jl22AnhfZpm~ z_ZCOlaC{$FY585OrQsvHwWN|W|5MAH$CZ%;7S`>@@YR?!tRz)PbRBVd<#dR=8#pFs zu5I`jvey5G?9T7RoZ{L-ijP1-TI3}M2HKtdW04DXXl?I@=%*RuDKu)N);&`%5&NxU zuM-O2*grn4@ZYCf{2U>q>nKb13p|9k$NW!kfui3?4Zj&CAf4DZV`S)7YX zB|0ABqEj6NUM2i3Z3ub30Xm}R!!i_>Y-2gKXBhRW>n&2AlnXe!${YhhWxUBRtBbp7 zFZVtVV@%4rGaL|EFg{t+H*iOOlBsOK#b?K2R*_v4k*Kx$Rm%)$a?o8#$%3ffB`VD& zP=evJ4+larZ`_?w+-jxkCSBc_k!vOG7X)V>)tp-m0`>TLD((){;Q*#qR{{q>HqO-S zIEL#+9WKdPvF5Kswy-*1_QkVD0<%HCzVuhSJAJmJFI79Ja)ZLWZh^R1^F@mrvF|!I z-!gEZ#sai%|CXUwRIu~#-NKUIBuqT4>y^&O=>-dZ90|5Eto00GI!EVxttoQdGYQh- zn0>5V4Va+)XI&Xb%>#6vC(h3{ELhHS@x#0PusE5@01P1+5cTAa&k)4>EgvzoGTC8i z_If6=fr}$uZ%oPlk-3joqL0FrR|GJcoqg`v6XZsOt}YI=Km;mRdWs4zYxDaxt!sg1XR$;Et{MjlaOjMgQMY)ZNZ8^~2^{u1#kW z=7JA5DO3#;H9$KCSc9>V^F)$0`G}PW{t9iq%`m#*lC|gJ_iOM==QrMr@8=KR=TTPE zP^b^&ffE6Hq5n)oZ~Q0phfOwiJaQ1@zt^zT6G1ycJ#H|FPo6D@=s(pRPoO`x9v}%3so5bOXYoRV}79!XkCj~{p@vOp+@NAWC83l{w;PavWV)_ zBus(4ZO;aw|5@Z@15N@Ja>b=jbGbV<6DA-ldr2Zwa|sTlD=waHHuKaqHz6iur`lCC zy1#*(MJ%B7{5M615rOtkljw(5Gn*WlLT~h5N%OS|iPL=!b zu18Ma@kO_$0dYM)=6>oLbix99t?9x?1uLa60zMWv=|qu$_9#J}*de5IPy{CbmA%pQ1x3j-Ht(85x7~{)!!s=5-jF(Bwx+ToXhXdQMjvLn z)sJiS5F^0-;NJ!E<3h%S4))al4x6D|lj7OTr91|jWczWACIgjpFM2clsx3u4NB<}a z$mao8wQ0l1RdFMRbwgTMspFxbX)L%Ov2b{9?)U;>FJ$F#W|U< zi~@Cglpk9e1tKo6{gAM*30Kn3k9Su1MdB)5{yP`b@QyP?tzysrD^w^k*_M$I8=hBE zfXF=AC&UQd(N2TVP!}hEUk8%j6j@Ny2yn44_0Itlp^a~N%3Iy?`sfbAq9T##$Cgft zL9}(L-z6){pXq%WY70x^Bmx%eMz?FH_@}G`i>0Bl82p<3C@Wf~qd4~Li z5o!AaS8u1CJUxypfaDrmRHTc<11EKRF^NxNCa@H=K4YlOxW%_XHFpsP*~$I!zsP50 zIYr%)O~@}(QIv1jGmZ=LKhc9~?}ezh=dM(9a_{3t-yB;j`OID{@xTZ#j=Vn}*B6l{ zLMagMc%9bi8h2RJC4 z*Lms^!~KRB=XuZ3DmGkz|L@~p6?p@#7OOM3Z1cO1ixJ!FEl{CAbd_!6M{kIaKBZ)l zV$IQO!6dg2&XQ47iOlI2yGh0)w2X_g$pJHkYwQo)v<1X+OMl)&m}{GKQ{5eiN>puC z2=v8JEth>O3P-cghPvWS{&#;8kEoWuU1r(v6PRu0kemOLC;}K!X&5bsZV)=T1Tu>< zORXFk%T*2F#u(O!9^cX|0GoJUw8sNN4L-bvTUCSm!V4u~QN7`B-mRu=^I zt6tNuS@14)R8=(%{&FhZk2Ot%AE|#*oq-wTF0x3|gM1)f?aJ}xo*P+8aQg{p=Ih^I#YZX&2##a2;* z<+XynofODu&eq{Y$t>FLpR0!-XnXyseq*?jta|5U92f1205KtWciKLxlN2b8%KK<0NxJp%DQ zkcaS{ttc&IIkKUV`*E@P4nz$QiOjK(G?}@Vt!V)T0L81Zi*6f=k{Wq-`*4KgQ0$W0 zd%dj0GuB1FR>f)}Y8751fqvy}aDqZsEHQ+D0rl%0r1r@jCOD$J@U5 z)6YN?uC;Gb^gX@&S_K>ii)mMz(f$55hKTGErD7Fisu&=hDNu_8V3BXC7z>wb6^^8_ zjs$%-XWbf5S?BHfaL3w@s1-BUtv)jLI0=AQLVSDEaQxFX*%T5ZSljBceLdT^6O&mL zXXgNh2TIdZv&HLWOiO}}el|Km_ASk~@LY@?;6I3!iqc((wZyKzmV`W1^}VuG;{&wM zPxkMxY%49Zu0v&j+6pQ*iTrbCdV<6hJLKQv4eHGt2@MAaZ^ge-I(!T&tACqQ+yb2F z&yHeOw98R^(2bTmm2XE25(ZMlbusBp(wSDm2z>po2e)+%qZtX(urK@<+rD?0p)^eJ zim|nQbWxHLFY#PRw3q+zjWgcR*%7$MH@wR$&|nxlkqqE?+wdagOOl-OTAS|rNgqnV ztai05qzOGM7$c`!1;euPo^PEtSb+e$-yiSJd|HKZ=6$UwKG;(-mWsRb!Y}${X=5U@g2Cd`kkAlqy+?gRVlD>)?D>IFd}R^Y!@)+e z`}G-H07Mp5-PH!&Q-~K*XRU>2mxcx-9{4T!`A77u0>3kh*E(L9SBVAFPZB1yUZq^B z+z*hrWZ9}%PmnA(Qb0b_ST&t)Waxo1yzSzhs~2R+=^!G$K@+uv{QkIv&EkLf=u!Tw z1(?&^a>6}<(_O;HtE$LPLifJ;-L}xXE51FttpEI-_Y->BkY<{^(bj0?tnQMdf0cXZ zgpHFp!P!ujOizi{yHz)zI zM+=kwCy}^o_j&Hm9~B5B174Hds#r1z5MesM-k)8Z{v1Y^_==JywR-X^NVG8fIbegx zp4uxu6Wa3}UxClVxY)2KaJc*Y=7Lly#}wRUsa6ToI1mS0-wAbN$4&JcvYt zmq(k%&(Y%yy;a#kdvMHv?*XyDGkBNPv?c*=Om3Bj239aG+h_DfAo1igpXzPH3$2lr zf$sKf6ZC&jhBLmOBW6DO>vJ)~u86Ds&LH)tGqvA|Ur8Nh`u&zva9|be+(e1c@IO)6 zeSVJ@R@}zlnW3Bf_m}}|JZ68`uCsB7HcUU`bHr_PU~Z@M+{B{)?)#DX)5vDiU|t$he+*qaiS%Yn69N75SwV)aAA`MRB0kaS_rbJ5^5@u^y*{s z_Y&Ck>sOqcEZpF4j#|4OIs!2*R!axR`drqI6)iv zfX3B62=)RVxOX42ENT=315ztShGKw;%~AQ6jWZm>F5D{|CJJu~gFGZKWLf5W9(}|K zja!E8NBnz~ZL?N?W>>5^c%bteA!Q)3;fxL5q5G^xsHFFOUZFkiS65)*e3vW7+1Rl2 zJD&uLqtjKh6SF}D!f{v6=kv^xnx;i7^Gm!xCC_;|r1q#Kw$K7=_oxEEc<%CXzfaF! zYbguhQ}MtrJ}Wnu#&esBaP0%-B!ebJh!C?do|eTQ6SG$^6)PiTQzfmI%i%prCc{$TJ=1eUe_98SE>C(-1WEeiY zKzHy&{f_2B_K9Bq&G*}HFYvlQKrztNl645cG#N?D&;`x=u5&fj8NFH@$o%tz_DbR+ zTkH`HVWPyID|{VCn-$a>p;I%Q2uG4-P{jcqBejp*Sa(8?m8cSvMQY9EUEhS|x0gmc zCzw1;(XNYZtUsmdUm*A0t~R;Th6i`4(KA(uG>9|$nwlkP4^!aVrp}!0c9%+tMSj@n zYov8QFW%J+PI_4Y7fpVVUO$C1)h-62V6E$0{!2t3RUszPCgmP5!@U8?=7A%DN|FpM zD?cMdH5~mGIHOiOV^egcDsJ-FSXa$*@do+r*^4%;&bzlIw5<0{P%vWi9Bnc6@@uLr zpYB-wG8e}M?$Jt@c4wPcIyTL<)si2{A2j#|%0|D)?WUgb_-!6@94~*=fzWk9tRD`+VXqDK3Doj%kaf%!|LPsScVDnVx9v{!Rd}T-8 z5?R`lazn~^L>Z*(%cyG-r;@srpn9L&|P1AG<;TL$Ho!fR+2BY2z31-jC83rrOZS`l+G> zz}Gd{RD}tuOu3zGBlJsKM!sA0nYQf>T3o1n9Nc|YEE((zNqzhE*2OSRb4rN4LS6FFP_ zr*jd4Z*tr;JDG(}0!mlVz3PIouNt~(+n2i`b)W28gY#;u(7XdUygn+ePgfE^JxWMh ze?!SL)_Ru{DBj!XE0_5r%L(fUU|I_8nxd^%CrOzhEe;zXv0>~mpHQ@t-$fwt@9ngI zR~MnjK?J#_o(NX1q!R3W680lia(3}<-OjQl-9hTA9hA8+bL1RIA#SO;!}OIzj+mIq zwaA~K*kByq?cv;R+_4QS?kYYcOGGhn-%S(FGuJ%yjvfs-tvn*<8K%YZi)eTcc0nKh z(EsO(y8v9X>u?($K)Prtw36K-oC)t1?_8M$4OY zR;0+Ihlo(HX=4qKo#?{6Njgy&Lw@Vi_V|E5%=0#40{1+ zCoO^>l8E~*0?RuT-z6H6Ogr*`KUnrAxe&gpTYl?tETQm}EwNC&4g0N`izllJn4m4N z1^3Uyb{Cdc=XS9jL$h}VIT$ioN!T$Y1v`sw;*}j^V0-c*nNQZ@es0dLychjG&D*Yr zB&Ubt3AS1n4|q4C$7xZcFnvLz528k}dxDCqi#YshFDtxR5^T{eI8+cgJVPDh*ffzk zU7yplb=hRi^TA(Y=4xc?ihCU&wJu=3JOu4`Uy8I_)6Je|uU=a#bD%u!E$U$V9%*vc z%wmp7#<-h`9S;6eBK`et>$ak)P^UEl3`jo@2ouDxReaoTCDjZQfD;yOEJEz{|G>=w zDd%*Rn62DS(hP7HIN7<=$O(Os;W8Z2RUG+Eqk)I8xg@Z~5wYcwG#PQz;gI4gT6@w< zw*RBYYJ;GAMc)b0TghNAp@YdYHYPVx=v+CIW23Db21!vCdFsZ=O4Z{dG5GTMV|8#^ zRhZ(A(*@G=fu4`cNKx~`9&b$935zk9n3&H~LULH3$hDN(vH_&O(o1|FU_GVA$CiP~ zFs6@iii%wXV^Wq`Z3hV%MV_lRkAgCG>dA0Q$dM*u-8ik|bXkA2&@kkpD1U{_rWb$g z(@pt?FeFDB=SANtJb3X{1gnWW`zZ#ktzE4yzqGcK6s+T|Do957C$FaX3H}E$4z}1Z z3$6%K7QJh(Ch)K(Q<1L7C`qGrk+w?;HIOnv<6?j*euS<0Pu$xB38yntVYYLxKa4!Q zBaamhoa7N+Ve$G#o19usL#SyS>CtGQr6n%>$)2_O7 ztOk^v6TP6IZLQu$@$(A~MTxD85-M@E3u$qWL2VLbbZPCJjK!Y`jSMi6ipu={zK?oB z0yAuna@a`z*X>u^k6~EgVYXVQgfEkH?Jtf&ow`KP@0r9na_PdbHBB8<+n0fI3bj}Z z<;yUstD!2Z!ZL1i`@vAq=zv&zu5y{z?;)8<*gW~0q^>QgQXXCw?9vq&nMFet2ZL&S zTiCOFpSD_%5GKdXX?D8DS`pkFxYy*<*xOFfdJZ}m{T99&SatCQyEv;0M<2HJS#7r8 zy%!|6{ZH;j0^Q~zJS9ym)kpVwVDXef*GCgWL14;6yFXZd7}N(tO3HRNV-*WZk=lm3lc7?zOD0~Uf~kEh={ z6GpHA)Wo0#^H9ZgQliUb5|@;GQIG&O?(0m?X(^M9r;^pOQx!z2e0u}_Iru4~bu$#Y zuq-N}hUt-&x9@>mP;5(9a1uUPW$OxYie?Qt7}ayjj`Z|$yLGgHE;*O&tT>)M;!~v- zd*MhsJJBgAVy~8AN6!(O#@P!JOxt-e2l;tOoT4h_4*SYdbPUb*qc<#P3WG)2qUG>; z9b|anXLm%CF%sBo7KL)^Pv!<0Hge#X%(R*-3btv5@?IZjY9mxz*Wth(bR`3a*NfvW zl7TP!Z}kLC->JNqijBvzhIo_P=AJ?X+MI+iTjG(#bg9F~q3kNQC)ObBgSMFX@29-D zn3iolg5~=fw}BN3{jLSn8gHQ#+fo?9rp8=TX5X)6)7p#V{k0|7X+?U&?n@k&FyEQX z-^i@_+XE$i3o3{VqxVeN>8(X^mhRZBQI*~jNH&R~@xbk<&|MNp96Ew5V$m>u)buE} z5Nahk4fs;SlDXZ;!;2(Asd3jxvpE>{AO|E&xKFEj1I?7${x*BCz;cj4J4w=0D#aLzDVyEp zciO{~7wh1Z!0%ePE*2P|$e$F%u8hJgZ0%s~Lb?4&zxdaQJ=D0L8(ec6txGOY8g_;_ zbNE|KmwEUku)zFTktLi(OE;`Fls{1BEaVbF zIU_XKx%KfW^6ne;^4MO#e(<=_zNaOUxG=5bxpjnBa`my!7QP9!>;}v>DZX3C0XL`h zDX;ViPAn!76<>XVf%xgL`vixVN`m$%YJ^GFw}rWOrATW=?u1gN8;|n62{tNN<8BJE z|3Jv|#f=JPD#N;AICFvdUC|e%&L0w9H;%lFI7tGO)i1k)=D+fPF7qN)L1E{Ny8U}S zH_7`dkTZNffEusO+BaCMZSrR`?xb3tP3Q@^--HX`F(6~3d35=V2R@?6f6}hnfhi>|p`4#+^#E)!P z=CkyBOB+)y(~7ootQL`yfp^jNgX2=Vwd@Y$iXFenKju}Ok-p=|B^N77564IoD+W=l zA5cz?Bgi?~!)`ga`#>kG8@V05x-D4T*m-HM5Usx%l9- zSFa;fg-N8h;JmO#0t(T&#gt3g$MK!?kQ|7Fvakgj)~q{on6erAQ^%83zGn!W#fEk& zczl)*0XlJVE82L@SAl^~pSJ6#tV7hL0EP`%D*@vr{q`k>VbJo3{ z<>LmkoxZ>j$v+2tO=fX)q#p|XG)_lRK$OoCNW%V1$A~qPAgWpDo19ND$ix^HYHiI+ z2lW)n1b%3}3cSPrIgg{`6LB&Dlqz-6PvYp9Vd5hL4g6IwPk5=Mt)SVW#QmE1jLuRm z5w9d0$1Y|f(QhK@KrYPoMOo$Gr74e_a6}lNs%C`SPHy~UhQG-cIyYS7!)m*>Cr|y) zUwYLD!!*l6^g7zhFKa1jfi~~H^k2a=nvN}zHmf$%c}(q>r=MCZ-{oC0^?`78VN(?d z3Qffbgk5z_q=>zsZ>Stqk^tu4^klmWB~PIwbgxcvc=;YiLJ!A66w^9Km>mM!raa=T zmm)O266AkXZy1Xeu+?~yr&`iZ#kNq=+;v4Ds`3qS$b~WR9v3sPiekUBXBz;83tRs% zgq3=hEhFVVLmCI$F=o++BjZ#{EyYMF)$&L<#iOX(3g#OKxKMF{yd)$jtYq|XF zYZCx|c)H%u=3|s^Z6=|P^W+m$p;0+)8lXY?Za)w(o1y|)%D$`%>V|^XBo#6jo0NQ% z!6{%66W2?0-w&Hdulk73S<&n++V5TK9mjBFc@I=6Ai%o%9>nB_omyE`vU4`4STfZ$ z>Y(GwO;f>ZF)41L>dy+Ap=}Jh5Tl_e`vJ05RNz-6Q2j2Pj*#kT<@rY8`9OR(dwY zF`Ei&5N``;ze@S(U8r98Ne6sSO%cX&8Pn*&@5GHQ86Q#$S~+xZB9_2|)nZM4gqeNz z6b!EfvCwC>meb~FJ;Hs0ay$84lT;f+s2gxEn_0_7tnA=VKI`Agal}0;Q7}A^!-eNf zl?q~*WO=~T)`a7N_-QN$F{mKC|VzPG3KV64N&LHrxx9HavPaQmD;6k$DaQ3w?mlzM{@Rs5yt80QBihAD}wrG zw2n^~O%MulBq~MTla*T@^I5#cr80DZUTiDY&VAhxtRFB-5 zLVX&vl@p_gVM!+$t_wRbd9C{{l^EAFAna&14!96eyUk-SeBMj?_Z8n2>*2i?65H^v zOdIeAds)5K3wur)LFPQ{8uSiqEZF;=()SI-87O&reH@;#e_R93dYxh409Yq*kgUxj z0#IIB`%P#gXw~`?RGG7Rkuzge}qs=Kv#-G#VX@^Xf1(_B~mLgbawICN|dJDY_5Pg-o^7bU945m zdhZ{f4ahqHs&x7S+iIa zpJ!O+X3{*(@zruOx(sy`UG=cV~Xy-fwX$a6PX>_G`kLgFWnE z$q-v`v($TUU`VvrN>r{k$3K2}1c0srhE_-V9> z4~6j?yOP>jb<8F%dq*@_&$BEG|Iy7OwN(_FIn!^TVwmvz zj^&{3py5KPL{=KInJ*G(gL2P8lwCgv7NOcnR z^1-V^X60@bjzq%&abvx-npG1v_E}Fh!~ls@;hC#Tc*v_a(3b~eX@Lnn1#BnZ-pyVg z?%aIfFL2AhNIH9|*rDV#zIIFdHLW>}5Z!rG03LEYCvL<9EO^Zn5dBHqR(HCzky@0z zZciYT+3mzrJ$ZA)9YWjccXD>_L`Bz0nEth}P?4`=S~CqvFJ>sd7}!{0N?Q~``dK0R zDD2L^Jx|NBTNZ=-zk7RDU)0WvC@h8{!`-LrF$s>YxoDB!*~ywUn5XBB*&Xg;-fSUO zLb9{7<#qeQl7IPV&}>*}vphJ&zb?vHBOw;S@6{6%6PuL^HU$S4V0|e(eWM)eA(kOM z$k)Hs*E8yX&H;ogx*EiC_TzTJ)jiG%9%C^cl=mN{}F*S%kcw(QArbEeniul!u;k>=wFu9ot`| z<(xAGNO@>gX^ib^t(H`R*sRxXbS=M5SnszO%SqQ$!_xUa<-mH{-6J_SQJp5DToq-> zD?w|h+Xa9V(wE=dbeD8i?HssNN=X*>vzniuz`jj><5c$UAOkUkse2(b8CCoKKJX&J zpIoE7$;mo)eLeg+IK;Gg)_@+3jCf}^b9(sKt(Ug0G?LoSXk9r%gvLtRBGmhZ&3Rx# zgyIFninZh3HNdn!1ereGkAw}wj=E!weVdL&5bfb@1gVj5nw~MG1(T_A%jFZ+^1+X$ zH*@_cBvN&J8(D4U*;GYwsN?R``+DfcQ>yxT=awb!3F_q3m}>I%DT_>F5w{p1 zwi%-5?ykox$bwXayM4OMIUil~ zXYqeg7yc2-meK?zt5-1gKGHfaB?Lia=>}Mj-#Fy>)%7*(4!n!HkxM(TrK(;%1!?^dX zP0l_Z+IwJ9FFpc_rSU$ZqweGIhOS!4_#a`httbu3txo#JGyiaE#A#w{rR zTfG8a`Uc~#zc@A#AKhh7wPx$PQ3>5iam{-Ch#Hz09o9+Mpsbuu2w^WL;sh>w*|^dqmLY{!;^F~>Z(l@^F24|L>m24im=#R;2rHM& z(S(vTxbuJ;9E4)qA?UOY+9k`rx^%eClT*DA00YmlqaARUu`bB)7`85bXDB8NRmVzUgH zB)lhB^kwaicg!z#t*^+uWdo8K`z1ibdD5D< zFw3CZBZ0b^bACCfUn-#PGDSZ|480GuSNoaz>HVO}V(4jXMmMqwlphd85X;YeE0pHp z)ffWQXdtC(kXCY8o|6lODq^U)$}}MPW6GKm030VkXs2F_=#bIZoHUt~_oLc^zR`~tc7T`wB$h(U`6=W9u13FmnqZ3Q8=!yL5hM)A&n6Ws2L&3WbaodtaFU_md(*UhW zXQ`SaEcA~j4n}+E+Ir^ItCmDo7XDXv;_>vV8>6P`afu$dVz{|}#uoQ7akpgRU+ro1 zHT?QwC4wT|1Nu^EmwG58djum|8(*q>ISV(>$d}K>Ybe4etlm5s%BQXt4@(qpXTZH& z-01T8mcG^>*1t9HZq?9b=XX`7(C5m(8fH(|5@2~;GF?UJ=;s3G3%BSR)dr z85QjgT+1!&@SPCZii_sG`W;L0g`lxg2ng`6kN`)81y3m}ZF2k}1;l1gk$NA1H$*raV}nuuyfQ2_0%FI5sx~K@&(kRcv;r4<{?S(%8mm^ zw_Ji~IE(xOdD-|^66ULs;?t%syMx8HhuRx3_-mrQIDso+lylj~Mo2LP@1E56@IW{E z)_c;Ize4y(&&Cc?S&L|4ThwN(;J^M_2i4_BLvLls>GD(S8+2}#Xdg7O6`#!W8<5}6u*v80t~zf1j7f$L6h8X-TV&mVqE{V50h%8|fjae#9`c6Q;zZlGLt zN#e1vM9!001xhV-sUo5RPW*sgD3bUX(59#9Doz4efa zvH-7_>?SS5^9~g($@s5|mCk-CaZ3i17^SN&y_ZOX4R;yylQ`2&LA@wzK*!AOjYMsb z^pLk(VdQUKPIj+ANS=hS5v7$+3n4THlmsti{B@B9)2EcR*<%XhYPco%p1|t2Y7bP~ z7Kh-iaoE7d&07EN?sRlC=F;fk+>g<+Q&VS8YQ;Y)?^x~#dmab&I2+M_f*ZZ*G(vMb z4bLJ|7)kORq)JdbHBNcK%R9*p=z0Z3GkQ|uy=(UPxqmJC8ffdP3)^UyX!S3@+Y+A~ zrFJS%_*hIXq8)rd9ELzPW;V3*jxyomG+u)hDXj@b+e86q7n#VUhDhwQ}zTi&&kvD_78$G&E@S(szy`cut zW1ML3qI8Wo_A`Rpf2j&)vM#+_TYu#mBq(SXF?|{9FNa4x!a|~POetCEqU-8!WlHE- zDGGiQ*hjky`N2KY%_kT$th|<^uwC-`wgvO9773p5Z6tJ`kGiwy6;oMMaHF+>a?WAW==d|KKp2FL^@=Ve_a5G6`L;dw1T_0&g}0yqTUqiA?59iVS(y z)007+T>kY};U#K&^!r~HbzIuEVN^YrmscGh3Hfr{?NOWJZpINpR|ACfmtO5}{Il5z zkfBap&qQQ$K+o>_|I!ap6^j1Z?`UL8MS^7`z+fGvC2OLjS_`(BYUDANIm3wMj+G zj=9Vkxh|0s7N^K3F43bLGsk~!%4hAxiXP_F#>MLCME6j*;yTxwbm=Y^sX@@y&P-2O z>r^jED3?+EP@b+xMEFvtBCLtfKJ@PwHm>E$?zb0lM ze-;-ms`rqD8IiNFq#psG&j(olz$VAv?}8E(9a>#q%$&0U-13^%l_4i+bneYgGxmTJ z%i0&dO5QE@-&zJZ?n<0LU6`~;V59c`T(@fy*`q!W8gu~!an>XLh6Or9^GSRrwyZoi zn|5ksl#~jP7&*+y@R&xOi6+1p`@7c7K^&Fgh=r!)0Zzb)x%Mz-VI~hDyo>@hU@Gb# z;<3?%z8L$aF}bR(Q&XO^$odB8J7g9BnU#PCm!-)2ZucsgNO>{b-_e%TVl13TJ~_J} zsB8)kD}#1Dz*#`ujg}E1e8MA3n@-C+0?=%+-vLxYgBY5-*_^#=AOM{$U-JjSEP>Qg zTiE|aX+0Kz7Q4yWme*Axdp+!uHRlOQy z`wW#8n;)VcZ-fQ`PhNvwzr&r(GF?i*TYwe?p82W8`{Szp-TA_SqEX-0$LA05{?JD06u>rc)fD-RTLG`))pnjNkAXK@3l<;z$8%^h;nh zRMp|(dwmps<29kCB)aoKJf_aPcI5y3-aG$^Cwu1#1FoV%pcDB9aRz`y^)5hE8{D5r z_uZd7ynh&d5X!wjyW4p1e`sHkeu#J5pf=QZ^5Uzcm4|1cXA5aik_I%3Mj|h1bvak@ zmQs{PpyWx3=fCFv8~Q@$udA~nWwWDd=op}J_!ie;WkAu~f&MTN(p-rcd*STViUn%R z2V5mMZRt3ezIOuSf&D+@!7e;}`&uus^yU~ya}t$2B8@qL@nGY!R!Zc&7Va;grO{@+ zICVC0U_Pc9`2P*-QljM5kaelShZW{$mjBCR!3bgVn%a|qi2Dn>&6V2L`=?>wRc`mZwHR`*K#A4<@eaB`^V$Y5ruH6fI4+ji!m)(*3Q9<#mpILUEWEi? z2|$iFgd~a>5O3e7_0Gk9D*mE_%f($hgkF2AxY|GP(BEQ0hJMpwE7yXC#*(uuQHXV9 zfP|&ZcNPv}J7S#-^7?#~oPa8;mowjO?2R;!!}AUfcHe19PKfuHS5;LJ=&N0^3Mc0c z=~J*y_HO&)zcqkB=vuwbmKhj;@lh@et$o;jlG-V6u4OBoPD_@>4^%(E?UKM8Z%n9m z9D|2f@EIel6B0e5oJKEzJ0E?*rgoAxX18E;bbma2TzT zkJBSD^c;Tu8(fTtV(Pb%C&4((>^*=5a zT8vQrY(w|n^)86j+`>nwy1r2&@LSsdehUpIp#-ADioW(l#o-Bk#P0slR7-#XgJev| zNQWmAA(!I+M&ZBA0~+iT>2l8pB=|eJR@C4mG;eC6>gSJdNChfeyksFv4a@NO($4?i zrRTrQg?zhJ8}T~pt^M;YyzL2qVP4dEoQ*IxS~{9DM>6~VkVrcd+{~(V&dhq&jyZp_ z#L2FWj_UMneP~OH3I6&Bn1uhAyhZ^J8L6#0<(~1Gi)UXyVqh2X!0ZZO_Gu6smq`8d zd|WUm+w8%_B4+J!S75ZhK)p~ac{Jud!_Ns~01=iYI|Yn2{^7deeiOB4Ntk?_r_S3$ zen0HE|Bd1*JH~x^h}SwQs}v^SpN-0PFf39O$AJ7CwNHDi7pP3p=#S|8ABZf-?_>z* z6ZS)aUz<=-(v(}S1fOn74E`J0LH@^!BfZtGx#DJ>fErdX5Mw={H`jI%OO=#2B$(sv zB==}^M+#fmDNriq-|M`AE`JiGRKB%>bT-$|x=sDtcbEWyVnKt|rg^fQ<9d{5ObiU7 zZ?rHXt%Ux`+J$36wxnvF2Wl*awF7Jl(JMniphsXq_3gToFfAzdX0s|@Y)Nj9Jw{yf z@gI@Fh#Kfr$$e+@;uEofdzaO9skA3>TT4HTsJmZnKk&fONk)LB$p4l%x?q zR1PSX9_?A$p6ru9>Y1F$QSxkkzbUfDt>E~q`JW{IFl#c1v-QQ70lrOR0V{YOnE$;0 zOHxWgeFh3$Ka0~ig?~8&8muk_XfO^u@HeaogUP}Fhpx8{i}Kt0hZUqHq&oye8l;8> zLAsHW2Bl*F85#r}5Ri~kU=XB{?(UKrx;sZYhR)y3_nh;@dp+-8a>-`jYp=cbs=e1| z-OZM_&T!RUcId4iJnB2jzb)}e^FE?eBcVKh^U}8scqbeB@4fFu&5gXRT-7hR_DCO& zO$Uz0WXQuz=xTFnl2l4e7&%&t22~~bg^m7xj5fx2ZD+neEq!rbVAmN}l$dw#38;g4 z0oW#4hX2%4(!reLyC@=5#}p~s9(eQt;88XR#6Yrd&~SQEX3&$QtYP4F>raA?;4As= z7&(7v?9sK>QhDKv4ZzFGoIHTrJ=7*1x7l*DM~oRj0;Wg9cBUStGJKed2|TM?GOeT# zd;sAEo5=X^`S;wvJ~e51EP~G4x0SzPP&Ixl1AyZc{nKyhQSSV4`r`-ub_*%b&O?v^ zSu}aK0VRmxYC5t=He=`8h4fV+CddsR4Me8G+HRf%aiO}~JEeVd5Yk3q z&8L&i^H2=`3NU;R*cK5eh*_lp1Ag9|RIiHgF;Btx2VdCfVQo*r?Kzm&_~V-gZ*`05 zc%DVA_yl_#&Yy-;3T*|(m@Y6e*8<+>^9Mw;hY~YF827&#t{}L#{zL+R66ps5z;V+e z1u+Dd5SqT#?Z!{S>?!?0|G9Xdm&l5GsI{f==R4Xp{#i8YTXAu7D?fTGOJm5{3BPWz zvIFOKxnLys!bgh~<{aBkHbvs%cZw9a!<$;kf9f^Z5^&U{Se&}#KYX}}3G@6o)U;0+ zbusKtlt)9LWund*tgYN*t-BnA-Du!3jvXH&#Lu0_#Nw=i)`yXsn!<_#r%+U$zirjJ zuJ^cPL2v8EHj7&1P)2~KCU+HsbGXr z@33wWdFpiK??@HnTAddzE4m}z$+{$J1F=7-GM(jc6=VB;t~`#=C?7J%?0!43v#cJb zhr42>s89IzQS%7mQEuvv8ma6eDi*wlTv4@ja3|qS!FbC+g;^pw00sO|z1fwX-$-y$ zTg2C_hVJp(jQnL5UStU^Td5+;K8s8WKc#4|*;s(;L-l63drX@Z#>(}39_3gT9Oc!7 z^&uAU4Q_+IocR+pCu<%knI6@Jms z_tv#0rHn*7ZqKtRP(&X@v|exbP3yJSPvull(8v?&BYEX zXllFh(2nTd6f>7I;=a$Aam$U;pG(Pr?kB{0%a`G6UM|x)6?NKTxIJb7sF^Ps91Wfm zWC^2*jd!`+B(j$?a=KjPfdIV`O`>80&Zk33UK$5}-zjw+1CY@1N@Q7$MWcGLFNj~_ z!t&v~ZYQS_-Wmk3M9=IWN&MI9y(7ZTWhJdc&OzQXGV=~DA(#&?N&6vO+Qb+lXpYA@ zfSz1EXi~Lk20`KppICo-(298QrWuB+AzBCDPZ%be1{5t$VSTJyyeuUqDQ5Z$!a#dV zpXZ(y8frk`hJEp`V_M8LnevolmN{8_cyT{n^HlLk*E@q* z>yV3iELIpC`dep4-!}n7fxrJk6!89N&{1UDRpviuJhvM*e5eKha6Ef}nh6h#+q5!| zs4$-WVOQ5{@AW|!D#e+e3+cBH)FSCWw*+t6KllBY(J0}%Smp6@-~%{J zu6%4G_Y{G%?J5(LfI4caWOPC5XrW$Ykm49i;s&T?;(yt9zj_sR*WwcY+C7|QoL`Rs zU>*90dhs`{!qysF9vTL9{;>&1*X;-ZgBE7No>}=cEjKsOdZHfX8tCfxfWpz1pFn^> z@Z^(5ZPC!Koor=r1K?v={{|lm|HPM-TMgDLN=S^Jn8+)0D8u!L5@Ocb8u`L8Tu2}h(W+d0I;bzD3gzVVJ!SaWtq`3@VaTn zX4i%cExtY%S5J==uNtL1y&`o7p35@*Ye|m!!Gv=>+YTb^_TaW<{%}v4{{ge}rWMP$ zs#kBn+9)yde%7B8T@7}JDlZye5sJ6uqoH*evg0JAUc>d~Av6j6hs$tWqu|w<OnGUSVxQI%*j`wtr++;>W4ZF*Wq>)MM;CvZxg-2Ah2_;bqg}4vo*40f z#4S8wk$7-!Bq%2gXz?Ip7T1=)feE7kiVen@AJqF3*qMe!jKW#MA&%!MM=NnUXc_rE zY3D9o`q25!5M`lbAaMAFx{lhHS~^JoCoNB5*I_Gf{o9*-OMKJ*UGBDN z+#2?+upu}Y=dvu~8rX9F2mbJbKxx(jpbAj=7VT3tJ)G!>wypo*q*((2d_`Epi*iUM zb5+6LnR`P$x!jv@tM_htjz1otUy{%0+ob!W7?2 zhZqLA(p_Ci_xT#VME$a!8p^iS{(xtO^$;22;=^gGHrm`;Bp$?VJN&V9U%MqNi{nk( z%-r4*pTd5d{HD$ab-)@wU;+I=M?-`py|H)x)RyG;>}}{jecky$iUvUA5st3bx|$6{ zPW^M_@A3x+#|&_ARAhqOtQTDbztSS%$L|*7Q2;&NvttJKo&YZtMifAI*ihiqAN#dJ zQ`$D_080&k6#=UufK6I-yHEoloZQaGrM)|#Ep;42-9qIrDDfW8z!DjGSpNTsLk9pK z*j5bOoD@?LI1hiPL{SZ5)d8qs6Wn>d4Pd=ZE>Xa;90WFihs@6MnSo>8tYugM$Zmlq zY+g{49l4N)A^?nmH`&D!TL1E?nokUno6t4%f56=1rv&XGcDy&NfQ~r-lRKmUwBj=` z*CP;}1l4W5hco_NM}W`yEZx#d@jI;ncAzudXuA+K$kEbO>) z5}%C;GG!r@aeVd5ID2#yG6|E=gqlA@MtU+{XOBy#bXJ^(j7cNcxU zYwSwHnk)6JSpdXul8IKzHBJL0N}FYaX7wP6eZOb@#f0-_vEA-8^~~fQc7HxAS@`O0 zSbJ9}wa2Cb$bCs~@w?R@nNb3_xBn@KHn}JP|$935AS~6vds*2t{`!-OUoyiz`%9T4x z6*bTCCyetutnk@KR)eQP7WJ8#E0eeNMfe^7P?jK2cbi~y37~79+}eFf4c`1<>gbSh z()?~w_{&WrVA--bfV$3qc8*!=Cm0+CJ%@4shU=z#j=VZQLh#dZ6>fI0o^n@#R4*I$0mj; z8vgAvucV#*4j7liR0^1c@BrG5Mb;rGJP_H;QoOV4miisBic9di@*#6M&I~eFASw-8 z7dzM8f2wwj1lIh)JOU7Aa!{|V-Wrgm90B@%;%I+gp6>;P!w`%AiwvMC*?+J|e*;Z} zs9IHR-MB^vxwiY4nV2nr-bXzD)*s&5U_0otER%$%l zdCBs`i^KxZp*&9Lx!>9mX2!m*c{u+K+4-9YU~~662U6f=B?r)0ia6v!s{?xLK?D6; zjwf(HdtP;G(n%!?S#KVj0(*q}Z%Jzh))E%a0)TlMyQ9LnDVPvf=nDhofH-Eo2c+s@ zJO9T#Z>V5HxeH{WE9{zr+yD|V$dcRxY zVnEKj8MNC!o1hyCfOzRYP6LpA)2Myhu#K8fCN0qGC?a(1`hyFc38y}hv(}~+Tlh4tDUb9es22Wd999SXm)ylF4O`#wXz%SPT+L*HHOh=Q{`l2-t0PPH0*)?G#Cc&RXX8C!Rn}AWOv}Q3s zW79z|KS#Lff8GKl#9SyIn0ikk#q5ik3hHNF2t})9bZYEN4yw-B4w}66bo5w!4L5Z) zuLi68`q^j}CptDt28UNVuv?gd8t|tC*}uiRXzZ-jQ-Rofpd z;_vq{;saD(-j9-f0a(EPA3pqN>Iob~Pk@)z)S~Y|TTjdVm~m|;Y0@^1HR4|R|E8r2 z2O)pPbl&8%^e3*0>f%qZl|L~5K&a&d{r~+p|Jja2E*RZiB$>e8iG#->mBJptHCKSn z|AWb##-MvA)7ka20lkG0XX*#m)jbUVKbZV+WgI{B*T*|n^+||_<-$L#v;Dux&Bj3t zm{tNPdQSe6GlKmHTka7+`=_Qr3eFjlj*8JuV| zu95a#gzE7*lJKQh1>+Gwp)6D-2AU; z*-7Cg_ct|9C;gRnlZN)s(A1ENWN@)30>fl*5h}EOq?W5@`ootT^zr~Y7>i`}?E8 z(07Y0BZd|Nvkw%J0Mf)pK~md=jnE5_pQ*%2p?k~ggbqj|yzu+!b zkiO?l*u**23Vc98Ui#`Eu{10M-(k6rRW}XIXIqa>*5$1~DDT zg5_Wd&(Xfkai8d_TQy(-?>tRGhFDl9RXub8&zB7y0(`Tpny^tzbUK%sH5eYTn!m;; zyNc$ECd)#-$}#_j(%>O^ev72?wH%O%t{HNhw}4FQ-Pi}b9WHJPcxT#CW9JyLv7y_s z^z!mMZNN=-WO-BS-kH~yh&Po!D6Jx{Qd_>63MmpM@nZTpaM}`L+@!t-tpc+b`Qt5$OL@nIiE>p^$s$@iCRb%SsfI+QwY{%XUGWr&kD1==Y#xclVGUgst9-rspK z67DxsY0w2O38X^;kE?aA+e&i!&u1BSug9#VD5gu>FVWP-)b?mop1&Az_LsV9_d-%H zM0GayC4H|c<+=R}z6fV@uIoIESoXf2JqCLc@aV$&MZI!%dP3^RT*lvmtl_2Mc%h=d zcyEWln~2cWEg%PYDsZdu3*>2+9rn`pue29xq+_oLuAe#-Jau@xGVh1xjx@Eb`yoq5 zM9p#nbrWORk@gy5U+_`2M1;x*9e$pWcc@?qCfqw(SqXh?xMFmd`em?Pn9p*v3A`OJ zh?9$TpWY|zBNu-D_YS!Pl!2aw8aGI6sgGHSc&87wLIzvWN#JDPH9_CICkf;J+AErx zf~I1#4ZmOJ{$4#?Qk_Vxh3|_?n zX~a3@$;#6C{z3|1Ob+8DeL)bsO_Lqhr8EiDbANQfJn+vpZ)D%aXHZykqU<@;jnkms zJt@Fz#UYi})6Mgxqqu+9hj;QPgDWl*fn*`kpTP}axEL9fZ~Hwdyr*iNn%18)9MB@R-(-h7wuZE;&fESdYi<) zRcD;2=JWEwr73%w5{U$FQxB&NK9C zt->0d1t-I~2R02ZkJ>M}Te^i!K(w}K>1JsZK>Ekm?YMuv_dE*R4}lEQ!OCCu?BkOv;ak?wkX zY1HFMqjy-sfDWarn*y}dOJEZ@VDdi==P}IgaMDOwMnqAmSk4}VENk9i1-1|D@?bl~ z2Kd4PYqe~K(UN7&+UoV__oO#s2WWlvFh1)h>Y9D!-yip%%%8LiJ4%LRI3CD*jDB;U zd@wGr|H~GAV8lis3sbb^ix1gZL#VHC{nRr&&;`|YaS)zr3T5rWXZ*`p8OH0qxpq75 zW@3_gSMJHkilXZ65if=SH>UH&g?(?Vial4lzA<1g{GDjqlP8+)u9r)rW(|+gT zOXX$WC0a>MR~|5KTq><&KgAX_1S#aee7NgVfRI=U%mRqlt@!Ge*DVWLBlgB@<%xH zP|IaxrMBtxI&YYS38N@08;s?EYL52fQBMzue%)?UD0Nr7_(qoHy!oZ+kVm=9WwzSB zpC*Qb0>q)D9+8Ci^Bb*ebwXDZC%3>X$8_1kYbF2dpKPnUKIWlK?E-N6(nwLW@6VZg zLQD9ceBSM~19g98;asL<(SCLCvjt~_Gd318w^AhxdMnE?p^?>jr|f^-a=&zcM0bDG zf^iPLzmROk-`Kk`CA?*IJlPI={reXiv%t)X#=drJ?@T#sS-^IWwZl&Cl$fqylauTU zOnXJP2$m$wx&9+C?r(gfVBn@7!vou|}($3yNG+>g^_mhrVUYZC<;%7B$y1NllW?ou-7a!4CE)=2l^&9hTID z?I)J04*ox0efus_=d;?l9-O+X;a?Qjz@nxgzS!%}fwHr{AGWbAle&w+wxM^>aL|=F zT$IJMJ#%Xw+|(YQ?~>&G~(IJy0NUbni+ubl~N${?d6rKyfS1Pm0i0PfP1YcUV^$ z>vaX6R?Ey-ZkC7=-c>jfkdr8uC-=-$rK=|?*115{?LuBqM$@>z*^7-DXGG-DW!ASPNme=X^lLMmYO+Z9$RGe&t%f7!h6D zrUwEIMppU-^OdhqGh_pE(Sb|*P^O9S<;+HwbcS>q(%E@A@R&R!=nWZKm3N_DYjQ^d>Qw0XC~Xd zYy$A@_yJ1)9Uh?*!?!*pDBUyT-oNgkcs^t^=i0W&_=qiBP$MjinZ(+2|ffq><7-*4>=P?^mgT#q;aoEGD}=` z0D~wC>;(+`8-mzPs%}B7{rpMSLjKc{57pFg#rHYwijXfC@l;cj4-^?}E%D`V?tQ8K zL0ZkB>)0v~#!(l_&5^<7LTKp{!7G25q^wMrN)=t0p zuQ@z~eKmf8e^r!QR)IiOju<|bsLzf+wo<7J%gc!Sn;IBN1bpHrk=A}G*new^>VAg* z{$OQCHOPFaGrMM4YHdH z+*gGh)mH5}VSGqU=xNqh{!*t_V^)~y{Y>ABpX;>hS;jSoLIWBPv^$#8i;Rty0P$=& z0~CqHDeP{zCrk{N_d>2Sd0W4?1S0FGY}wDE956}G$M&4h@p~#x?MgJ?<4<(HXxwbS z^c54l!%oW^t}LgWUU&&R`-8e9aS0Z;Xr>Z;kfe?O-s_llVI-qoQvILHyf(cXycj@U zhi8H5tNB(hXh`8xId6sO9Dmff&bll)_%dz`hB%-axUE!jH(pG7%W8Ja)6jcl05uNPg};6~gE(ydYuvD$pYv48KwR;5Jok zeOKoH%Q-LmVJS8L?IPrX;f-Or^wf7QSIukM+Wix63V|gpC;vm$x9L<;I7rXXbiHz^ z=#GKrZ7Z$G`T+iG5|-ANu+X3wN8bbHDmszf?<&@!e%Y&zC?2_B{~I42_!7cH!^Hb= zV#)vh7GJVaqiNMXm0P#6Jz&D;LW|`b%}0xmd_0tnEy_=0l`@4S@U0=G(uX3er4Lf zKJ?5>0rzuybP=Y*Uu67t_T)(5Nr)v;i|nyJ#C&fzGq9$VP#_S-682v8Xz_7Cv3Fdr zcy~GIpR`&Hf*q0@6cR=ko(0s5egD=orvGR^$9r>F$OIG`N{9c;-2s49C7T*~H#UBv z7Z%_IbvsyW@wA<i; z8!&ObG4g5d4qbrB|Kq-P>U`=!gn0d1il-B)qW3TRgQaVZHOu7L-(;JMc7lQ-a9SXp5o7R0Jvd#u1}WGO9P>CBNvFn zvOh0IdY<6?;)XGk?L1XYi_Tv9wuX#W!wuEDIQ^x@9U4;MBSOKn->U@@yR-C*$!$4T z&es)R>Q7j<4{iK`hiJ*qW)YpnhS~aJWCNjZ$5H@~c<^6SpRJN$XzJ4KQIAKnxckcA z5$y?k^Q^t^;kp-+xSf78wKNCcyJ&94^KT+jXx(j+>rjht{9W{<$&9>)H_pdLXyE?8 z=c?W0$wX#E8};rddd2IPcU4uxpo!cVSMl-jpvpb%*q)2OBRKLcehB%Wr0={|@g$0z zKdu<{colQQc76<0=U*!sRA`r3_oxvOUmk?8;EV?084eaz&unlTFCQ^#CUyNZhIAi< zcmGN({u8!iBZoR34&Ej5n*(E{gjg;;Wq4|sLmbUgzt!=oC6X*@L z^@OofZ)}1J$~qw0jJ$fOW`XZ_UpQ#lXES|Ru&Jpo$N}SELTbvf8jcd*Wju_+7J97L z<>}BUm-(H$(UQgW?yA&pIkh=8de?7dVCz5+S7k`-aI=LBfluSTK5y8dcT0aZ%jk99 zS;A+THn$*kh8y3s@eQM7R-pNS6eQv|Z}Fr1^Kx*_DnCZq`(9e>@jgQ8@4s?+L*Kx~ zGg)P&$7Aov@d=x&CQ$JGbDg$BPdwRgUA%us$y2w#sZ#E)CH1`~|Lk;Dke}S7Uu6jA z09rl%t|!=-9M3PCxNvG9Em_H!-TnUlYeRMXd&+JH^*%)MYR3KS`@nqDVL>LO`JH<_s22P!bH&nEplD5f4>n@^`-fjg&aXJMi4O3-IX z1G!a`h+83D@U3T;$|F@m#Xe&iihGX77)P_T8}O1ty%b#0s*7kC**AgOo%{l}$8hHJ zf~PX)$i%d{C^Y3$K}P;kRGcXJ^o3uxpG>!^b!anqk^fDTR9c`2D&5|jiOy?hIsc%V z`ev_6%3EHQ)XtTpH3NO0gIz&ka9Lm%2NCrsiY@4=g=IaPtaPP9rg4aNRS{`z=t2<= zgA;@JVsW>}+?rAb@dVdFQrMGZ$b7AUs<)$$yVZAEo4O)4o-m551>c&5!9tye`UG*+ zbXykGgY`HZGF4`C`*8apE)MX{E2ETy&63{JTDytJk^X?rBCuq)NAvctW^5pNrxQC? zarN}$`%m9Nn!`YbI3sMv1q4U|LcLE^p0tS=qV6IAyh0fZi5r_K)lzB#Dxs`znZM`RPUaN`4*FkHJYq(u*>)H&y!FM$(uW zs%}w+H94G*m>q0p#g?1h%h(__PS`4=c*untfx5(KCseDS2V71c9SX;oRMz`wW{h>& z&ji(a_0`G^L1xCcXTI)bT4b*ZuvIJ;F9WbFzucqa1i8HDq5s-kHPj*+V6?ZhwHKDo z4cq(bAbAr;X4+VRLGRGQRl>TeXX+s4ZK*0!fVrV3U@g&;G;1x9;%`yor**p)a2fw8 z|ChiE#Zj(JX!zT}0yn=HKvto9VH}vBUDW79=sdSsw*pC$e@JxKXwV>H2zQ-65PfTX zvNthf?h{mQ_;Wq|W1+U>*Qr2mvD5L_ihr0o7PH@#t^DeD4V4Oh%Sd#atTA1@!9K0S zVo)0yKYY>OhcmFcP*%jswe9he%2unBtBKNQuvn&aW>jFl(ZRHI@zP4{gMESly}sYd zR~3h)Gc~%p8b6TKs9VB; zu~Ujup-=jb0Z8)dw%8cB<7mD@BE}jUFH^Dp~9K&~Ir2^mEJ41jsy6j-3v6vfayH$DVui#VC~ zozI86HBQ;9harYl*262Z3VCqW^Ep1h82|Gzj`=H!@z-$iMFy?ObxJD-Dx_5XHMymd z{Mp=qWAo}bRyrgXr36{|iJja2Hr5-sFk@NRlLZoC=OgcG+Al9zt{%r5U!uM&OcC{1 zllB&VBE0vgU&xwgP>z$A#cGT{cKuN&gi4yj(Pqqx0b|_i-dh*;vqnMIR#_>wn|5U>UJmIgD?{7Et&@I_@39ICtf_> z@a7-o-F<*L*jMxreYl_TtDMXStM%ZA?#*6bM|X@NA@8guyN$joM72m|6n){PLMxaN zsZr$kG2QCO{yy}p(j;p~m3*5?;h#SZYFC}F3aDLwSXdzx{adH+rW8X8AU%*E9!c`FW|q7M{~+}3nS|MU zhim+Qc08_qARFx+>c>S2`5qwGeR z&;jw9RqCBuOd=w!C^^my6zTB2+DHmKxgK+{w$_P?`uKr`=P(hC^>|$T2IV|kW1B6h znk3(K>$`5gosW@amKGDVVpCv{N_9qs9s$*;#;VZHI21T{Iu3X1VVL9M ziHC!y;!FQ-J;BNDXV4!-%Ug{w|172>3WpBpL|parirea4rzk0;kN&rp_y>N zd#$%w>x2D|4X(yw2Gl@SnNC;;2;ZQH&=mzz)^5L^qt<(NdYuHJ-Z3+? z!ulfr#9quP+8f58q$!Zc{N(MK;aB31Yq+ZH1TGr3=^xJL!4OZI4M%cHFB=#1V1u;sU9xgNeyhZv-)snZ_}7FupPxecSUMbW zfDr$B7)U}?9;EyJ47qhL@s)%$W%h6txb-3 z(%)FDHJdWW<8Y+k4HG-IH4{`ia_{*zZwO{w1C`a-r-;drpxn<2GVBF%67~shq^Dz! zP4Cj9a9oM1qipPYC#F}L5=+eV9QDb+dPef`XMW&j*Fg;yAiWSTU6oz3fZFnC##R1Xd%G0U@oL+Aiyezp?h$tg-Q$vm_YD{!< zOk|;N&P&Y|O{dRJ#!uq0{^u=#6(Pbl*C}z1OHk0S%a+*&jW0=la^}~U`V@nLcA`6+{Mvy z;35cG6sbv}=yS5l(BN6-G@HUY$a02~AVN&m1{SBJ+6Uv~WxK+)iHV%wyNH&6LX~%k z?pXJt!=%oYrXFuhGA%K(t9?w&d_`ymI{pRM%_q2+qTY@B1o&Eg182X0(DYh=0eBTK z{8OdB$a`J-!t+jm%29aqX`Xqc3|$OVbFfWVQsC3DJps-r`6AST0OTz?ZqBL@&y!kI zx@u&LG4YoqhJf7i3w^b+!29VzF%H|FmmhG}o?~}}SH$Kd4fE9PHLg}R*cFNHzG!&! zN8yyT^2G#`FSFVa?+eb)c8RP^$&}bDD#B8QK-evQ!N5p zNIU$r07a{qG$U~WWJBl*=73w2G@q6Es&i+~xoE$mO(?nX45gI5<0qyU{TRq;f`rQ2 z1(k}9NL0Ue4MdN1GG)!mw(Xiz2`I%J*S|ahO68+hL|x3vtW*6PMHA2Z7z3kbLO@MD z5b8Vy+Ix)oH`HdL8D%YC;7+l;AMXeidXN0-W~+a1?%37ZwO2Y%fcd`Bc(8tsR_{5Q z=*jcR#HgUGQx^JDMaNYkV^<(mD!}U`16=DYVC>;x1L{V4k#1cAaxh;<*b%`QvN&im zk+s6uH1Ro*R9K^IL6u-6ab{SSNBnJ8JkHvCDtGomko#}lZy(Ze=`)-Na8`e>kvj1* zm*y231eTU$@`PD7N0z4Q!IVdSsYIPpl-L!>vi%O6zzENg3y0bfCQFr4?b|)W8jnYB z1e_NkVnQq~((fCsS0I-c-!p#V#b8Fsaf{bpxx)cos@Knj0r&d*neHrr%VGWT_oP3h zqDJTqMUPUogEe@B_>Q;zBh%6ofxxHp)c%0MObJiPdLG z8NHARM2@K_9LlP7Rk@ufMhWZ%Qf4)_t_@2%5sRMR--&BWiZ3x}i?q<_!cxV{Et5{f z`qDi^i;m34C=0qh1J2e|gzG%)}z+RQY18PSKNaD)o&v$7jj~G%DG420g`ZmCU*@TE^f=DJnUh5;k+tS=t@*Q#XX0v1z`Ni{wFzATCSCo#aRF^D!g@eU zFRDX^aNaYR$8X!Q=L{L<(#YP@KJ1d53t2&wY+(^>Wf(Wn`ukh7;+Uxu4A;icysIu_ z`Bl!Bs!CdS27ZPz>58IWJ2N5WFf46<^U`I>_cUoqL#}sddrWzszkye)0^;x7oQj_wbx)dck4^ z*6pFuhaY;o7J1y+RCzy_D#~pRo?#C$Awy4&2s6&fgr!kRVG4agdnT$f@iCK?82$)X zc+Cdq!M1!OvBpw1uRd#Y`i4q!4cA8TDgS`h?r%y0p7Fwyp7);uO%#n)Ye_`WZqZFX z8fi*d>`*}QEW(9qvk`31j+&9Vk@PdrPNF(j6_n|@;nIr0X_bei9-DCXj#7$2^IyH2 zsY8ZRpV0uxXVKX5K?TDKa5H2pr{<$G_@{Fb*#8#2#IE`v!?D|@2K}~kByS7Dk^_+` z^|iHZilc~^;-WS|c0YA)@aqiryf>wiEy@q<=gB#5Yvpi804*g3)tFARDxAs1tS3m0 zxkz`XOh0_5WfA05ON`0l`6bG+#2%JN7I4XU-k5Tc%fvE0Bi9~BKw8}q?#5w*vnWcr zaJZ7Ug6;JTQ)m0}-FNXFzpA-lAkPe=s9`*t4aS#f+*tMBlf3T2oI`|{k^79PAN!wpbo);E$^$Z9{xH>MqoJX)eL0Jz+ z7U`pi{$%>D6HF>7I%|XK@y7R=S$Nm4zGNCocnA3k7=99I)O;&o?AF z9kOpwe(ZO|$VW5dWYxqjHTAswK&?Ql(y^AEj*%G*DFPcBuT^sW!WZJ0yDsO`nN5x6 zH^=$b`lhN0(e=dhBG%-S24_4v@h)#a19M<&_bia<@KNc-EVhnBf9cXA#Zc^|_$Y`p zYq7z@|7iN+X-s@G*ow{DOq}m9?S3ubCi;GUR{$|9da`m*WR>Bxe~V<>ScH#DkaY|i zMzy}~ut5uB90}#Bts@zsbcLw?HZPMdE3yKA6_s8~ea-o-=4XAEoGf|=kiRIN=|bVG z(LsARK*I*xcMe*sT+o8Q+?Wlv!Om8J2dSuXj+4|PWeWA5b!u0kUy)ku30^p!cCrpn zvelm8n8?_>;-bxIMML9kX1tgK-{*8kPW^uShgN%1(KfZVKrjqVxgR?I`Qr46I;?2DNkillFntvYUUaI?ohL3U!!+r zmnM(b9d$zcoErW%xQz?lzPQavEWcI$B@B{1ofP-61F|jnxMto}BhOx4QRI0#j!BoA zoY!cE1X+1p5LasQ%axpzCe~@Y-9wWss+a=7VP{B@>0Y~-4N_?)Iq7iX3ww9wW6o}c?A}of7Hsziya=07Z%3+ zE9DV;a$eo-QEu5_!eZ?@i@Lp#mA4i3f0c67OZ zXjRpMLs%twnPWBRKdFX}*{KxiI9O|fNO*j6_ej^xmt@UiA@3Vg4yqfHs($zM;7#P( zjTBw<*V6oHjeF)F%eQ3)BsHR0zfN+V+P@}BynMmQ;~`F3aU{beMQz*^B!lvd#(6hT zsc*l)FGC!axY5)l(dcAY>DQk6Omx9U8jxlv~-%Qh25~?L5d~fFP63nmo+v| zHF`;#StV=S5YDrFQf&`UI!9!7;sf=a%Uzw0c3>MO>G-9~g#i#Z4yu;hcDkCL6+|X* z)JnJc)4Fr_1boY;Hdp2HObcGgkYV#ilJ8!fNRvxE&ia5c52DeSoXAD~(vD=^j7Q$cB`B|G5O03Qmj9Fr zCGcxafq`kgAgY=e3a7!( zN5lFi`Xj51PUSs_2fj#vm!DXd4qx!U~qLu%2Gl>aaSUrHGFLQKOeq`KOM*QHkKc$DR_ z7ss(`@eXMRCxp_Y-p6ufK#GL|p!ACbb$>^)HsNC+u6uTMs=$a|DWYQ^^JS%Mfiyv| z=V)SLj>e80YYx{!dfrvgs(*1~lm5-VTzjIj^^5fJ*>D>SqH>1eQ5^p1wM66RmY9)N z4G$icz*C$McS=p7je8?nDbc@K;fA-8R{>5iS4K<}oyswo)V93ws? zK{?sJuiW9Gtc7c5QdG?qp2?&CYFZtXNx_4Db${toTR~eb@KF{ zQ*n=i!N_N-mC4$z4syvkG9?7b8ixfY5KMl>;9B9Ae~982APQB6WztoHNvzsYf3{gi z2C&@qs_H70XutOOq-$5T)@5_s=y6+HtB@#L6xfwGEjUi?+v)EH6v0r6X-4b2u?BEKEi<1##wGulRz-zc#q1dJ2{U6ql2hp(9d!?z-iz0+auQ~= zMPX>Jj^}p|R1AQbcnXynp*-TJybw?mQYqeh9sYb;=cd~2ra|FnkRya=ieRdijn&B) zMab8qyn@b=t3HY>l6&l%}b&qqmqW!*=YTBsPZ6Uzq2-f|G+wuz24}A3puW+Wg?<)lso3Qe;Z{Jq? zYBRoX1&Qz$DEJelQGd4i7|AD+u`GJ#!NAPx+;y~WOuo##YsOf$vpG;WvSj5{f(g|lCPH=@r>x3m|o zV~TA_II2^?-g)V0xxff7L@jyrl#PZz4Kz8Yjgf_cRLw7X%<*m6xY?0`HoHCw$uF$p zZoE0w$j+caxO{vt)mbN_e{=;$DXfR%v(a#&qvM9==g6yfiqN$O^GRDl{hAQdq0l&z zrn(`4Ql=zuT@oXymm1t&*STWjpd{b>w06?w95s{=>uXh{*RZ4OvIT0xLVOl8YK&STs3Vb3FV>-UftufEn2}Q=m7x0f zdonE|i)B@<$OQk1w{1Xw)TeE1o-syvQ=spG2@%J(ZI1<=#KznIev)J-)r@r8Vx4}I zAM)ZI9s~B1SBu3~3y#sOfg0$&px4;mmd-;@*B$>77kNO6k5gd4)AQW!JuK&>c470T zIs*#Ynaz=_Z#fKp_bW=4!R!<J%JQYRFn2z)R+06+VSj ziC_09=f0$p_#tHIa;zsc|6)%|O0jED+7J-@-H&{CQ4Myd&Z8IS1G25J?dL++;aAI6 zlHb0|Qql-^9=#Uaat2&S3-LQDKu=I@O(*_IWQD~|rleGEHX5wlgwlF};||i}1H!~b zTZZH8`VY$=BZ7^8CQI~S3RjbRLjv(Q(Y12os z(bOE3yPHMeec(+|(%rteO$0Gq2jEu4<}UD@_0n*H&vU)4xHxTL_m0kMLJvl%PYKa? zy|P(LKWWpC^QG?ov7zC7p!=cw?N&%06}CeeP7Yt;vnTaq^iW4PT^KVashYBpxC?8! zH{G#7QBYTu2^FKZ8ASc*dA3W~^j58D;<6`5%w-SZRq#vqHQ7;-(Q(F?Q+97Iu>*!P(V~GY&+1+VwNmml2pW{ssRK#a@ z;Fn*=%1s5gsraH<^=r(jVWDZh7P!2 zEpt;Kam$;0&p3MsORe8@Mco*EpEv|Iu`g-IX?N6OL_j zl8J5Gwr$(CZD+@s*mfqijR_~VCw}++to8nY{iRo5y{oIMj&t9OtH&l^GXb6N!=C&I za*(R(%#6By{Xxxs!t&R;+l_OHHkTyht@bdsf=;v!>R0L@4uik<)d{)=zCG_95}WA9 zVv}+LSbqS__OBRh7IaqvbSATc*!{2!g%ON4jhIes8FwtjMG#wlMZwfG&@xQh|87-Kg79RWm@ax%UKC z_1#(A;C9gk2!>=z6s1I=p>v(T&31l@ZmH9kdwMbKs(#?Jf#m+jt)queo6p~Y-F}^| zBl#~=r;$qG@@(i(MWqAoALNnDa0d7G!yfiYqqc*`(=rl;S5FZvf>O=N?HCJkplVxX zHC(1uO;k*f1|n<-c)oo){slrv)N9h}AU?=gN{5%F7CKHKf@gMH>ZKnf?&isMI4&pt88SQ(Ly(*7urpq56^C7!rTm*tnxM?mBe zf9OyoqWjUU+1NQ_uMficT6394SehIPJsSRpU9}+=j;fw^Q}~~=#=burF-u^9dD2>R ze@s06EJ67$0GTuVZJsUjM1)>bXeQz4c32Dhn4(y9GUt}KTsB1Xqbg?pI0OU=`jS&| ze}!jVVZ+f_WTH->#w$4uy#q|dPwSl(KlBG71WNwzr{42-`6Jp&Rv)$7w$Z=na%eG~*a|D$MxI%3D&!8+aS#aqd zlau4Tu4r0?i163Drm?{u$ZaeB<5QV#%-lS?3fv?A*(NIJr%Nt8z2&&-l>&>0{ZwZd zY$r{6N2KBzrEtU})D^Dk`=^Fi;l3&S@Zc=3$H8ATQ z$L2+(yhm{?B>Mw<^nUeZT6y%*>y;LRV|!g2S+Y=|i?F?>V+Ud^flaOZnfnanYdr^a z7P<4@ZZYLHxo$=oD{I*-#ODDkhpLg09#vSV)_K<9ODKYF<%mDx%B^r&o_QMB06th% z>I_0J$%Fe~TYl0M4OYEQi7s z%4)M8jcFF+!?-c{ic@06zB zhy)?E3+8Nr4R#jTO=M?l)^13smsyrmb>2o}&ruXI%uv&Ps`DyeQ zK^VmLOWo3146`I|I}hlfGxNaCUnRI#58|vzMStME%fY&Kn^xfNx?$@hXvzkvj6U-5 z9lpl6nz5&Eso!ZkYs8elBu+;wOA#ie1Q8Dqd@j)G?5BAiM#p{Q%fbo_(TT(qgY&vh zJ#~cn_a9?gidMtX!Ik?htGw4<@=LQ+UJY716E3pPCNFYHHZJruJ=PhUQs?AO?PSf~ zm4Doh20sQ2CP?KYVY9N3731cD!2K`WwuQOL6Yg5}G)3jT;>vn{thL`F`D6Hb;EM2~ z-$iPq;a+$Yw89yuM8R}!j3T*E?ZF1|&8k+Kovv+*Nn$Ss0yGLuei}}IvjyM?tkF=i zvTJZnRc>Cw2Ujial*BB8j5yddzmxW-ar4=dXoWQg7h=yoOz(zJ7 z)sHjk#UkFmf#%*7Lw8s!Zguz{VgE?nFpw;qORx+}1{j-bOyd~%(p1-hILu!G*sRx= z)Spt7nVHkw{w~Q6O$U!gdsos}v%%Ov4SRJ&#CTh~d5w`>d{?J^l7HMEL=vnh8Y^{i02B5aJd2xdVy@eQb*l-=|X`H0QNZ4+B8 zA;CDI@Ae{MObbV2SRrbze8kt|Y+6j8FHhrqCz0W)0LwEvKWJx#>B?#-vceqbdEk}q z-z}?W+j=ColUg`k5l5zu4a#kV;hLD`F407Jr(H`PnX|1v`Fex))&%3%XX%)iw zaR`Ttyn*0a>*>)L*F9|R#;E%gs)xmF+Z$WBp z9rKJ+_!F9m4T_ptU~SIeyhGGVbv2hT+;x=5`*zyU;i|6iL+~GMq#kZiPSO<9-*HzC ze8fo>-Qe}s@Ckv7Nyd+iZ`)Ee2HpRp!MSWUgeN zd%l1U?x-nQbZS^kw{F_m0VG{bXWcfLpagz?zXAh$&3Z1x_>177jhCN3Dn6DiUu<84 z%x+eLWe#{m9y0aSnB^6#!e$^YH&X-K*5A90>YMCK0#0X(0q-?U-zokyosC|X>%w_j z<@#LxPEW&)uc;mVvboth9RV(1>)E!?(S>&90(pWQ-+#jw)PGcpOQiEqJi{j}%|)fx zHyr>gWS2d5nW1+)vr_Ky=$;tHyjC3N7|M$(WQbdbq|3Ojw!{Z$yPfCGWP|GpCxSx1 zxo^jUfr;@Gvu@=!wS59ZSBFKPW{lZmQsn1r|3jj7-!Sj0RmyA}JwUinvtV4`Ae0(K zWfCK4ONd_^Y_fvxAB^=m-)~GaO$9`^O~1u|t!Myrz>y+50>)h9VO*at6|WIjbHC7J ze^BbgVG}<{T7;^}*1eNW29%2e6UX~K?cUH(|Zgty4 za4g|DGx~UXg>Xr!E_}(a^xkeXgM{M$&jR!vsDWTr>}XjzjZBQ>z0icjF;H%#b+|RW zGe#;F@bJuK-%mGgtSQ!7;oh6Y4319TP7hK~?!kwgZ3O+}ne+~0R@%?gVb;3Qa!$ct zmJx)fY(sEcYdMNB81CtW=#4CecWU=Vk77&@8 zsg1E}$Rh44lZ-ji}Od8>m)G|yy3!|icEHE7SDlaq20oks#Z|6^I z{zR0zGoJQvK}Gase6w>747Po}YkKabfFpptf<1E|9bGCdo|* z!B^*vahqRBfu9FavCCv#{-f_)r3fecPOVF9H6oq=0)Z;C%AST!Tq3FD9?{GdKmM*A z(cJu8`-V{RmikKglnS7WsS zKM8{)Sv#J2EQ$LpSbd1Zwy=a(bHU!OXKG62XQLVqAneAl*0^3->aZ2|N-kKu5_(kL zB3-3W{W}iKVHir9S53>+(dHQ=@la|+6K)17Z+86L56l*@XQL}OIO1wGxGp9;txv44 zPK$s_J$hxPxBY!Q^tnH%&iQQ^oitfgen>twL&69*bQC;PE?!nXXhpzYy#)?~mk@PC zAeEC}y#enuEzOz!C0XU?q&xQ1mR)U67+qMb$U zXN(TAV8j=!bIt*&O0>V@S1_#-L9jd<=^Hj|^XEjdY1a&wB8=^P3vAZQ3h&%g{o}!6 z=F2cSICc=emxz?i(Y=ddh!uC#-8+XDnrOV|8sI<-F4;)Mk9a%>MVFoBAl>u;CxQP_ z%zjaf)TZng+Q2W$B*F|7ro8q6Gc<294RRy!l%(_Pl#58ts{R?1ujomW#iE8mk3jsfilLox+w zt~5+liTrICP0ZT=N;?=VvYw|-QvR2dUdcg3zWvocg>h1ZaitI3yYO&ZA|lugla(3( z_e`1&g=jd+6N>`|&jO@fajc$?y=5w}q>We%qS*sW6+7PhmOKhIub$f@ zS_NrRUpVYz;!#Y?@p2P1DpRdi+(FmC+wUuRNfpXl`Ho%Y#Rq*SuF^dalTlO4GyGehc;WgyyT<+AC_9l5Fj zXOPjpQw-?`!z0(GKM~Gd(x236Vi195bOg??l-RBWmZ|Ax4-L)!)sC zhMYahRcOVpBYMLzyCtV*65})Faxby~y;-(VSyEq#IOpDk33+BNJ zDET68_j_B6Zv~jTk=CvkIqM>4%~0aMciKg)M9es)gD3iY6DxlY+CYrG{yYxte*aq-m6~m2 zz?8R-R~}*l_yKGi}!X6QM;smi|o`UKB0F~=M;*>BFj%2Q^r-TMwffvK|04fJnGBJGnF!?3t_$4)l<}|Ijx%~;@rUgCcMY%ezR;^~K zyJo{qLWbDC5t<+P2kDhmdNo|?T%mu`2=f+wcKp}DQ3`Xz(TS)q2DXeEg6?P9t);*VWBrnBZEfoE9+Red2nY(bj+oPq_xj?b=yppXJozwqJNH z=M@Zly`U1ft%;7FJjOuPC2htl#)xm!k*|jkt&{iUTUUp7J^Qm~203(Gew*RK;;9Lg zx3V_~Xeaw_@UUPlo}(wOG$M)Xu)~!)I7>09$=<+G{*#X4#B`xP17rNRk z&`s}eI&1~FAK_)LuKlaTJmTa2oz<(Sz$k6i;`n* zTz|@i){NvM;I@pRN{STj8AHIR9Sp$5~SZPMka zQTrME`Mxt6z;D0wWmKeN`HZ7wf=w*O>cAJEZn;b^h_l$CEOUZ;4zyl~o29PE|1MY2 z?gm{>{fLL8wSt!D3gGtB$ur`vy{RJCF1w^*GuRa0XPeynZLI5@B3E~i>#9uss~9vM zL)_5avEj2!hXMl1J&PJVjv^gxU=zR7(0S)sZG+Zf4#DrAx1Y1?F=NN5x8$zy1jcZT zDmDr;6m$D189ASYZk$+uo5Xkr14=wL6nhI~_BEniuPD>Z$|^f~N1?cp5-m?oIXkxu ztJ3-UpguHwsgWmbw@|W+a}5{pRU^NSM-H1&2kMnwLBSFD@r^5UsXotNUIHAm2JD-2 z6}iMk*x@K=yyo=NMOF(ey7&wxeD~!HXW4ohF>$%N9w7L&FhTb5 zn_^D*b10tuh%cwy4~9bw6tStlbi`{R!fDiruL~}%tGJ0U*o)_LYIRcfZjBl5R&PD% zq3V9)Nb}QM}j^0o^ z4-|^WTHy~e#Q{$~OHNgt zvrbUInEhaks*S;vvujmP-w?{@2I2!YKLV@A#9Ek#tL;?DYx)I~%=C0NptrLLT?^u; z$<3#1+o3I!*Q@0!r@tyyy|ao^=kUR;eT3&Ajl3PON)h1CWBoN8^J3PmBcP;bxhcm?I)2;x*Ez*{jv&P|uB>kvK!ptfR-C+xpof-!x(QkQ^f#0J^ z8t8pNu~zR)KWc}oAsb;Dq*+;Zm~=nLF^*H`AwvFKD@Z~1qf*!Ny}taAD@0m1M#kLq zNv!7@t9Q=1JeVKfgc5i{c0W$W(xp|vOC*(?%b%4{N@?P1H~`Fz6GTz)5J*ZBI@WN> zKc+BJ9v&B-YqypO5i(wT*suOQapkvACdjf}806l*i&rMeN|6uW3ui@XMfq#{J>N$d zihcMDP4EZgXT396vZe2O&-GVhy$v2FLf1;nLovD-*38_Hf$3iCOek-ckj>6LoNePl znD%?uluS;I2H-KL5Bk9e0ZbH^|A?9d|Jv%d0kkwrU2r;^QY)5O1Xom~tH1gg>14{l zU)Ovu)Uw9mdh;n5nm>=y{-y(4$9Z&ogct$mF!>M(0*s(Y7<>^0w%5*|Mh<@hMI1dB zHPisMEiUztZ3K;Hql|sC#5OcVe}lpi4Bn)3g~whi0yU+d2}oWe@JgzMJT=Sq$TvJB z<1ikv?kQ>JV#a?nps>{3#HiznB{IE06qkS{&L}3gtI6#+aD%oUD6Q5kgCLx^ah4&u zz98%$l?GS(%_zGy`mO=};HoIjV-#zs)iPpzG{dh6Dz-;mifLJVUVQWNE$=dd1$1*1VbE2CHr@AQSe;Hn8n33H;gtWT-|NU(^c z7q1k!Jvk#vNm|H6t^1;OBUWWhpDd~yt@r}+2u=tUvwMTDQi20}j1}PyG?i9?d65}| zlNj_N9)a>>VoVhDS_vxz*C8feTfR3sWBCoT9b(Kfi*yuYc-c2qXN49x=#cnAK0FQw zelp_q)`bm-{0Zo8-OgiLIdkZT690Hm3sdaRN;<5qI~e}Y2uTbD^mx_QxB#Pe&-3Ln zHf%pU0qcs7H=AoeOl)kNI)&ETDisqPm^io#ioB|Hc4YRPaRZzr1A{{h??)bs`vhBn z@jZ2|^h>4D>|8+hHGRyIlD-(9Y3{V_`g5{E3mxEK9Ny^Laa)kDr*J2FbUOgvqlRn4 zTf>g;(wgn})7if}YrL2)ufNn5A>Cf1#C=&NjEsQUd$+f%g@AkAVJbk1+k@D-Er5h; zO(o4EUQIU@@X&5HklzE@a*x*cZ*ZIYzb|Q8#h&U_ z$^69HtRg>$S!S(MZpIbxmLC^k{uFd2ov;&87TAMkTXhD8AnC^PNh7f1kfDs&5Wf+D z=b6T~0#-avziu{Kn<-b5Fxs(XJ~G`x=#K^bsCN5O6T!ZN8pV}^vEP^O#x^t>M8nR? zi*3}pKZ5htxDI*0;$r(sZ+yuWg;W1-nC+X%h06#cUdh1>o!mHgq2TvJt~kEo4x>FP z{nLogUdeR7+*v;Ai639~R=+*)8eZt9S+Yo^pdX?G#1xs~_&2=-)CZ@vMk#9bWkgve zSC+Ie#q#~o4nif2K05t9ck|Y{XL36D6P<0SZD#YcHKUOVfKYtdpmId&&@8FU#ZO?H zEp!JFy!B>ot2`kvbG)xL#sI>*`yF4P_^l^TlF6s)v ze;%g;2cW5Z@~`LQQIvGGUwln~@dKGv)6jhf+pQPPPrTCAJwWd@LW7PcdJAa^J3%<~ ziDE{Q@>d9bC(9I6vl1T4Rhfeq^N_%-YrujUFryjUo1soz7Tu6f#>Cr}JK96pcFT5A0`AsqMm+AB5O$w}Tnj*y#;J0C4J@k^bUV7$N?SRC>Em%OXd&pM>dBV;L zg?Q1HH#r>3t5z}u4Z_*J80&^kvxc$waxkXK`!zLAmDy-d0Fk1x92%> z>M@+vW!=Wm=j(M$x73jN4GIhJd^OV>xc>0*aFGK|3m{lxZgHvExE-p9Ri^42H5Pwv zbkO5O@eo?T!rHVa$34`I7}B6hBPPKcm!^#WSPKRh7a)*S8c_Ca?f?{&`h7?x5Cd~JRuWaySN($j_5*ny~ zz|0!-;h6VVIoy+~myQpUh~I}f9-OWzqog2o^2fu*OaKmxJcRqVeRYI24B_W?;o^sj#lVH+LzUTqn;JvV?bj21&l?Kyf4zzRf=F-#hIfrLff z4PXi9VxrWJf6PV*TLv6WbqUrO$^H$g+{#$L)@TM?0?xnD^Ku<^RXnMvaK)Z#wFM5T zE&FW!_mQru%5en^P|@h$O;l;oDShN0?W`gdoKWI3i!RLF1QX|NqCAz`pl5)3 zn^mj*ee%Nf%0)6%etD5p5{_SkG`6F>7-!OtVBdc-B9INq2L^*Y99rvB9nZQh^-Bj! zA+^Y2ybA{I#Oa!(;(5@&`&mvf?0gMaA<|Ia-FKf!z2}YM4Yf?1VVuiPCIsw{v^jod zeSX2mzvgvN?)Kk4DpiYrF~#|Y&`2od&v{YjPa8SrMo&wVAl z%8XjJAA`-7j0fJ6M1_%(1@}S9PPUke-y7SXP#m?sga&yIu6V%yCZcRUcJQc#+qY_* z1yK-J-n5}|G$kvLy}A57JBhL0M)<0olbMkrzV~%|I*L{vxKNhyFBbPH&N|_#*(BW* z87z1Mc&pxqnHBO7vWq}&*!1AD{>g9_X;etvmi}c((0OfJv!9Gk`4~%_xXx_{GYDH1 zf}7<_I9JH~_Er;+#J0Tzxcv8H0mTgPPi z;qW{UOX2Wk{t)>>B@ig&82RzaX`Zyb;q~^0L@yujHY<2hNHrBTHrC~_AmyV!58?Qv zN$FqRr+1quH6;4HL2Ynprx_z6RE}dJY_DdCA(dyYbnwf$Ft_!YJ+E`e=6zmNCc{%@;M2B*5f{q0u*Nex^B0tas6jd?Z^?UXEl zSiD$O5wnjum4!B@rjo7%|F`C5-0rYXsj|h4!p+w zd@Og;)WuMZf#Wex=z8|ak}T`;YjI_NaeZ2~wXhik!|W~Rs{FNoH8`=_nvZ02#~;$s zqGr1__B-RG54VwyeXgpPkbH^PaA8AXC$6>ndw9IAPsF}Hy5QxtL+<`6ukDy6j5vj@ z26aN`zLitL=yr>*AawzH%{o$Mqc6yT0p4TQ7!yZBsv3=T;TnF44|#{p6~Kyp`J~k? zQfw6@BuO4JR{oU(04p+v~PBnVcw4<>@wwi~WK zbo9KemxmU!Yu-8~x_2jm{NkvIESN{RlBN9|`{U|h(BxQo9p?bgVVl4AoHG+&jhzn9enVtO{EVuOJ6GQ~apU1spyhDFc`YBvfUot|{7Cm?6`pCmKwJWIyNW@z@4o zM2Jr^dS4cX{XZ58D%qXC8jDf9Q<7XpcMnS;_xFC5CmhA3x?Pp-tS{w?JXw8!WMk}D zJp3LYQDLFus-UhO}YR>G(PuV2xEYY*B4<7($H18TUGxy=tt~1f&UhUd|ZQ z@*&m^EG#J&a5-O3OnFT58qg$gLWNE#=KJ2z1Nv8{4sa3rdOe=EH)451EUxyB+6(x+ z?`_<56DJHiJf8omtM&#wHBHd;$VhtP5k?sI4D3^Y_|8FP4*+d6#4Z&v^$Vie8--dF z+EKI_aD3=2dclLqH=xnpZxp_r*PKo9>FKPiw2|R>#JvZsjlo-LLY2LahxYmkTCuM6 zJDH4}JyamuC}`qj;MfW9eGWMxDT52iAi}ChF9~O>f;qM|VHj*X;JCHmVws6ozBC0D zV8XxS#x!G2~I*e2>Ww zYaVzGQj$1(7sa?5+Q9gyO4-Km!IkYSyfF03+d=zZd@{zmL&bhk^HBCt z)6V{>N&2>#Gk;LkEU0K}-Bq!T0JfKA%MSamF;CWd;#V1L1Mbyn1uqP_`n?iNry?*V zjQODu*M7H*Y7LaZpeThzX;g@gE>et!^?pbvlgNKpVwzDp`AAWOtOsFQdX#ci5z8r{ zrb`z)ZT@PQ<kwmZ?WupyOPKXO$5h_jux^gPyzPVZfUG8x53OK)smg2wL&?PqgSn zi9YLK+ENf2T{oFD2H1xw`nPalSY;Y1NCfBvgpnhb!ns$(CsC$`8Esn3zLy5QofEl3 zEo}ueTacSnNCVs>lU~&ZfQJRZZ`DNoBgv#Ch_uD&<_#0~9A=d3Su^hL>|yMY(D3WU zIMZhua=R_D89%6CrepN=fCcVk5xutZl2fy;XOpCs@Cpbu)BEg#KM{P4;;#l=N&ct_ z+z0wNNJ>aXL^DwD05KY<`sOJ*EU-d|IOWS+TZl~?e*u^xHX#u$QCck*RPkGSI_MkV zBSNS+fsZc;9iWGMTq{nc2yDWrh@lB25}rL9QL@nGX?cQSQ;o$ELEZtoN2tT67tL6W z4saGn=3T9~;VcVFFa(EsnijKgNl`jZhbBQ|0Wka$9JZ-slS{)apP=bCR?LX44y!r6 zjUg4>Nkhg+Z^qIBtFXhV@Pv!a?rd)E@ZeQ;??;9TgC2wGCSI=XN~WWLDQ-mwm@von zqz$!lBvc}vZ)-Oo;}t0~vYXX!wRGZnp6tHL{;I7+w_n@@vhozhSL(Z+6as^~t?akD ztGBhMh8Ct;aFZt8Fab!{PD<2m zvAbfETe}7Th8#?Z_FKmpZZ_-);zZ{>@@gztCRI*)9Q+wmP^iTLMqUD4tpu^hX_RCc zeA1dj10b#qbu<{CtKwi12Hl}pgis_b;{nE0sD$4j{5M%u{h@B8kP zP56aYZs31o7~g+eWO~fG&ST9mM{^l?z`W_V8kG1|SX;RexwgscGqbS!TDyUHSf9Bh znUuNPD0ImS>7l16pGE{gLPf7`7jni$zTLWU_Klq-O!buRNsrE?VZ(~+Z4ic84$nhv z-q|EY8A(cC%6KHjPz=l@+c~)xe&A6yN&Ot5fH-!JM>jlp*%LA;gC$GJV*bPH^&~50 z!zE;+TQ`U(SM%YS1j9q<#RE=Wi=j}|CW-w}KwfE0h+*L0QUXz+=gcue%q(GzW+hG* z^HxWa%I)Q;q1fWM&I;j0Ivyd!BwqRpD{z1B7AEj&`_yFQTU`qgzpfUln88Rok7+-o z2Z-uxJ){HNw0W#^&=OSzY-t*h!_DVxZXKgfZuI)1t1b{C^g}6vT)nt85Uv?)v1%U z9y&0CWwcq6FdU7)vMS-%RHa;5q7`4={)iBZ=pm|^5k&FpHYW*QE{U`=cdQU6_f8+F z#j%m)ZNh!LSahoq+K5bP&=*5Tlz<#lfkvdw2i5M1#x`8lop%Kshp(HjM+dCpiZ*Z* ze#44-Ctcqr)z`?`%TC;-=-0fP8cmLFz4=3bId=hD10prxD=qZT(o%h&)9&HOZhw!{ zruH`NgCj>Kw73o0GEiz|zq(`12Kr&0Wqsi*EWddwg;-tr{HJ;Q->90IqnOWC6%dS- zwW)#C(Rj@(e}lxW6<#^96#24Kc@EuqinNzZe*vSXpamk3EsO~gY(=VKqR^Yy zPGcA?5g)Zog5mTIrZ-aJ;8L`h^tU4S`6*0zJ%LVKM2cY*#cXwKA_ZPa0No>mLM3pG z1jUi&k0N0n5Z^IcYm-z4|Az~e6|+SsAZSuyu36}Zw}UfL&?{q)n8IN+jJfaM5f7&7 z!~bq*1%{^kAOAuv^Is+Z4*l@Hl@RzQjskMC*^BLNtv}c!e?n71={F#vAub|zA@4FR z=ff`5(b_nf>bI(cdUju98Uk@=PO{30%`$w5#hT8f8vOy`iG z{A)0U4;u1OK;BtXR7a#?Ni#F0n}noB$yv83A{jYPUj%}~P<0B~D%VG#>LWz87}sE1 zRw7#9LvxjJQf^{yuyW+@3Fj59ikUil7e&$V22?GXe7>dm$sM_TuQz49A z+~~2%8HCfn(=aTL&d%Wr7uL$+V7q#u@}$x!Eg5=z-N!ogHY+g9mJ`yzZ3Z>7Wk(Bv zG|{W=MA8a6owY!-e1$$wU0~Z%HCDEl~W{ z13P;0Dr2kAbS)%7BZaVx(^i;>c6%#=xMQSD zk7;`TwDUxx3=*U>EUGu|TGspr4h@KRb&G-IQarv$kjy&1pRO1b3H=7?>e+(fzkm+t zPZb}jc%>E&mB9T8$^t&eddhkM>p8aH&~U1b;(Ip2j$~|d>NMB8zL0UVWX2LS%I!5q zi*>5MG`$?w@N|AFe6vDAi&4l6sb^Qjk@dSkr`xbthvF-9?LG~9{Xai@*VV_`q4GZ< zga0kT_X_50qO-$l!1v!!SXk$K2@D0KJ{A;i1m@vMKB4HWFoMcf7qF>?m|#NIBHFZI zh4F>LVq(OI>&N1QmsqG10CdRLb7&gf#g^LYB=~jVi*j@OC4Jm`SRkd|Uw20yzaSf5s ziaS`I1#~aIPE8n3o8*#@c$dXluM;f#G4ok~k zWT;sbh%cM_nkHaOjX=>9?*ZSgye});`>D2lb~o@oxy#Rh`&q}eP8U&i-|uFmk={Yj z$0`myL;*c_|AcqrZi`+KNI;F_&H8a_7I|-*w~c+Z1lB&rv*8QiU7GM`?wfirqT~&< zzF)yM{P^Sl@>xgjrda-$==+nX@6*t|{UR+XVAuae{o_e$!tja{zq>7q5h;$K?My*6*F|T(A@la&!pGF*l zu(IwSf*;g&HxNU!20`|LO5-!8l8;{$u_ozsS#@;&?Ifr!l@6-|&JJEhWn9FVuzaI_ z8TsZ0dWRSE(oH6s)=SZ}Qzf>rMGp<~^-58KE+wjqq42k=VO$;x^49`>wjQUiDPGs0 zMxpFmi+Hsl?jUGh<@TASeQf$1Vx{nL8L{?NZV%uxq8~3 zL=N0c?XtU2hW934_y3Y0Ay-_-7O;t$&RR{IJI#P+z!$M+b$VN#1|~n2`XJnCH|4#_ zNxH7rWH)g0<|65Cg*v?7Vd#8YinP#ylL$|9%5e@Pc20>bo~0GGARryTO<12TM(PSNK~7W5X*uH6De2 z+nXBJ+ZkV1|9VVUezKh+4L6X69+y ze(QwGCvqMtB2BS58hSTrJ9p97uN;Zl<2bVFdZE`chep3$NtM+7>^2=!)!=*`?~Pu% z01~q!KkRB4jJ_1K9+Z6<`%Zp6Oh!}Layqs^m!jxS(?Omo*w56fu5X_sCD)FVv#BeV zLeq0)XZR?uDVG6%{j_7|Z#jM`eemh^LUhr(e(R7aCvVP7i+qcFWZYvD7H>J710VNk z8|Myz+t#M8x-w<-ZFD={oRIXvm%}&D0ef`Oo?xKKAA(jZXIy!uGBs zR?6~;D8~hTyEJ5-GxLj1qeo@Np}N1a2I-02bc@A-DI$JO1DGb|Td;5!^E@M2(TW@A zDv!nOCe(79@=SG_&vxYxc2bWG@J0j58?-2s=&zv zzpst+YIzuewv*sALu3$I1^k#NE%=%iylO~J7|H;*p&rFtM2EPq=vvuo>2G;_hd%MN z2GrcgOhW;q)X`A~Q-x|2C9XS+<(Oe9{K^ZX4C(qA1Okrq3+BV0gKfcLGT2B2doP%| z(|brJBNH9+Wl`ZU2lZ_vzkf$_v=+{%vD8Rg-9GeF^VMA1cs=y@E%JXgO8o+Ri%i}+XPcxmI(zpz zjUh&;pPMWMQyuB|@-GZb6l=sS;WTBz@-<=&jY_-}Uf}fk>bm0WKcransqY#ViP@ft zQZ6BEaM891R(=(qd8g880(@MOP#^2)6Nan>f_Z943>$^*ic(xnaT+mC~r^#i0H(DHKTzdRFZm2c= z_Bg@zbRXe9w3YjIPHXS@>bal%m-Z6ld^|RhN(Ge2Y1zK{Ukj8~x!n%HQbxQyFo?Ga z^2nSOI&Fa!Kv55E%s+|l7(Mu1spTry;%oBQ7Pt zJ??6v*1LgK8lk5a!TUsm?y|&|i<3TmoKwhP=X1s=OEQIe>wQw&(4+=rXsn@z zX|08+?3+}$ye6#0HSa)G7|mUK;at>(ez~my2P$mLsNk~S$r=+{&IH&34@1#vIsgCF z#9laL1qm!hfKyI58%tJVR1fLKbTCE<0uRXg*>kx4*m>643JT|mc^3=abqzzM+7EOp zt9ft`6E?8V>g1c;(f`b*EZN(t%i5>QbbwW7XVA_bj@mYumxV63?Ea#_Q>Q%lWD1{K4R9Y%3Y4Bz?)mB)YNR13DE$Kp2VN0~nT;%@hD#m3L!K)Po zp8g}RIRTJIOeY=IwOrjA86NLfE_}Y0h!4iVp+rfgzG`8}K3Whjltgy*Pn* zxjG&;bQ*REV?JRNav+!LZBC%fokbepa>Lq-XDQ&6r`24&q8h`OeN`18NC_L8S2mc^ zQJhC;tU@~v)wR4eq^^0@;JrzpA}@%4#>< zwedLvKxq~HUQI%tG9=)yX0N^e+RJoY6mrUW+}j?IiMCGL+}eC58_2-yZIFg|e&U_P z+5Fmld2p=ba*n-nRJingG*ti9l76P!&V=|J{(zO@o)`_J#|vuuHbZwin)P=cwERpb zjZbIEOmDn=RIFvdLB_N>#Y7)jg|Ld_v#22d*@wPCCJs^tzo?n(c%7!{H?J-{wE^8S8fG^IK0$pB0oz)Y zc1YgBo^R;}hDAyR;BXZp$La66!>&3n>qA%upsV(o0Pdv3%Po(8=y@{T-v^C|qQg}+ zQpn%}_%hOJasDIi9XOKWcvpDicb_Kkc(mqtV;em93bFZT-0()*F>GJXnN=!9vx;(9 z3$M{sS5&sS78)qmlOns*(fm~8_KGH7*LXl+FvTR&gQ9UI9Yc-+m8ML!Bm~_r#nD8B z$BxfLAC?^++ooV(6N~cVrJo{AkjcsbDH3CaLT>ON?7RR6(|n8S)M9^C*q=D)P1CTl zISCx(Sc0O#&oiT-;Gr>1ZYDH%wnR-Y(MT&HDb;F%h^^kwEZn641(t;8`HgonJ)Nj$ ze*Sn_0`;J&Wbin4nRjbB)63oZ_)#n8>c8KUZubYhDPHo&<>u2gUSDWa@Mm3}bCsD5 z`$Qv_r~)YpxFV^F8&Q`DR`Ro)^23;sR*JkfE7x=k0w2UY9Fokqj&882!D_H(wo`-n zeoQlMOj5OIT)voNQ|wPWd&oD-Y!TRA4)NFMOS2y#ARahaR}m`(zY4`Y%-LIFkwa-p zgJ!z9^ch9m@<{%KQBj~_z_Xepq~fC{lm>|G>!Bh{BsowJSB}sX!67ze!O2+5!U)R_ zU3ZO&j&8z*eV(gRN70FhuQEHbSQiouvS~dD3cQ3s+fpFbnw-=b9=3E|z^l#Db|bz) zp{(U6zQ6F+(-pMyhX3g2aksjk>C;~V4i4*XGHE%>H0VlCe}|yeP5dwh@@S9+H{M_B z9lrj|bYV00VhAd#syY%Kzi_!)83#H4wx;BX0(w|*Q(1M9zWk^9^}daf!Ex9gVRq%r z3VK=1F(}|DE8ca78CY14?=gP_Zqj179S`W zTB)of{a94Ne4d|2jY8ZKTOfYEHi#cBY|#NCCfK8)M1r0n)^|9pK`0VwDk&^lp>Yfl z>Pm}9`7eK#YyK-NW|omFLcNkIw^5YN-W&-oa$wZuN|GE_@FbOu8y=_bBg4(Fd6KFi z=e;I2i__AEqtP*~sEnRX>K)ARvclnnT;vl&i~jMevntZ^fvlRM|h(CvP@^Add3 z8KE2!CP-Z6HX!Kx#9?|(06n|})5;oSL$PVFxKl#nnTWWg-}C2JY3GqO_%^MBZv$@o zyxJ=S`=BPaOg}oItA{62BngL5 zH2`Bnm1#>lpdrfb7P5|{_wh;Rf^epkCdm#Wdy_!RcVs6v6K^yFLUE-IrYwaO4wA5D z1~;;fjQKaRA8!=JWr|t11pb&XoY{cOcvzkYY{GN|kD5q)5oBmgLrB)0MYY#aIBE_O z&bgYXCK(OdqZGmO=+Jf?X8Gsak*{Qpn9eJJH@iRT^^k9pqJ8LF;XKTRNdh=4MN3Ak zgAzEBCUDGAeRqKrd9~l^U_w}cvVuHjA4@f)m0gEPCM@l*JiTGw1z*Dxl5nPCYhm|y znZHc9mGQoItBE^LFRKX@1Tr};Dx{9V;a~k$hMM`X(n8Rn>V_h?Vq4|KUqWu$DYmGS zYrwRuC~AM|T@x;i9e3OlfhM2{DVanfDsIvfk|fKIl_iXQd!XGGp#D2bf{{HznCnXf1mc~VpH~!Afd+4}CAr5)jiWkB3X!NmsM-$JBCQ2m z4kki>fJ{o1BW@lp86!u!gyCSox9NyXuQL7mxQ5GKCh)d9FrU}+TY;Bew{17T`Fmm{ z^`-w^^k*&ZIZ%D7{fPxqV=X^8HhF;}>mvc$b4Q<4~EToVp@{K1FZ3#-z z_D7~)vx6(+MlXCI135WyRvyQ;5-h=3;E1mUmq2ezrRI48jB`E%Q_5oh2Vg;;z8bN) zv}kT1Wr*i=UCX4EhQ4*(g=8s@jDU1bl<{K42{O!3M}}bqBwgbq1Gv2-2ynYzMl3>= z1gr~{g|OpoMkL%UcNwRNG}Mr&5Cd5_R}84g@E%@06yrn2<%$4T;x9Yd8ojK6e%eTe z6r+Fcw814UDFC3uw*zP#lmpuXCt?7(63{-}xgLlmGC&Xzg$U(^+ zV;55d_3PmZnXxS`!CH}a?Jyrwv`Wbg&!+)O$fbnyRPC6{J11m5Jg(@fV$t4ypiAH5 zG?_}wz>G4~&{iVo9}^C8$b>{32`RWDH8LZv6PmJ2lg80Q-gTT@kOJ7*+g6-*S8b(Y zeks;8Dq)F9-TB5K#1At-@G85ir|?Fm3q`n2muBo`U6={B0CVYg)rtjEG#5$p;6yX! zI0mre?jtX?D7Ya(T|?Y%>NR6YE+fPa6t|hQpr^FS&|tKbB$7}9MuwjKc?^PFT8#;Z zybZUxBx_Dj(~e8JrdAnoBPm*3=o+VRV@r^z%fyPavK(QDmP0t`D{YBZ%W>P!g&b;3!_ZKu`%?5WLyqE2~{BU_)_Co z5?jkX&>QlVJ#*zOxpK1&@hTkA)1a?7jk?}j_QinaY!@6b@~+xOyvSPQ6|qMW+0!uA z%t6)pMsJ~w-eK36b0tLex$mFvK9ByuTl4#7j~qXnzm)oX z%i`|mUZ|e`bc;uE&ElWyBIn<~TC+x5a03dbovQVh4@Zti?YI{^hG-dx5ZBwrZzUP# zZr+-j8?gpZnJIkc3liIyS|k<$(M{YOL?btMdjnZ3BnlZcVimT8D3sxZP8YkssC{5CIxvRFMG$g1bI}t+$3yZ;G zVC;wOs1)LkP@qyMG7v1i2{pE|?*gz(LU`bc^OSL^FU2%lEFzo|Qh~JMtZ-StKCKjz zI%o`agc1N7s40eqc)|fwK}@MYQg5(aAPojiLQysp(*8={hRa`Y&{7DNODf#+;O;*; zs6B(N4}Q-U&ifDARi6plH#7fQJYMl0T|D<3()sY>p(&A%Exvi*o0Y^kXb+-z_}mHn z3Sw)UU0VF2{Y#!t{M#PEt?fPk8ssv{Q%9zOoTt9{v2Y{Ac;QeH-PI+dfZg zX4b!O>KZ42P>|V;!Zcg-q#&G@7=>(t=YMur?aLmYn>0qT0tk|N6`;{4k|Cy8GdR(U zsvwz5Q-#>&2*UU?+YqE~0?wf+3Q;go3KKK^M!>Le>bp1nSv@%?72tl(gpW93P#9Ye zx@QZgrTY7*Ojk~xJ9+NliFy2HAvRXY?+9Y|W_4H|#g>&lA;<=N`(r~zZ z^4z&ae6}AB=drS!p4k}{@x=M_|9)@Yu_IqL)}7DSReC-jATa$e7{YewH5LGye!fUp=$y4<80{JMH=xPF>^FaKm_uX&?vS*ucdA^pPo{6fF$MjJ=u! z@Ph&iwXK3nf;@pTo4T-JT&y^gCc$;uht&v>{?z{}V*FQL)!Yuh3{q@ItzLO=gDo_r z0pJBK2=k>K_JsvV{31dXqVB|f=Zdot3k5;ZHBLv-C*P!X{E4xd$?{WjorTLTZsjo4$jPz4LpVLQ|pwj1!&% zEIo9{8sn80gCi?euEV@;yL4g2?g=aL7okO8RY8oJE9DcT|IAz&j5;0tQGifxCAySFe^8;Ow04Ri= z3bdi=^ufTsjv}V**LABrC#V=QMTQP=e#O4W3GYCeo|G4@#3%j2c3c@DIVDJ=KJA82 zQX+$)S7!!R?9FR=mJc0G6NHtJFV{F_8~YFt9PYrVUg20BHHO|HQrMzUGZu?!#)0xr z5$YBKEEFptnF9VjPR43Xnh5(4LN8p&fXbLLWtbl*WUu%k7sSvgM$%pt7Ts$XR9VCx z6x%z)4Z4*oBndKP1`Ll1Nc$F+;>C33GNOz=cRHJbI+pVZ@&dmIv>Nh--R$oG~p7l^5 zT^@=UD_K{5(U-*MZt?L?>1rJK$Dgch?%KQe^C|`7Qnx@^@4`7V)A)d2h9PH!7`-9~ zmh(~$aUYi&<}vmwTO75RzvE?q!(r4oF-t(GBAaBWiAqg^FJHcR`t4tgj}@mI4B2&R z-Aq`EirE8{001BWNkl z>n@#n0NZp9Tz>cT+ZQ~Ov_X3KFc}q2&!7vUaS^OI_c<(^Zn@D&0DBj8d%)=Mz@@iO zUo7m=rHiL8UiJqJgi5Rkg}-z0-OGgHp8|+jT&2mf2<)|i5+Lm-i(9{r0+b{V#bL!S zCStUAc^POcPJE9u@|;#&${KqYDy>R>FgkVP^u_&2pqEfBU8aM%gfq_K#{ zRT>RdX{R>gYg#mJ7RJJ0!0w9XSD9@3!nsOv?jq|2{CnG$D^B>fWvB}0KgAsWyBX2W z;iDit{9C)miRS`iYZuPx`7E60-AWYBnYTok)z8@2a`pUh-1sg(B6&lYRT=xy%z!hxuzV&aXs4(YuOFmb5pY7|SbJD`W#s(w}uX z<91`nf@cEPe)(Vs>ZBkIM6zUyBo`p(hOYa}J^c%GC0u2Rq%X_=;f_ogT@lMDs0}*3 z085i;+yH61R_Z869Exrr1P!G@C(EXc*uI)f+F)Ne+Zov${DoQke>Yd0Gc$*DjdLch zF#f9<+?&HgSe;?T8CR*t3g^r>Svb8;4x{$XJkHs~<^x~j{cd*NZ0(xmzSA9+s&Rca>hlbq^lJtwaumn zA?ti{(7vJy8zo9pv93U|&Da}ytHq4=(>qrnx8!qD`oFK48qit z!{OPL3-9h5o;d*9v`#O8MDh5%Q?6ga7@_NzQrM-b8Zg4)3sQX9PN13dmAH380%5*H zAa=Sj?S+Gp^x?9tI^A?QV*G>Afjlz9gCRl}p&);Vhuv>FHgbd&Y*Ab*mFcz4kaJ8s zvYN69jQVSwIhSr(;CufkYpgh@cWT9HD?x4NoAJLgJOly?=bOQ4uQ+F>kH@LnvBGJE zS=*1&K6xmYk0;2OEmd)>aLxq9AK@PX+;;m1f5_ghy?1zRGa^|3!kL4g)w^0pN=1errI^dZRW_yJ~17Qd}bo)}I6@-W?{Dg+#6;D#e%Tw5t3*}Hs z4HC4I%sKNri_F_Lbt&Z(t_fO5qxwJd9s9qdet0HUcI26wgpyvzELJfhJVOq{AwXk9 z+-OKzOVyZ3I@*54*^!h@AZ6ifT;O|P_zm%Y>ECbouV!SWaL(opK4XP*=3kQw=SjEg zXJjvy-$!*brBW~2#ELDKtG1NYcI5G9RJV(L{0X1xzH8SVeFvrTzW#;tuNsNimr2gX z7^Xk$IR2{PNl?-pieA7@Xo@iT{Z{1m)94}P_k1-o*sLzxB-^o(`iD&nQb1NAAFpfFDT)94jYn+VK?*zNf0Bre#?cjfrMbfQ6RIr^C(JBwx z2bX^5%?(zZ5gwTNmcI5`N?n2(K470dj$M$v`m4?iMo%JzcB&S5xMh3H7J5eqqm{GF z@oAH&y||IGva0kmj?v({8Q+(9-8s68ok_TN$a+2iUY{Gzp0<`5LTjh2r*O>~@P*mo zangiFfOfbKNyGZ~qzP1Rp%=bHCLwzcKIs35^hd zZqabMlSYGQ?O52g*#kOXdl=UaLX`#>(Sd!l%lmNtWqBV@6+l8kRRJrua4fGS1|7|s zDjtIidmy=GNm|?x;M;A1;fEu5)nw8J`@-4I$fn@aSvcMO6Nd;a>l)|kE6ykG+g(nn zju*}s?suExD^4B#;V>jz(3;0E+=j#NlM?I8QaEQGmCW#J3f&ptk36`u@1Rs6Y)s+Q zz~f*O7y~Xo`ud3FNG%eSx~7a_Rb($4wgWr6xSw+b7oWpfTS2CdtuGHsa(Q@}GgM>6 zX@ZBe;#^pfU0aCK%{xMmc_hCq&pml`u<)#(nw|Z{SDYg!_*uetbVB%yohrVKE#j`G zRn34FgwIUuv+YngpDf)BHZsD9HOm=d7zre^XLiZ%&~Wycowk&4SVntkcFEQ^Hva;l zo&#?W=ietjx{L+T@`8H|Wp^9b@}w^X?A@3OCqrRi#Z5?$c3~fZ=^Z`}|x`vz&Jq<_>-1y|oaTp`^v@*sUSZHZv7qK*9Vs^1D)+FR!AD+}C zuOUT8BZo~pGNF0*is1~vy{I~0T7m#^0~Q9ONkh4CH0sZ850#fmcur$_J6os-Yo z+t)K+qz1`~$j~noPOdoJ1sg3KG=mwzSuvqGYENP}<-@j4^H&8NSem_Lg$655?+>k* z;}geOXfah@MF0ImNF5f!i@QgIu8?7l-u`HsTyZAdbOe_+;)(dsQs!B!n^%Ktt-Y@e zai-QK?eL!tCDOpL3!HNyVIQo{D3YK^ykJlRjsJOU6gax96{q14$6s_3EySRx9U36< z@)akUZdyDSF!_*GED#!u-Wgsr0mzJA#u*TvBnd&awjABJ??6N#_cB4#0wHQpiMZGh zhq55T5Oi?F_<|M)jxtO!oE?ltON73UzWGWy1=pT^?nhel){@n(hPE~DI<7B7j4zJv zdLSF&@C6=+a#!t_DxB`B`&8|8o~pfXPsVghg)^3sr&B$EHxbX_2LheRE1Ff|-0d;v z9lb?*&Coyow72|l*jJzI{oBWDR=ug^T>37wVQfUaZJpo~mak;_hr0HIgH|!zdy!Wgy z*_zqU_(>J}!C)@ea`~Lxu$h(&h!rPzq?aA+aK$MS`~q@0dLask;biN^>>A~))jhsK zz{;Y&-U70Hrom?>@WeL&*S*h3M;8N68IeAla+w0jM%pQCBpg(`#_48dAiYE|;f4+h z5fH9%2HS0doI;U{%!Vb#;2Xy8IlCk9e@yj zkbyomLLhK3Wu9!Jl!mEdBbeTTCAI$7QEocLZoa_x`1iIgzQ_4!qi~)Vb674pGU7X4 zar#U-{Owo*&e$j9jc*USntBQ=ao0V z^)ZF<$WOLxHP*Bj)yNopH3M)IPSM}DrO#+^Sf^^EaKg61n2MW=GF9tW%ABz-Nnr&du)2Yn<)E>1s=3al3c=>V%ub(g$j!ngL*;aN3F! zI>9p7XFr#z4OVhDO!X{zML;&5#d=#SPO%t-fumDr=tk75X5dLKPU83|SlgpO(3V0g zGpjVOTTXFTt;Z>18wIX-s>V(YTc+nOgo@k8Kf?{DE!YL-6<4M{{*CY{3Ey|63@&5; z3>Gn1x3^+Xm*NHrK?s;4@2WLW6(cQ}EfD$2G#F-YpcVu6@qq$zmBR{iYB6QJr!RY? zMqRKEG_yv7IeX*9-NNCiUm&HobHv47FElh&QwT>cAO|=EggZBksKuTm)Me6+_my~b zvj26Ib(n-V*hKpp=gwhWI49gyd&euzA26nDJ2ck2S3u1hj=+w^X|>{iH><;fMm z%@rrv)r#|55^RUL)FvUFsx^b}>q{pqIuLm}>|Mthk@-+)T-R9L;3M~7F$yOGu-#8m z8{Yb@wXhIYs*JGj48jvao)=D&FR6hxO@KrI6lzuNFIE}ncDOI+O6He+bM{q8t~foA zG~#7aT6W7y<2tfQQP6fM5HuF{gJHNyam5*|Yyn6vVg*HTQ0y36V)Ky?p^H+u?A6r5 z;tCC>#5V|6M=pl+4nvMdLO_Idh)bd2F?1wgin^p98?o$?r_EV+Iw#Gi-uKu+-e72T08o z=fdGdqs6e!!Wk6PL*Wz}QnBI`o^DR*Fl0UTPdQhsX#^U!uCZ@#W(Bmdx2KJw$M zcec&$u$b4rMZ^j!vcinSM3#|)io)rqY9o1}PU`lXT)FWNU^`GrVvw3jcOstvJpPr&p>b|aSG927D}Enx=)gA6MOz)|mYe>5y1 zc(ynkLww!V?mL~s^BnBOojhQTIkd}~!uX*Em&4&fVS{0i^OGT_@$8Kk%(cC!~ zI5k|}>`}zX?xsFf3zqjUq}*L)E;yyNj{sMTS)+p$7cMNkWA+Sank5}OS#?RFAl1$d zHG=P?ZFWfrZE+K$RY<7pyOSa$2p0M3SDd#n*>09-tT?A0IfM_IlL}{ZY3D1>>DOK> z^dG5hpYX7Xbr5&Ib@2|afN4NVnjTu%>)oNgIl=|mqjWrHYfS4L$yoyDYYQ;Gi*<@ddMBy}&DM&{v z&Ps?s(P8<UN4lY>(uoW!YGzS4~-d4k&mVK&n=blC*gf6j)9&zYYt#nDFWC&(Q+$`kOXS>9Qh5p;4w-=YD z=8k{Llj}0`DGt-rjfKVG@fELEV7SLi;D+}RR&Bh0$jss`u>v_UnCJ&y_J$e%I*Jp; zvOTOTQ*qRc;B-?p2>b;alPpbwE@cQ1ONs){uo=Nqv%^`gtRYTg)c*9$G5w?3`S)jG z$o@hRf{BRb1aRC=2wR&XWSOOgwh;9Z0JUV_3?)K%*;3E?4(d^NAee|yH=b&*$;;=v}~*q{qPfB zr`#tRSObkfp2xZte!Har-fcMcG|Kp+4BU!am=Bj<~7a`!g`FG-Ey?{P4U5o zb(NEnPdqdmvk(wP$5XYo;*^vNxq3`#m9&8&gAP1>aMmA3bHYwsD!B`XW-l`e`_J|7 z$mU@QT^Bt*oV7Vl;?i)AqfQUk zlfYSK``Ga5r-+A^OyKOooRiEBI*%iYz(o~I9|_j!R&rE0r2~pn*&50SZ5JE~kO4Pf zdwwX2U04VRxXgtJwbB``%%3zCU*0ffW_{%}|0{>HcG?4<9b8N!-eNE}6^|9Ck3)_` z4QP%<47otrQlO^VB9q&hqnB>rgUDitn|>6|AfYYSID?9>(c2d1WZ@U1f5piV8@%sz z^J$+8KwMb3eh~jvb`oLgS!h6Wa9nD7au7&7d1ykfyNCqWyiY*_^o#CS6BQZr=7B3t zm$#4m>Yw2PxM%tz5XkIc{KqaH80@g8?hR+>eX%flQumt<@){=%Ro3VTyGyfnQ?9iz zOCmicOnSy`n8KlST;qfq2=%K(?U5H5v(552^|U=bkxxmS#p+4|7CE*lTds&-E( zuBDy+;Ij~$#v>2jSl92Mgy{`vt#I;MQ5oPqsC~HEz{Bvv0A$Ny2wp09;j~Gg$z7B< zRibc;BnwtJX)Wx=UA1-LgzZ>y9zK2T>e(|>w0+oajdk;o@Nb%cH5+Trdc`R}f21nc z2R03^ZHs_CQuJ5(AVXZI`7DR7>N_PxGUbYvGt~(1w zZl#3?ZY;H1gzQpgYB-1oB-qcffQ}v4={n0SQBc)S%6Rk&Fg}SGKdy0#QLD&|2TvC= z#J$JvuHc8S+|hHAO&V`-$w)C$R%ZRVL6xsCh<3zJ76)-yP1}SjBced4(jY&aCmn?J zPHM1fU5~+KZ2qcoLDeSgevL)@u7&jB;?Se1N4sfF>gS_uykrvNUtT?AA z!=4ecUBm;o>Js+kMuSTqH;fDX7a;i5AlXYx%lr(x%%@;TF#^Uv%M!)7!(huZ0VYEW zZb!yI5o8fE0`5#tch!pLp@m?yBFDjk0mQ*rq92B(GJz1V7H`C-Pa@a#pRLJ1vje&X#r+t96Ba;=XJ+zBX3X+Ole^>2h&bDjw z-CS|neJHcJ)PsQur)t0JxCLl!g>zb0S8PZ%Z`93br{BVOv+V~bb*(rLOQhp_plPQK zeEb)D2jve*{^+X&w>*GroEmE}0EJb<5Fke*C2|PHc=f`G>u@+hwdkTIm#1otM0=o> zQ?;ZSONBG8aZ=rO@kpR!E7!H$94^_(KLuCz3+KJ6=B*-+>iCtrYH?W&8&0IIabpZ{ z;u8=3VHXo=CImJqU9C7laRHkSt3(gLL6>!v$rmrKTf}>8{$jv>aMRDcq9da)cL{*NN7Jhw|waW z&s5IYMcS~St}k3P$-{+;!l_fWkn<>0B`msQKODqNPj}UZEy%{7f%HoP#6eIKtxK4g zQHmdfC@7NapwX*1csrNa&Vr&q6UAE}8&A_7wPnJjXxyFBA@$@LLJ)^D|w~s(mdc;(v~5Pa=Hu=Ux2rX5I`m-R#jMy)8la6($h&H^RMnd7VU*~SU3{8q;1Em$)oix33 zRKpIRdbN5uECt{(B%y+L;LZaXQ!#Jxu@xuqI7S&JWcUhB zc!&Hi7Ubd;(NFtk?MG5VNs!F((8#I%I?V7K zR6-qO!1hiLfGHkK=ai(2Q5lgr?P)4iVP4j0QcQq)1M07FZi#7k^&sM|+T$+vMQ7n` zovQt^`l0DCzsABj^Fma;!;gn*v;D@#n{6*TwPVE@G3!S|o7&yjU-s_RLl1txJ#GhT z_M%>Kas*?v#jwk1&xjnTVC1-!RDD;iEfZ%K%e<3?(@1IcZPphUk)g8UglBu{T^{mo z%4c-gaBh%V85iVnoz6C0U@T7#N{4ZpfmcD-wB$6SsZB6LEp&qrKL~u~iZgrG&;;U= z(QWb1W$4D{g6z1JrRCYvH{P}v-c}=yS^Zq2bCfL(CkW!PAe7 zR}4CoBuLt^wUY@A5;f?y3cIPR-sHbHnYMEwaD!wJP*f#zb%Qi zSDgOqC?4yV_P>wqpUv3+J$xMtwwLzoDaCKCg>xosb`)QE(TTix?q0_=&Ud}`MzL=O z*M)r)&kEYLF^@cG`rNZ;Pm^$mSX|@O=+ofCp16}ixP8^PSxhkK@my0t9Ed~FJ&I#mpFYc3OU{2IeglGEoAJj+LJn>%Mnxr zGP$)_JzO*l-YMA&P+>plCZsm$beS#|o;nb2r6WJF-*1NJ0-+5C-k=z{SP$d?0!z#6{WW&k zN~oUZFyMXA?|S}Z!w_`EPPw}sEF4!1h9y}mkh^6Qo~Q_9$`ttkA~*$=Z?a+AtqD~4 zL3B6BOYvn8Wg`kVAX!<-a18hvzauJ_4Nu`i#&8V+ziX5K3jo|0twop&M5D~x001BW zNkl7nGB(KqXKXqzd$-Gtk5i*i(Z?pK^QpU^j5 z;(Pn=#unE&XByZ3eh_oG*o^bAp6qjbg>&Y4yOo05mESbYI%WggZodfDLCIFns(lcj zh;2JQ;l|w1tlJNt@yhwV1<$Do4_cj5oPh^PN#9pkxWNpI5tn?9j1%8|jE4{&=@E z2I&Lyb2@`tS%!eyGlIWXe9X~tc%{EE^NRv2aDp0Hlgp7#g+1czOrNWAvaMYSpabX?mdK={3@nay&RUxPFT?jZ{ zMA3NXU=EDntEDJ+STO;LnF&FZn?qh=9}c$L1;J{fXpyI00FRLTydtX;tPRd#-}gEPa`i| z2{sQE6!D?@~y#q!T!6kEp>fd}an?xqMkA3TGjbM|bG&P>p zar0+^lm7CY{TzldtMTn8CadppQeZ`g(Vta&2~(wQ$Nf@fIx$-~jc~$#$eMFSwc@1A z8LC}b!Te$Go4-P&7DwOo=Mz<$F@P{voKl6Li#an=qvQl88#wFnP9%DVA$rhWMB%@V zA~(|XNJa~b{=6PKykLb(q0p{7@d6RYL@v4KPAg9H1$Pk_TEZZL<)u0#z#E*hL(9b? zMeT&ZJbl>LQ2gFvQ$89iPE+6uJ3Bt}j^YNQBUV{~*3QFYO$!)?>}?-6{8TM1i<&Z| zg%g?wQc?5wILoIHiq%qSjhcZi?4O&@-*Xu(oRZb(16`!epBsw3 zPNYJX_(rG|D4ZGwI1FLP5lw}D#krBQ^E%m%ld%&DXRzywoc0;?2YPQ7PELN+#mNjX zpFfnP?%~5OtREKR0;D{I@5FHSSlk+f>#5qJaK1KAd^U}xpE2O0KkxnI!=TzD4?YWc zgWL5loOo8P20n+FhMGoPYt(_5BT$3bB(5okCxTE-1qZH>cU<<#uy0S$jVVT(g%cJ? zr1vPCA`RkP0G>N!myZ_DT+CQ3g4s(foW_d6Nke(ht=$CaubVA7AxBY*#XIa#8AoqP zxIK>|R-6poXz7jsP-c#%VZ|^Rc*jy?x0*=dx?LiqLKru?eU?*hB?AuL1FM_3%h>Uw z`+hNtL}=PMv9fY!ib9c?!~zQElx)a4qco1IyLrV9a`1V6e2zsAqKEAKD^=`cYaCx& z?v~!rB_;4F=Y7wH9V?xUpu=L@P$*EIU8-PNvX4=-b~c`3M)5e+C=HO4tWzDQv`*E6 zE!`qmpzv<$u;A$aIeV5MU|RiIpAtgJ=*8w}$B`*LLReIAT#&kMu(F*Jpvh1r3QJ+F`X-JCY4gR6#ZW&)l!g_OG6lu1^#Ud?&mBX=q8~lymw9FpG0`N zHcL1OFs7Kahs8!`8j|E<=V}%J`LWAIZc2otNl=K<*{)x4PMFfS68U~kTDJSXJ1Tn! zpSBavLEyFB^TXl4U9?d+ZEoQ<`+VEbG*^_ytj^+|59vo>t8s_lY?T6VT9skZ;onWA^``_ z*=X$g7Bp%*!c zv2wbk;&R5ni|Otq>p5a-oPj2oilx5B2@cs3aoI{<|3C=cc_wc+P1y$!dvO;GQ*s)} z0PB|__7JP%x^L^)vE%!OvrE#)VbLRZt@}$l*Z$Ldtfpn71e9^giTeQ6{Zj!u;uYV+ zHAHKK-?Go7+77?ceUoaQXdNCoKL4nV{<*wOrz)I# zq7vE_t{-!ehw!GqkE@h6pS&0Wzh)_J~4|gaaMLy5WcN*bdz2k8SQrJ3smM z^6>SA!45xFYfBz(OV6IVbmJiY1E)^DpW%n8iET0r2pIi}kMuD)aZ<}rO%jFEi(Y2Q z_!KcyHLax1*rvK-%V(Z+y>MI)*9*sN%Zk%{WBq&>;XE~uURw9!LB4Z&`BU!o7PoP}GPgZ-nc3?p%xvR7L{5kJ9J!{Vp zbUTKfG;rbUaMqS&W(ejZ!GJ+=*$AW^%A6UDCkIiSYEY7aW;f*k`!KAPIO7Z#;87w% z3a!%X8&l{AHvD7RhI8!p!eQA*5YX(Ii`Q>FJvUX>o9S1kC|Rs301{iAk_E+0XW7Ln za511QD2%gpE!rOa^3L~-drBH}-2Ld2-()f~GidO(7^0Jf_iwSN)*+l@DnvXI*c~ai z*6ebg*nRwn|ND|*|FFw$`FBNSe)?(&>S{`|Z! zweQJOpE`H;u$@GJmaRBPgSRi9#f(~QbMx1Bq)QXSdzDS1CX_Gq8JN1G`HM`KKsWG0 z;moC$G874=ddR_E%!jTzIAu9J_x9!U%d=007>(M_WNo!}LeR)=Nw#8TYW9HI)}|Y< zOk9D&4&HH%GY_d8(6iN56wW#Nl020v5wIvLnF>Ti_B&`x*Dl{UGkg3UaX{Q*pJnGS zUfTceq2WBA&0rSxBiQ)oup6D{hR3d6JvO@!XWipf;wD8Y^lmtXvWgBP+zdLy8iL{# zB2(ZV(2&DaATQZ9Nq|`ykUUvj-x=F~{|=q;jUNokVJXjdv`f7!)P=?PbBDmwx43@P zJixM^R6IrB1uPasn?`65xa5jWGMboBv##KXmBKxwm%L%hD{I zcjOc6(GO1VP8xz5e>^um5a++W&kFqCR*bW-lwQ@A;Z^-x>SWNhe#&_hl->2F+ZBvb`@5 zmt_NK*G_5%;q`Y$`w!YW&6Z2YpT5q8f~`0QqbEHO8vXgxtMBeNYp};xaXk@gGOWwh z)MIe7Yea*JQy>1bFV<%$Qac@o9HRl%Z5d>j!`_`gGJE9s&nppY#T-2O^XnHOh1bf` za0#DtUXG4hMwpbJ?Z#Z%bU`y@kiN&sP^54B1P~?)C%$S~{51|U$!<7gYT=dlmZoM8 zpTNWR!mqLSKfN?JoLf3*r!0f|sr_VV_IxzD@buDf>eyA?W#jHWT?h|a{!V6C0>@x{ zK5Bd&1$xo9x;GH6Lv}HIzZGEnCFd|ujF`SGKdXOMW}I2K|4Y2`u$bWG6ZVPx$f2L@ zDZOOIjxGD>8HT2OBoHJ>RIJKSNT5~E2t~)4fe4(f*rYQ8!<+L-1M3TCBOTikS)anW zL0$jq6P>Ev^{2b@9h9mY>s>f!X8x){SoWGc>T!wT7h_pN5u*|sm>#ebB#MzGq{bEl z3rF9%w6cT36e)bD#;^~7`i_7r73}HBDOMCN9JL!YMAwS52ty025a(t@#KGy_LLA-m zncTYqd$8s+nhD4_ND#ql_@H^^bj3Z%F*#wBEn>DHZwC5Xr~r5*Ifgj}w%60cmkH5j2zctlInk$DJ&BZGf( zm>x_JLBLk%%P)zrD>o1lW_uSU1*mhv5ZSU8qB24nz;G3sYLnsYQNQA>X488R+|q?} z<};qD{p7QCTf?sT2yQ^()Bv2g_d)4nxeQN@e%sqT0N7`PR=t5X-es0?M{3NR<8CE} zdyvCyg*ynp=~abOfzVRJODfZPxLBnPIrKJX9GP^jP>$$v|Jpf36sqB~5JH7*J5B6L zcGjG#d(lB;kgnXHU?|1AvJQ{}ysOr3f~lg4aKy)obC}W|7+Cg3gpy(g1aj}@4A$F2 ztK8C3rQs~sj^J8Sj=)Wt-%f*ZxO18DLqrHQ@2og!)1F_vE5K-^cH4RNyF zmgg+7=vCwYKl%zOj}UErq#BNeL}%#RHK}I11{&QXk!Gt5ktoOE9_EVyQ1KT*y7T^2 zZ8e$Ri{REPoS%QPxBPI}Kc4x0siJo?{~J*_bF^WoVeD!A@pCJOp9Y_XA_+bv)k8K0 zW^IAAU|8#*9LQ@e0yxHd$fgp3G+=DkHxk~wIY_W!n7mrAg0?v|E(=8#Q1+XGa2Nfq zG;yldR-E=KLQKK!{@du%{5sj0z`J zil>00O|}RyLlOKl3xSzWq{s6N~1wY-!hk)ZV4?(#){*FuS-1BpvsJ(1TqQZ%gtgTQ=U0En4MBg)?0xiMf_D{I|H2sqi}Qd!48gRfllnrja#Hsj&ZIUhFFjr&<2@P3(kF!XC98yit=-pdb zvbIDI8)RS?27ST;YF!|8z*Ds;5Pg?c)lVbDLIT2i%R=l^+6CM}Xv$$EqB1SfrF9WB3_CG;;OX~BL!L;Cb>$a3Drgv7D-N~0`3g+ z$7;wJy#s`l{h`Ntt9!|yK;%Ux-LNUTJ|L(CBU-siyC%k0*-68+q2cNTX&Q2=V~80d z8Z@*b(PD`}0${0chASlOY2R83iA;6u$ZNS-WxDDM=PJqBCakShIHx~*h7WSz`}N(w zs@XhX>t8tkiV?<9XMjUK<~RV$7)?IuVwwiYY{QDR1Yxg;g=BCgloF8CyGzHpl|@O1 zdVG)5@n#bSy=|dwS>Sq;BX%_-F(HAQAVI&zNz`0&DB85hp0IzzYl3cn!A@oxP0##2~2{^1r>n=;f zgr-4>$&NbZlEwfJG=&y$U1v(-2; z_8sRe@l8hmq z?cJ{k;6@Nq_kHO%6-1?tB4%h(TuQ+~Bp`R3X5FxNoP{&Q)0nbhWCMz!5_%|%uf$D0 ztU#Rz0Wc5}xe*)e$X94lh-CQ%7$U{L8Z^m9&deq7@WnN%|%CXO+s5FTr}T40bl3&CI_gc*d9 z=s_z^bqn)0UfhKqQBSY}Np{3nX{1&FCLGFPIdHXQxe}tLT7IxOY{MQHYEGeWsspw+ z8p%z=5Cw%Shb2LZEQJa%l9j>GHRF(QDWmF*){OvU7hGEs>Xbwz3X8_eQ?)`t9Seap zx`Go3^2Hs$#~G9=w2%sxwQbenDfFxjA?V3vWkJ5dvA+m20wPW~W+a$l4np>p)dHuO z7?nUJ32UzFMxfR|tM-=4(DoDFT7`4Qmz;ZdJ#*K;x!F8+>s>fs-RVDM%^?>9jYtkp zqXLc(@dkr0b`f$idC3RBm+i~~r6D0H?i&`|szi&9WF~ol1Kw3j1z>ztSdgL}UvUP< zTgCye)zoDO>_!@tg_xVHaLyjg=t~47xluRZOe4YpI~4-K0;7_a%WxH`4$9hp{$6gKlF5SO4nuXK%b*RDTp#&=Mvqq`x6(}!APm~u#K zDB6UovB`m$W1Sdx5_4`0;6y94MFmnW5u;vs6GLQrdk{XCC}KBYNZwUTzW1kM6J(fU zJW0WZY&SY1ggOy%^j}2o&4li%g)LEvVo;j)(~I2O-Zi97swlS4W8pG>IOvQd*A88^ z*q0~dq=zc+gHtG+McQQO*ePm)0!+>fA`3en9di6Qk>er5oa3^Rz4YF~fJvxZjn09X zWw+2~Uwdv#AkuI=8f3?qrC_ z=jN}%-?A)x9p(HTM0b?Af3)U@SAL3hy1}DCJYyXTmVVpH8EE~K z!B!6SuEusx=H-&F(m6&kaygPYdNr)=8s`@~`xWOM&6vINS~H+;Y<|7>_nYwZn`@2n zw&h;kX}`y5H4z_2?Tr_~YIzxS6AKyt4bHnd)%DK$4D=aTH3OF#<2XijHI#9xwlAEw zJz4gOw~~R6|9|?`&5!Q>k8j>en71h#HwU3)s&1kEbT1@zBX~JI61mhC8ItX z)+As+^2OcCv;Ef0z`RYA^G(t=z)oX&-v)7xg=jxgI{bNz`fDfb$i*ek8u=iO&%tc_ z73bRK#a`-8Gw|X6{GYz$yi-eCQomktT1{kGtg7cYA(^PG{NsQOM1!@)5+=~Cl_qK+ z6>DO`H5V`|Pr3fVEx^1~M8Rpa` zxvgLDN~G2;0_zQ>LjWk8C)V3T+nzkOske(;miRqR|A|i+ua98DH?c2v=+BPU%tMkBfK#=_O~ZI5AK)gf(RC3JFK3?Q+zSFnJbgvD-smieuE6qNI&|he(4w zS!AMai-Aw@9a{-u?N9`JHVS7DHmF^#^X;fTgpNppbjqS0{#5jWo-F~#l33&cO(2so zftTcN^_t!EmDt3>S?ea8Y7;0eatpng_FDO7w$n0+og7$p#zcPYT|%Vj6E6rvZAVJK z;@o;A=H8453(k9a$?4ZP^Mk2I7>cM5z5L`_4P66?#je7G#NjG>ArXohRm_|jf-J?* zX|zp|f`z4~YVd1r4M&98HWefq3SAiG!zWjq)ht&l7`#^75^T$dc7!@^E+iWIZl-O` zWP-yL=TB)a8DS;`SL>PQA!MVKvG4=~`F0{D3*2D4(Ml=V0CcKWF2qPj4WMOa%v|yl zFl*x$q6!mXhN@Lk>uaG{9yM zE+gqmVa!-ltpkG--cpM(T+B+czs9-MO3XUPvo)R^#4DUzYRS1VPY%lA<;zSAGSEt` zG=w9sT8YM>B;73(n1n8>)9R)nW(e_G0A_>kaPdS)KP%SOsfC=^t-e;~n8Aq3Pt^um z*&9=-2{k-HrxpRcMllKcqvcvUVP(X^nWt(+PT2$$K`2E$&Tcj-nn24+(i0dHNJ0XV z#Xd+&Ryf}cWUM7&j9RP=@bG*7z<2(uwWM2UtvU{`S$NHY^?&;+O3tbz0?c8`XswKL zU>mnzajtWr+G^&uM&a}Y=N4LWu7BaIAMzfn%`*OC%T|C#8vTkkuW<&UR%__g41+s(NtCEd zxNO11O>zj6;M7KekbraKqZG~tIgt((kut<~)+$^wNubf9O*GAj-lR=pLJEcRbS;o@ z6Yvj$G7pwR{tRE3s{eN@i6Ey=0`gFcrj3*_pefK+N=X;1nj#udWh?babH{-78P@>| zp1R(BAjJD82W_=Fx9;(5jl!uboLgzhx&DRIzQ>92g^r>NX_Mt7gwaHN`R+NrAnj zhJ0s>kjN-I?`tkKVF{M|73aE_s;y;kOBBvraBiI?=lT~;tT=Pz;0Mh*S7faZUeOF&SzK%JkTNY{@Be*tcYX zj+<-~bHzDsz(ipd-H7N{u@~KqC_*KIh<4mWXvml3a7vQSgDA2a9kFogR4qvH2dEKi zh02Qyd*hz7aq|_|!1^Ttl8USZT&_(HZP#3~XU$fpYD3yv2nQ(-r}e>&Sd)2?81{t^ zIf;hGDy>xAa*?YD0QIhjMbaTQ88pMUh>Ti0DEvYt8tI*HJY3%Fxu*Q>Yw(J_Sy zEM|0&iuLMe#VF|FAtXWDn>83LQ;l0Cu_iY)8@_1_3Zy!#x#BcpZGajOr!X)A+!e`+ zsc=ltB_$MHua!lUgL-c=5Yx&E4E%vo?M8$5vEtNSwZUUcDVi?w;k`DN_T0WAaGiM)s`~36$)py;It*@)_COJ z`WH@HacY|I$-*#h zaN{j{6BKHZp+1I83Q(_UJ7n1A4zY(5_QN^Q4ci6-O`GheQ%YXNhc&S(mHyXJwp5v0 z|6sO2;cP58x5{@p*S~PuucL5e+8zL2zKdDNPz*{^!3JV~0ZONVB}o93Wg-SQC;>yp z-Ysv1MjJ8&6?aW7nSiq09AtY}0#7X4halL1`AQ%NXbZ8x;enPL#$3}H$W$v%XG92U zMXWtUN-DQSMICV#V}NR=%w2$inAmbpQQ`EZl@;vBbsR(*7>u;yOtX@aZ#NwwRUU&> zN&C@_SZF~m6k_zXiY%Ol6$)Y(f8MVWd&XseZVoE4mFFrJPnfv~FA;m=2fkrA1mKCVliU8X)EO2q@=?q zB{FgpzNNC}ioFm?- zm$sJ$HROo{1NJTgq0e+EXa(gVP*+N%5N|{!#8;d$=R!i@7zBEVO z$?|Shjr%Z!3b1Z(PXZuAOZYAwfhkOBxGWon5`luEH+cybNrNgx@k=`$ML=#sP5(X4 zEmh{$KbXxgoUH}t7Flwxf8o@M(?%JGuCEOov(YIkhOtJfC>))(BbhQjJnI=DNgUC+ zbF#ZD7GR7*E>JLu!d)7n8t9-}qwjHsN-BsTu~3J3+5ynumMre@H%|U?Dl?!ET0Z^z$ zV3LY#Wy3;3RAB>bFeoMiK}z}Hg5VvJl8~}jf8D%RoFUH^o@gwni4+?K$uU>r;aU+w zs(1z_Jt`Q>DL|8F01dh%c&U^O2-z!-S6Hfqp+q6KAPq+%W_L4 z8wgtec>+ zEZDwm9tt>;_K2t^inIXJ9D{Q<2s~9w4oyhfWt|+hgI8J?NVx(S1b=WFVK}KAD6>;c z6*-e1;U*Z}Rcl%yR|)e}t*tn-TN2B?`>ZrF4DY*)x&UcyW#N(o?6PjC;Goxh=_%XF zf$b$S7#+8H^Ih>m&*eiROj%nj2{Mh$i|5OYl%q;X1=mizsN6%%RU07f1SgZ|!VD-C zTL$otmvS=gmes&=_Q*LVFCW-Tz(vxF0Lrg2)o*;^oc`wB_vRm6daE78D_5>ud9Y1f z4etx*gzRj3;T&6VZjB}9dKb=_nH;AWR<^}FQB5f%)ngF^j71;0-hht*zQTIK=abTp58(r@MP3niAJO{Ella75~2kV@ukHE z30#+u{0OIN<9nRcsSE&T;XDOlShpQ5sOyvmnOzScLUT`4bIDYonJ`C1iIw}Ps;o%Q`z3^8Tyci|iXNamg*NRKq=t%(DqtlNUMX67 zBHSch7}{Y0RHBL;D70&K3wJY=le7uYBl_<_hn3+Hnurlzj!JO99V0I$tk z-RukJcw%mYve|{RW5Ky4mYf?=ILF4LhOmZU3`A>d0$`14j7)#DLiwT7|_^wF@pRBa|^vM4qZ$37RaK0f~kk zn3hjb45fy4&LskvFYIjYPF(urg@~Bp39UHRQjL_#f=v3Jr)p0d5>yJNX=VYec)=nO z0w&DDnUw4oO>@ks7S8m0>E9io(w8dy1U0lTRLbBJpS(JWFc$u;F^jPaOB4+}JTDBt6h zI#q1PfLxlwWE{A7^^dd{E6y-ZZg$ZysJfMKC0XntD1ox#^z?}BaT=0>o;5TIE6jx_ z;fBF}<3)+22FnSFLMM>CIY<;YAWx9eiqoY*Hm~S_ze-b+LR~F62v&X@{WMw#rvVTu z&5{NE>omH}qhBPdo|YA7MGV7cUhxFMWvoFMz<8>*#7uz*HMAJ0?Vw{J`{4pgT>S+@ zy?3X@3e8hf7QNRrCl%67Onl1AIzZ+FU5Nld47w9##)$E9A8t@o(Xb~pKG!B?16Q2S zEm>(?{w`hBz>ln(ec`Nga@+7G7tYQF=T=y9u7Bawxa6RUv1P3W82}%nIV3rLi2=HJ zY|=I_!!e{al^M+lqJ?Fb5WUPH;ZPLCGs)L`WyNV$OY|PQXy#TnM;CB!$)X&R=OL6{ zJHhoEDWDbULdx|&HH-7nv(zTrCD4e0DDX!HT0_zhiLM9n5KaX*!iGfb`0~b4k{5WY z))*9bPLbz|6Q^oRJRS(N({Ker*7eyGnegshI1FJ^pH7h@@QJOz#@U&kTO@9B z;p|*+ZiOZ1`WMc>V&LUS)4;Pm0UwSSd17b`@>8}5*Q{XJHn+TNPY=a*WO;c`Kw8Lo zgpR_5+yYBJHDuTJK1Vr3^1?4~W6H#KWO?86ocpwr#T925%T<7Uab{5$VRK=yNDF#M zc)h}^d!1o0J{bZHP{kv|cD%;v^twNSYl^FfjfTAgm)<^o(fh5aOrJmTnxT7(| z<*~V$(Ey)(PM2a6&SZrORY2-lC6jj>m!KOF(HkidRr+Umik87C*dQqBh%yqwh|mEA zk?Ns=R&Xi-1&t_}e!P2<9)AjTT)y=+ecBbc6 ziJMzECoDL(z>;(Q3nvGuhP92h9B=|KfcYvz7m$x?gHSlnTz|@DUd&~-T{rNjt5d__ zvW6?9?XT!N=x&LX8PJ@;Uk-T3qp@~HZiesB@KgIg-DiY(tT>IJX-4t6pW+yn?QPDM zX=5>P5Q|S}CqdfQ0|0JO+>{Z(6(>RJr>NTsU6_f8+_p6`@zh)MQ^R8izog3GWNUT( zt&2<6jXZTIrLIcN?T0^J%Y%x~*f3HV(uy-_Cg{DF3>586G9e>7P2>n2a`>Yx9Rvpy z8N=QMAWf9^3Q;0560#mR?HPy6(cqXJ!E3VT6f`Xtl|KXJ3nKQ%Q?&-_4PATd1&N9P zScK@h1SmrgTmX^`b=ih4CEWxNmq>s5On=np#2*79;cK^6% z;#|B@g>yGc*-mhWP*=4Q{-dP?Vp%N@W&E6W>vK5$ybh2Y=knP+D8HN_Lo<-rL-NDQD z70UTR^Lw1&z^L|VN957qFbn57(6LRBL(Eb&5d(Y*SLhLv3k+B|UC6ycggOR!k+248V(#v}J$ zTt;nNdZM!kd^30DQI*dARBdO1ZjrdDg>&M9bMu#+>s>e>`*HWFm>xJNG0rsBVrXKl zgHlE_`9`o&e~n+lz#x5;llbg)j{3p%;c}P-llmIUG5TfHhc_C%NpPwMLY9$4T~@Jp z$7QVWJsqcN&4LR#6DHw0{zM-9OcR$KbJ^&2k!CkNbO^yb9I$PKA74ia8HE8AsW*gS zg{F;dEsYLVYz=jChfITsLR@br=v1-*0Syj#E=-VoBY_Tm#;LnU4Us=EsYve-dKoV4 z_(K}R`17D#4>tL7vfS9G5364J9`ai)lpDNlojD7Q;+G6FZ?ud4(PwyW!42B~5kpEx zQ1v7UlmLam;fnKWNH`hBjRjLwcWVwyD7uIb6rgy31SRMp886HSMB^9q&HLeS-(`rn z4*+gs^mBaVIkr;5Fo+hR0-e%5+6XfYQV2kz)C*;*rj+=}tI$Trlo6TTjEB6R;ezws z&I<70(v{PTec|lP&214kwQx>caBlvRbG-}at2_0q+8lH-PDO}4s6IJh0Qy+9=GqVo z2fX}LZIq)!cB;0#DiB1V+{?b?YB(khPAl5+<}$_QT8> z@nE!Iy<9$a$Tz#Ld(q`XhJLONmwnskM?vmW)^CDog`>_{IHLhFxe}c4}ZUn@=zsRU^(lT3fb zlG}<7F)tLeofC^Nx+;uZfUt>#{o?=S)`6)@p`k9C1hh^>-Lwn;cJ>8Scu-G;+P;LD zB##5m`LZ2bS{I2t+g0JgmKzU6$WjOvDwuMcu;Sdwy14J*&N{F-wK&}u&d$`_9&s}Z z=g0pGKfpO*`(L-$pY_sO|HA3N$H_seG04H_I2(sC4mn72`Y`Y1!n-p@~ zp2H~u9Yg%)AgaJoWu6jXxG2OaXMn^8H*B`Ok!<6>Jv_8uvnz7`^l<7a$+8!rd|=5Lb@XnuB|r;g3DcA#)Vhlb&i zmPUiiv&&EJ|K*+kKYRcFBiC`H3xL%ev{-(PTqCzVzI=f%)9tZIHc8MT3=)O`0(^+% z7?6l2VZg#HS3k%Sux)5)&&9yJSWCVHLE+B*VZ;RlaDn|x_j^P{=BYkS$x>4irBkQ+ zWM;${UqohgW>%h8-PdASbq-=;S|yjl@Mm4s@;S~fA{xuS=LoNcQ9+DTwo5`=%^X6< zEkLi>>YIORz@;52 z05z&!^S&M{P9ba=u|@bW!8?2er*T(sP1vKWkMDVV14jVD`}*c=b|3!-I8;j{&7C;6 zzH#Cy+IY4&%M;BfNZv35WZH>9J2Pmn6(=pT#*jL&y|95%}5E4}pNAOGU> zF?@FG|L_0jS+D>1XD8kT%OC%E{2cpdrG9k}ejO!xmXqk7*%7lRwvG)L`=xvF%0qQp z$atY6M-#C?U$+NRuzv9I(;V4QzH;yA<1P1ackaqBwvIl=6*4&pBDeMU@mY)pTWQ~| zIAL|}PoUevth4S3f`<63s7)ji0-vgkoZobdH!e0jc=ca5k%>vqR#8+$MM$)Oxwk+o zPB$I324QF;Qx-!S>a65!wuR-%jkJ>ME9Z`AIJCt<9H}*-_W@nsfBt~7OFGvmoVs1a zLkX*^kb;Sym_DeA+0fufW?E)VO#JcTtB_I7zvJ_buv2mutXgrxJ}FeM+4{9}M+kup zYBgV#wnIgh;QUiYIL5#d3MXKU>xe0q$Ci^H(I{uFIFyxZsL?Jc2SjjkF)pR5m2<>| zaDD$1%ND5deEwS~k|mhlT00sa+`4AYO>xr5fZ`>if+l%`wA4p!*T8+icbDZpNwJ}qr=&|mQ3DY-Sms>aUHUDiXS2;!lof9W57f05% zES=pa175gE;W~22zkCD1$-{u;spGnFgO@#OTfgrMn=U-!piU=uf`JijL_{$r(E`ol zCga{DagLLO^`>(P2&uu3#)feZ`8rzea2sJMeTrHcg90voTGebLGzC=67Qb0Tcv-M4 zKd+X8M`(HlMgiGewczIv6DJv3Y{HTz|I{#)T62;|7Vy^Y;oAHy<%9VwSb1@fWn^$e zAi0B@g`Cw60|E@Q31gOuSY;eW%JnU6cvUS7!`4Y>M?q>!+4wC=_mL(Jmaq=t(`$)Z zJV6yPb4#lL?O@|A^TPb;Dj;zpooqx!h|I~Tn4vYoV#-aV{uS7?DXj$&osc4_Aj_^v zzVP4U{F&y>)iatKnLqJrjYF4LU;fqJX~kLXF=bwy7Qn=LH#=)~WA#Mzwd~5#KeOLv zcU%&C;^bAe+7>HMI0<5YoULC{=A1^0yBFrGmml5dbTq&4R4$)B!jcQG6D!Agl_p-8 z^57hQm*#70m+l;*o{KIro7IN~XV>C33><^vnh&pI_)+V8NHS40Q6vf%3m7PPJyBkS@eiF2H7fMTR#SEe^mzEjT__*;Z) zi(#;g;mSv9p$@l(tfL_gAHV;!EY)T~qQr37DQhHE`QgT)T3R}EWFkix-kqUJA-v1G zO=`QA*J_DmF?)&{amDG)NtH@|F%Q*tu#5z$q!o^=B&0m==LbUQKs)j=aZ2DLt0Ie$bD?JlDg_7$6PMVMxtei)HFGAz)O&R!}W zgTq=$gou1dGcKm3{K8k9uY2NLJ@MN=9Cvo(A7Xgi^UleiF*}{(d~xQBDbi?xc2%u= zAIE|kFr&x1BWCZ-{ut%#(u)h8^YR=gjQFjb6K7mUd6+y$F+JgO#Mz4G*>m9wQyX6A z&aIueVHSK-r}L|jKEjx>iQ~Z6BCvkNd0->13FXj$ff%o2h<@={*}2)1C z+$vjme0~8L<}FtNUA$%g}P-1<#J4=i*DtN&0;w#j~P`2 zm;M53zpkE;BBas*o4<+xe&OdhFMGD+(+^(yw)4la%*5-T&%g6arqel2w!-v!v08xV zIHMc8Gor`2yJa_4xeC#d6Vf_z-j4EEYylH8?*ITG07*naROq6M8BcX#;yk$Kb>s#_ z4|$aKDj{AVJ`pe{fx|~}5==%X%);}g_uyl(a#9Dz=(d|U58yl!t_f9eT%GcT6(=su zPr* zu&@p-OES(B#g#fzO9Ci3akdpF;fgP*!t}!Yf=)D2U#if_hmELa6i$Om=QvZ6X^(fF zdt1^$Pn0%HLyp8&76?&0u=FPUN_U62^hpjdIkD9}v;xkFLX;>oqzUns5E5BMEPQ%w zMm9`yr^%41(d08zl_S(Ie8qXrljSuIi*bSN>f0>*_{8e}W%_wmoNxSm+bb_W`rbAT z1-aPW`?FE?4;;|mACGE(o+s(PGpbJGUyW*io|o+WU{w91@Vs|a`}4e)=ewh7t=mcP z>Ztk)Ym$HXxi>|@Y-^H%Dx-Xnn}mmI4~)LDn;sd};$nUR))%F{z(IIEKPrKYw(jRV0c5gcEE-GQU?j}1K#6X(6H zdv9^>xK6EJJk98dbDbkMUQ!K7H2h({#p}@l>Ujm|{DKH>F?*Ur&H91YnIA+1NRD)B zlJYBC(wxKS2kUhSw(Znh7|@p5bLBkUtUNirv3mHzTes7=W55FuB93TyD(X*`YAh}wI=BRD?InE z|KLHb$~kLw8sE^ic{YMg!WQEJm#s3&3~wNU8i#dLqBKi8{FemQ=itGe@Z@lr|0Ue& zzhnrbisv|Y9A3YTi0AW{V+^0=SpK=v!=5-bSMI&$!Z|Qv`FX+v=jT{)UjEj$S6*WD z!)+Q0{yFRN52NZIg&OyCjJ>>4-n;i_W6E!(@h?WT-^zZ}0gD9*{orQs?J*I-OJsIH|oYx*N={coEDapX)20%KL~mac2i?`8U!euwBD}XKf>)#uD8#saBM|QoEXgU z)WVUMPuj6bMqg--#SI*(9W==%Sl=L=1HBF!Znr&fWA&(?9JE}k#QM@PFr=VMHF2V` zw>3#(bO?c;RSJe_&OkfVioEdJQuHl)=uHy{UG^RD~ zml=7fRx}^cL6lzy1|LR2X{@VicN|`y!^HVLPnE0wL9w4k3Dd+cSI_@6#{18Hj`Iti zICI7MxhBpZZ5zcYcu^ZUDjeouKIfwJ2<8mD*7hHT;xQJ8dd0+f>@psSj@R9aFD6cg)lZy55{*jT z#Od>}3US53?E_>zrt&_bt(6s4nDiq96Glm`IH7J$;(%kX@f5~OL*g5(#Omd{xIcr( zAJ?7?j8MQewNEu9()*I}%F5;?Ejur1G~QwE<9Nz#BrrO^>ydtk4>||q5o1OQ-rKwe zX3!+Os#f1~JU@+kzPWY{Uql$!=ob-C$2HgZG`GKU;)MOSCJ|aw4B5BaQi58sMRuC| zsNz=tTLYF;v#paC@NH!0CXw|;TvR)-Q-MvwZ3kn27SC!DA-3~UlHbk7n`DK4x#IjH zC(gL2cJ_@!TsZejfi#^aug zdsLm;y`$P+rFz_Ru7qj)a*zMefnj*w@g>S;9B$*a)UlyU^uB-f>lJ zs@G%UyaTjkG&_bd6}N@!`?#{=jER%Lg(ps&y2QApNdd5%Sogpk3EJ%_>nk|$yS4=p zDc^-L_#}S=YL5u zZ}Gd!v4^a-xM2_L&<2l`nkKq{sf$+Q-5})8P%A>#L3Y%a>tGG8Bd1ugYB5=enx?US zUvBR(U7Wgx|A>=U<=Cvlz!lZ($YGqrRVWphG735Z?_vt1wA^JdHdYZ1Ss`hwS`n1Z zMQdZr!?g?4@*HP3fOJ_52hU-}`46$ijEVDQ7q8(;+LQk`Uw*XE>ZSktfAa`hj(R(% zeq`{scLsb@HDBhFgTBgbzWYxO!o@-CjC^UYuE{JrsJ0XfV_4~$QLH#|Rc)GXaai`^ zG036YdVFcwIDSOj9!S!N)BTbDGub!W9e}=8Y96e;s9k!?2uTp}^We=TCKU zP_8%~<$y#YuQ+i#%53I1d>5jp@AIm3Oy$Z~L~af_9Pa${YJLumM>p$TJ5{Axj9!~| zaUS)%3IX+-^rXvC0|D$^oB?Ht8{h4fjGJr`KfsAfV*-q|ho%sWvm$7`MzV2nk#p!| z8$Oi8E~+$Ow>{n9NUfdP5_?XZxW|;vxanXmivfFh%^$%0@9Z76`7E#PX%lz!jN)EtFkP==&Ndf5EJJf4XQ6)R`H$x+8Z-yXfvs+Tt0CK zo+i#6nrNCh#~Wt%Cr&(ak6nu68xv*K|3W}%jJr}=SE4fdCl11Xa!{Ci;#><}2$THr zx%o*?XBQy@RZg5V^fdJ+xqZrMMX$s6Y(87Wu*HYG<0-XV>@av7*&A7Lj*!qSCQdSL zkbO;eCC)rq&b8hci%4v$8+}GRF$g{ajc-)7%6i3Su#KA?D-O*m`j4q=p`UFz~2_R#e)qrEVB`M-e{5O6Df8MUoK^l4cJxTvN;D z>vfzs4;3D+d#1%~>*HJJ=y<+%^0s{^VFFq9W=rMs|A?aFu&#R~cL=N~%oFjN2VyoX9`IlkP28}AM%4}adlDw5@>SS<46Gyeuy>qDu6#DD>W72G*f%4=$eUh zBh-mau3#WO9e?pp4thyHDYm-$x`(s>?J4rN)5JMGynlY_G;xl%;qFhIaXU)iowKK9 z+H}liFH}ywQhm9tI1}KB^Nh4@x44c`@8Sv$@ZojQ)crJdSPHH%@xB}DOPmHgsPU%V zjw1cRd_*}`oWGSJ@8irvwGm+>IqV-1@y(*TkJB?Vj{1S_jt}bUEL29sxIL9Kq8!{H zT-r2oE{%u*5jXQg8%Q8}%Ahw>^(U5a=roW_aHKYE(>vYx9XK9u-Xvg~lQJmJy6>SPL_<#jm6K8=N zc92Wg1EC$@l&rPZRdCKo=o7Fbwm36-XM+v5y18V>q6`r9zWIhu(gq|o)gVn?HsQ;S z5IiBvs`geXYt%)aQP5f@W%q?YIp~C^NwPWPX({x?8BY$PAkJ~_Zx+m*x`~y>vGusB_M$ts)_Oj>7%R@LVAqW- z9Hr(PaptKVs-=%JTcWQO=Nj+h93HA&%$7;oigTtBQ1^%~bjLb^ljn#CT)75jL{i0g zY`?KzaN@+FTF(X${p!?M&cTW^yQn)|j!I3M{c_kTCEaPo83z%=;wcZ(oxr#GYg%#e=Oe(L>pMEi z^&j7jyQ7T6%;wnQ6!R>IO+QYg?zvZHmsfS@zzK9Z5!XnTfM`};?eyB^^hCeets4^7 zh7O;$(nx5?_7x`%atXzYCOk`d#aR<470eym8g&-zix*qJ9m`r^&lrs(BQFEOjY}?&IuR z#PQZXo67=5#*N2{dGC!khI2=GpdrFJ*wdvXgoSX)T<9?0-tImME9lP?gt!8lCgxv- zKdw0Y&4rXS75O3nE2VlppAz4ITJZakdkm@+$W)#TZ@~GZ7Hg6^?OIum2bQtow2W%U zHlz=p?LwG!Unoct6s}#1k__?@80AB>iD}>0ktHIua(hcBK$9t=OE;#?f2T5IrmKgu zX_}}kBMnh%(zt?FAc;@#7rx^B)rRKElOE1~qe9H0Il_JR`#7hGGuyDzY2vJ=7-V*T z;;a>C>w(+5oOSGl`A3Q0$npJ&D^9{RxvkMKCQcjY`i`WBF`VX;^v>(d-6q!;ex?rq zgG057hgP_Y>M=j*|+^#Cj0y(?)`$i{#VF&W}g9DWI<|!tW`a3V&X4S`pR(O|Pys4N{U`*6}5zT@xiKAYO4A8?Mu!{6mY%`5-wkjk$1b=&TtaW+oB}b<6s#+RsLq@f-Vi~JhVH(^bvCpzFS1(L!Xuj|j=U&g1tA5(S|JB6lOKfvo z`y5XWnkLR{#7d`$vzlU%+5L&L&T*>KtG~JHsc*9EKFGb_NG$`-Z^~oh?4DJ79iv+w z`V5i{i5>!fO*vc~#O)3X{rWDhH#z??wefJ-OUCBJ>5^hX7KduF_QC5k<@T&YwOG@M zWZ@^&iX;xzia`;bhRtH8gTiZ?tK$k=-(aFOFm4zXC>*LaLI4HNapFEs3c69s!&_8c z=H_S(>ZwwfXR0kRajw;k|6s$BTAkVzM|=X|p*d&!98ysqQ1}yiu(FCdq1^#b$4vM|-E9!cg!NKWSML2m2G;9c12S+La@lNi$b2 z#0n>3s+wh#P8o^N{U>UVsR4mGQtW__D|=TzLNpn2IhwSuyxFyD#fgbCWxP#7H%d@| zH#SM|1cb#^&wn=z*2FpYlid%~$P?#cgE#iWRukb>Q}Ea(Uc4Bl+pyw%EICOI#3b%; zMFMFf@x=KEF~tP#M;G9LCvT$=>sUllGXsDkz5iyqILIsr7jluXazs~wnG#rf1x*rP zAE3e9eY{6+=+JYRq_qjtHrh~yHQhRc?JP01f1wM|rvEr<9d8CM%;G)$%9&hHGkwWz>>!!ZeeRDqXH43Aginzx`TT9;J>IvOX~%^D=xSbz?U z`61BG6j5NYvrFc3AYC{^etcY!^tnu&d;i)Up8Y50e=-bVT5%5BDN)s9#c88pkQTeX1{Y#IoeT_)kvJMkWo1F;A@%|8JXBlO9ay$~tqdvz z+a>KfZ!PPb@Y<;i$8E#J8UH~RI!fRUuIlGxqZ%+sn*sZvakha#*|`z{cW$0ivArZF zGyha-N#B}Q0X;shNcwyx&b@DI=KMZf{5x}TOJ|E?T5-0v_}sZ^;v8?n-JUr2?)`ms zH+4aFPGj3zjbo`^{xC+?yX%|5r{j?d+l=>+{X!J7n)f$ahBZGBF8 zyH9YL$jNQCsB-41yrvHTul}&DTT@aU&{M91<9`)_y<0G;z+Z3%jUXO0Bj z80o&sefC=SKK9L)V;6Pt>g$I%1oO3a;?WcRH1eN5_CeSA#-qD2kSu0rPYQGbs~k_3 zC!XS|a+f8@fI& z80@sItU?@23I-qMv@N7KRGWC2Wy`_DETqd^GG&cs zr%raKagS`}v)egu`eT75js={Ii0mn>#p?X>p5;Kfata+1*U#Y$!i6FYmV<>=W1~|7 z#>W*&zl~Pi^>dtL{@EL6Fm+=3Jbbx+5wpcHtvFj-eD2&dagI0P^O!i@4{Ow6SFH{j zIhXady349Rv%K$;ULRk&^f9(e2WfSeB1^DX%)b5b;}gdZ;UPfi#CI;9xOj*1$8@~~ zS6tE7Etuf$?(VLIyF=kF!QBcC9^BobaCdh?kirS>5`q&v5P~ONxv#(OzVD6x0gSExi~T&I46sRa9QDFnXJFEudl+f=aK#y3 zLxwR!cuje#q-XDf!11eFOni+0G&f$qO*%%gI%dMYTZVr)!q$-S*w6S(o<)YS+Q+BN zM^0$jCh5lw1O=4*7)Vzhfg7>$lWE$a1QG5m4o{-q;;*p+rGzlI#NK(XGgEV`^#n3Z z292e(wChYm={J+?KC(DCqDh=OaxD3R5MeD+d0o;BD7$UwkvH_{VbUJy2K2>X@Hl^c zRXV&(jWkyYol-j^%K!tsL0C7F<9v5=2ddg|gtKqQO&PsetsM`1p`_8+$cG9=qWD<1 zGIySNY{)$U!NGqD@gMZ&M1{`QRzzMnW;ms0zJ|)_#rj|ar>o*V3J3?W5)Sb-KI-E& zKbd2b;KrH4z(V-{fz&n>qyoOYocs+sU+sDbJR$z}mt$r65i#fLfXty4ic}7GG5;=z zz+>^+S$y)()B5ASbJp2jT^V7f#-DY`JBes&GYgXLHE=#VA2=wXbp8{B1+3?gvCdZoh!$+lAi`tODM;b0bMYv3M3JJ%QILJ3 z(h;A6RWZC4$9LelQ~Whn7NG7vkg`kAj-8;eR`@g4lz{1Eb3I$5AkK#OIAB|k)<%cZ zi5Wp1{P{$716ugFt(#98icl5H|Gqe7K<5wx(fI8mCMA+RdjzyWxN!cUaXe}C>d!l{ zVbV<-AS!g4w?5*h@$3Ea1H1I$HiqRWag^o@LFQ>_`!@z=T$^vZEEXP1U;61>Du3Ya zV*Ydz&7Ok+j93ZHuB_n~@ScF4H!he4B+$)_6UChbZl)aZ>BM;GS+oz!;|g@8(g9R- zd(ibj0fw~aoCjofiPpjX8u$|Gws`Iedt+=j-)0WU~973t! z6y`GC6D^l(M=~mj#QY`+bWu#zY>Wxph6_UFbVsNa*6;-iB0X>SS?9h=}0FmvcJtTkaD|Gb&WRO-OO?O z&Lr3c`@%sswqQ}e;dB}aFu{@1_;yN35=TdnWY7eVNod_=H?V{_AQ7QXqa`g{QaI$d zbqRfG@^4VxL0en+-f0*tU_A3x5JRjt#47mKK2>gg2r_2Lq!BI4)*-{756*ah%w;?f zjUg1~;obr2P0bY5>(CvZeShm9b=ML9Lb0Jnc}z02)<3Q4iK0}#H8o@KBeA<|}-J$&q@`Qd2-5+6CY(^A7d!T2hm20lQ)6J?mK zps0qD3qGoN21(+xT1ou!)fu#(*gLLUJ3hWscahPE&3?y`WFeBM4~|o9@v%1TL=!g5 zc1glrS3&dA_L862Z@{mrpyR_=0Ml#kgMpg154^eN7#N#;fltuD&j~eZZQAVVxpN;!Dq>08P>fszm%p-_j44WA|^2-R9Jn# z1;ClLGaTIf@)i|)JpAS_%?Swu%_U65jT|#aGl7IaDCECwE_&l9y$-dUCn1#0wtY8h z=;rbaK$N&ew2jgIDFi!U|E$TO?rzP8aqRG?aY^&{1HTq?pT?wp+ZQ=_ReKh6pH!sG zYo;aQ7AuM2BtPO{yG`LSi*?+NK$pO?r2uhHlGN^onV`44*D`+~uHC)LL-#6YUA*-2 ziN*J?N)Fm!1D5SM(q<2rcZwewW1VRYDk;plL2;|w+b`rjc!cAF-Q`~^{XSUYWc3Lo zwLtJs?+|Ex_01E^{t&1=hc-G8QcvAe$LYriI$#UCTH^smXI zF41H?l3v0Af(OVWe;=vlI&yRgwZo(|4K8pDLIGcKtA0Rj+WGBUSr^C7iGvX<%s}wT zZNb=}Nd#TV^kj}y%#yqkrG>CYI$Pb06Ymv%Ox$+OWJ>BMZY(n!r7t%MSBaQbMaIGn z(06k9f4!51fROtUSddJ8+_=p(X{kp)QIY zkEAJ?wFq;Tc>#l%vwA@O6k|Oyz-Xa?lPD%0WQr%>-~!+?lPQTc;|=Wg@Q+ zMiT3=Yj21)d%iqfbtzQ+w;4`oGXmALYOWAs>ckOi&K;J|-4t2kS30(wQYKbS)@QV` z?=mandBxyShdHpW+H{3&CDHg93H($!X~OmT<5En5lG2c>0AuB9osToY z5=z`j)It&Z=Y*IKpLab;@iOAZNlwk)IFWMo)Pj;J@3S4bjkirHf&n?Ra1Hq1$5?iQIh9y2BxU8k`K8v0;bcxdrBY>7h;riddqkRT@am)=GW ztvQ|r@@!zHiX3e^Unx@+73K;{g1I{q(>qA)U5jG1s^mCH;E%W(iwQ2uI3`V=?dK^$ zjcIuJG>=&m>}Z2}d8X;uyGl{?orM&x(5dQfm5r|Wn_+o_Tu#2AjJrkU)~{m5^br-F zV`xEUh1kwf7kpya4)~rKD4c(*&5`0cSzN%ypAB`)jX+75bss4Ia|*Klk-l-bpA*Q{ zc3N?4wo{`&YuNi*usXJ%B31vCcUdgw^8Rdmq%B2t=a8crDUPq#g>RSF#TP?WoKP`T z8D^jw>ya@meUrs`j+eT`I?Gp4bz@N#=HH;~dBYfQ!J)K-79KkEDAuf*5MeC?7dQwd zI{4yarWALh2zbpV4o+Az;}IrJD!u3!e;_+7Eug9tL=0)p=v+^U@Ur1p9GI~tGyM4e z7T4y<1R3-<@C;!wKi`bRP)(IpEnv(H%D2-%?~F)Rc0YH#UcW3H0uScqz7iJ+N5dKO%W&1gbdw5JH9y;bD})m zk7Tl%lMa9|$wak^jopC`ZDes6u6`!Ta?e=Kk>Y*x-A4}%jZeHgu+}D(?(cLuWf05w z&~1jvk>3c)XfcGm*;Oh@D&_iuzkj-h>*(`Rp#^#D*S`SBV~~#Kgb=YZ9r&>TUKn9G zUkn9u>2eeGxP-aBE(^U}T##xnNY>%o65 zvK^JqifYVbR?$V10&XRw3XfvjPc^8x7as7X1$@S3@X)m8pfXLvM;^J!!Em-SJACn7 z`;R(+FcamIiBv~OP?;T|={{k>n)@hXtjg9hI-XQsgn)i>ovVCV z&`BpZ1m7ClkX+G@Cf1QfXc^8zs_QX})=?<2JVb_*wZ(11e#A z7=o3RATSDlxXN`XYF!dln{9^gM!Cgme`fZwOyr2$OTD^m(RTtw*6`S+2av7GSa(w~ zuJY1^U4^!_Z%XIiEb}xuH6aKwstQS$+RUaV0ZOzM&2YAeC06; zkQAuDa<%_mNHfUOm`V?PW_xd%Ehf}w|7*Kq7QMR9+1nYf&CR&%vz*!}5G)hL!{4na zWn-6Xg z|KVbrNqa#7i0UY-c*5GMvX^B0rZBSe7|B6X?vAP5Cy95PS?joMVP@v@hO`xk(d9}K zJhsY;XUMBh0p@SpP}x{n`iK)-vV9nrF2HVN)qKq(KXK_p2SQE}uyjMKu~Fvp=}?d# zu5L<45)Y>_pt`6tvrvf4l!vE5qf?1ws&B47z-gK`XT{Mv7*UieU()T^-EIl4ed6f+ zqz69L`RKGJOhdnKwX1c-X%&WRXT1sZFJcNokp2D3ffZ>Vr&hX(wI-#Z)b)}RB1VcZ6YW2ah{S1t3`{AN z=@22HGJ+`e>8S@v7w5%j(fbPH%GWZ1FeMfwt#c%rgzwC}ap+$+iBfOM=UQvwkPZo$ zaqH=oILfB721w*1XL;JFY_mYbGuB3+|M@?g{~)V+Aqw#OW#=n|PZx=BJsyog-Wz*q z*ktF_Wh@}-$$@Uu>Ck#f2CIa5aR;lxnjp!vDdjA}+|d_`f3gSccFy6;75IYgU`msv zZ5WXf+qNU_C3a`<3s$U%%nL-Nw2vpSnx#U%WY()4{H|3AlRWL$VJhQC`$4tchFfhEN&!kUsVR2Qn@5 zzZFc79Kdej)xqmA#)I}(-#U1Y(I3b>RVu&u+)RrOB8cDIf+)ute(-k;F2j7NA^3jx zcLlWJ!dh0Ze7Xc>jl1M$V^2tTVBUm)!JPNY#JlP{Z@m;mJD}gI`i}iMVYx=;j_=zC zcP_fX`~}y-O>j$bccfz5>Y2L@1QYmG!Ui+(x$Sax%6tu_>fafOX6P$Oc!nCj-A_%PIx_uSGtLr}sx<5%x`-cg*FrO4KUlJ%z8xO^;p7e@H@REz z5UHj+4gx=hrZ+vK{&gM6c2yvb8&lb0Lw_mvA!dA;QduzF2`M|jZOc{_%kBG7yICMV z+!Pz=fB5YYvF9audn5?F@U_%ew<=yJ!?I}S0yJ|{cLcb@_z>QTvhBTCu|MyBvWOF2 zsLQ(P5=gG)g`5&-m|Ma=y<{(1ys^(>Tl^N(F&3J|kuHI1+^cS9o$-77X~9FY1{*qj zi=0otbk5)yez>^XDBQZBr}Buo0#)q%{&LKSm}nV5J(+IiK=A76By(EDWFwgyTsk>B zsHujMo7Au6vVwJ(CR}}v_MS&TiecavkahN_#r-*L0hG3z^Z4h6sO$7*^A^UxH_aTy zQ!*0cda3xbXgSx^`Ru>d%5f$Va<-hgQ9wGyF>J&J=Y6fm#E6wjZGbdO*_u`9P_zFb zOr^lA#^L!BsvPYXkuto6`d{CTL}O#yQsK)-7Nqr5brqE=D`#+)(yD><>)f#L&gI8R ztabG)756A)jI(tKU+NdjOzej2v190%L=uqDvR@#zSjXg|%R}1!v7UA|TiO&}KM$#> zUAA)~P}Ec@z9!vQg$`(x`x)vmXy_&-q;XK;d~f(NfrulG6#&Ly8m3WL=Xb}|Nh-nJ`YwQKh?_2$GPNoH zVgMq5nGi1;9m5x2xX?vyolZz|Zn0$^n621TdGR9AK~=}1`I!`ak0J{#WBbFvM9dq; zk%L67&)H19jzBYU5ra@Wpk<*bZ_HBDY0rU@iZK&Jg8QfBoA=&Kbm>HzxE9HD?SMwa zi?xF0e7#;12Zj|H(k;Gss9I_>y*X2I&8(Yc!RZ8^$W$0SQ8Cf4EDbY&Db`sGi1UrIqb^DIBKpcFi&eM=1ly0t z!Nt~6Uqm*vGptZQ@7K$dtcxJTg;8~=rKYK8Z#M3Rp!8j0;gF3${8TSVpXrI>efTsK z6@~P{iU`UF>@$Hy2~Uy9Ig_sI2Q*oJyVr5h_1_Co^imJj5-9 zlm^s}hPGyf4A1Ef1FaT8QftFseGxBPm-RJsSu&&PCqK1%WpeMbtAe}Ygfqx1ClH-` z2CjqkobwaW5dI=VUr^vNE!iASg_;G8woR`?L*=4}t_;zBs}6&Tp>bB7^#A@)ODBMh zzI9Tk@x~|#ukRDOC%m#Mi&90_?~S*5ww3a!nm=PitbAtXRJ3Ixp^8Ah?DPbub(!`8 zTjk-@gq`+uE^(6iTIn5Z4zu*Yt2X=>2>}5Uvem_*m7~i~VVl=T#Y252G_t!3hjB}t zd;M+sKaoZCN45+KTxjBLI};g21!Rw+a+xySNi}tJKS|rjo0S&F^5O%D#Xacj(fV~h z{b<2hsiazT79mhTRyN3`k4n>L_)KW!km1m0!{Eq6wY99khv!z7(tCe2p($*tD{1RD75`{n{ek?zXB5%>(~~Z(aXn-EU=!D+<}qx1cDV8*6t*{&moYc0l0}LE9?piu zN=;^Kxe$^z8;^ z%nwJAcL-`d;I{Ebnm33N*ZYdAYNl&%1~a#lVB8dkG&+rA@&_vS&;(}>u8L}24o%zB zW)~Em@0H4?Yo1v09d$0qJPSDF_yuD|o2pQJ{eNKQAHFsLR`&oG?2RDs=@ z&ksTpNf467H(5N=?XbNHJ0;bWijdQfmqqka#HP2h&d*|6CaqiqDa1n_$?WQkLrag% zK%4SW@95-fb{M5%a8ln#O!HKv?y{#L&D>Ai3`5=Y?8RQ2WSEhDNxbMp*mq-Xc%(ST zd9xileHu&#*_AR|Rh?Z;=9;mRT}BMrL^`s+2myy3?jvJwpcV?U^%bMHW6e7FbPDMIV% zX4BeCs38fn3D682SX}U8j z4QEVfcA29Ekq{SGm=KTcuw;`jq1}x|o<@XJ<;dO&GtY8H(%)AH6Um?${&%AEFcZ(U zB2S|S74KO`q+N+H`UZTA>3(c_OxRD59MlOCD1XpTRSRXn{ucj}fLAb+Z*GTA=Y!Op zo-S4|RvKWXIThC!>8HfidxUD|ezPF+$6#e(kwU~9#Pa5e{!rtIvJX$53e6sK(tuuV zScLDU^!103sEd^kV?DAYb1u@>gl6keL%kT6+C3&|rf^J($B;D%UEUjD8~=XM5zMWv ztaF5#5cc8ZNR@L~XBYVqj>+D8jBL6ppt5X5@r6ck6{{`Debn)>p3=&oj_=&CIcKvS zznKv;CBs`)R@?+fF_2FL!4Jk)!hLT{4)e?V#S?pBYTo8`HO$NEr$LWc~pZQ;FKD&85-9e_AR z!EwJkG~|=}$v~40V|XQsyroL4I{s}IgwR9lqRUxXNOM==%r1*9(@|S7GQoz4HRe$2 zvR5uKo;zEzgZZ=o_A=-9!H8en|lV$uiWx7S(^^H`-;KC1Lda&c?g;E?SJkn&)W67VbmP11Uq zw+DZ4*TejvW13(Rd=g5TbOS-{CFLpq1@+Jma+N=|FzJ}-?|};+Hrs*o11wL^2*kCGjb)kzmaO>7R!TH@^k%o7%I`i45~02)hQV_9{l1okPC z8#X>|1JSMk1WWxhjRlEjLNaR_^d@S@H;Y;dHTe9*Jo8h9&WRUp7lwffh%!A#%$|P} zhejeV5;}cjHLzL2$(ORaVXKyF*&NuY*4>EZ>`Spo3ggu@6?LN>$qn>kD?V-k@eHIZ4eFMo+4?^KqHr8SEXLVu{k!M%@5)_i86r_Zx@l`W z)3CU#O!0MwaT#ZTJ_@;qr>W_Hzd z)>(TTkUb({pi>+B9Xlb5ewPMh9a#F#Ta9`1yZ7Rj&x#hgihgRHE7rKsEQ&}iDS~Sm z8o(G;nMHCzA`Z*O@IF(UA1yQbU4#o$sb9H@CIq(@sdm4)tUjzxV%s+AF;gAF~ZBDkb&oWg1Q&(ncCI3>Bg^FJz3fSU#BmW%MkAOm^bhw&kM6-eWZB z@VPCnj$AWiw)3=jR^&z4yeiGNq!}6m&^xgmG^CdNko2(Q3Zn3E_}tSIOt;=yIKZbA zrx0ec6z#x7{vXExstdmc{Ra5*-&o~Voty-a2K2Byd0-gZ+{ZHJ2= zULDD(a1ES}5iODTy`VW)CNny`?tyAy2A=T(V`=hK5j?+o z$Ze4X(LbdCYTM60j?`4eJ$Y2PhuVEEoG)f46pEY*mc+WAE90*AYBXby`KBbYp0aQ0nU-RL?GM8)MBXietT8RqTVm^mgbiPl(S1I}& zU9n}MR2CI8Jc=|P*{L3)hYJScmU&z--5J+gK;S|&HW{WS;T-A#qd18>NL2H2+P|Z6 z6#UYjc93g8nrF}D*;GkXESUm|E^ubiYHHakj>@nlSgI7krr(3W zYP4p(8`MZo2kYcjSUf=VwjlA7HF5ArVd;B|j8r%pUJND&^Ch_YX^naQlAMHwIr$&*&t_1iS`9&riiM zpL^-imDObUdJ>?!yS%nX<9Ladbm6FQi)Uofx>QZWs1DT_Wnpkr8dGtI>!C66NrNFv zT50peqoRIID}$Vh#5k2!ss7(AAKk)5Me(Bz8`_53)s9cl|_cnEkd1^86xIf8>lf9$&ucX#O@ru^a z+m2W${xMxo(Zk5`E+Uamk=`nPxHSzFI_}vBIc|4!JC-%s+pLqMWy`c6WJrcw>V2)sR2?eDCxah_ z)TXJ`dB6JbV1T`KU4F)+|80?y+p3I@)0UL;y;>7bs1L!#SBj8;%|Jdgi|>zo6+xUn7*E1nbyUB}G$G zY~gI-hRYPrpo9gF%}}XJTqsd7S8{}rGtg)X%*VY=cqp}(Sah=tA%cWhBJ zkE^rd9T}Ho@Fkkg;v@nqz|wC>rlX&{B0j@Ux!g@8ChCttr)j6qeP={DVtk#Sn(CaQ zVcXtRZGsl9FN!^Z{z_%CDk2?IwTUQ;p^hQ5=~}{^^`6-B>jja(DC zIKQIC_b`gYnou#qK_XgSTY0|u8+6qrccfvXNJo75qlk=QVbY zjI=rHom;4cB3l0OWT@O387x#9d>`k1aL#cSo-+M*#(#jy$MR`?uOy;q^L{o3$}>0` zT7u+6eYm}*zt;fordCF!7AKIp9Yqw?%KiCO4>Y~(D0iPmvH>^yh2=Qd~%q zdQ{xWscGEt2@F8yM8$){nL5V+mnf3fN4lv1od}!hesw;rFSOMuA|+c`RL1l?6Ux0w z>NYHw(ag%Z>U(F7RR=Zdpn>u;*mUy3++){r@KN-c~R}9uJz-U3{7tc3<`$e3H>r zo319IL-v-t#>9XaR$o`Gv4oglU;!Zb`S=_(=EWW0R9(LaN}?02o0!3Ucxt2X?=;(} zm?6c^fU$=ic7DU7ux(`Lul>qUWEq?X8~cs>_)tT8r2;)t4cl4()%eGfLZzg8ohO(h zN++t)25u%3Gjo_;w=Fzd@c6`Sdm9JqsEqOW?(SsmnD8nB9lumVIbWiERcNqPkgjpU zxh2V`#&RWzi*UBYZ!sRWGU|A z4FgCLleNb_T1}*wiU&F=nLFz|+tj)r!V<259>T;#b{Uomf?Vk5z~*Ji@nI-gb9bT!`9&#MzGS}qTWYLU!mI` zJt=58&759n!)fj9cJ$ZuuWl>f59rMuc&|S_`Tf3wRS>d1_x?MDofwJLv>k*xhn-W@ z-^ODc;-6VZJuo}?h;|f6t`Kp2V!tvRXHpArou6|cHnr+xA+$o7N-Yy;RpO{{nbIq! zYE`;O{RO?bhCiT4Z*x=?At_|oa%#OX{Eru)Cpd7x=O9@r|=$~z*-D$?64x&9_}Me#HO(v>JHBC&q%MB8h6 zY%=`f@hW`gvjU2K4!G6l8=}!NWPrLV6n_jhMn@u2Qx)lfcb7whYagyUC5=JyoOi#O%n&tJ4hJ8T^TZ7+za=&S{>(V77wb>o*{=|>_A$mDe-E?<6^bgvTyUYf0 z&GAdBpjmP9(7lGtl;!lNCvS8vM2vwB%CG}E0v=xMIz{6fBIS1m4>KM{1lTJ+Rxpv~ zj=z6N39($!pqRACI5gNH7+)PNxnv1a24nm`p~DK5Wlh0(GkCgv&^T5 z?94T_c28i#9~0KmcHHZs1s+it!?(NTJqYBp`N;Q)QiDo+ zomnWIJs^tYeEvbpTVe65pBx|?m{iu;2(AvBBBOP$_sYeAT{n0tw0J~)ds(BS#wT-b zEm&t0sm~Y2i~_*IK5xE1nZKBCmSU5`4G|4N)sc!eiE(mjc+N1+z62IU#>mRji6}ot z^BhKd`VLMYztLUZHvm?;=9iW?4BWG==1^+_{Cz&$OI^=GHHMUh&pATZ%814sXgB`h z5NokW{%Soi(?y%>Kn=DG#ykI@fzBgIf_Pop#(eZuB94}Qot(HuE9vSVQd1GIf+Eau z#t2d0ys=TO;%Yax=@+|aJ{u3Y_N);x{9Viw!B~-Y8d$U&Rg@LQ=TSmrBkKVa70JlB zAGme`07bj1kfW!~^k${h)8vbB=?_hBgxMoF@rihCX!M7`_w&j6V7B9;PdbzEM_W1x zZk_H+vp-W`f-2bT^rZq~Y?+N1>{w@$4o+p?;V68WI5E?#dFKGJBLCYmoO9c%Sx6=y z-Qd#da5OC6f*F_A!WO1jF-L?x5dH3!atY_$E1{G={jGt`?rO2$ex%GM6cSHEZ`&P`J6fxPp@VsI$aT8Q2%*AXk3BRvyUX0mFfyfl1lAz8 zNTigi!ElBvUDy#R$)m({Q~u8W50a(Bl+7}!9$hkBT^6=d2?Dh0bvcTZq!V(tQwIqp z4u zhN7w*3iEA4n;E}2I|1VC zjfTr(mJ%tAb(Rz??fNt6$KttjhjgH7s=*5|J^=_vhip}5=<35Jm&%&LFHPHBk2UmG zw8hM9|4HRT^4tlj`ba8W8~j-?q2cH2*^`%W`OQz@%`bnEut;{%(cp#11>xE5Gn?J6 zKkv`jKkRWf;qbKB~lh zl$oH$g~#1e^JiQ2aAwC-qJlo62J5h^x=)~Mmi`{3P()1TM`Npih z=~EV`VF}E#E#GjpqCrTKHj)T$`0sV^3j1ViU-FO>_IlL0gfgxBq8WWnIXz-Ghm2VQ zJN>mQSiyXkvv~MppBnthPrM1`T;XR7d3brEfqdTbU>B8-Qzk!RvaBbMn)H{_!+rc7 z!KVW??8W$Y_oDY-PMtO=ORO>3(BiHtH`gEziuE$?3(-)RB{I8N9F=Ml8ISRUb{jyV zM^XbvX|yO@>kBfWJtt~l9+S`$C(-c@fyiTv(kLVXQp8&prsK*f7I;G3EdcHlLt2k_ zt&dFfZVrIA^WEqme2u82Q6b3)dHefWyG_Hhhq*P5oCsd)#>Tt_%a&k|duA7VY~wU9 zu4=9L1kmx387mMt&!SI@n0YPT+?YHzb-O+jUF`Vf>z&<4oUmx= zG=1i)AtRAmnRfuZvc_g+$ptSCELX~MDgwiqpjR7FEd~A>t#f^xl8pF-hI;&cucSLC z@kR9qD9=7PcOVY*-5iMTyY5Xc{Zcb6r|zB1o}L2`oR9B0isNN-9G$#1&I4kCSqHka zstH)*H&$Q0gp6l4AxdYc`o){QoD$rnRCAhHhvy%;+o0WareuSn-Wpm+ahNbu0+?oR z`liW;!v3658<*kwTLw8Om9L)5KH0)~qm{8seA_DhaHut~wao-w+@WMf!TKk2YZZ5})yNZhe1@8o{F60{4;`ZdLiZO+Dn~ zuZ)of!BYgb?n9St%HY$bq_t%@pJ#>^S&@VvB(_asrIpkfQCsw=blDDBu|-x7Bsa)L z3&V}!gC~bx%V0s)z@WX+jIu(4pAH-6sH(s6zucS2U+x|oq28M{-4jp2 zdhzuK1>b!VHOJAK1xqoG$01$pvsvg?6wqsqeRzHuLvL&;p}VjXq)R-} z=y!kA8M@Cby~JEEiB_ty6ho7&_Uq}e!PI*J7?itD zH6G4{IE;}I_!KG?1X1)uxqcCSwp)-5C%xH&zbu`jHtoaWgwCXR_tfP;kEgc$TH$=^ zKf>N&&o^qB5g1CkSOys7U})5R-{3W~JddJB@J|agehdx3pY@pq8GeT{z)d}O|!>*f;`nga%h|DxzjPM|H z&Ws@5G9#nD2~QEQ&6);4Y_1WQG0#Ynx<@I4|9ZV|TPUe}03D4Xthpndn~|CPSz`p5 zre7D%AQ{)Ak>=yBDjZ}*t~>2p$D(vnw$0!kZ}!k>$5-^JebI+YNY#k(*0W4R_jPv( z-uz>3g8{>BIC^eUXomLU5f6AhXTnA}(8}`4dLpxHl?Ss3tC#;xAQWtHnWOnOenMCd zEX0@96I`;^X`gy+d7>m}GXhDgQf{*@B#F~i4GY;FLRf)FrL8bM6Q4V$=D!_Ao?L_U zO28wf2|3j2l<8a7&wGimLLeM$rb_jv4i%h>b>hHJ<*17T!^-#Z{b_0X^41A){nR#T zz|y0%*acyAh2Z?*ByJG!OA?QGYp~jXBv~n_Buo4yNz01xg&4KrOU+!#X(50Ksxp#Z z%(R^s268~iW835!p*2T*67s;$kz+^F23|MbMPnbY^C>gOD*ClkqSftKN$=Qy5Jjpd z?Pt`4WWu5Giq#RYuBUg`S-oxMamYk|^K2?4YtJ#=qd_0A)la?UM*rmpX}hrzy>x}O zRINc>)fmvxwIdFX$7W4uFj<34PC1NVoHA+PZCJWmoAiS73wC_>UpN26`@@Abx3!_X zJzj=Kul>GXC_fq(obBlV6=ydfoS}_}PF?!{G53V%^2bfNfBis6?H#HoU^Vv_@u#`I zu3Rv@f+MpB-o!Fw!>B&P>(x3K_Q(W;6Co(ruc_=>PLC?P2pN@XRW@ADoMB&)rXm!_ z6aIdkaV-7W_nWG4+eBLS*!jsZV$T^_eQO1%`8K(pg5f^GyM5>Mi&$_GS{_?sxxcW|=~rlm5VUD7DG>JLs^4=eB!dSnzbF z2DXC8&q9C3*u->%+K_o>c!=trQH%#H{Kev(&aw~dEUW>M>(Ob1P!1=XIboZc&4~%f zQT#AdzU^M&Zp2oYQ@m&JA)WxFh*0C)APKb?sog>a|9gOsbdiMk_rR`*^|PyHC=ra&vB^}fsd>q*8M zRx2Q!|JJ<*nFeXZAxb^Wu<#%P$Q0yA%ugSiL_&o3qT2kpgW?$gjakcdu;t5AtB8pN z9E)-~P2pE@OQ)u~`DZHRY7-2^(3gIx6_AbXsyPqB;&1p@K=`Hh?d2jskx zr)ryTcJ&n$$sr;4dfx7Z~uS zV?xZ*3kql5r8cJ9g)eLUJ4K_p5t5^ioQk4IqlV(5v~^cYVx=6jD|%H_vZl*JrAipu z1W-bc$inGQ3TWdqUz`&TEX(3YvuOJqLz2dRCB*(-r#$QmeR^u!aK0reqQ){aoD2YX zk+LXb%?o2NZYHiA`cP4KC=k4Ia8C=~K;&ApYt_I>vcl1-%OFaF`8eK-g!w`Si+NSF z_)aOQXh-Jun3r0jJOAm4EbA~03mh_5AhL}r(ff7PAAEQ8qr$gvtF_!9pQ~_Dz(jb@ zy)52QX-r#7amGL`F&Vbgm}vVn6E6+duZ#UsU(m|m0(gyo4zmApI20VzV@HT__!C#9 zP0(2A(USPkN5&lZJZu6E;nSX7C~DEgvSRq<;p9Z^Mt#do07cPr^@*|Rg0CRp1_~+J zqRj#;P+imrw68;IR_BW(0(c0cVF1FCpppZ8zfzf(PyoRTq`BJ{~rzLoXN z`kiaAR~*BY@lLC$1vrug1C9lGD6Z6eB!|H_)yh_@AsqM?!USr+Q@ zxNV;8IDu+MxCrfN=CRjtzBx@kn17kt2x?=~xHPnK1~N{ScTk2oTDI7Xm`&_rd!at? z$L{BjROoN=sL=C*%Lv`fNHJ01Z+WPT@;-nf^9jCFo`LKy1Sx85>^L*L>EKh@t#2B1 z8yX`=!PqTypSjD~gpa2Nfv73uq?v+^Tl$qruixSIQa0 zvY=Gn=gho+Btwrm@oF9`%VB8<5nc1}2%i2TM{Wx@R%t!7YOrN(cI#D(qpycWRODcQ z`&H(1uMc60d13@#Dtn+!`Q#!$MGbxiM$TTe2NV4U9P*ce-aV@)Q5_X(V>zFRC03Mg z(%EX&e3q642U%vhSb4Tml_>Ut8hP|k(~gUZrQ8?|&*)lN(q8VP&LN~HqMYUX$GBB^ zxJZvpMrv++w__YU2a+0?%?%RwwWim0Nqo`w_bwGO)2`6eMRM&2+W(|3a-i;$eZ!8c zbP9p(m+tFl3%u>E4&UquGFl=tfL`>Px4QHlX^%(X^rp;tymoenYz8Zc^@JW%GFwwL z__4&_H!ynD>cl?zgM#56iKmKBLWdrhA9U-J0e?b1;6BRsLDq*ANE!XP26UX{PU2@Z z8tdI!^l?OHFlc9TKa0ysjBsc@^btUQ`(J zx8Ob{ggKh&l3p;+Y8+9E3wU^_7@Mr(`OZTi#L5gmKG-9<+kP*+(3Y(9%7UQGW(n!y z$QLuc1Z^r)B2h0%W)aJGV>Fjq{keGf`&{-j6@JmJOc&bi`2X+waUA{er=oD}GNs`U zBgjjJL2EhgE=>5@88@8T?3tJi#*$@ZNL}0%fpxYW^2wY--QsQq|21yiCSI8^+R5JWEzt4TK)pIF z6<%s%WjqJBhhj~64l`UHATyGvrcDdk${+)F9A`j%Svj!fd+Ir9r?`i-x=%=8&VmEB zDurQZ!DqL|<7^xUNfp~8KgG02w9htl%L|vHH7}=qnijR;YP7Gy5o?0}6&c4G2;a{AJjQ~{2lBH#y6?wkRk{p(^&-1EL_2!vd_^j;F)ttVlF!Eepus#%|igu|h9qzfr zvPof0ue4Q;Ky5^nULWZGbEu7=fwDi}n|2@6A~ssevfHw$^G7s%zDQSCl3UT4fxH## zQ;vI|Vez;@1MhjkKK+>mf>r#F2FV*g#e)THRGyD%7=f|LU;jezt~V?LN0EENhyHjq zx8t8k<646g<9`?xqw@4B9OnRB&OZmrtSKi*YQ@*b;a8Pa1@97a2Bl0q(#uw5}Ae<9|NkeQ#g?r>3IBQor!#z&)p{3@@BV zS6Li&N^4qw;_+8_15A2bAGw?y^Q3foz~wEY&J63UEb+l+rY1`+3>LHzbGTXyUYg-< z7CF42rQ_kPp*CDmMN=)Kh+E58+cR)BKd0MaUnfZubx+my;VY=CN(Z`%>ODaOe!31< zqIygvvw*mY#^LIt%5pu<+$VFaxKT@JR|m8|s%Qbg8RPgbj#mA@S|dL9QTLhoOq@Dl z^{{HYial(W8m|h?YETaDM{^eT55KTyd-Z(12+N8Q!2G}jvG(=w4AC0>PI=qC86Ez<;YAN)XMxaDBx0LPY%1o=ab4?a3@2M2M`vEnIV z%uh<^pW{uU*`0jCU;E6DX%Lw36P@#eoL@|Kuyl7Y znHbr}O9J(^1fkH~q6jV5_VZcL)S0TN86xc<5E?$G$8!FtTz%Tr00@e$Wt`v8jvsS&E>#27;fQ5tQf*Dgbc@@9;Ol?-5+@zdP z_0$kX;_FDli$smqbz{7zg~(vctq;b+(+CS+${2pA4&)+uH?%#4)e2`c%n zEXoJ3!Qk`fp*>V?ZOwg2%lwHPmDh?;5@P%Iud@qQSapy^N|9W{-H1Wc_6H{nHPep5 zSarR&4#xx-DOHak@t;u&y48W?1YQ#w#(qTXfL;$?$VDvg+s_Ic%H-|%CU#cjFZYY7 zZd5bpD8CQCTHg$MvL`nC^iRTTn&ThmuBrrDB(vyzc_j@Jb z{*Dg^F}sSwaWDXBf|9fue?e9Lw_4sxrhcy%J5?~zmgp*%nYn>Ij7`l#7V(Vf-P2{I z3hBlq<{3dvk99Kax_2fje2%<2*U=(xBhBxn``akY=3gy<7}kEkgyBVItm*$n);R@N z8g|>dlXRSpZQD-Awr!g$b~?6g+qP{x9d^*MjkEf%+Esg>|88Bb`s&4e=N#i1dc4$z zuhVP4_J+n;l6*6{WP~T`>i29}K02X{hR^QU@Psj7cto1Rl}pqBeh9rUN(^m$Q9@aS z(^lgFq=u?rnMoePN;EUfiFmBcY%M#J4L(tWPQ4CdRX2yjhqOXI-aWkm$Cg1qk{l+S z49fS7Tr1&!aVY)SExTJ#dAzutqkEnp-3Vgr9FJ2*PPQRBGo}cPi#zy-kNLk^L?FPh zSN+OyRj71Y5374Eo6eKF3BA+0;i<;>+aBfs&s(v5UZ2yMf)3FA>UiTIowh#Q_AWZ0 zQRyF0debHvaK4LsyuTd*@lA-Q|8J5dKOpBY8<&O&X;!^2Hx&3hLWpooAp}{4^A9O+< zGP9`0WW?L@MMK06BYBwX}TN2st_uYES@^iMegjcHP0?g-oDvk@&2Qy ziUv`&qGdByftniBzoJUYhp9<~ssWRqRnQms06eNRG908A{YA&3!$EU#m$Ki;Xjscp z5%4n6xR7SqwXUB73U`lvJ4}chA!3gR1~-mmCOsNT2vr@?KNDq+)?EDA#IQjz*H{&$ zF5j)&gZ|a7eXlP6sbMKzfUtmb9|<-Vv!|l1my`VO3cGd;x-wZ917;sxVyJw{p&GzB z*3Y1X9SMX6OgLmODtBhl>RAK|xm)nq<8Iqy&MPldFLPD7W`i@eEOD}j7m=7wZyUN7 zRseiWMR1cCzUW~_O9V-&MAFYoI%KQI1bX4>$0o`720XHbgP6J!cGzABTvCugUPKud zBO~1qiH%eYmTwP$tW-8*)+9<^!8D!bYq>q9pyF_rgFL~{Vo{ayMFmF#TRLG9f`I;< zRkilv*py_<3IumAbw+duiR8LAK z6$PZwxCH8?*=qk=7<-q_xqP!eUbuO)Y2&JJ>Zl{BY3VdvUTKxzjXfAauT))W#ZY;i zkeeEMLa8Pd=ZKZC>iSyAU-*rcLJlWIo-Z@Db}{ZOjr{hf%9{j7!0X?QyY|b5-uz-9 z6*>}J81wI?BoC{Au}tqi>T6R4yj_=ZX%sBi=LxJIh?%;CFOF#ha$yOIw3N!XFiLwK z%E)p^G6VeD=~>0rwV-SuL&&MhmaUXS;VQ0g*P||GN5Q@ zo&8)?PP<1HieV;_QpE<;8

=>JiEeh!N&23qXR*q4jR^!W%KPLXThi6Xe9081qtcAmPu0`$O>=F||;UpYC0-PCi=x>~)o9-J%cKB=+GXSz;q zZ=(ggAmbqNUUx2hTqiO1?wTjz-+ul&0pbOE_V%%>R=jm?h@rRZ7(#&zZ6ByKj6YJv zO=6TmQ`9t3V`b1fiTv?EA{3~if9odI#$-|w$uX#%#JZoz%Q`56Wt+M7lm&sb{1&L` z?&$w$0ft|E8>{#`+;?J;e!3i2#IP;&N4FDX?+<60E>0m!I}MJ-z)(6c)wtPYD@sYCK`w#CpV*CHAsT~- z9-ET*n$b+fELx{ObQTaLu}r(sRzdURsbq(i%>C}Zb#VGa1_0l30GU=)^vCQ6RsdNZ z8HSL;rNzG6KIE68=xW$vIAw&Pr`pB;dB>V>#UOu7@kW8$c^@!vH-4wRMzFWh&SG>( zhV*kbsUHVmGQi;~?PEiCa}JlE)$Z(3Gc%Aw$Jym}nSkXF?7e1GzhJH-1|KJ@TCKp( z)MP9?%1X1mB__jZfNsrm$W zFQ@2ad88&D?Y>H@`eMjG;k@q8-+LehsV}sDq8rHiQkuaH#whQNOI2xdeV`O4LLdNO zUO6ePPXKFH{ojEUkgBT_dfIrg8JM6;DC=MEA!*3DbVE8G z4reF%gP5|6;h+AL)av^NXYX_U)zxQDdi+JVS~@B5L6M=Acg-E0^7Vb}2z&IrsG%9) z>So^AI@))Nn=ieN!!MWxnQ8q5m!lI(2NP5?aXM>p_BTnB%J3(+50Cbm?wiV2+J7Ff z<`Y4+D{WZ6A$cl;r&p9o3FI*#c5y-n^c4yenU0MZq-->0eLzbjKDRj#EsX`-HyX$Z?wZTIl3fqeaK{Ev? ze`01_E^z^vF(H9Qspuk0sy$>tni91v_W$4in(Jz*Z5++I0J#V)+&2{J=Zd&zx^K^a z)!tPm-#Jrrt4x0kZokmGzwmV`S34Qg*dW?^8g>;}hLY;m_$*y;7Cc5Mm$)(*pO5Wp z6cSVADcnC->Zq=NkA-^3ejZCXrOIr~+5S9aE_T&4g zC^QI+TB55AmGmhG=Hx62va(f%=r{aCfI)XsH_oLjki~ji+Prt9a>07tjaLKj$S?0^ z5lRW7I;^nb6G*j$#7Sm25p}6LyAUe%H|8J95PQb|B?A1wH-B8gv}*8bq)okFZ z_kO<_O{LeZ==F)c#N|ONd@{QEUeO=P8PG0(tQxPMyJL%gE~@bBqbKPp!yHPSey^S7 z&E9TCZhd%^qgBmpRo>3-Vo;u?R@c2q^a3Q9947a0h_`5zv2tcszv~nPIh7&!qgYxU zo?1HewHtA%A=-($=&s7Is3J-&3jq>)gLakm3=ezRvtGmR<2n811(v|xR3&LPhNq+6 z&(`%Yly^E2a6rhQ*VV&e3x_6d1w1bbZlwsMC9e2E3?9g8IrU#>S9dr+<#Bq)`!Bft+S&j|f5Pqk zXuxyXeKf1NdH?umdwo2Y@_+QR?~$&(A@F~^)U*s#dR=>EohY0aohAS`NJ=&(H)GNI zS3z%w_Jq&}i%p?+;p`(r!1Se0UYbp?yh)(YoN0&Jh>$T$Y|QxMm_L|?(ZnkiBw1o( zR#XMqx!gG7>#uKytlwuunNClX;3PLs6g=B^T4O9&yPKoU@{ z9;sj2qU9~@g6NFC-HJ)PFzx%aLAs4)S?XD=l*%1J4NfE$*dZf9oJwA5To`!E=R_Ef z))|!3-!bL*g>ekAs>Lspqu&1}OT4yLjgvrFyqr@qbQg}W%YuS6he|KR$7h<((8M>Gc#2M8W;WuxpoGdzeK6^8iAY{+ z;9Cm7KEox)7#?ts{q#08ba|a#S9c^}Fea8^pf@2;$Cp}QyP`kbFoH=()aKltQXsRBA1!H~yjl`_$% zh-l%G`OswIDW0W*ZOm$3mvy2G2pU8%x~t6UoE*SmS2w_8#x*(mc{2t=)2N~(UuNKT z+_6%MdQd=NgOwB3hpbG>_74xBgi*}mAb}E;JtQb%CdOOVHhuAB-zaB|uM2>IEjH!! z`xtP|!!ckMD1Ir*X{L%-kv(LX+r$BVRNW?^l>i6(jWDo^bFrE@R)|eTZM1~F6@i#W z9hYdzDeKmoAopWb0aqh~-6t`brw+76M1fjP<4LIE&)7goLfr+ZsQVyeX#y@S8>k7o zUpUQ7uFept$7kzKInuc~#fs6k$n9;d)TbpguoP-x_ncAJ)@-?` zJ_veGU;eapmA->}#D{_?*gb{*=|hj*P89(7!h_V?i zfcT<&p&ai_cTgGGzCRT|MYTy~Du^!`l4{uO>1x{z9Qrj*oqtP3Vl^wfq4&@tIs_7n zJ3P%BudR0es>|~MF^qAU@A*M)~iBvSj5XsdYtKi ziRU$4Tohc$u2+DDNl-D&D3tn$EeRGvPmNe@piM+_r(S8`N`(sk03YCDqkBSb$rz8# z>`CgI(_zQ5+*hKH(zFfrWc4(Q1brPom*yUQ`8}CMx^n_g@(DFLd-?E$=L)QRkJ-s& zhH^`QOowqfG~KCmO7ULX4~CzXJKT$Emg*LV$UjNo&6dZN*O`Qqlz?7L2`U_e5Utd2 z?&M~je0$?Z4Z@X7v`Iu{Ca&Nl?NFk?yI9%&uMWSKmq^_`i> zz51dVPrRvMH1$N50IGBzqA4r^*JMRf$o?u=;gqOeuhJ>IO8Z?Yw!WSE zd8j6r?RV0go0LmFBX5;2`K6;RKr?e*lU@MXFxX75-`#pPdAhgWUD;bg$bI_QbaguU z8`}b#G`x{s5zVhV$H5+AlCvS^Cy1`3=NmgK9P()rgm2@Nl0=nj+PEh;%iMc;fjmwb zFrEyR1|=nvX3ZJgY;wl2b8cJfPLzj^nfMw#L~}EFN>mbuQ7wxd=CLl0_O?zIpSx~# zDm1^ewBiTeQEr?h(o~CUz6usWGEzMh3+TYj(NKAAJkbpMbn=Ce+^@A{_P9T?s#?aX zGL4i)wsRN&CPDwM!x@upIud-a1b&g1_Ttg3uagQvpv7G9n~PBqnhsRpA|KA3v=*SJ zdi>^%!9{p|c2*~9uw~Wg$h&aS*T^-4lfOQkyzkBiq=s{yqw{l|59?#OyB20%+rQe5 z>fE4?k&=$&caAd{6nDqhfS}7AwX~2v9z!Iaz)QXX6k&_GUW-dQ9Ja?2>5L+|tvqIB z2;%V2`WuJ-T00>&X2BA*ZRQb`wJHG9KpaUcA|X4=#J%5D4}VlB?s^nk-7Pv0znkYM+s1UR)S=Wc%?U=LP%%C~8wi`EoC3 zF#OjHg?-9^1blvB2AdeM2)NQul(nok1&yihZEDX^-mW$=WN^zrl0rCxN#ft(g{dzw zOZcDzyPw1tm(aRz^e2GI*-^pu<>fEzC}Vrv+A|AzucI!WIX@vo;77^#Cx5uJ_4jm) z>%epMwLcr-N@;6^!Nq(yZEB(I8#d!CK0OCn86&c$rnK2+G6@?>Ieiym4Vf#FL<3u? z%Ae=f3DpD6lrhprk=NMRY1&HBzN#X_?2Jiszsl8VEUpF!^O}TH;NDRH(yA(B#pjs= z4(E8Ck9%yi`I2%{Fck`AlKLvZv&)={t)uSm4@*>>d@+mM_+-8fhzU<~U5nkk$Uqy^ zU=P(2>w};mkewZj3I5T1GZO8emM7=_ZBV5)k)<%_7G!er28kZ0|L@dl;F%hyH zT<6_i5B>=~Wf;x-=gSU@5mQWRSZYSIs^2N^kkm*VaBE_V1x@2&j#Amv14rY(vmStM zsV}21veyoqp3ZUHT45+QSv2O9!3Tfnw{S>OGDsil&Zt>v97SCksRSy)bUiLGi3`Sa zd=+DCq}biwqp~VJbS0P%3f_=aaXwk-ZGFXZ$7t8WIM|4Pr2T^&YJoTWjrThE(Fmjj z9W+|4FE^mDpd6m$mt&CEO{pWOhazMyzzR?|lA3=#Y>`4hOw_?iSc>ko5o6y%q>aP) z;)!OU^RD02FnokSYWF2@FE*g37w?JBAeVyT744R*XQ^tQN{pPMb22wEl`D=X=rk~* z|Egmk$zNd1N(#-B5aXMAPpT)Cf}B@S0N#F;Kaljz3{QRpObDP3uk?fq52;NWo)W=W zLIe~*lbE&#YK-#+NtB^{!_rsXyneuX*!uo9edOxWHmc8CnQ~~T6W*v^anZtMZTP3l zT=#N1E*>hoAdeW?UN~bW0X;@w;79py=7b+;{TnwBWfDElXvekm7JA+z#r(abyFy-! z1J7d|Nx*h-9d8AzY}PVr;1sa=4kY}z>c7_)Hs|nAvH}R#aT*VZQC#B@d7mF~B_1zM zcF0Mk;pG}GK7#0tsLfj}{r5w;*Z>Js)9JVJpQKIgHt~2#LQt5)LW@nKq}qyDXz<3v zSme5xba@p)7A(IE@=G1eOX*-jP$%ka`k>}Ww@N#ISp---K-+p+(8#G~f~?itVJauI z6$8V}gxqXXhQMUnTTzTm#ODlz^OLZ`D{3qm>gqr#)v`W+Whx$)84z)p5MN~xh+-O2 z0~YVw9-$HYrUg4z>Al;mj1fSOU^wmhGg?ju?@ zNf)Yp<7VSB00LG$o^C%@qo5nzf@s~nK=8qwmO;w%AVESZMSf);zwxr1g z?DxGdZFBQQ!r}UHZTLq22FNTr-LmSBk^Qg+`?jqUp2)f;B?La9qAO1!cg)lqau>AR=C+~cf+a-5!?;`mrQzgX^{V;4>Wg^WOH-Tf%NC0KHBbyQAUTkBQj;{Zal zcRA4wMaeNo?-JrjR`0{jsD1Rf^2lgn?$`iq(zd`B1;2P#?h<`&;Hxz2!@)0WDbd-7 zEZmQb2!?mu<5^bwnjDYO)EPT{?ayVDQ~6H8OJx%vP$^XGAGsg|m`TE-mi0={`|NlL zKPETYzOYlL2^N2%Z%hL#Z;Pf%qv=%MX(;f8ql(Q?s!@Wj z06SE>(QyDj6Iii&%Y6k5q3JqbPmt$#|GPQUDo$|zH|Oqi=4n*C3-C`P&^igUy@k30 z|7s$QoG+2BzGCv?L__!oo7xA0)o0B0eFl)v=3lD32URiGt+;uv%_i0_C)cO^M(L~*xsDBsDG>+vqyjb`yyU=dKL0ITDYNpcYlr` zwv6>zE7n|l6qwM%T~T8lArAcA&BKqYy4pzU`*>Q(|FzQU+wuLigd;rTKzrv^ZIw2+ zSJIu}*dOc}%i@Em9G9M2@oJFHeh{M{Qm#JsmUqb%x?A;iTf-If8nU#QT_>W1mvDBj zmSU6ehq2D?d?avY8+pd$9)&{?XAHz0LwjOxcL)wOMq~I?6Nkyhr zb=x||exw$`qx&SX);q4wZQv|i-RmQNL2PDoeDog$|fw{et~ zB7HV_WH#5mCTesc$2zKa?M|M}1wMCtmC=|>J}Z?8m%iKa@@+3+YuHb&#t&2Vi*ku` zo9l5R7w%&{?N;0JJyP+a_4c-uAi6?-y6;~|s{bcs;pwYxdTvZbDf#-!zt4RAC$AGz zS6q*;JY4@6Z@#+f&wVprg|6Y#Q3#0DAG8mBH~pA0F6lYXt+t~cB zl}g+bH^v!FBhoq=T*n&5(`+7Nfkk$ssJi8x?@_C#cWmK!vEU<-Ht?bbxfg+-D$N`9 zytW_b1@b4H zSZi(dJ$fL_+c~~MP$h$Sp{vJ?0AGd@(XYHUd0o%a5@;C={?FQ9U5)XHqoqK&Sz{(# zNqD0*oO!NS;`=^#>nL|VWD1^_Aw4Ae!3vl3Ib+4J#AlRdK;ll@jg5frau-ww<|`3Bqt=JDV1*;4ynu)g%@J;2 zyk~E)?oI2^*;CD(xTgK42Y0WU?+@^@0UcH;>-`!RAR))|r2XBCkRR!p-@ja57oY<0 ztV~ZCT=%0Qt zx5|HWUjo!3!4;I=5Ignj746xQjhjvjdYvkvH#;~xHqYQuNj zA8NF6vqNqw)i(8#Ns_4CbzR^W#v0n(L=iqI*Qt)V!53Zv&UG3=OYKU@C1;KQB(;yb zu1xq|OR`tliu0}xy;Sv?@Xt)UzMLmJ7K?*9W&f~X`|C7Yri#TMh+To#oJ&q$#n}l` zG-<@?4o@@V7j|eOlf}^KPIt^u>Q{H4{e>Hms~Qo0rx|BoV%JgO`tG!I5gGV+`OzIa zUa;t*oqAy>@)s$cfKVCpuTp*96loEGau&md+z}N6AFzS%sQ?YAy}0A%tO@fx*Wl|$ zPzL74t}9VRNH1)It9UbX^SPMhovx>?ey^uJpJ=t(e-~kpR?I)Km<}0uHd|_|DN0@! zYmhZKIdf;e4r$~jq;6BRYl>9M;>Nrp#4mo8S(9gc{r=E+@cjMYAD{!c5??_sD=WRu z9~(-?zOW%~l%`4p2|DnFQluA$+P8=m{{^CicmM-YW({vWLoYit+I^Y)m^JjrN89ztmc_XhR zyjj=7yChn(=KXgQjzYy-*x|DM#24iHLJunHL~=PdL_ZiZqS+?=pIY?m&j0d?f6%j^86cs3v;}AFm)>INbS2q{@LeODOt(E-nvz6m!?Q3m;~-NlU_X+De@eu{0T{ zn#4K|EX`5YMSRYbtF^X$8$>fmvcGj_Ov0J#v@1pw{hY z*|R<;@J2`{K)BFv9Hvt?lCCoT7|f4b+C&pxU1-%+hPN3;Q31x!>{$@PzVZCvpJQVn z#0L&?ec(}!w^4FX{0>lrdmrusKkIOV6g~F-HnPg`&*LZ*@ zzMJj3=C2wCuOdoWQwShp6GiG~1|YbOPo)a_Q^mgC#6*?nPxs=y{0S+YDud1=uU?BN zBI{jBVLJJYv*q$*_o+EWH}h|;l;Qzf6jq%{GN=URrH5pX;jnz^H{-US4{BP@u7M?m zU0=it2h*#h!#jz^9ZAXg5LflCG|LScrZt=lN$Z%zRH-iP`w@v5BGG1WPzw32lA?Dq zD)k(jeOGysTLKqCVm4#n2$3Gn8z%i8!efA{AHLXdmX)-d1Ipfc*4G(^)WEsq=F!yR zSxR|eCPOsKzf6Y4HxzT)={fMt5}OpN8SmnBELC#FfR^HHgUz4h(Qa~vbOAZwG;%ma z4cdShCZrWT=@MeQPNugc=wWwOfEZQsq@{nV!{jh}8mn!q%yYP@Jfie_Hkw)%Fe@>} zd+5i}w~n$nTdm_3XwN>DWZt@2{z(a%nG3=DqjZAe`3NVKM6=waON%Y4j;g<;7M(|h zaq;DjfzEJ~1Gc76q-P5TT|OUvN4z$hkZH$E0N)*&nKaY`=DaJ~H{Z;%9a`z!e|rTF z+Z^~t`WMVPvtv$y$2$RLddhO)Ko!wMLtMEKW}(e-#kg7RD#TEKVDt_m8~nqP>h)c& zDr{I+?V8X?%v%G{WTMO@g_n%kKtiIKF6I>8Mmrf{gqF2wF4VqVMv**&ZVUgZSGg^O= zJ0y#Vp=RqgO@ux%f)Q8R^u3AQdP~l&a8nvwm(~N=>1%yr-yOXWL?d_<0&6#1+mycw z7x2>onWK9IzmE4w#MwUb* zpxWpcN+mb!_Rae;WHv2|Kb+@|tiQ2I9EB4oET8?47Qo)ldl$I6%7vRvZLg%?#L|)= zBx*)fh0SzmQ4P&Pflj3m6c&oG014#Wq&t=>&f`&-x@fHoh1R?zl$K$Y%fL2T-OBG; zn^(##mz@^rH`)c46&s|K-mh52Y*$q{fq14U%tNgYyKP}YvJeixqW`x414%F5 zw2o~b2QUML(plN5XZyh*eX<+BETEux@{;E8(N4zY;dbbp5JfdwQSsC! z!vu#!rH2y=tNWod2+g9gDW`~g3}WAT8&+8jPRXbg?YLzOTZik+O$e=x8GiB8t>Ls# zm@guJ=zD*{W|1O|8=T?J0EMLhCY>Q+u~BCdawZQ=ix5^vm!DW7t#D@pQ0M2|Q2dRi zgh{1_Ggnk$X_IA^ankQOqGZ1;gi;mB&70^YLDB%^o$-aGg(IUE$0$iXPTiL&Pd_QiZ zO(}=_v7)OmRMKOt>V)({<#z#ei0>pJOB5#zPv&bnIOXI0oGp>06e z0d#*#kq%&6$&j=vjw2C{#+W*K|L>OT^je*djxKo8#e^V? zmXw8YCzE^3?=l&hsEkg#o+yQ6HL2iEeuQM}!z+tuXfSVnxiASG*aA%vgnA+U)!Sg= z{dq;y_7yWW>sym2F^a*V*J}GQ~TjmN*UL8B|Goge>KT)Y_{0!9uSU8$Zr~`Qm7b z$CDuUU1#2u+4#Ta2C;pfla&uGvoiU?0{-%|VUa0PMY4+0y9z4D`HioVme;3u)707| zsNDs^Uae$2CT;?Yt8O%rFmu{`%TB%gstn|s4YC)qq5bN zN;OzIrK+QM8i1fRvTe z=x%PW+rRh8B6oNR0+?B=0f`GXExSFfAINI|(2yDOf}Ag_sXZn(d!M zW|rOPyKeL`)OXtN-SA3>1vq50a!s3ZZCtDWMJRMsb*6O`y=pxwy6!#abZ?~=YIj(- z&~L#-WRPOhIbs5sd?@LVr28$?Wl)4m)~FK$aAi^JKcE}oY@Th~izrb{9McUQMlo0F z_oD>)G6N^3FoZF%?hWN3{beOfnpwGWqGFVm&52D3!p33Hnq>#;8j|b^5emad!IDFXtJRoK8=(6}>|tG4ckoTQ!{b z;4l1Av5MSo0E99{$?Rb2-Cu#;b%MwlH?^mo_m>^s9AN8c=eJ2>zui9$#-+vvLJv)0 zVFOF&s3-?+giRta)fIZ8Z-t3d#2?D^?Y0FRJ-L5*@8=^X%SbZ3qd_iUF>jJ#U3@++ zK=BknV}&s!UiA23lF-q;)uht=lQol3 z(MSuJ`ySiqagw3Sh#iWh=;>n0A)F??%@O6l)I!%F#fW6$fJa;>M`=FOCc=+Ww-w@;lErd;uoML!P07EPpX;WvmzK{URyyc>nU-m5m zc4!7Ae|oM4QA46935^1x>gw~wZ>Qtog7^22EdI|!d-~iS_ZPpfJ^fSU{9a>&O!ArD z!g}(P%T@th)ie7>>~94)t{xAvR|QVYe?83}f1meH6ABonyyiO#{0aCq-jNkSi^&o} z>zL)3BT`g9%Z{zJ^Cq&rI{-1ss56hOl`dBs&Ju=gLiHWhkbr12&FftdX$=al*klrp zk64;ShJ;A!7r0rIhWkYstUmSI-DM80$$=P>VuxkHoN*R`gK&|-g1qQ}j5p!{s+$@V zs4+5zlNCw?ktA@qP$@!knoeE1loa1AKm+SN4ab59!Om zE=91U@SOY2PqKD!&hnM%#4ujgB8p$xhzScSfOje2;}fz8_r~S>5Qw0&u=UD4Tk`wx@eg5Ap%ERt;evo=n+V^q;nJv{Zx&ULX@>!ma1$G> zw|Igcs`d$CX9b!$l5o6039zVO!04GoJf`DcBZpqF!6*TdjZ|YTo)ij4>nv)<0G&!O zEn#mw(35j;cf&5B6Uo<%f~Z75B8~`bY|vi;<`RfE0{dwf=-JPoHR#t|a`!Zk=FTkV zOVR6#Y4({7$n0p>mkHC9Z&q*+|XoVvL5iM^ch(027E`k-BEsLW7LTSCLm-ymX66L`S`zD!<=25m% zZ1bxJ5MG*s^DxtayF7lqjJ$#MBM$I5_2vI7gY-!Ri`adQHhn<}q9jhzJW1y|jxnr* zJ(103I^oX%D|T5R=iaxpS(kX6%bq2);~h3;UL4@FXx1)Q@-0idPkp%|Qx(DOc&fkP zLGDr3Mfj?bp}~*3Hw#ft>A~~K7#((n#p|>V5(~tX@y7z?A|g1s1Bl2zE*@H48@A}~ zkMRHp+9JVJ*{EfqZ^;I7S&))qt%8fI{XttOMRcy3MKfhdSp&bz#UaS^CHsEbaWEAq z{AKYLF&@ivvJF_@Cofn@3|wfRLKV%MPPLhD`@6Fr!-2zZ4Jwe%jA8eas*E_T=GJgA zothM7P6O!u8E33o)ZL%HKzO47mH5en6ojm79BR27J$#4qQlb%SGSdv-v-OW`%e_njC)vjs4wA%P+DR`%mh&hAzT z+VQHl&e!r&&r@Wr2QWP2;q%6;)62^mh*;)@r2>|0Ei1=D7kZ~Y=Ji{=`4jzk6_`}S ziGjk9WA#CLzlV8Y@}{D2xhiN?q&!boSMgNfNm~HrV^ee}r@#h3xy4%rPcrSCSSO=M zf?DJ{^$a>6JB5M)(wKx}2$a#n_jRMk9fIzLd(UfeZM&YQ-DLG`@4J2)dtZmV-KJ>; zJHKzmdIAFHV~g<$EUb}s_g4o*f$n|7Xr&Z#;K|$o@LZHvIy#os)RMPR0-KD7vCyx_ z`Ah`9*U7ic=>;%PAe5OS0Ft2ltsjPTpN#n+l^_Z764ulfh_v1%vz8V_yqBOsH(#ek z1}AQSWrF;NvGJcqt_;Zpg@U#m539^*Wr;H@p`UZ+_v}V>H90 zMU~TjUe^=W2zp-E^8_FD#j|2ygzOiX@<~c7kmPCL6{6oW#H6Si?cXTyEKzU_xZjy` za9Q~heE9zUn_@fyZ}R_4T^cVBRIZE}%x8t=r$9)5>H*I)Cqrq#?grqUutAT)-taN) z!!m~HpxWu;0mlHW=8uwcc4vzQ?p4I#d3N1C6llaBPKUqmrl?pbt)s1SAzy z=HN`8CIfyF*Mq$(GPOL<96@+!$y9D<1V%0Ag;3gxBwup z5br1FL^(5(SD?Q^Hou8&HD|hNNSVMR*XcJ#{?lvx!Y_$RDM?c>I)V3!yWJ0jwl5A` z>gyJnaYv_ITN@je_}P1_WB5k4$}rsY^8t-_hZdhRls>9;zZPxjNBU&b?I=^gfU_#K{{Wz6qBb`dya)Y|wK z&#LKYrOnKf^tt_V77_HufGx;(%6Cp`G0B`!idB37Vg2nXct%!_)75t2!Gtc6%+FYM zw#T6EgsO6XuE*~x6T{0ap|$J5<3KibHQpbZ8Ni19C7Q@i9%)@1w9{4Z3OV-ir^MVV zByAVMCRm2SOafV2QdRF4f1SaLyk2|;b-4%??p|0NF9?w%! z;`2yGSFL;zDv}f)bvFlLZYpdcbV%4CV+`J^FXmm zsEp>~{b>reW@$BPsL=!tQ=5r`auVS?xfi6M7R0GW(6Utwj(DBEVA8k<_gz32hWk;* z06^}_N2ckH_XA~LjF)KR-Ndi*TCVabw^ z5j=$e_PuB7U%>bfE*0@z9)bS(W)juSSofZCYk$`Fo?G#$VZ}pe&zas|2yMHeMn-WTiLf~fJW$XctffJ(Nvo0snmJMWAc1D{Ry>FJzh3pwV?S`U(Q?roUkG>@tV@?7duF;eLlCQ#^C6c@Rj&b-R){B{QWW z%l1Z_+~^lL`Hr@u5P(f8XKD#yK+}M;>i>160J#aL2B&&H2rC?$C!v>aB-t-9Y9Zzh zx>4-3tnCqd5s3klcWFQ6gSOvsLWSRP?sC$vdIRd6nGSx1rOr2U@BqkJC{5rYSB+M5 zXxZ89?R7LYZ!`diENkAr%3Vx)LDq`~40&MVL<{p7{?|j>+L=27s-$y#)1Gqi4g)=q zomwsJW&`VCUTl`5`OwIzGoL>;^*;jw(`vgY+;V<>O2B4Mc;5{=VCGl%KJD^5XgSqC zU!`&RJ=Gbpcl+O*QEK9qkUC%oBo&l>!q{Cnt^HNY2%99O>xb<((6mB*!l7gz(Zn z@7ty}@)j#k^|WAmn+8{*c5;J_L<(c$))>#CSt+_C1B)*1y+&A4{jT?O!OJgWrnhU5 z*(6&&p6i{`&o?NBr~Y|aAsz->qT}#8G~9a|Z_ZzDsb5Y(JFMuDX#;!_5VaXd7igtU zQKVnAhTzU+=#~;DiHh7m6ju#~90&X^raAmdp}$D*lf7eM>k#m}UOoqv#nqfaQ3h5W zNLRM@c9YDdKUv((JP!Lq^M$}ebN(`m!144@r^}<13N6l{so22WSTfue|m+(V$Kh7VZ7p?oayBIDW3nXZFm&SWD~ z;IP@h3eY|z%+gy8O9o`#RcmPPEAB5OJnVa)iI|58H)lb&yM$VW_m%7|&i zv3q9*+uI6_=6z%De#xix*nN94GQ&o!p+`Ng|3BjTAapvRZ|l>OcR@Fs_;fK@_omv zAlp9aL-Q|!3-_ZFoWbD#fFBiCm{Y?7ZB?;f!nU?nWr(fES+>#Hi9&FM)i@A4p?)eU z4-{Ni7*Ucc>Z`Gxso=>z0Gc*?+5h$XzbJDDhteyDakth$jD-vEPmQ98tLrq@XDzLx zOG!ItxCU(oabOKSoovsriSAygFE60xjy7(F0n+XJL#{{AkKS(zbWp*%x!Fr>0-#t- zodL!LdT|N4k=kAOvl|gk{e!XBZozw2oCO|eK;_bm~dt@!~7Y|)O|tOwV9BOX8>WOx$hVN>5F_tVd7u_Y2Y6L5mo-!#Up{x#C1?qVpe~o>C z^5GV&ut|7A!aZgYksQ(Lfc`F9%dXaDq)PG|14Vxv+~QL^>Ollb@R(m3ZnP8^UPbhq zs!3*U_H%OsDE%ru^WGnHZJ8|98X2`g1&4Nc!&dhz`z(&~ppfH-?SuUN;dssfE^Y4< zYy*%9P7`@;U8%PTn%9~~1qxUInJmNs>O7Tm0yoNQ08t?@^J6!fGfKLVRvm95!NTT@ zN9Am?dJ6HD6%H}+9uf6Z?5>2GA^va(!2Ljm63@PSZ^Uv)Y$t`C$u;_4K^zz1cjCS854lSq+K`hoDf=g0^p6t{G}ak~GuL9@ zZI)M8uRQmMBAk3pUB^+@B@xhnU5KxQY8=<^zbBseGqrI-&26^wniSzf+TRR_Mm~;_ zM7!_Nn&4Lid;cDINnwOAQ!5N3G3A7}Z1K3aqco#=irF6aZ*k#qT}e4>+jpy;X<~0- zogH%rC;&lpu$LDQaT*fRJqiA!<{$TgE^_%yuZ)DOEaX7IYY}5=qouYrc`(gx*%>#F~NAdvAb+WqB3^Jf5(1!?QeBHJuM^szFKNs=50i1oCPhSFRQ7*BTeNM)vemjh!`GDldj*- z%6__MW2Dy4_2(V z8*p%lGitxp#;?6)n&P2i*23v3Jd7jGG@>u^D{f#M&FnaofuGMOM$}ekKj&CVimZPP zpPC9$>E%cF?GJ-V&2;;=pvOU!#;MM#|BtP6@Qy6(x_pw3Z95g) zww+XL8y(xWZ5tiCW7|%5%#J(hc1 z;V>@F^h}YJ2yOG%Kq5s`wnJIJ>z|e#@RldQ1^+lNC~1w zYDP77j+=WBR`RdA9;9VEbUlxBX;QPAfhE~pS+2{fXLi}XzzwG2Y2~2}lT#nNlJ=rg zi+(q;?Zd~QyX5Mb4o&ki$aOdu`14M8mTbW9`3H^q6sb$1-VZ;W-mDid1iF?X98tF5 za0VMy-^)yl7%~OPlh1#QqD?#2TW)X!PT0`-=ZRS-kRJJD?Ry|2EUg-^xhg;&9QnvvkqHnJyNs>{9E0e<;^8Ie0i=h%l1#6gBZ>b zasOWv{zaEU-nHsZ&rLPGRy3R2?Lrqub6k(y`w*JkcH2G`o^zE4>+o!nk3nW6<7a|~ zlA|X&E~I;vrpgruVUSP+6j5$pw$xk5KjVVKR`Nov5oborKW4Z^Y=()9EeJ8^Tg$0b zT`0F*FYkc$j6m(&8|F;-bFbATaUypYEY-z%x-+kYmQ41Gs;py%tO3kw^H)q)(t?1X zE7RlejW!m`^*k>u_&_b~iNmHJVRxgoI{Q7PT5H^|(-CWE?#?sV?}o*Zy=w6t8Xg!> z@UHZ#az#;a{d9D7>yw~t8Xzux^yZPLJt$mqY{cdZ+;Vz!EX-RLFp}`)6DhxikHdp) zT?%Mcg-&$+xrLk5KwFoBB^nS(hTeT@G4FT2EL!?S+4x?}?kLm}JBfaqCE;FZ8*mbo zBuy)36!E{c0Frt|m_u~T7E2?hSLxNjTJhyF^cksVq5mI82_v8GH4}#Z`XQIsmvoAE zPXY%AtXX?@wu6+W?2R`j#b7C0NFG?@lK$e(x=>$)&%CA3_zOrOWeOkYc7kd$DiB|bjRrT}0L><5 zo^5?9uUv4CZhRLN!lDYs^*=5U&|7!EC-;#d5%SyB)KwmKcc>KY+s(Q5?eVjtxGYAR z0OrKtSI$Y*3tdnwh1DFI=S~T~3vI7HqGsOGpcQnd(01CYhU^an^f^#$D4qzTeDKJN zd}B3>6$O8fL!sC)@NwjQ>j3|vDG=&Zu=?413d8PR^em>%DI^1PgS9?lvO=4|YakQD z%AqDPHYuEEHrL>QS5#;qT0!^dZ-t)vE1{KQ9wh3pxtj1|G3yQr>5sfQ@=UEn^u(a7 zFeyYI-LQUonjPG#a`qzItz-|fpt#yPPc3w!>I^-;VH;7t*{$2px$Oe~TR%QySOk>{ z%A&b_4bZxVb0jLfbE(RTTe4MNvzxagY-Xn1Ow06+#PI?pAMezfITE zKPilw;zf6Hgci}6L~p7#*+nM~i&}|DR|yu=`Q$IESG&;9uwNlhedV>DRf0V04liZ; z#=80Q%&Pe5&HkNg;GiJ>0t1_GHgtJFpk9qUSnPJ^*kf>`M*bc3=k`yGEo;`!{JS80|e&M9M-a_XdD5V;#MezT+_mfkP9FL7lg?`cKvfO-}s8G|oV zd&NAZZ~R@LBRotboHTS35`^nsQ_pkhcY1T1h`Dm_=y*eOz%|-O|Vg(#S=roCp};XjMMYgV&MOty}racC9h!H zOLm#lsyZ2wW%IP@!c)jwg31=nFDy3ZTyf?|ByvuLoD)H?8(FhS$sn667fo{;!>-_x6r%rI%z{=8V=)GyBT*t& z#D6o+ikPIv{}x;s0bBWX@WE4Ha;)@{&oCGUXHXlj7E5}pVkjonZFa?xrnLVyM>2c> zBEINQky3y&7sAplfSK57j+|DkaDC>My4$~I0SBD``=|L(6s}0Dly2lCwI&hriCFG8 zNsZ*56jB`PsM|RYGLbm{5B`wfT*(F72{-KPs@s2`2?Yy-&E=>_WG1#%w|JET9QVlK z0@HYh;@C|wOD_}bHB0>kIO7fQuRFg-1_?&8HO9FWz&IqbF~IRqQzdm5jud%fa#_g zwCELY19@ntHc-RZC*Fb)^wQL@W2KY=KK{^v=mF2BYcZ*}`k)Z+IPQs2q_8gF?r+(R zc4xUqjdofD%3SHiraCNY@C;Zn5sS3)qB2=(dgAJj#H5i#*KFW-8*+|QPqL)DQ-U_p}fu0coBt@b7255(3FW6kWaM@;V$x?4!6R%2RHt*~k4({uqq{EoC+ryqrcfO&W41u7PY zQJqNJ(0O_#qT%wu-Y?9**^PJTdhi-(hDjs7(rs-u#S`4+e!inNCdk1w&ag!AHdIUm za`EyC-qEvIIDW-7$g2>CBy~^hf32y`Ut%_z-HJyr za0rKZ|562?xP_K{endiHW?in6CSnG)PKzJKib^>iW~_%a}GBXl3!P3eeLdRHXT z6aurSwdx^8Gl))TK&hb@_2JR89EVZ405@^#Lu#>^#1a2y^$&KR4}E$$+eW@PBd^yJ z12j`@#Hu7CSK~5eyiujnnyBGol2()O8k6*2_HV<6+K(BNoDc!|SpE;5_Fs@FnVlHDx_#uV_5)0ibBZLA+NA6A{<2k?fR>wvk0Fu< zMo5YR5*@ytCv}F#)HFEy%0sQ><0)0~D}*!0$=SVvxC@7Vj{Cvmt%z2+(;MoNzt`zx zHcXC!MF|y+ye)tt{uYK_X>tTS3%mqg7eK272mDOh$Q2NsVchFwsF=-_Cq%p&7>=Zu z_5x;N*hi$WS~#UuGG^0z9UTx_QLAf;vLJ3otME?$f%QFmP;u}{YK@#M?bEC;EjAh~ z-qKh3dEZc2O6r*RvSRu==zEJlU*rEruq8)A@hFMGai21-jUv}$3KGZ^=9#c*>=1G= zt*KtCv3xMSzwH|!B-AXzM^v-hWMhas3eqG8s)KSm1FYtWNo*{hAA3$h&cbDMD=8U% z&dyzrF}U&uT}&b=l=6&M>j9#L3KPo->Ubn+Qgof6wOCFwZS>A9rN>(F!1W(SMe(Ev zp@kTcl_Pp^#db_Rd13us%~o_Vs1p&|YpSdDTm3zqQLD)fA6yD) zI`UCnY+U2MW3{%Y0K*V%(Qm z)XX!8Q->W@@PAAZQxOFgogV(JsK6WkpV#kH{_HK9(L zw8;B)%Y<%e$v3_U7j4QK;<^Chs}sd0kMJs~PL{29IEhvyE>%>%BOPOXJb4I|eD40^ z<)%PB1J}k0tQ2tBJa8+koVJLB^~Dp;wt=xk`wJ7d0-hJImC@>1 z6rq0<4Phao=+O9p3kb?|WP~?pZ$fdg^l+E7j}fEXx)W19B&P_S;4Ch@;FCEp znHza@njpoLqx^LVOP;%=0*=##o6qzj4Bv#oOQ8ZZ^+_@cN?QZ+3#aVaf*fW(SW}ocG)17D^QHf6G zcvNX3krtCqI`;N|RMzLUEDZXPl%J+Rao5*!BsPr- zxFxxaZ;f4bv+r^Ak_1PcYCTXG4wKO$jMDZVE95md$j`dPbDvlf_PYJeoVC+bPx3Dj zhGfzFgMsQjOa2>vqRYpbNk;0HLWwxnN0maR3&AN55+hz~z_b$fx3KhtZIW+*NjxVz z2dU&=#WUmc7xD@_TA`LOlT+!6g;Oi1Pox1v;io)@mbq@J;N@m*2~;Zo26CNgF+`}( zjc7?rTTB_UjCCh6v&@F3FC+#*R$Nh z3(Vf4bD<$54xq_+oXRpn`jj}6agp8V<~VHrD#Y-NIjOOLsT^1Lr+i*Ox_y3fzwh6^ z+MB+Q&mJ$ifwz%M%jNzTI#S=nH{M_GM*G$JbTXz_?C)W^4P6hsb3=5Y`3{&S(>2=f zqBOB9)(2BL=~U;qaUFlRzhBt+-mqHl+Fe2&zrytUK6sLxr*N))a(=+u2w2jP93aG) zi_*=bCvSc~LO1gq2D1+7mykvi+XUGUe*9wkSQqsDbY=m23;}NBuikb@ z|Bh>6YOIzFwrElz4oqvSx1a@OVgfr&U;j1H+n*;5^4E6!PM7!FQ3c1b3XF8>!8yDS z`*gC<^Z2$}w?#vx2p$AaAueuqk~3nD-b&!HEJ0JU2)w6BOnu+uCZyiYM30oz-Ocb5EeXLdV2boYgCv)mqN8Tb3k z_>lmWSZfux!MFD-KD=fIi#M{bOF>`5EoY9b|FC;4feMT)*1?&d-Bu`$|2)@S8ZER4 zp19P;#Dr@B8qbmAETKM8)O}vz6wSv|_iG4C=CsGZJX0Mf3*sa~KqE@70AKl;dIgT&+C;wnIfy zKS8gXM3SFhCw4rx+fAt@F$QmhuBH78Z_kSoqviTh^G z5gA@E>-hW@&qFIfl^WadTi z-9S!j!N(DI?aUwWpO0&Pf9ZTdc^i-Bx;QLaYr4*8sT&l=E6Z@uZxr(m1Z){${N%Th zb;!Lgyn5SHNI8TaM5M<$J-jE12lBz*CkINs46~Ksv5rU*ydp*H+rT_o{ZtaDk52k;32;-r`--~zL zwfORHl&*pZ%4f*F4)N?sMM^&+xtYfAt#EKXgv6cYikNcA2ot$XLbvQg7B=el=iR(8 z!k5G>aUSN2@Q1=C_~E6MmbYq@7dvZ|&j_QII4>fED=b$|BuH3Vh^Y};Vv+DnicL%N zT1#yWv?UV#>r}2P_?%aJ9-1;e?@Z2-^qk_yCfw{R%&5r;`@+}EZ%>8p&(`#!5 z4p>RtyGx2B$u@TkRx2VThVQ!a&2bzeGR7G$JMs6-q1VDRpiN&9Z^h6G1b@fC^V8Lf zg`4HG;NMwC5tBlMY@dHXXa8SxFikIa`~q`b_y6wqU&j7kRc6H~t+@tV&pgX3VI%qd z;M;FpBTh$+sN8?SCy~w#4c(E3c)-|61rS`aYt!Jzn1QwTZjRS)_;yXrZFhv-nx{MUbYrFaX!?8tzB15$k~ zsTw!06Z0(a7CpBjb@6rhIq(^5oLzWGQp8f{z0s3I!0X`yOMwTZrHhU1`x$cZYwwK( zg-*+S%2*2-NLoNzs$eAHrs(2_3;`@w*tAmR+5lJFg|Bcg$ic$JtsfGI9r+a0Jqbfe z{X2`o^H;zt(ayXn2jAE7mWmnSDPC+O1&)$kId2MXE@;WRnu>}IQ`=UuSYSj}Ebs{q zR!l`zmNx2VefC4?*ZP+zqtI*V$*6nbOPu&x3(AYZY2Pzwdqt8?6qQ_X1+Ek-)Mo|X zM1d}*gHWP5DgxoS0$rV#wy{Nny=_$J&OP-A7M345`JX2Oi16Dbn6F?Y0GmklW67Y! zT%|S@{unu7&*!+ga_(jSqZjP&j8MD)50|f^wD>%UkH38H<=b;L$$Y|I7iQ++-1?kg zcMGMK7xr|jv>{Z;(d{h1^zJqhkV)yWw{X~Px3%T+-t~S16zfPF@#jUlvT7}2Hc#y_vx3>pvh)Z!)~`)+v(uZ%ZpjZoY~&aN|J;yU_JEQz&jE6hu<$jx3`iLSs;>4 z%!w2$`LfhQVfJbua=o!o18Z0c&N7>Ry`$k)41-{wOw_7_>jSQiWOHZh^>!GgQ9!@( zp93NT*?TEoY8>nGKiuL2_%D6VipO?5RDjSw2?ax?w5(f3=(EK2J>MI__qL;k_rQOd z+?@OKmS1-#p}#(k`%s)7q@Xjev*=Eo*jD)Ov5o3^mUsTpZ`)sp=#4>qCJk#Rb<5Y_p5=Yo~LKbOSGl`-{yS4zmSMe z{o&8Wd5@ogFD<;ElLdmg7qy$)HFn*NT_jf0jFh7RX8GQykI<$pIgJp>IESuw2IFZ3I+T|q`JN!~<1f{Uy_GPPQl2L@cZu)9T zZhxxNV@nHpeSM%jyiJ=r#{@k)({#5`$tP9D8mCvJg{x7;Pnq*dLAbEXNdD2c(IiES z{nlyshF3bm;Ve|@Y>2x0cvN(UV%t$j*y?#Rf43m`Tz3|76;xlTJL*!|P+*Pcb7JrB zdda6H9ZVkby)WZ8L>E6sea8$e7q$(4IbaNm*t-v40Fh*hXyhGpj3<3scD9OJHysSl z0OM+5MYv=YTbV;=|6@UP5P@^4*2G-SzYQIp$EK$u8lyK&4@cCqe9lH1ZLjLFC(f&pHw3+cA=aJ(`yJUCHmx;-46<{>N|b6N8k8NA zSarf79s?1cq?wArWH<#fh zv6HE_tL-ILwiWAMXY=C>z|8rE^Z7*t(xJfr^%u0A%d$_CVnY)F=# z--S9?wYM4bH2er#Bzef zb;wH4+b}@Hk}82FM#ue31}J#U;vU#%+7r;1)6koX>|ZzF~#Q__FG(~tiVR*gFjN(i8H<$b~xYs(~pkZj_Oc!L_JP!#YsuPsv?Qcd zpl$K5#7(=(;LdqE#x42E{F%Tv7Rshsc6SIsE4A4HY3oIEAkB3p1Jt@+ezv8Y*1O3OrfskRjX z5xS3Y{#6mO(yA3REW{V@b19$<8I>^&dL_CN_uZYNWCpr;^!TNu1eKMWvc8n!1=uHwxv)xNI|pI}0; zeVN0m96!GWgi#C@P=)yZ;&%H*I!GD{K3{~|mS!(Y<}+EQPuhQmSgvNT%Yp5Q`KcQSm0F)H*;C zO)r8*t~;a%`B?W`nfshBzXWY80rY;=`XmS6jM|YL}RB{IiUOW)G@d$oLNL_fL zhs^FXsSQiMT)t))*W~a=PX*he>l^9zdo`Ho!ND&IN*R8u_wVFCcE_uMW7XQSGPLcj z=2M`KMAtBqjgJEjR5Pz$w3QMY??BDuVt*; zZ!G{uMkyO!gODPcJe~$vL#W~A!xK9(0VrPWkm4GF3S>*aL%H%+$ouNFnlI=j5tK%H zp1CNWax>`dcAP4SML$LA<7ZQFK{fqJx+cG1PNraxqp6c!@A@$PbBkmog`x0}fg6of zM08)`Ac>`WhCH4jg}?T+k&R8ZbGm7rl^Z$DZdA6qMvXpR(`+(Sg$I6Qk20U(Zd(lx zi4p3;FPV)h3Ra&a)T9pKQQP_TR9$v7iq6O4&p?xna`3Nxg7Ep?ax(uKtfc66v*Zix zgh6=x&7LU!LPAUx7%GIU7zLZSZ zDwYFa6W7xD@w+411QRVQoz|>F3;K}5AL{Js`T6QS)s)Z?>dt&GqT$tWXp;1~UA0Rv*I;IJBWNsZ~GoOl(3hQ!(svx=7VXFu#wjF-Z#J2}OFP zTwT<(*<%r2RA0QLPg$!NJ1eI1j?tm;qbaM$;su&>ENY}>LesRM^kVy&OTqZzMId@B z?QXHPGb@{!(bEm?;f1P-tikHs+Uf`%njYRBp@4}_elRuGj|YBx2v$@ScyU5`7nK9} zM%fSy2+fJ~5CsfscS^YC92JShZOjzSJf3eRm2{j#kX9Em)xW$i32OX@$$!S_*5*jT zzy2r$4;2o;tMm-Oo$>TL3GQMC<}&yv>tBpsY0V3$@Pp2U_;oDhf6m4HB}j+!>+Ns9 zxs~geok6O_-(_84_m4HAy^D*^F!{8IwT0j@825WH8C>tGZ5EnMmo6_X%t9r-3Xnd5 zve}$tZgF<$gd9tZog#`78M33CN>zs+!=4REyq*VvDyaV3wrP1RAzdb-lzK20YL5?! zcxv}gFd9E^kj}mvvHT^SZ{E4a_;itU^C@C<50*PY@PruO{DUbKfT#jaM^Prup7Hjc znyyGDhE*hgk;PN3m<;@*U{L~rxuCzvgt;w!hp9T>;3sBZupLx4FQ3X4CXdt= z-4fkmaK>2Gqjl6Q1Z*OTT4&p7au$9B_{rET*^F*(9Z1fCjMD$8pY^Hc`ucq)1(~~x zwZt|zcY`0cndE4VV>-C5ckIM>v0AL&_?>jYm@cK(Hx#griwr8Ll^?d|oS_{Mra5Ak zxD4`8&1aF4@X8r%*AHBioi`f)4pv}MqcYhVF%1Kz4{XfmM6;0xa|O8I?6>G0_#-J| zLx#o3oGhSBR)Q~9HiWeD?u!f{lSvXLCoSMygIm~}6DPuK28Os)5%_whDv^Ag9DBITA&wAreFwYN4#lcgc`jTAO zP9e$iY07BX74lw=NMGr~=oa*+b_Nl%2GrDnRUZKGQ+>Rh_vdy<=@TNYTh2o(zrVkEIIw6ZGqnw#CWL2lnz zzj5Kgf749Q0le_1+{7o+dynnqi( zZCeU&nM9XCj)ob0OBm-Du+@M@MD_cfF7YO%9oha~KrRqXa8&-W+=KQ>CS%HSQaS51 zAYKHb?0Ht=%#A@1q1XzniOi${P+cUFbvxvD78j{W9^Bf3zX>R<9#kB25jDp?bCO1o zhd(}er6#G$pL0$?>0-|KFFmC3l*{>3Zg-)t!>8B+Rd-HEn+1D?w*Io{j(^e#$B1*;cWP}xK|*^B%mHgz!07`^`{31p zE@7B98K7)J^@?;*S=P}7#kB>;K53K)r)%^}iW*%#h$> zwTUD*`9Vr>es{bi!Mx!v`;#|n6}_SMN3ZwFfJB3_OnsI9qST_f0BrXi>MQs6&%4CH zh54XvXWDvNsPKKcy^545x%6%pczTV4HqUK&Yy|AlP8xLC4#?cUr&!Y~8n3=FTLH_X z#)$8wrcFC$|pT`J&IajGBoZ? zXRC;@*mVl!gu)&0@k<&R*&YrVmzP7L4mcju8F-~HZzX;H-Gv5avWYk# zQcFXX+^R~IyDhY!ChAw5H;qYix`rt*G)S@XuO!nhno`x)=%AIibJsIUxsBOeO#N=t zamKnkI@w@F=xm+H3AuEkb)glFVwm`H3~ABEM3T}GffARLT{^0PDH>Dfs;~8e9ez{~ z!Pi79Y*NtaX>Pya{Y>61J@ z-;vu{INHQF;Fp{JxHGLy!p7?ikD9f<9yu1WSk61ON4p_=F~;az)GSmT9VD4>&0q$@u+yzEy5H!d zYBQ9MhG;SNDT1|F$*1YAA19=V65Lo1l4+0X%PY~=cXZq3Wt1~%*HW{&TPpr-HQ>7W zo2Enl)^XjM31f=)mLoeoIXqFiOKy^!4Q0e1s8r*QaxU@9qD}@Ub`u9iv#^sBHQ#Ob zRH-h6)kY$`Sbw7Wr7vt~A*Nj>7YPiCMp-IT_M!}FP6iPm@J)&pALIsJFWxu>JQnEc zg9>w`8T+5Y^nnUfIX?D8<2rTQA)Xl!x;99VV98bIk9(P9h4&IW9zo1$`lI6hqqb|C zt%BR(QCe@tI1Xbz6;*El;t1q|i%yGoFy({D| zjp>&8##+JV^+#$G^P{@1#Pq^3uPpm}h;9y4`Doju${?qP`oqo|3XbNfSe5;pu;C(C zTiWj{bE*cw`f8HCeyeuS7X-0Hk}y@bVRlk=l&qobsKyaOoHd-ml@og>mv;rM8V?R)X2=r4VW_9vCM#?hiyPsXzNIFMZB5acqp)#YIG0Zy5;*nx3j@~jqIYX4sje3MJLt#xrChxO^yCOGQeMe^Os%bEUYJhNY z`B#3N;zMx`9oYt3U(qyUc%QL4RyvwrC$lBIDo3>xi3VvmM<267n?ni2Mg9l_l}3g; zkW+fGPEV`e4I>w(<2ppHqHSiR`f+0x0f%9@5P(^XElX!rLavWV?(nCr%K2GQ*r6we z#n%tse2*1qN}^e1qMBfWJ&fgS_Os~wNTEv!Hv8PYxMN$DOy2RfU05`GVX2YwZ(eF7 z1>y)c%&u|`qHLF5GdLX7pLG)IiTvlu1AkY6_SJ$~o|k@|(J9qdWG&h}J59`)$98#! znK5uRQ2aA+@tOxSXI*J>%zw4wG=nF^*2U9#Hb4D={u zHrDBD`qACCauXLog%$>K-&h}L$^@+$F=G~dbvCQk5z#d#$+1xrHEHmWlD0J{wAxH$Dj%cIk8~Hx zU7WDdE$>NsjCAN$FHiUu2Q!sCt^qg+oLms%4J&uDmn<`VLwP*rFaIhrd6T;E5vr1K zxh|O|%8A_4Fk$ThtB<8A@VMiac}&y+Ga>vbSvxf(>rv;dtn}|XG-DXFDB{QJiMSON z=_7SHKzPGwhjc{(I;!08+HQYbw-0v3#bwB39t%_%rIi&WeFW*8 zcHlGv)0%}O!|H8k>oJQRmQ7xN#R*dRnBO48^^jnsP<1KZ0qo5K?FVQKw{?nm5|u z634~3&;vhTrZ*-x{9XpjGGo-*`l8eu<3ZQTtW*vUrE9^L(K|jvWaviO{@L&*G)ufW zQ5j!ZI)S$dWF+g20M!Uufx3a@pBmUk$z=LRXA2t|YJu@;#dP*_<0X1iW?U@V7X2z1 zxNuR)_XR9|1vjKHs1hYpxf3m7u zXeFArQWaS>winuUxw6^Owa_$l_a?NZfbMClICK9jBbiC`v%}XjI zJ{~PH3hh#3N%iz%xo%5%J=b|#qd$;`3M>D5)v=a+?}kw<6=1O;2`nj}iV9mxsC;=; z^(DNqII30SHY61yp#CPhm9;PotW>QL$dg?J1!xC~f~h!JQTA!22MU1a_Jx^Tq3+%Nl2s*|IAO7n3V;nOKS| z_Ql>Aof9s-yjKzEnz~lp6A)f;MktEX?zaRFdGGFz>KTm zY2Xl!ucTtC^~tfu44VsE>s+{%u~|Y4vhLEB$}0_4TU}VURH}%D=Q`({K@-=sg&cAjh$5!G*j-nO z#F-ouZR|vwQjuEaGVrb(_FZvk$gIM(bowXfN^Fekdk>Pf!dTs(pI*3K}_=DT0J3j3032>tlcV@Dz*;;t^R! zQ*yVvXC0{FJMylFiE#2`2_XEQ;Mu@3&JA!^;6pG22sFg)N#U#!!!$i|0s+Ja_Nk{0Obeemcj`u2s-Bn!{Bt zJgZnrBBzFYilbIFS4Tk!SSAOfbDTqqqm(mPbCJyGX%{gX$*Ywix0ItZq)Eh1*~~=Y z>?HmT*!)DlVxT!A`!zyilhhyhsWj|$^>KZlsGLZso~c@2!gS5pE*{v2oedy4DdV)kDE>!Hj-P^H7lsXS zhmlNE)pnLYx*?K;YDdO{pkc6}OAmFZ;POYCO!n?ZAl>N(Fs`4>%GyMjwo>&&Tlz6; z3=;$1tvvdtJv=f{ulXb$W1`*{o8Dg8j2!6%;M1)mDYDZ{fYXj&E0-?+nwgQjl7hf0DyWCvvv^eqD`7c&3*PoH$Yx--C1$9r4768^?>^G6{NayHV-TdA9+Cw8i0ln zNAnsgb+kW+;nm#0%@oDc;`?Fo_=daQ#2_w6mK>8{#ZMZ=i)PV`%L3j|TcvVbRU#eh z6m&Nnp~pZw3sI}Gs;e(SL92QRs7$UprJ3@nrsLj8dFnt#*O8BNP5+D|m$Jc%})zOgk*=uwFrGk#mw7!9-)Vb}ak7pdO^5uDFs-ztoD1 zEg{B(-ZSO8y_82Ik-@=;&-q7FBXa$*DWrx8Kt-;DXQ>E&Clk_Ko01lKVe2>{p)I)* zB3k?-Lse`tg{p4qI$W`05n3Z9u?jamy@Y%{grr!H9?kUOooutw0<8ZlZn@P@K;(eX zM;#Ia)(GBBZ?ZV5TfMB< zG6aJCjKVK=R;{wBq)7yp55G>@a>1p=dfr!2J@X^tI>AWy-v-UiF@^IJc?cKkKLtqQ zg82B4`$%L1q9o@LYH1~cQ9e`mk!27+(?n9L2gRFwdoc_J?8*6(0W%_yr3mOTKzdmM zpwFyiizw&hcG&MJ+dlB<&ljQHAk8=4rO@C%SQqFNZ{KMY1(f^T=+Hp!_hFN9lG#9ZdRdpF@J_e$dwUP=k#Dwr#B-zrLAl+Zs-_Zl5(1#V$J!cEx>Uw4>MPN{yjytH*CP$mpbmeKQA7U5i zVU@V#nFoY2)|`>RMR6N0k`Qp))sfpWkh9UD0`iq9zoJ<*IxX6Nz8snyz8qKnQXoh` zl*t2+nbO2>H1t35j+pZJ+X4~z_d;!N?Olfo22NOiZ_1PS{><+V#HFhpW+R3lnqZD| zBzm4S5lw*ODhaPUFjZE_k|cR_;XuX1Q3vO&Yi)HLkHfha;BT2NSm<*UQ-=i4m#?C< za-}6+iErG6@2Dn@qcKPme}u}}1~Rqnl%(rN@j+oQL?x+MjOxCB|6t=~H%8Zu!ek%G z!DRkPrmwGQkPWUH?QR1t+K|-x%ArXUHn~Wb%TM?@;|WRj7&9sksHx^U*b?ZBNxP8z z)3R!;Lr46hQzIiupXol-q2-hOn)zB;q6BSMn86&0^dC-;B54?)WX8F%<_LRKpb4O< z*Hm_oBt~^sNZ{;F8`|pJ_J`Q${q{02;N`Wp@w4+O7$e$sq3H*oTQ6^wpI-datf+;M zC-LVUw_s88^Yrnvd_Qw||F6u?!_Ze@Im{Df`Tk{+*bj%&x682HGy3OR5EWr$Xzupj z$RqEC$!Vsh{lkGr82afZvp@FwMqZe45rL3~LszHOa?fFzAL{t`BRy@4#wWJ{-Phyc zg0e!oa@_4R<;x_Hulu8(uA9q&?O1wWIMgL7Sb_pz%X8c6%H~_N5uh$1VA%3$M*_GO zkNYsT4*`tz9Q}Bg(cjZ&HkH=V`wB2B(W^T4`l?04mKx1zc>; zy1b;8M|_7xw;fN{{Nma+l9tk4W|Z`WlXdMVv$L0?>(BXfv*BVK4lO>EuXM+87&cv4 z@hqwR5Ab?0#G%hSkJP=WKp=kE~gE`kl=fP-wE{EeOnP(a~Xtjh4G>dB7R?fp5 ze`EC_WneHA{5ni*JR1H^fKFZ?Cp%g=_QAQ&SGZC0x=ON4Evh9@6;G-5soO`Fe5qb~ z%y0JaR+~<)S?0s`^g+^^^nt@rbM-qkCoq>y_{WrfdJ<fe92sjWlUuV@{%g!Lx}n0r6l^rQzOsb_^UEPh1i=1)R6I$h(So_@ zLq<^M5awiwM4Vh+#oTPUhTd1!Tqa&zmaMp9sZRh+W&>=~LUXF(wSR;4gB>tc(x%?Y za7C$rL|F4MK&JTmj<2x`-{Q`#BGbt@rOAt@M<;CWhwuIv-Qw)wwv(f?-Qk6wJI)?U zV3+?eFBJdOIXOL2`L|XH2>Kw`dy56ky~NtcC-}H6iEk80Pt5C3Q*0y{ACB$Gb^4^C z+GrJ6YM_}23d1uH_{Qicwc>MrtY0yw<4BhI+e{(sSCM1y%=9)fOJr5OT8SzUaAz{# z22(e;>asp{!i@c#y`R*EfWyETCPybbzvOt>Wq+nMCXl&~Rz1ft{y~oi9+Ec??LidB-f6unudN)@u5nW!pvu>M0Z!2_qQ>4!~;a`bm0CS!P;OQ`bxjP+GqS-~V6jeOFjh z&DSr67DS3PMG!#&kq!|OdRGt#3Rn=tf>J{XC6oYyCsB zf)HBh9hDPa|L=3Y?|II}xjEKGxLRe&{$Igsnx#@Ya!78C<_y*Q74f_$c5x}cB#!w=9V`5NVev} zI3wlZ(2H&~av zyX(HchS%7dH+v71TtD3;NoMl$>7&lRs|>{^XP;d@Is8OD42m^J-`h;3p}7=cpsQsb zYzVlKA1wx78tGMNF|i;}KBw1D+~|?Fs|_1h(zzv!J2m~^jB=vslTK=_Z#l^Q@-vd^ zuA2*D)&qn}?155E8MRR^T2{0CG=|YsD^gXxcA{!26`@T&1*;_L3*Wd?U?{526}-Bn z8O*`JVaFt9)D~I6e_j~pZuK5w2-hM>zM&7V;7W`Y#-%xTzw62s_(07{qzQ2ZNDAZB z^%H{rn+>SI|JXpE1KmV^qU;KjV)My=kg>3x`sS9-vdCpBt9s^4#eVW0(sP2{ae~bw zj5B?3BMxFIpJw%V>1$)T7e|S+-NiF3LWiSY5u;Jen{U4#r1B{`WwQE0vWipK5QDf$M(Yht?CRnySk&|hEYYzzw@ ze-It(7ZI?p{#kSBx@hI(W=64`rIhGeK>AZ-6o#W-$Ob74q1r+`rv- zz@@YgdfLDpCBl08Qw;b$q-8-JK>jQFUeK)~e5*hd+V{b=a-;tJ`Izv1zjd>8di-~Y zKHvuTU$mgsB;D2wbYrYp)uDTcKk}Nd zTJz>GdeKa zOu>BAu0#I)QI|@UM^~kYGe2 zSj6`_w3s%@u=Hemfbipd7EqJDs|zU1+)&}EH$K7t;L!kO0pNIuN8g9!X3}(sFtnkg zIw$_l!(}Fd!Zho@cx^z!ZIadZYv{>)N6{X0kG!^Lxe6HzLFxIAFi0L<680j+sxZ9=v>6%^H<{p(M<0^m_>pIUSORo9ivzj$@7d6 za3T~ue#Olu95gj6T<3>BKZPr22{55Frnedo+TQcoK~e@7Y5s$yMCT$6pO=YQyn0*y zR)z_ifEepWXXbkqO6SD4nb$%8HVy~?e}ruLFH!&9z9!na-pYiK{_B_88X&=*#FgMS zw$enj_%8RKt7T-!DUlO9pD5!e=(YvI`Avsf|MjM(@mWpM=f%*QHGko!{eXgSYc{a` zsZ%G#|L6Aqtyu^MSS8Vk_Pe0#+2}WXS z@Dv1k)%TJ{4w6j~j0VTQ)HI_jfy4h{crvET`0R_hr6u13J-Rr_TQz({X%4&c2J1$g z&!R1U*FqZ;urn}IPowj_vEzP39Pz2shfFO5@GJj!cGA=5$3;IJ0N2gy_V*qp7JKgs zL^ZXD?fNn6YCC#P3$@7t75?A9ryS(2 z>!YH3b6D_q+m=E=x91NArDw|F5GF~w;HJ_f>^Gt@DylE>O*obpF}+ngw@_rVCok)0 zos3@V>XyIb0l8KGQfyriu6+GXtnQC#x|S>PCKuP6-G4a?4mZhv@!?9^q2@5pL2b1^ z>7$y-m~DS!>yF^tnZ6@=;roX}rCpx)`Q;KKT63R;{O<663;kj7Y0zEyo5&xI`ao7e z0KngK(L&k#Iy}7uqt2Sl89oFSirMWz$Ty68Lz{ZqSI1M6d~FWW)+W(AojE0Y@t(o) z=Y*jl^Ar10hCyHCI`@)2n(sfM-7|p*`2NY)aJ3~+d~BLcAZoOzS~gGj3lJZbr%o+m@uM57`0Q0nxSawj z7*@G*dp~nE)9u*Ant@B&OB8+{nE&$tKjhW08jLMCY7cSoW=7u707&qu29EL^DkW1~)z*Kf-&xkt^B_wSDr9`1J9lJrsa zoGW$>UPQjo-6J~z^+&4niLZu#h)sT)%4R%;eh)EwD{lT$jOnsv@$IiQ97~DQn=AMH z?{*lVCmaOfJk_JiE=bb0KNF}3Ab#*MGF{JV-T0}6G_>^qs54c+x?M1y2)o%xBSeg} ze#`|^p42S>el_yvGjX|k)PAR0F&vvr69-<9R z1ioO|Gg}KZZ)U8tM`y{*e}pHtdyEytJYSGeZ(N2?y8EXlfG^F`S-m=JPljpq%gq`I zcJp<|f4?3rQ1YqwaFi0=zC+!$7U!5Y&8K&aTaVkE@7KLSTItF=od7aG)%sS}7E60! zHDpYkh{_A!Qjt`IT|mRm!%+4T#q|{a8)5DMndL`^yVsbZL}8VvyD>msWwUo!Je#qI z*stf--rQ)jxTui_<>wf^z>1viO+_~NB|~i*5Y7_}NvwR2d9TED3CGk}Nu@)k-J@;; zSBS2)xUcI{Tpy`a%`B>V1&1V|ob??Q|vcTI0m*?I#F?@Q-S`Piz**G|`V;#*O=_P63S< zI20>bCJba}C8vH#bh$I6a$Hp#vfk>LEQnj&nIxw`Hf-D4tmW{1^-|Os3Qtgh{w&$0 z8p=j~x~QW$6`&oG?iyZju=!+w_0?-yTNEuK6pj?y^`m4om8LpYsOf!^_MfgU@a#H> zik5U2=L6xm5(|e-7M$b}3!B9+H0TDK#O%hx$Gh)o>}RjN?*8+8hIW-re3h2D|GCRZ znoQjCm|v!c5p_o{(>NYu&K;ec#;!9Ffg)X0-k#N96aaN)Y5Sx4gjoUBn!z$X_7)K)g9rEM|aOQj3YDqijF2J{fkq=k1h{^sy@O4SzWw!5Mfa1 znLb9^nvP^vAJyRb$3k2&rPQz_dRY(7$o8x_y2(_iQ$m|N%v>+@c_0PpZ>BtFgJk{2 z=1G2pi`JUw9Nr&Gmz#F$hZdgQmby`Ju6TX2i64YBAuR8+W_hj1BC5O}m+T5GIh&y~ z-aeJWBCf&5!S2GiLg$Sp$<()UHH&(m9-Ib#O{+p^=eFis&{E41r&B#uv8=Nmy*6*0 zGc>arqQDy5*b83+4EKhsi!=05+!NEYe0bf)Q|q#+m+kxUT2NLU9WJaX zwe=}QYSIBTs#zC1ajiQWj=CS`gE^bEe>_6`@>7zGpt)q>&tUxq8(t9$?@voz#H2OH z&UwCk1;sq+ik`NQ%KM4$^p~lcr?6h)s5nzIoJe8x)L3M&R`{^CU?_2)K06c#kvnUG zIInKj@Qk982BCaX;jsJR^Ggu9zX4F^AZfikyc>pdlt}iuG|<6o1ZkJf;TFcxy=QEA z2fA;Nhw5a#vS5#?ocN@^J|j|L3FBN}vP{cP{W?DRn~G-ld}2m2!u7MNhPf`Op3}Fg zxN5Nhd-xRc(pV|`jfz?Xl6I0mfutq#f`T|VySL$OfxJ35R@Izo2K7DX^e3849@Hwb zdd8-5;sD#(_e3T0kLxZr(U9S|CqnV;+ncG>TEnW zpI(FIt%;sd>l-d6E@^3tt*2D zCagu?{D4ziX3~3HLLLNFdGFH$J;;ARcZj&7X6xZt*%CWbNfY~E{#f#i9&w`UeuKr{ z6vSjgLs@p#NH%u6bryZ(X=@ws`?Lm#s}(;h!(EgV1+Qm8&Z@q@)4k=P-I)#-fvhe> zZo#}_y;n+MpP@Dr_%AX++T2aFnAPtqN4UX0o~dWMx64(ib84Eoa&Cr!CB{Y@@s9a9 zIpfd&&}bT?(~)PgHZ<#U!a>-u)9m{YYFhb-F^w3~0!@P7JD#=Iy4;_EGUE4S6^)jr zxgi5EBfo4yg5^TVgoxJblU~ZZEd@@bD?M=5%U-^>F{!^wPe3zE)^uOFX zm`a^lUo)Kf%e!d+zp3ZDHh}-RPXqX8?(zT4K5nx>Ny*yo&MPGW-Q{o_91@A}NOYCd zJr4kY-_UT7jfI$0G@n2KJ)9F370FG1N_*Xh`a=zm*$8!grDu#EGrLxIZ7m8;JWl?$ zRUxo6*Wv&2`3tq9;wroh@HBhvC8}R1V<>N}30`#8e+V!|yD?iH2jb*JcZSyuo{d|` z5dZFS5Vre(v=Wx)PqLVJbM&o3b!x5B4!JdMna7{+YT+wsfl=4)HANJPO42f z7h7Pgo(0Z!7b=WRQe55FO_qc>B)oo{!~rF$K+DPs+({-?oI* zRA9K(P^Ws)XaJDFNcSGH)_za-(kitpsXFkzqi)f@>(DU|bVQURZj<=0{;_sLIyfLy zlIdE)!#(isB3E&$)FcfqOAUFW$)CG16F|4h0?wD)rh>AMwwrZeI-k!_Su|e6%qr-?%k1~{t4r{P6M074 znFdY$3U}j_9F&>EE7)hAZy8NkN}9YA!O=S_q{jRHDNEmQzFp)^y2}QRmOV3i!gQ2f z1Gax7*v}7{d%VvpYp;qu6p){#zCX}6KO~%ZLm$O6sBJ0EhluhpY~RBsPlOZkb8hXF z)^z9cC9FR3M4w%1WHko#&Xlec9BiIp;rC)-#|9*uhr%cySMJ+D!3rBBCNe`DJGI zyVQgCyjLqCN<8&N44!aJpU|R_Qpt3#v z+=?frZ8-MT*swMlE>p1aN#dFFxmr@5k>=cDw=o5FZNwi|zQoy&>Xy?ge@e>h5BRl|32YxG^Ah8aoUiC8 zj((m-|8)KHbs6;1U1N<@r!fI$PD6N|45B&&iP?_?Aqn>+LO94c$Z;jJOqx=SY^|N- z#_f)HnbVjs4xsrtYS=HisOVaY(53gQ6@3L?6VaW z->BU8Q(NE=^d)IUpT?PUsBNuq=LbVCB=!2a4=<_}(iYfOlE3ChL(!Zf=E0gDPQ1H0 zsTV2*DEJXX0@^QW@F8u~laFI8N1UkU1~{^_71++hYp z{gnM4vWu5jJOhq%k;J&vsBqfE(^LF!_v_wjbNO?3Sd$rO&PP?;(N#nDu@GJQGRy>I zRpDEhwI__DePJTIRW@u4#l<2RO%;R`Wri1~dC|T0cFb!UVpg-gSG^fUplMu5k1&#J z$*(Uz1y5N#5DRK;(xeIBvWDI8%)S7$vKJpFd^9%rt`NzE|RQM%9lVW(>MBe7I=>+A{cuizhUlLV;5eiaVA>B0<02UdRBrM=n8|V(Z^lQ z@vIRHl*AW2fIe3KuwsA;OA)jr%vuzbs6%;_4Ikce&VM+RL37c^>FB#tO_(sPIoSDfb=b^HO!Si}o6Ggm71l}5W zIhO~3aE(4}D~b8r9bd+EkfQTmfh%h-++wH`|bC+4Cb?ly+3n({PUm{Xbq2D=WE* zv#4WHcDyod219DnxZSQVMv|JD*(Z+nY!_NNE!3)E zRemVXRQtu(SlVVV#?JBKc%ZO<&r_uMO34L32{M$A({Bi-G=iJ{<}~f0GBfc@5$<@m z$ui6q1FtLXg$Tmc<*NH_bDpVJ+DX<3*B9&3-xa(aK;x z757R7f!-&;@R68Kj8O9*aue+;^d{5A2jNJ zniV&AOtQ3e8i2k;>3z#jcpC3Z;>(i#;Mw+}&3%cHJICuNO9W>-6o?;cx^s}yp|0CD z9JpRq?@*xRzSTX{OQ}vm;&qUnH-64FijaLc8S??b--M{+d!2D;kVIJPovq4P*Ng6} zuvjYlq)~mC|4m1)d7NBjk1zZst-ym+_eQpuC+N4ZgX`z#7ylMSbM?_5`(G zWEZ=c^BJm>9y56R&;vqS&*`HMYCJPcowhBX z1QS%7qmn5L4FEPxK|LC3a+}sm&}|uLPeL^jRqO@6&x*H1H5P6A+ZP?<`(qp4&0r*2 zJxA+?*i`ypt_kXVM}2r56#AkFw)A($2{(TByIX zf~C&TtzJE0;vNmz_eV4{;RpA&Y%B0}R?TySI*-anKHTlb+M^KPM&J0|C(J;C$cbGC zM>QqAbe?1pT@D^Ma8K$Xon#}W@5bOZi-T!zYCd77;)T3Fb@AVvCKL5sBr}MP?r*y9 d|H1v^OH41p%UKa!qcqf?fgV&Bqy6O7{{qmDhl~IK literal 0 HcmV?d00001 diff --git a/figs/cmt_robust.png b/figs/cmt_robust.png new file mode 100644 index 0000000000000000000000000000000000000000..442fdb9b588f94a0b6d3d218b48128e432ebe9de GIT binary patch literal 97630 zcmeFZRaBf?ur3;cb@0aB8VK$#jk~+M6WmE~8h3Yh4Hi5E8n@sYLINZ}Xo3b0<0$$|2?g$X3beu-`BAkYVw$9q-f8cJ;PK~kkNYf3>EO~8B!Yn5&p!i zrl1)9>xH|5q1Ur#l+dRi1lKtqF#Mr~hKi2t?%DeJ?eXu2hpnxxi|hNDnVFMo*q=Xt z=4WQ$A2~fez59JjM^6_S2@w(!s;aL1dHI8okZ||xQ&&${baL3r%8Hh@CNeT|Zhnr9 zt@ZwoeJLqP4=?w|=7x>^`Jw6l{(X+QXV0jgDauId_#+&>$F#;1ndRNA9{5yGZgi~W zz=(hI=g;}_v_h+ZqVhF4b!l$o@kT4F+h{BJ3MABfYKN2%3gmn)_Z0#Z6xp6VPImq8 z0W@h&$>N`XUM7Z8Y%8jN5bt~X=m201XoUK2vkG7{Nc6{#>u7Ci^~)CY>(n$BTb}OMn9xK9e9dH;$iIvdp348Gq%s^ly8d*)NLv zTmOwu6gH5}vf(2F6$878Gf`}J3;Z+r8yK)l5L??#zU#{uF}cQs(-z;w#D5gQAw7`f z{vBc~*VUT6{_isbVAqaxNBMFIbst9kgqHpTWHcL!`6^P}8+r703O zs2%j1{?Lu)uX@|shD4-pBt@X6$=AdqB&Dm3f7O>w>+UaqkZ{VC-I;AhvVCn{heG&w zTL_vWVVwX9fyX}n$++@zHxEPWxqlMEdWdU7{3Zn!%K;?@=v%~%GgE3jvY z_JT|B8GZqod=KP~*#QH$q@D6}>a{dI87{9ju z6AKGciu6#5FuXA&DZHCoN=?*LiXm!4RkowcHL!uoI@MpyUyOAHF^HP2mIqpX>N+bP z+aq5?7BTl~FEW)v&O2Qe*HnOV1@&z*H(Cd3?^P(fhT--$Md=9;`~^L{jYmpwa4qAm zRHiLj%C(6ISC|xgmkVSOlrt}AdM_y^*a!M02Gvb+?~zyF+e?%}=BwMS>x8`@ei3=b zNf{e@Ie2B+n(RXISE5ax5D>_Qy(X}kGXIux5OarYHItnT8y2w|@pBOGD&>cT!8}|N z_K-CD;+H^+lfk;2y!bL z6>ST5GoFwOF{#h%mJ-?m0a2Q7d&OJwwdL4tE{rFzIiU+Fe zdH~3|$hXCxvk>0*PFY$TLH90jTpgPwcua0t4d>bU$78dvRKYnb|1fOLvi$cYi zdHtqQG0I;U1b9mgimXe=!$!-) z1K=hz^x_jDD`Md9_GISNuL1Ebwhy{4oWbCHlJ@E|l?$LsCMbynAPA+dw z@Cqsnu|VoD%w#yeUY)^{d54E;3wTGl%`9&~LX+)sT=8CKJI0lH4V%r5^x(Cnt!OCZ za`28CNzFy9B7c7@YKEL;o0z;5u8ySUW1z1@dZeg(aPBuGnjsP7fF?3*$M8#p-~rh7 zx0NB@pS;=4$*eLFJWwCkgRh&Jz=d^Lg~gBT9A}oTz(cx1-POeCOG_haGpcF_s+4EU zfCfZZk9qK4vo7@d`33@<9`DiBh$x@5y)2UphqZ>ox63ot)7OF%mnmi%6O!vnbx)nE z9RRxauK_%t<8O7F7m1|3>gGAs`(f}8Yxr_5B6K6+z6YCK-3`z!Y8|e}1~8Ei{v#;Z zIH21<^LfA`4kOBHWat@y91wo??DftKMdxCA>1 z_BBW>9qI$kDY`=C)yA1YMQ065OR}5G`jYdk3qLWbEx9O1BjN^(am+YEwqGlF8S--^ z-Qe`u6IcFYncFB4d^}Jg@7Wm2iaeK$5pM08{;8r7{vA-~EGG#Se6!QwDDWLXzv!8( zSQU%1G8n^sT2S&|kD{@dZI%?xd|_v);uhUl;LyS;Vxzx`h!Pnw3gR8Uvk3B^fR&0H zHwA#H2PkIB;pSj=K%8?=X*uff3E>`!(CYE>HwluE^6EQcW>H?sQE7mG(6eZw7D{e) zjbS1xG;QTPa3K%W+TE4qqvgFAG)H+vaBeNmU7_{}yUi4m6dz|8l!?cc*6_aEYSF7)dEFkl-TegjNNi!Ol~iDqsTOZ}7lB=!5}IH50(nA69^%7z zftU&W6o@v$H{u)J{r)x8j(yt7pr>hgKOG_QJr8u(-E9^e-!DIYBUpUR> zy;3KVp{M`Z!i{rn;^olY{OArxJC&gAnz8|yLT4%xYX^Rg6q5#Z;L%yzLl!p@Vi@{) z!rDYb5Ne?mvSO^spZVu~1{?#?ATQ7y=R-$**0gq8o^2nTF%rde$0uTG=CMNm_F(!Q zzykW_%+2zRgT3{vIe*4zfoyD2z~i&jErreft^C81z+KvEz% zA*0&15^bO|APm`mJw8&PR8VXw=`H7vzxDI>Wy=F4jgQxYx+C+>&B)4KeGV4I9G>%z z&-)Ti$~khlO>V#K#CH5-foFRtRqqvWF@eU|!~0o15&Q+2;>G?rfGeJ0xkPmN;JOtP zl|Wa@m_HRW@2l_`4<$zOlSAaN5*t9wI%8p}<|{hp*cSpf@Id$PIxjl$_Ql^;Pg1e1 zh(Qxptc>;>)H;zev8+bRQ&A73vr4vWF&I6)C~gvF{E3rGG2uJv$LsNL$)g-qZAsR4 zXWhfzNJ*z0{!YJ^_j~>F{cHC3V9SlP(b3U@TfzB{@7Fz||EM&6aTPc1GhN#6XZ#T0 zs*pxPbNs?d&O=6aKGA6&>cpdK!kh8z=OwCB=K3uBgm`eEIK8{MKay72vU%W)*qoex zC#kY{YjV+Ry%$2TeYjRkD)z^hqMy`$g6DlVcJ}r6JuvvsgK4|-4M)K@@lP><iSZQGq*FU z!1qNZx*1i-*d1{cTLZ<>R&yFxq-HLrRbU0&yn3QxmRGZKXT(aKCkKDnKbSq-{Mx%h zQ#;!`-aD7v_kA<8ICgw-wI2MlO2ka}feP8Bf9vy?kj&|+{QEbr7rzEJff}v>i$>Xr zcDlX53(hi3+WkQH6{BNtCKBP`rIeIB+}YV7HDL3Q78>)NneSXYUfj=~759qvS5K#0 z7ZQDU?GKj2`BU@zy35n=;&S&IdnC@u3}%Xu`9y@fCG%rX3~!9;NOS9iz+}&hV%y(I z2hT|R(*)(7Z=T&eOTf_pqcmNI95y)&yG1ASGbQ8SJH?n=3pOsZ7jk)xNoi94)LTx^ zdGa3$yk}QSontGz>z#ht+sJPG+vwuVF0%U0^PT}-6pB-O7tIjevP3UA1**%nWAyi1 z-@g;Qr|$kZOT}rN3p5Md(((1XwPyOheGa~9c3C=QV%W`$>nu3ket;H&SAp!@Qlvj} z4q`%}MsB7A`Wi^W{QMW=!ZnxKGfgiUMy(IzoYY}ausN(_RF^)ii18x6ubp?>*&QCXZuM?Oa+n1Ok3uUT6ghICW}{U z%X>TiYNS+I?>`J6=z`zmgD+CDB?SD?|QtX-8>xws|Q{UB;&o0tx zV3j^vd}4k4M5f}-WLMd`&L`eg)as@Uw?ir z&JA_T#{P0JiD+etpxQK>->bjw4qEuLaboIh4;gpfaYD@{Z9}O{l%K9x#M(okAZ45r;ze(LeYbQh!LB>mu~jOE!#iWT@5{@ zyE4y-Ytc3a$B$RP^+(YOPTkT-w%k%$_I4IR#ZD7oxgsYR{qKJyoPOG)@!~a+DC~eu z3A|nUzzzN5(yx1ePDxX9!Yss^HI`(24bV;|*3(4C-%${;qFSzyVX3LBWM?Hkqb6`e zkp<;EO)A<<#|-a(UtkQ6BKpXWFqWZ_-Wide z-u5O>e|swbktEl%BXAD;7v17@LSazg^tOpfM(17El;^wJH;`1I*#Txs;jM}}?Tuj?lRA=}_>Febw zY(<)w$cW>oO~TfYBAWocza{HQFw%Zk7dS?h+nT~cuP~z8YZOU`8~D>Wu;ows+VXP-LVI?o*jh8kcG`^1p}Zh#{46GH za{eNJ!n6aq0e;}p%w7PfvTn29iXW*iAD_-}uYCn@953RT3FNs6cu2nEk!n%9|D53N z^jlH$v74&UDQJA0;n2&-U}i3>;-&S4pUwy)lW%9l1@9vU=A zXmlj=r5fU#zm2VhN8O=IFC7&|QxJ(RqAW)ZdDTzJ?+ZA6!W!5yw%sax$&dz{cF$6WFJOVk8tBhF= zc5h6RSVapuR;B_`x!&|;|J?feLrZAHKNbACxDtesT|XOUy)guOuHO7y~OLpU#_13>skM_h*R^6$zNxuy0UI1lplNxbd(60AO!mSK&?bV=R^xuh_2cd$ zqMCZ55;ik)UXrd(RW)@&lgFkQc~E?JU`?`7?&zT498z`nJMeFTpp@hVGk*JFd`9dvUFQ4aYomNKcVkZk~rvUDe>6sMBdV=3*Yg4!Lr z7&B!?<}CpmViQ_lpo)JYy3YvwD$iJc_BKh2y3%7;icls@uRfuyJA$Rxj;UeQVWo2Z z^;)EopMWDk6|JjN4A+ft%5E?3>vX`e*{X^E=rrI9vrg<= zHVIhfR^#89*d3Y+LO%km8CZ-qz@xwYjyo6iAaU-Mu5;B6j%80l+Sf~smWz;9)JlbR zc1J6RsGcx6hmA{~@>aD?)VBU%7|roj?ABI8e_&K{y008@66XnX6^Jz?Kp8<0`S)1@ zJZSmho-_-rqS`!q-BJmf)`y+^m<1)ou7tSW99zPD6D>FBH}*B)Opud-vz!)1RrBrF z+Fzae=PH1zy$Cs*$h&9AUKEVH-lNjKV|S^(t1#>DcMc)C%1lK1ukgQHqN)y=n~;3( zj0h(r17b7v5!6&WdE!nQeZT&Az!p2n(?4Q-(>fJsU}poFFVQBphiBpZD@ZJbp?AUU zk=OamXO*8Cf|wuuBX*2~5sYmiz1ecFsvec)V!Bl#4-wG{UovkZ;6aN#sQrZHn4kMi z@GL;*6*qQXhdGFrsKev+h^4RgurNVI^A1(ayc&g;AmLhv5X&aPIPd8mvpMY1HE(Vo%wcog@3u^z`I0-e|E`b zF7O9&|6~RLay=}GNjH3_k;ke>nBxSI=28`tp>@Nr$G)IhO_a}LfJNvfugbqpYG;@o ziiomtNK?^QjzNj3y&^7oo6zB(G>KkvyC4d1T+?WHRX%;WWlu`{VWQm|vaZpOPdk}J zxdo(jlw11}xn+*iGlsb*FjZ1n6tW%|Faj%=tV=lkj5Yq@YprIDYRwC)GTSAjcjHlv zwT)QmhcXerV(u$rj%0*z_JuWK*j6jgaL7lcM?c-2wu37wfhwDw=!FtO2ydO)7NHZrb&`Nax&T`%nt-P$C3_$8!wT2IQ z#@zGzeNA`>@Ep&;UER!Gvl7*07NBDdZ>+3Lix(e(_Z$MNYX&kT8GgZhD951mmtW;x zTRkn?JeB=)PeF4&>K(4RWO=>D9|S)R5@=s zL6Z%=AIh{b9GF2?umtztD6$Xi+j!T2i@2*0c))|!l3vFAQz(CpOYP90mh`$MP47r+ zb*}Lkq0;BGKeb%6nITml<;o{SrsM|Ytok(4W%Bn+RV?Ma>fX9k;86j;AGLF7>7!~z zM@RS9(XjlPnbqW5%*{L64i)uZ{wkkADw$Q7iER?gB-_?z$FHQi?$3udL0>>fOjIU4 zeF<+#zF*ylLCw>E+zfF{6<}KvJX9S;tLAd854r&BQDVSv=P6TUxeTISM{BB8GKAfE zjb;ncgt3Hy4zTxvY!$DL7wQ^i>N0`hrVn=oeLQJ@P)2ZbA93#y*2-+uq$9L;ltDfx z`}P}cd5BVU%8&>Y6y($%ob4C!b~ZSWjKuO`ZMdpGpqCpeX0E2K-}Gk$hBx&F5*Pbz zk+zw{2~q4YuidudwYK)5@p&Bbo2=I8CzlqAOiy-n*=!|-WH5wPD>_&M)Bl+Y@h~T; z^iVoJ*`EIe>xubpUu$hOZZ2+|^F_S;gmeZTGXc-(vElcEgtuk2 z3GTBZx4FQ_=r<55zDu-hBNnT05*EuAsh6$cR<4cywNa9k6+H(MTlk-OR}tbhi0M~r zSrZeUlIa5)$T>Oda+xzU@+~945$rF!gkMnHz+byRstk&M%?8|GqlpK8__&!uQk9d& zCt6@u(UsJ6P*~-=4Y-_RB;xrPZwikClleh?HTDU|osxInUWSTW1;E3)@B2=5uPU>D z^7VcvvVm}ofkwkOIER1ahm}>_$!d3mCvewyt7|#gch5lun3ymtKaGV2^xrTO=ACBvU6^z@Fq9MRz*I`@VX;OM^ihqCmf;s=9pk2gHQVb>1~{Yng- z7XV}a{Ilo5k5UgsZy?hY5dl54CQ`w^({CX93p{FkpuB}zHncjfF9n{*t$E(v|u)BeS17kw;^|7zdh9oMAOe4EL_r1z~iRY_cURcCE%p{os(s5d>rI)M!d76~6 zFBSl4EA9>#UkzsGpx~HL=X{@7Eq(H*Hr+qRJT8QcNV{4j?owg8D(C*Di0aOBR(8=Y zpdH5vC!c}ejw`&jlG4)dr4p(ItGB9{z1fxJoBM8^U6VcfjyzMM=}|-{$c(>=@1Nnb z95&2MYknmIZX^B~el*L^?zX&*SnG%PEEjgm^dJ20rGvc~bK3lVm42DBw-#hCl5B+4 zdB&Z6=Q#{B<2$z5rs8}5;oWu&5A+o_XCNY`cp zN7)aE=VI~-{_gvVD~=7K2a>VZPCBJpb=ozU4EmZYeN-Frk>QnEUQAXihbbHKn9!!2 zY*~JF)h4oHL)`}Kmv6BXVjXC0UUoDh>(Dtg%kP8-SuqtXSGgf*rGLB|;mw_yQV#0$ zeZ2YoUBE~vaiGAh&zCAm?fv|&NKsdJ-%@s_RoCy`F5CHH%kU8Fcy@70dpY9bo1fA> z(3#7Jp$CeE7TA@~+b`w)-FI4ZfqCacowqSs?tS+ip|{GWo{3ph@mXmXOD#N5S!TwO z_}a!^2sPtI?k?q_IbRL#hDK0@uLcQjxkO71Av$IHD}?{B0J1%ay!mE8vYn%dq*n+pSJW1nVPxnqA*(jj<-9RyU&}+eT9BHP zPh46%8)s1TGbP4FEh!JciEAQDUR;RoV6e3qJ^VTeXf+xjCX1m{nN6sp!7vsx%*nz> z6+;x}mO39bCTLy~2wJU)xfW5KSV=R^JmRGtJ`dIfWL#49#mqOr-Oc@QN?@i!W<{hPI4P?7D_ zcd(q5m(Quqj(tlf{4Ewo{9WvrZY%IeB?TxdAN{iL_GZ^hq2~t&OJw3Ojwldb7xLy7 zuNOO>30}F#ey1-tj>G)2#oGmFN3)<9GF~G8y<2`%N_e_~=Ixu86w=NuUF;&+oTOAR zRHCxW((LDF<0fvbSuaXF8WkJtf7jCAb+_wK9ZKd-V6WU`W0#y=GxmS@fNg($9Ng8N zHnw{$B5F=L?HBLu&L73Hm}+h}Ydn|w1$1;FnkQuk;Tq%?CDHtWQ_-OBwN&a7A!@~W z7$((On;zc20OT763+;qfF?6}YYk+3hw$uxh@pKn6aE!0mapb*&HF=cDD}6l z`nRZK7b)3)5}p9dPH!K#txYfJ)X`43f&=?{d;ip^;9``v&B#~5V+g@R&Xx3(UJ%ge5KJ@{_6l1Rhz;;!IBJ(bP(?>AC` zzOMRc8JcQ3ijozJG{8&*8!e3#hK{t*kFKgrQ5h}x$W1PButW-Ucp`inP!Izi9vgmX z8oa^Hpm^*=WLHv#acAvf+>or(c74vvuvOV2@>I^g34AHY4NX=#lrkJ8#ETcK6R5>C zRM6j*>P~~0H4^f0jMys2CEHzU5oMSX?LC`>+mTv;*j(CN&bTv9{BxU6b~C?9`CSGG zkxy65jSG;+YT8Xw2>G={=h?FyL;lU+_Z=i?us2$9pnx^{6GE#b7>mEA*=Z5u3gtXk4hI&F(-!l^-tq-vq` z0P-?JsVX^VJ25wm%Scx7e06y3olsg8hA%NiYw774 z|5R>)MQfXG2M1#sVy&j(-OzWO|H{;_lT*?YYN{Uhv<@Nu24QB*C`!v;OUq~-0N+_N zrNyF@?8|b)wWfO3eJq7Jy@M7c2u-OdcaWQia>_OCCg_GCULVJjD3i!-yR(k0Z=3Cf z-|j_XmT?}p5fpIo&aCkl>Tuz!uePG!{U<^ekj_`cN`)DiNEk+KnUHijFbFfUzN$pls18ClbvP1x>b zGmK{hq)|taBjI~D>5aIA>fGpDTPmC3nwgd={G9W=>cDEk0UcEP_7fcF&-zYD z0^$1i665otn{p}l@a(*HGVEBWt3mczb&b<;wdXO<0j>FIQfh_zQPGZshnS>7Nka75 z+(y`P$RG?PzS$AVk zofy}Tw7#Wbml4l@KTVpu2I$`o!chR}0v$O@({|3Rm;_1`bt%jnEEBMgN5mVFMH!NY zR;mxozIxGuXX-!_z}JWAU?`SciN!xsq3kZ~p6Hz48Pp ztI3O93CWVnEEi4|=Os10C>NUeeByiF0rSG)1q@w56#1u$ANlqU?34Jp%xyQNwMHt|e+?+)2n8=`s`+}~6zA?DdW_^0|;(66=9t7eWl z8S_$yx|V1opSF{6Q+_)(oy&5e7s;)U!}21TDFq5}KvRr>|ll zHD+j`;W>*OmE~35HxPye-Eob$NxJ2l{`s;UI*#sn@%2Lk-BskCGsZ+-N|Vs=($$$S zO;qg!oZ`PtZXFlMzf0~_U`TmpB~Hk_v)mV^l@E+8TGdI!>m#gA@8?0e#^JEhvAeVL71Vv+ zAoQP7IgzcTw(d4eQ6q}@lXNn=vZu9zbkq19-`VxG(Ut+cCYl(8v32PnuL+&mLu>>G z4>-0AlLEo(GiX>vifJ!2TGy#^kK{fs;BXCzpCI!vli&qCy_^EDPR~B&WfVE)gCtui zJZ%jdiZ{c4x3u_toPEwoN11%lw}NfMo?uDrwoYErV%H(w>D__BZ*oEwEQ)pxHO>x( zRw7^4VM?}tPL_bxX~0`6D}ra=6YTra`F>y#b4~@wxb)Le^=TlaPQ-C{7^2C>i1K}N z-@&@Asph1@1Mf>C)J{*-kd|3YFKlT+2SI&*^yr2IoF&9tW;DTTvj zM=%+G%lg-gP%Y^A6G-Oo_WVUz%Fy~ezfKXI8Oh4UNsG%!Szs+llf-E_rWRU7D(`{+ zrY5#bOG+!N4<`rHRoy!gHLMx@`}19B+y25>(@UNhR&_`_wg_gs{5L%Lyt>0&x8hcp zxfnx5R_-@(j9Pr(JnEH5T(Xn47i#_m{V+e5Cf6Yy^o-KwQFTI;yCe}KMB_^nJ|9Vv zqI+SK#jWg(8#O0NaEZmf`XcuXz=!VB40agl!wHIEV)IIiii`?tE66Uid#KFkDnpX!kL2#rCBNsGP!S_S7IQ zRBOKka7Gox%xz`aR54mXYnzC{L-fwBEDiheFsb}6^oQk_Z4+u9zF}0}VKXM9+VaN( z3w{RSnUury80Ljhq&F1h&CXJdk5M+~F`D^>**lYqKOiC1tEM)L=VCQ1D_YU%azGO?O@v2s(W%8*?2;&i>#4ffmuzb*t3py&;uZ z0{E9ON5mj#8qGLLS26quWy7(f0+)&IO=uLY!M?SU1yQW#$!0p8;ssbp$SPTpR?$XQ zHz;Y~AFLq@r~q225tB1d?VeXknfI_1wvtWJ1FdwncT=>LkA*Y?@--aevmo8YT?2hz zJwN-m=W*f)P;ww*=OG%#kJMGyj++K+?H(FWbb#_zI>Nci6LI0%X2R5$(T$dR3+*PU1Z7RzcbztgBTwEEr_+HGMLS5zK zgSzwDwMOx16UHW=fTb`*>H#~^1XM!Kjt`nt{yRC)36y3*-^;=9OfRu_B}Es-4?g2) zk=JS~cHWj_xC4`$0qkw|lL--WGKIXlTo`r#Y4(iLa+=!SC@$@F#JX)wxue{t8)?YW z;;a;kqL(50NS0-~RXz1bpEHMcn;Kwi?}odQU@YXETGK4HOxxZklCaat7h+3Jh9BE-GhQicfpe?eK_0r!+N z<#{=hl)6k>s@}^H@tw2~slgyC^j-ep2lS`(J@6z!+-hVmWu%ZVUGeZHuKEiZG; z^9o`IOqx$prl|g-sAvG<2JDbTaK(z%o|W;+UOE|wg4bYDyOE(%IbwX;rxdcv zgQ}p;$(wI?mYwZtd_}ByWBD?gYDGMBS`r;_gyCOIs|X0A@vBU>uQRe-Es5m!5E=Kc z1w9EjfJ)btv=-w9ttIl-<&psBBvJ%OT1_iFiAky;+aCP;9gJpG*g1+FVLmB=c~#8O z7DI41^AFca{T@S=Ms7MzzNrzERjM#MyO^-u$Uz9rMqZOoXGi_LrX}CiKS~#ZP6FhG zf!eVpd2OWkXgXO}UFV51VVM$@R5KR#e0l_O*9Z#LnESHS`p{w%!pWc5`@2lJCD_Ew#krGpDvJ0N%@WElHC0X5x@f zq>C&YQwq;i=q%xA-o4a=VyYUA1ye(=1O*r_un& zLNn5)xWk=aKcd-agFn(a($eWZpX>3PvLpX*=^%N14U^C(_%Zb3o&UOZ4(GnYV&8A4S zyA0NIWE?y49QY+t5Mm#l36x1PoypJ!`n%Crkg3#YdbOQNi_^vt=HIDjKF?$A=^EoC z--d@PaI7mFNNiarp=w{JtXqo?O8-QgZO2`rrK6jd-fiBIvQcIzt<2>YNN`TikUV4f zOEM>FO48la72686Ta`oA=TBbxotHJlV^}d^r)7Wlkg;x=IHcG+Nok zXCvyVcvSwpQXStMb=)S$Q++h12pticHvUKUz6X3eWLcwF%+ez<&vzStqUBp>-_qr@ zqP}Z*Q(tVutGP*D5*L7P9T#4a`?co@J0a`A^neMY`H6z%(IaS`xzXgl%~CnkFHSsZ z8_;SDRB+S8VKj?LR{de`vl2sWl3;7>(443DlC!H`{0+P9o{*K!3pjJ&FQGwCu7)JR z0p&2P#BOs;8qdxTA{i0QBhIJPR)JBAODjpKgda{}>)S}+5VyyBIGppI$R08GV!it* zqxNpDnKVg_Y9T2@Y+h(Dc0tZPY|6x;nAMg{>DhRCYED9|u=l=AS?T!4sUgCU7wlv? z)jgb|ZPQ{@Pr>4o^m9nI33O@rP`ykp$bVr#(zq$?D4pCd^r(Q;7Pw+I+7gP}oK{o7 z%A$qI+uzitrYvAcprHOLIuCr$3x2%R$eREW`8C+Fnlx^i4H*(I@zR(aRuH~woqNu^ zm|~zDK1S{X&d3o()KRwbWh<@u_}w|{ELR-zMD}>%YXm2AGWaC5^Nv+$I9|zWe5El> zT#M*TD%Y6gxyiClH8YiVAdu8ACVe42n)N*XZH~$88@3mD@M4foBxoVpXrQIN+d0$6 zLvkOBtwl1x2cy3(L(JlRj1Qd%MDH*LCCK{-4awZ$(mTG#*0!* z1IPVG`n`EnToQxEglS$(%h6-mDn`nw^;cW% z8DeqqZvCG(y?kDPF2x!STS+#Em2U%g9eQED3aT!mjfl7XaIH~I)%c(}9$VOx;N%c^ zCT-z-h|GPfI1!ccvWe?4kU1@C>tKk`UugmbvqBWJ5SbBv>h#Os1abgAegwRd;@izJ z(x@tVIYh(9yE?r<{t4MZW1)u$^(*7D0ph5~mKMT9rfm$9rm$Y)K@$;V6ji~1DoL8k zYS0Ta`F0@Sd+R6-X_ZmoFBv^`(j?ptPKHD7Z|Uj787zxgL7t(Mt$ec8xHI&zV|5s< zN=N4B$%Fsk`MbL0iR(&ZSTjMF+Hn<%X55TWv}3)9gjlF*EgSlTW^_hc^kGs94db}e zcJM}}Tsej3F1B$IalM5v#Q+`JXn9I=umlTMJ+{e|LED@E= zj>|3m?wY?1McX~`?ugBeV!fyv)$Iz5=6Xpeh6PXB_N54CP}dzr*7U5#)e#tN-Go~M zZrJ04_=P?Tpjw!>#_CJM3z$zA5K}l*Ch$_MDlM*rNwx4GhjGumG49JK*qTO8n>D$R zwm}gW!uSlGR>=^y3;m2)s0Li7&`o^q^X*nCC64Y8>GKo+=$)%)?eB?8pfy`o6`{?0 zFIG@WHBn$eT?azw97)&4ejh)feBp|53C57WeibbJ6w&Pyc2 z{la{uYMz`~2R7?UJG9!FUhObJ=mhgVgA*O0m zXssHHtw|AfB#82e*oxo_Kgy8b4;HvcCsXech7WK=?y(C7OmAJ^D&qS~GIfg&A_3FIjm{hA7L9rcD0 z*K5`ie5`7g1%>ch^<{F`s=q{K19tQRY6o@T` zfADOp1mrtfz}bI3>`)G~w}hIWwew<*Ftq>dz*gfu=|GLFE(o;A=`t}dbAkgR`X(Pk z!P8S-Mnf)m?G4p@`N{!bBny|ywD-YL{_TG}Zz7Q;i;+v~WHC^@-#TS;Zt;HA`BwX1 z{+b8WXRTcsczXYm6vv-9rmo@e$3s(_f7y)XPh{0eCOBCYm$U63vMM>8tO{d*lU0TLrd|Ga z^!jf{wEx@D|H|lp?FddDPx}A2Mc$)se*X^(@PEy|(~|SQNdC)^A8@2Oy(F!1H>C&CkzWiW;{drLG>5v~|^c9%+1xv&#B94D| z2Q`?CTj~yMLiSMk2@a$FT|%+lH1;N8fn+-x+y0CAFyTE96-L;*0rYLzPvP#6T;Ku; zl;*K6b6&4B2+vvvyn-p$1)ahRiw+N`h z&i^m@Ul542Q#oCg6Up7s;d*wMO#>qa!I9zs+UKnz zYJ|D{GkF>i;wTKCrH8%OFdOKJ)7>L5TH$)X5ff`1LyUvC05BjSMI(5`mGDkJ)wd0o zPJ)-qd;^jTHsIi9I!x%3(kk_j`O6oM1Awaf3t zwyc*@f!sh7n~3(4%K6M^IXPH!p*m*H5{JT)W{jNFA&AtwV$e#wdxCd=GZV?(eeZvb zh)FTX?kcMIDlB`}N1D81zf~-?MKMc1lSPXY5`iMdKE( zEnY4I!MJP;44f##lc5BfL<)VvIOxCGLWd7We!y9@<-yxaq^Y{6?Yh^ z@=`Z4>!Gj=nU`Z!HhcsWpJP-n^NaDJiH{poOrO*Gj-=AC?z|jn9VzRHP0U4T;Wi+F zijv<;r6`uS3WG%1%&&h{HzuUqQWFUI{faGJY3W-OYn2SVD~xlVFc5q$Wy&~Hw8u-m zl48JX29SS;gTKG1a7($p_;DLz1kX>zZ}-IPr4@eLx7?&*ugqT1f~K z3uD>Vv<=LwCl;mD-%rRlCg8m0vZ5z#Q^EDH;DQnC|9wgJZB)KXF=SwM9nm#NT^FjOc9JPAX9@^ zTNlr~#qTRJf#=yjc=BYID!Q_!NMOP*%A0(TRh`c3)X**)s?y-#&&xrGKxm1wi1qg&ue2@UOV_;6 zE`PbZAN(i@qqIDbms94ou4Vdx^2W3p8^Y}f^KJ$R=B;&+(6dBjmL?8aW?Gsh@M^)M zNH0_mT5>pXEHa5RN)~JUrNX=HLubt@r_uKZVy|-nYZw4vGhF?oVbV+yyqZ;Pvtg)k zvjPfSCnOt^lv?qR-Y%A$JmME`Bv-0HzzGz1XqjXP)5~n~x`Y8xPIMkKNT-c%! zt;TRe+on;As5}N1X-a2|=Jrr#0*j)u-+ZiYNYEH%;iLO(PASYJ6SR&r!<~xWK$wjH#I0pq!Qmhwd0~`;*nm|P z(#F3dlYs}PFeSwcz9^amkU*^j!mH8BqOe*5>`1~^mS%^CYoEc#vN@1=6`7r|v?GAX zoy(C33BrYs1xbIi5)B{AuN#M1Q<2uH4zxsofFHp~Act4>$C=l|LgC4IogD&T?rV{A zS?Djr4?u4}5623D0<)3@k>TI8!D6LKKM}!;+LL}){v6b8MUtq)-JvR`f=`QC3O6b} z6n{FySoVyFyLVBM<#T}^)QO`o^Ct!?8u&P%xP=>f9w2SXouEw)=Eyp|66IaaKPDvU-#Ot9&C`y0*?EN*(q>t1t#ky3`Mb6m&Da;P#zs zw)4GhSxm|IHjTVUZs^phu#PW3KHq+r;Zm= z(({4;4|{JF7H7At?FMh$-QBHm2o8ghC7P4^GnVVZ409cAsK zZ%2UTx3JD-gHQbI0v8$nX?rnnQBYO^5?{`5YYW^)kHNc84_ye+x`qG(G4JFswfEHT zIHX@vaTzRkQZZ{ansT@>%kVR$7MY-!mCq!lI@i;>NV>X2HG-MPH83r*wA2a@q9Zxy z+eLpLArPk4q+*4z3|%HRos+7>uin~YPe+m_3S1gyW-ggiB=ii3qoKFgzq6(!-nsfU zo6q}a-b`dO-2jQwc^^E^f>!WIU!3G0d3Far^r#w}$B~*{`J`G3x?HBrE*zNXe^x$P zyd4Yd%y~-x%xn6KWD2z02GV1tmt=+U)6BU2eI6R7{537vS#WCPyjk3G( zDXMpD)KPy;T|yu!{@W!?l@OiKn^_ReBG0q(09qA{C!7=Mmtzvp4Kv&{O%6n4RuK{+ z&^a(snekiip&yKs0OiBLz}hFF$y1w2gpM{ElWX$IC!s46hP2B@Hc<7G0?QE9+ldc+ z#zYLZBN+QBM*b){BB+#K@2TSYU4ZK{qQzRKLzd#$QG7Y?$w_!{;&0|Ryp!CE zq58yTY5l@xIK`-glVJJViPv@KyI7TDDPG_ZecO6JTREMWs*z#Ac%u4iH#TWD-5=eO@TxxWo}7W+BZ^|9C(!{ zE$e-v0#T8L$kgeTh{EbqIhXQ{)>{6NlCR&P_ST2I&nu-8CLpVFV8*bGZj4rW(t zzk7Ix_%68h*O@Mtfk(7cQ9dY|Vrg+V)cio+ovJD)Z7pqf(f4zhJ{D_@XLvjQjTsSxeHtGmkz1J!q zS!rjgYF;RLv&z-M>(0!1u(i6vvQ`!d-1#{z%6RDr*&2!~V5;^hwAe)Wy7~DT1iz8B z@_V*bUGIu9SWCre9F@!&?L02qNVR~NkDbv<&mG{b-XJ7OpfvC9WV}+KQ_nSCz(*hl zQ=Iaw(_I@vG@Nc^60Q6$0t4`sBorAOw6Z5??`nb&O?75r=(a|shM25*T_i>zAAg_x zzP~zu+61y^Hczh4Sa&|F=3sNvDr%{!dR+Upe!s3qqVcK6iTgh{t3biZ-S~xuWd_?f zCSlr_lz{^!E3ccvfsY9RK`Bb5JBZ@OMne%rAOD83_HB?UK-QWw+N?&cwU7-6NQj#C zGrQ4q^R+9o{^6M4U9r!rY2jWlTDky zqf9QYuj5Tp>wo^l%-yk^1nrc5M!5BiDc<4q_6%RBIe4WK$Sw}LXk&s@3{$BoK)#s$8Er%SOTc(~%Z5fkY zDCBRu+UVIveouC={&RY8c$3HB;stZhVBl{UwWkbKO&lAD&<}W$Lq-w zAH+;=EWSrK;IKa>SRGtntF(pK4oKF+i4-@y0^Z~~bMFPtuZkjuoj z=~^jaUbTrdB=%82m5r1N=X4>-_OB>agcalomM*X4}$?qw?BH)%vo^rDHwT#T{!}I}8VTJ=&vLCnyYfts*hz za=54@Bj%ZP)QnwLBW;1vU)BSsb+AsU0;%P51QdE?={bVj`nI#m%Tf$}WD>JY8?x34$xwvZ4r8jI&Hk*_OOdHZaE8>yS+<$%jYkf zWZuP#r!sGAoxA0x@0w266L@$;E3{TtcnkIeE&&V>U`M01qq_zO9?it&XnNhez13EI z{7^;Xk{)+@9gHfxY;C)mNN8wi(aLB&Jk-+NWrS8-%j_#$)qxr(6oHQEr}rLyqKuI) z4HV}WjaxJ2-CqRyB?kXGoNUt2zT67YZH6QZ{CarbD7L*V9$3rS*4-a3xo4ed{|tjO zw|DXwY+#Z{qGeCctLDa%S<{YWRZXn07U$?EZ_r|s9~V^!!|c@@y$lvPSaJKD@8<@W zdI(iPqb#bf6rGGT-bxAtcodrP*9(S3%4))B-^{Q(Gwk6>mt51HSt6ZmMMb19SzhE|fA+?A!%h@_Lm3rzrfnbWB#Je^ zVl^gz15ccBd}(hY$F$#e4juvrS+ZZy*ia#Sl8It56&7W?k?cf&Bs+O%^^ENLjnWz^ zekO$r-1{j4!mH5zr>We4SsClFRa)dkx!hEG_kd|L_}2?mvXxm1wOosk>yfcXV?hbw zO#u=?b@ah8jUvD<3NA@H|qenT}&~lBhpzzvA%5ug&w*sDmc{=g&?g0n?TC2Gek-O1LBAkiFAD z+ry`8j?dK+azAB5Y8UtH_v-~70&+LQ#cs#ZmX-9>p>WebR1T=xF=Wl(opPyxnVS4G z8{!c1V3>mx*uhkl2=b}{70794f4cs_5bBX9%gZKL%{k|k(0X$*}# z>vxR|gwGP~1)^=nF<6-enJ?FEGI|^b)3*brGm0h$ZwIY)EAm68_Pw`0%sZy!aeW!tkRM zHHoxkV)id1spv-8lErUdHX|^Uy3W^6CBZrf{hDf`$cPflCeJ{*oyjr!V3}jTxS~$f zK5JY~k+V91=8@rt0%PxCSMxXv#vz$co8mvQaIitk*-`k$<2?zWp+2g0`4;*he}LDO zosS6~nAyez%4#8bf4PUQJH!@bxCynB9PVM>XVFf#iBZ1HL~6bY zauO|=n;0>jeUrzq4bJ#>c0&$ezczXTKGooHyzk0v2xsQkf%cnWEu{WgW23-rjz*8& zWp$j0V}&DtOB%)eVARdNHsm8NCA1KD-yApWp@sQnqCn>6B{qOWYCpstO2xu2G;&@2 z$bl$R9_0=Z6e-adVD!JH!G{{w5{`dUx4pK8%I&{Wc^k~jK;t` zsybM6?!AKL!@j$DfBv2tNPMCKqf-x{kM%}{F?nDx|T zyx%ub_%+@mDQSG%q@9&Ap_S+`hF*DJs~|VdgrVI$ z^7o%@9SR3g4EWVM*%}eQcevesl6>H0IRJf749`w>Wu=t~&ShXiodZ!`q2Hq9``TaZ zP9I477j|_LzBB8AUdCOeO9k0h6-+SntA6dE%o*MkcgmOhjQ6Ni71@wDFlBEnEVY?6 zgFKhWOYBGg@KO|E_V9vaZE@5iH*tzRbK*-~_|E98*nMeT)|!R1pc7qHXjsj7I{vaZF2DS$FqS{Fg#%4hkzsf9Wqs0 ziiB36i&G%Ki zbL;m4Gy8DLIXMSnl*BK0tY}x&6ySQ)hAixs)z`$-%4`)u%D0+NXDYVr2N42_DKbI3 z7w=(}eG0BAYTkE+o;&nqM1w6wgkC{^|1JRI=(fh=odWMX~ z7;Z9FrM6dou5jYS!>J{W^p!`26%tdvy}(We-jp_x2cQFj5;g0!0&KBe#_Phk{E(5m zF~Tf+jC0N{Wz!_`#h4=bZo!o8>hWxW<0Ti9-Jlo0UPi4mvDxK62d)wMy@BQyAFx}p zJ8^P=y7Q%@N9iP7;hPuMG63PIM`7oW=Mgn0|5rYP08#c8BS3rE=RQoUau~g1SpqjK z3WixlDF{r(Nsbjbz)Q*$tdXph4KsOZB0j)sj|Bo|M)p6uN&sCF+)JC z|3S%%t-u-Hz{a>hO@CuT!H0KSj;UkN#RJiiTmL|M@%bZi&YW|!rW7vgNVH~_IhH}) zaqJWfMLXPf5^5tKNQr)F!Pqf1CvGzeI!HYfh-RzG5L-0ti3`j%&8AsvgS{2U0D?OI9V;Wkf`*EaNU<<&@sko6rZE z6=gYJ{ZZ-P#E4GX`iOZrL&^CC`w+VLo&DRfYGvnTCsB)k@{gYkWF}h@*{8MZYCKX~ zlgi>ifZIjNDrRJTo>5CW ze~`T*FMBLrLhm;Y{KV9ee|6_TXji4Jd?EmwQWb#hdpx1}P|+rmxaOEDC<%;O9Jh28zz-GnDv-nc zFll}dO1Em6l^HUVs6%V_RFx%u@~Aj2M!bbC28!AEM{8faUOm7MGCp3XFAAm_f{riJ z7BL^Ln+H9sKVVPMUF!w6`fy0wMz*r*XMRCF7Pb*Z*V=AHl>27dL$JC3bfRhh)LY1? z13vq<`~{kDRdbz<5tkBi*taS=GRa<4Sp`HETGu%5qMY`WemI26C1+u?aK)5aEJQA_ zt~!uQ`_$yJgKd{p@fOoUhO@&6Pmap{HGoim+#5dNK{U;n5+XwN#oaaymEz-jFm(%- zJ!TNuX=< zX8{eAjrwlyJvV6UWTO1jdl*O_RyLZAnY0!(>#w_)>NKVo%xiwCG%q43GpG`gVAUk> zMIOGmn2_5`{cvrWNP$mKCq2F^EOrgrS)v&EHb$`BdT$J++--2o_e0>5RVNwShb~qN z4Q4|!JtLJlS2|TkiFYV+=x~B9nLB2>!EmH5hqC3|L0-zL3yj`DwcX$Mex1bRI(w&& zC8#~3my^hIGC5mEx<`ul%lYP1zCb9=exJ*Uvyg&m>i~Qbd2K`+pVtS6%}2`cW_OS3 z^A)A>#fcsndt=_RIKT*Ff%=A`xQy|pn6t@|{h@Jp(zlO$xCfw5LDpiL;A_R7w)ZkF zBp;Uw{EMRf=&e^R%TVQrFq00~-sb@Z?VKDhYuRU<%e2PvBO&46QFfVbQ9taU!i4kj zgEF2#Qdgxw(GK1}@eP7vn?^OM%~+epTlgC}*cFyr56ef=LDTkugeRB#hKwzhQ))z< z*Ie>b^7b6HC3#9wV3=|iU0-~Byv`breqNt^wcGkR;8xZpV*RvTiq^hm&+NJcCT4Q8 z)ZySh`JiZ2%Wg`C=EEm6QdKC*L^|}0NjVq%c4Iu{K;1-9MI3;DIV{#j_3YQ&s&|6i z#E6tdfpp;&NMTn5`_F7?U{;e!@u1*fl4)#iFZg(FJ8)sc+Jv7+W!@usIUViFvNyeO zP|(fq&%y3{RsTO7&Z2A)7y;X~0FD6jPmws!)WbB7Z=3D|$S;9daY~={E~l9g-}bCn z7Jmp5W=~zlMX)$35_zJiOHeXr)o7NCIx;$B{YFkOF&#$X;;NJV13v}RD1ZHxIlktk z*Mv*P4kz6TR3k9o0P@m8@{99bfBbgY+NBuUGNDe2#Q&UUa`MIU%Z zy^qW zN=O4o0~MR$@4R3^3unJgEe*Abs-2dbQg+q-Lq=0et)Ez?dD3xyog@xmIb{~ z$rW~og6)Fva`<%0Xnh&PVwa4iaN{a~R1TMU82?Mql)9tH#fuj7CStVYtC*PHLDGlQ zF_GJ~k-pCm!*bHMj{(?^sCR+u^d1vFXQ?9Jnp6T?(a_RD%b0}3g=AmXsYO!boaR6= zv}?@N$omnDPLCv4aw1t>VX+s~g&mySNd9EE>@TacM}piXM3a=Ar!L1&CdQji{CbbG z?|=G<6OK6RY&kjq)>l<+DKO$^W({JG=`KkbUHx%X$(Xx`zTNLd@qJ|qgzc2%i=tSY z)Yh!z1yRQDrlPH6W8eOd7J&CMXkR?)y!Ssj2Y-6v8t=amd~{TMCO*fH@*_PB=hp`ejzdBK5D!R#31< z!YYA}Gvwa>46j^<@@B~{y2K#34{O^2b5_cpouxsdYdVH5O3g#-lyFm zH@z8OPO&KtJspUdlC^?ehv!*E+272)+jq8-YEQeC(74cM0CAzau5gBKL`dV~2Cra!xh zO`83Upb@=&errK2ZMm+2&)hC2?kAiV1{@^EU-?ZH0&oNKXM^OYkzChy8U_NA+6DZw zGVzTjg9iP4Cnk<;PIc+_#XEE$^6Y6v>UxT{;qd61ZE~}!bHZpOiZ3ZtO?mj8)g)4D zTwd?&Z0{HK#~dueT_g-@>!`;|&|*DfyoMB^=s!x&Cs|rZzEBj~u7j)td}UL`G!%Y3 zw7P)K_NFbU644(?%%c8Y=U}ORZHgf6nZ*gXhw~=3*weEV$aiVs-5*zF*#{H}gKwUG zk)O}5czX#rpKb>AZ)MyRZkudwZw3yXD5uhYeUL#@+g zshXpUAdCI+Z^T0Y?k?rP9dv^|yMJW{B%+^DqpZ_Ye{Z}v#LrDA-kef4alLZJjE%DkycWL|g;!H{0~=^QF;FvsmXR ze@wX!zgQoXV;0I5rA=BqQ~5uy&ET}@MsfelAJP8>SkE-q-aa-lFzpXZzg@MLVX?)`zIc5 zUHc&ZmE$%GH!y_5Cq8F`2N_Yt=HDZwf3Xkbvr}-PpBU%S=aK6-g0<#AKB6PlnqXSA zU&zMLEevwiC=+-MZ5~aTxzBw;xUdpcgG>?^KnX!^)9U#fufa3pi~K44=zX zfEJroqMJxC_R*C5#-J`rgxt;F!k#qju2*pWg7V_xeD{~crRw(wmn+_}&CO5GeZ!R% zhJA8fv%+_IiJwwZutfSC4QLW)XNh;Jd-@CK3yF7D>g0*Ju7;>sNJL^`Y4H`-muqxj zCB9M;%R*cf9dbZVN41rz^ct2LC~ccEKn757H)=RQY!P_7Qil&7JN4{0fJg&wPv9zwwDIeHW`kF;2)!tZ<$)SC``T5S0qFG zt5ywI)F!!A(Cm_yJau4dY$PzyV+jIhqz?nC<`evjO1f44++PsxxHq2(50X>}deV7? zd`ZOr6~{A@aToBNvhM<$#h>)tro@L>2AW}kF5;&5u{brhNTn{y7?<2FeB{zyxk;>+_Qj&=mg+PG=Qk4T67cq)V#gUP%^K&-TS^*-o&Ga#~L z3ZF}(R;k;yj)2>Yb8K}$$5rONERCrGi43xxFe<$xSwx6-`Szjm0wNbA&fi+1$>22= zY=x2C(*@)d-A-w+Q7y6e_U&kDwkQ5{^by*4f*CeZNLW*`Lf$GT9(A_sY=6aU5~1C2 zM`ASk1*HF0@ZMRw8Ga-Rg7QOY5NKVJ*3j>yTFTsu5Ag6>+$iJQ|g7Q8S%LBLeH zFD`m)CfKzJOqK>-&O0n0jbGU}OB?6cYf%nvICI$7$fBv{VK7RnbJSz=QZA}SK}k{} z&Z!niNYFwYBi>meEpUHt6)*-PyfCm13T}#%&ik6 zh(%wS4t8WYRA0KQ2xCxqm)Cu#B3KnwoRX?Osu1n1+gW^u9J{N@5nYIvqOeFDfrWl( z)`Vs%75x5_B@%iG%|jVJJvtdWuz=t0*u?^@Bh?8J%(cHKw#t}SMBgiZ58vn+HL0_v z1r==TA{*2Px?nG7Y9A$Ev!zcec0#xF^cRHz@N{+#--o!P%Mqo|511;r9#3s64lmqDNBf!P4-kT zyvhJ-s>ym88@{I4qccv|lm|xa&Bf}Gw8mdT$HxB^W~wAbb_Cs7vnT~zAqgahV=^1B zeVo~0kFk@?Hr2|4`kcT9E3V~l^9Qfmixb}GnnntR2EG{dpu*-7)qpUmKh@QFNGZSp z^$tqF+*V7nAsWj?uduen!U6)qT7&=aQ3+-hflsxt4w;)m2F8M9D;PI68dHA<@OsGsdBULEwp zebI0}#LE+?dsi_K)F2lCih%C9w%FIT^b*+QMfdkee0;buY#d(&h>%3}f(_B*s_%uv z9i8p{@qAIfsrn0)A{mz5$jktV-J-UOeDRmS6m`ihyv}3yzcQgL*#?rIAi4DTo9hue z!UlaSvcu$AnPj36#gyR8LfA^~aV&-kb0&Ep#?Ne=n`8ea7#kZ>tQ*XsRYUPP?n(bP zqZ{PzrNQhEU-J5ynSru=l#+d6bd<*SLz#pXX5rF;0dLbowoKf@0VD8$!&^~v|V8E&z|*Y|^vi-TC9#q7$g1ac$-bK(>2gV7$&Ji*V)F`A<#hg8~G zXVOIe?QFLRii)K=9uVL4IhUu~ENl)zcZ?~$Y_B#YNBd3~!a6;r| zqf4(kM0d;1%@QJ4?flQtUm2MvHnn67igtfszqOWmq64V-`ykbUwFgjlDC9SttZsi* zwc&Dy=3U)PjLqqCvW-EP+=J#?L{7EdNxx_H10K2H~FYBA9Y7HjP zU1pjVf^&&PRwcbXc5V_CdVdQtf^)qKb(DqXI1k-4hqi`si(I2*Rw}m)zXESGZS8O1 z?GMFXj#3b+qaJkfl2GSZFuAy4No+B~&=Zcw1)3OWcq!^X+ncUwXxF$ml|*zkuXg>w zs;6sJJMTftt<`GIs4PDM)GY@{fn>xxWEqlW;1(%v{+chK8O2`3J9SE!A7r@-%2!$r zx|-mzm={6BM))uS{9AgD3yCDXua$P~{9^jC61( zTCHytRxLp+{49xHzn`*for$1sH(`{oMPv9=0&3>*s4}2zG|`RTu1D*p@EATUA#mkX zTJU3-IP2oGy@nK)+VmJXbq_gV>nTxK+$c*4%f&lS^2QZ>{v2B9rwWu7l#rWKFQ_F| zFmNNHlJ9C{t@)^s=o`o(L|(_I)d+aDRV#5*3E>66Tq6on6pAamSrr4AC14nA*#%i)q zL8M_EFGs6JK9P4ur4=7%>5m3#`$RIrcQc(82J5l%zTTR5#kbpR$HR)L>!ta+)ub-N z+&rYg$CH_d`b0)ujU=aqPN=S@yqN@5>SuURO5yH5`wY(MjW@sskTduMnbCp1)!fuq zg~jm8;fi-ElF2eip=vm1YIzP9^`qyqN-ASaEW|pJn0_qGL0b-@M=lACP8#KmXtW8fxg&6h zjdm)CCxH!g%2*~ZVV1jfs`rp4ovO>v{W=^f&#HJ(2YeWCw#@1^i7@)>-bwba`ZxGA zEaV~AtycdX6_u%&;f67>YOQ0(V%91ft7icEJNqF>=+upHiOV_D(1NFQY+9%sL9XNeJEbre105a`Q7l; zrATH;cE8m;Yx)qJCzfB`l@Kl%ktICKF@Wu+sKe;gB8KX6ZttwbwHV56RzDa1$8aKj zahuI!ch2SNij@`(T1!=fs#2_L2k&A}4S_NJODqg~`1(Ky7QRO|CYkpF{XjvTv zPk0z=$l12J7cZN-=}{i>`DMI4VPh8NU1W;N( zG|kbF%qk{y&1SB-CZj)ZxLlGq`pB$wh?(R zUF%007rI$=*AVefUK$g$h0nmSiRrvN^pTkjdOq8sTJ@N`!%qPMI zm-E6!#ouwx1o+gcrp>}{=)USv60l?o>AAD5!Tz1B$i$jBj;nU=e-%Q@FH>;ZOSN>f zSS<7R(4Mkpo)oMidM;g@$utUNv5l-eee4i_ycd-^&MK`~woVg9DPz&q$9K??#V?Jf z!am7HuS-LrG1#l>kX^-IbQ^N|5+)j9H10HmTOO{qCtJv#YN=}ek8Rh-WX(}#`?yR- zQM0}dpSC*@MUZ9S(f_-K@Pj3iuKbY0oU@=6b`v_v^K*$(tCUU!h@d9( z0)nRVA6BRT=&VnuqQkva$G=KM>NF>Dp$aED)EE&C;j7fhQMPzG;w>8lFj*z(Z7$P8 zGusG8VXIZ9c(|K~zb*q(*byia(Q6^NNpCt8OWSg<2WnONb}|KOxa!2Ghb-xDClLML*AQ8lQa z-Pl(}WDfUaUlp4hc+R*s(iGJ*(5n)lAOGr@zU*H+f zUoVHk8nG}P!ZueD`i(h?P7b#O>R~Mpa(Iyw#g)m94>>ObKb$oD;h1)>!a7KF9j7c{ zDFHfq{XkIBx{QLIk;}*8b+cTns8Yf%z~g@$#@QzTBd12!D73J8D>*0WI3?t%R*c)i zw2@RMzfGuFUL1vA4^z^*uX@D>O7Sj!#i&1iLyT-`7o za_NvNwWWF8PAWz@c-MV-^U}`)@NDOA)D_z#RJkSWTaC&Onm;I1fC}+W8K&Hcpi8ul z6Z)pCdU6uEN9H<*OCRlw{$@^6r%S`%o@oZOmX}u%B*R9j^b78OZRowBTc(|r)n(Fj zAA6faT^|F`qfRkZvowj#tMfCX6$wN}or;-;EWFZAyzJ;zQQ|PDETy-<_AoVtMdIl= z&CZ&T=+H>NsU54%2s>XC5ia76=uc6{B2Y6`#XH+b&OfkKg{dUL@W2-#2FvuM8 zPf6nno|7+i&DO4?B{zV0_uq42XT?HT3VqUI#(ysc?;@+iqZXeFpX%6@&7q{bkRxhS zE!|*MrBWIX{f?Rx)U3v#q0(w6jSQBJ{)waXI7#`I4R8W<20Nd-PbsKx;WhAA{FXjZ zadl{hu3#J1ke+HwW)jp_uc%vaDZe+)K8Gm1=)H^CuXp|iFZ5I^s-|C=B%0SRnE#w@99OLvq)T?oR?S9NpC$D zP3lgDL}gO|wWV>0Yfa{_2as+1v~R)`!>>fqh(?q;8Cgm$YxdqSd9m}AY(`DG@PyvB-%e z>A4vdGAP$|ilv&V`2Zpwh;m58VqqqCK;MjLNJ~LrsiWbycW$>JZ}OplW^N>GfQKa7 z=cUM+!fIbz=8{!TSB74DxtVgX#bbn`jt1m6nKZhK!`4+vQvq|~D1jr}&}%YV4E3w} zY=!mHZWM)~DUO46kaW!n~Eo(O8$XSDE)NQzHGw%7Xj{oFMI6jn2-W zmMMv@@#QS#HG2y(nONG;7VZJf_qu}`&VcF`4*6SA5;HtHim}23>)&B%=gofY@zQ3w zd&zD@>F%gJkRTdJvI&e8U8yI>S;(=!G!+HH*3w9dVk5a6AJ}(TVRb`giThS6t!Zbq zOo-QRy;`TM@?QV7xawm@+f3j?q3N?vMSpgj7rPRK8uZn-FRID10T${o+ z@<({Nl9oWsC4KaT@n}(BcO8&LblNhx3NWV`Q1IbkRbppJN;e!gPePxtgb5t%vl5-h z@@AAZ>Rieiv5RHpwkNK^2YfFmlWWf`fX^gDHUz)yn03VX=zIR`wX$P`9D)tudUmKPRJ(%-o<>gEk<+O)(GBHGltM zB#*7;4wT_BKlT0j=(j?bnvJ)*CgmJW0T}Z_Pm* zuJDQDgGU{~gjCB1q6`a(avt62tf^x9XzL2E2?RYQ&DxZ9Z7#M^r}tGnKjR6w@lW-8 zBlrm2TL{GIlE*0N>jL-=&5VEkPRD6mbKoud+JWUBn;&2s`CK@HtT`SKnGvk5VL(`d zkK!laZ`+q;KDx6GV^o z!z3sY4p-0Kv!kK4frHKJ0KaGxH@UVU7#LaicZ3%>zCCh6uuRw2w!OCydOuo8u>^5KYfIpZv#qC+jkzOdj-9x~gfyEVyv=OVbD=<3u%GCGS-Ot; zb>4cEeyp9GehCr59Wl<6FTTK<93q`W5mEbK@BKyo}A?e z6(UERtOH>nN|(|dl~HC3`Ta)a=D*rB6T|PaetnH8zZ7^Wq+HK1GPdZYsC46x=lCTl zLA-OgOQz*h^j%iFhHShlb!1OQ54&K~9H-?|7nSQ9uf4TzYvFV}HDm*bxsHDEW|JCj z3>jqJ(FN#7fRlD%+307J&0Z3C9NE~Z>CFay??^`*0iKa*zMvV$f4~e}uyTo^ zJR?~dWr+*PuJlee|7rkF#tsVrRaKIJl4{=%-~VZ>?s^Lnq%{(f1)2oI+Iaud{d_2m z7HD$22@ilIVsg6w)m2(9<)5Ln0Ff-c*}n%8`qx0I|2xqCw{!I2M8)JkT7ZAn@P8-% ze>dm<56@kBqN~UGEQly9BxOi^D{)A|42u7HcLvxN^E~36aUK_d;qc-JFZME@k4*$B z_@5MrS4Z5V@J0KFvu4TR!r2cV!QB3o z?e;SwE!4CXTo?Ld<^go@rjHP?Sr0w=vdJN{fpK>L!9ZeUMvyh*DOQ<_Hh!m z3+Vb@NGSZ)XOlKDmmlrTIqHfjVf;`&!~;5YSYXDZXlPdOhDu+E{}o~;1+aB?g1-0g z0K)4>D$c~+Jgi1k9dof3Yld8&ojgfyMZ@AgCkcxyLE3`8&;(#+{YpN004-!cMIp&K z8fq(VNR0yt+$9*%g93Y1t%pviOM`^x~y zCsNl(fJLX41n`OyDj~G!%VUc(=s=A$$0HCWkTJpI^91^8@RlWW>PlO*kG6R<0}FAJ zj}6SnzZlU#b5^{=_;+fE&v$stv&7$N=)ujJ)MZxI3pGDV-5`R!FggDsVF1H>r8Iqd zvmYA=6w`yAvvBZt_r>ztp3FK)d1uDnsVqUEmqu|HBxhrHyn` zlHQhu^bz0Z5rK$Ob<+mI7vdJH?d1cvUMJ>2jGRA80de%tD2)03pp?`wF=rf>-HWIO zkr+VYCowb?8byWoMh=jI{l91R)#igTl3yW`dC1o+%;1#Ma)zaSVk}l^wq$!g3HAc) zIp?8+|LhB(e{|2`UTNEX|Mz!ScHG=$k>Qh}1!Pn+$jt<|3?)2LB7?;Ly@`GXM1p_z zVFG#m3Oc(}#1CG;r=y zQ^kDX|2Zn4=Y{xg*9u3x)TnWX1cV8(@?y&q)1M4iGr?`O7QVhLR|Rdu{`1ZMbbNxlCpNftH@<#G_EsIUPt zM#`e*>^^ug6bq(m=9oF6B?La+5?I`fhal89|IhF5(<=lL^UyX08Kf&CHmOk<2n-;{ zc$s02@QS7(b09*KA^ecm8DUOO-}9^GDtC#)2;zNK0W?#=*aHR?Wq34D;NN3=5S*-X zaJ0n#T*ybl6Z0ikZ0+ZcD;BfHWlXYR%^W4*4^)<+-L(rB+-!`YT#Al-L+=XwH^|!@ z;FPBq?fPd)VsW1~`7kWKU=u>%5cebBXuNdxvz0pkXZQVAgZ6+(GG7_0oeAa)l5fLnHXIq-gyI58lSORZY$rd@Z%aO} zxoUAGD)l^o0Q(;V-d9nnz)yKb`iDm*(`DW}1R&JO9 z-iBnQNqF?-m!nRk$FLK3Br(J6|7+KIrC>?AK)S%jtuRWAar%P%End7%r7(WoQwvWf z7=@Agq<_*J3k&*=1>W%d^N#@!_#YhcqZJcjyOhy6&-yG}cpx7BnHoNB8U{(Ka)C*D zbYgzkWoYw(!Gvrvi{1Y?m`z}*TzBV#!=o!iS!`F+ODSoc6T#d{2rjWPS{vHLHoW{X z|BAS`&N!QH7Vq|d?CQUAT40KA=0{;NVf?sBN)aS~ z{lpLpa*wM!Vd&wrB3a-xgu`C4|&g$6SCq z-c4U>k{0Nq5m`AkvQ=AIDiZME_748n`~qeEQ`j8m*bc@XX{_84&?4JS z2H&ttLbI8co%D!Ag-!|~^9`?}X5pZmG;c^ZhUUE6y>=k~;nGo7sD0-z0a)DsUS>e& zTRGj1p(ZdWxb|+xX2KYV2k{AeCk93KKLQ3g zBeeg%W&nJ1{9I$Uxk-!!5w{!1O&eSbdYxU)tkwILxP<)@NnS-Lja)6YknFTH2SQO` z+gZWeZXhrJcNRqU-_Y&yej6e(5sv%BxDVUb>6Ox2>5BK&$PCh*6UuC{Xz~?d<7o0M z;ESthjOl|hdz!io)p8|O)!J`JQW1TLMe}2*hPP|#VK9U&!4$Ei+{@!a|K%~p|fboH9@W{(NXV!hjur8l1#1~&lbYw&S z$9k=B<6pK@tSxo5lnMojNVGlmv=$3Tp#FS?rTti*FOo&nozDt=ob;&)`^d&Zl1ir5CX+}+Qc^K!sy+3- zGInVvFJCZ7Rxvx(gf?$trpbRk-M?~>>0^>g{rD?z zalZ9;qmg625jYZ24+DvkWUM3%%q%N-eY{4SjHtwdw9MMrp15(LFhVTWKPJngJJZ97 z43$^vw)i{*F&g!ki@)9fmpl4T6V7Ol=lhKm_h2Q?NVmp0&N-jn{-4ao14@{9s=D7- zpL-jp#yNnBZ67lifgB&DpV0MZlKb8)dj4* zfsE0TA!0guDHCubcIwy0Z!zG&OPWgrr$h%P-(ALW)Z%Nx1tk|>Wwde0W!s7b{3ZVT zBwjmQ`Rn;-qd#|+HVt6?Iw)IO`@=~nHuT^94 zW@ea!b35Wvmf1;=i|^umM{w{gos;9-U4N?EHvyXR>-(IoVE$Gb>HQzZHS9u7;a^;1%UX7A0 zg)H;`ANJ0&E3R$p)`1WL1S=p|@CxoO!QI^@xVyW%1PJc#Zh_$L7Th7YyZc?)=j{ES zbN|3??Y8$bABtMF=A2`W(fiZ27r0#7TASfnF&S7hbG5~l*5-46NBml~7S?H}bQytr zg>Jei->+C)Xvf^L{~7yn#V>tnyv#mCu z(jby&4tYvOomaozA-;dd{7LX zB=Yz#c2Kdv%18LwG6XHNbC>whdyuQE5_He?8Zl;%V2Nj&QMr}1m`u&sC<+H^?YnVY z6B@Pus@l4cm^sTl>rjM&p;|ewoc7pzHBF}cGvN+Rg!0Jn?UE_h&xb*&aQW64KBk2* z3tC^G4E~j{Rz_%S90vMi?#4J<5ai^`k?Bs8zD@Ja#vCG_b9br|byPv;gUaRtIbLw&)SBQ|T=x_sf?XxjfFL+ zn$c@qNLvPOAe#6SMc8+_BIL$m(`yr9ptKDA`jPR6N%)Wx>MODMbSvd?>)TlV8KBZpD9OK*0YS+Zj9FkCeJ?D#pwKpSw_J8^0R}EpzjDyP* zB>AY~qtQJlPmnTd{TK`_198i;{kyuBo#PW&B_fWkWd$qF)}}9exp6}6;h0fMCW`HmmSg#GI{9KWbi zB#nLy=oUtdf?eN!+q<{pkC!9{YDE0ky+zCZx#-;s7eYG$_=&WFu1($E=|>1&GgA|B zsfWheQZw5w^^>am7rY|{>JcvTgE6iy5AJKbsRnR^@TvkpnG44wv`U6nk1WY5$C*!n z-LF!`%D-X4Uj8v~!X2fb6U`u=&Dt)B<$zCzE^TC0{!IfRzsVx1esnH4YOQT$wU50!6o#j1od(_oiEhjI-BCG1SKx4vZ2`Ot%;Pg208^eO>eo$ zDh^8zGYHXL|K9nApQ9|uLrY}PKU=`+dfc;jA6S*z6@>V0-tCP~Nr}Aqru^*7E6wH5 z`!rS-BBr^iK3TfmIV!jKnuaafcday88;d4sZej&MCikC1;$!i^B*AORqW&qU?YXFu za8o5+tL3AH2Wa?}?(Fa`Az^gaL-vZ3zvD%^wn0^aHB)<~3t!87(K|tA75uczqDiGo7C3&6ObM66xp$^=qQx(A^dQVn$k3YYU$Mr< zxUIVKrLYK<5SybCmk`*Il8WOaHN7!Rs1)$r=_c$=ef zYQOh=P)l?A-7l&1cS0V**lG=s)(=8GXxa$Wq;p3*qjpb)=IJ0aTjw{U6lG4_=)oN3 z+n+CC(5>R9L#+8I(NR%9za>&CiYpPwdo3ZXo^2DlDE!HA3X>}VKf7(_$kBV3QpjZk^3&~=mEULJAw^wr~f z#&WzH$f^{MnG8)!U@57lrjW~B8m>1XR=A~t{1TZ;hW7cM1TI1lCd+}W6J5`Aqg{af z5u$TTKTW%pQCjn<-Gl!WgF+yhjVtGSXv3HB{%v~8;g`!kPM0pTKQ-J?Lz z_gY(H*n1zkkbk_2dU?@VGXmJg#z*ww!n ze7XJ<#Jz|tP>3QBwACZS+{@7}Z741BGah7?5-aYqp({tev#(60zMjW5gf3yuR_Z@rxhFJgrg%5DKB-$ zDb;uMo0wRu;-La~I*d0v}j>|ZfaHd27T9Txu3Ow)zBBajYgrf+iC5E#O8nH(8xKM1|8f!gYR z-gf-DlXeP%o&vhXQb#18)%s%1fA(NYjUejE12BOtB}+VD=a>YgxmnIhaO(Pdi%cop z!fc|DHT;E|aEC2f(QZj@OJhWbupSZ0z-(8DqV_IrYnjL}mNRqZ1hD!05p=CbMi;-4 zp6y$%;SAEnHxhQoFFk)X(zK!Os+}ndhX?`PDg)Kv3j;Ekqb5)_{%@M>G`aM%45ivN3M=?#k>(zOtA0~y$+wW5h;*WydpPmz!@ z&q+B>$KliQc3z;<6pg2QLArSzY6IWM|38q42=hojiGF9>bMkbFm#%#LUel@Ah?V*TrqFjwTTp_DX;udN32UQtJ^A zgGGq$CD^zB!XPTmFFZ1nKtnAFEfu2vp1zSd3i7+<54^v8M38?GM3dR z8u=2%G7d;{zOQ2kx7;a8KCaz+_k8GE*w@#Zp!tPJH=X}Z+&#kI&-i$7rr99nK&cE} ztu`xxvu6^pC6f*PVwufD=OGPx;SD@XT$&qBeOzRqh%hT!%;B>_K$z(IMkF71)m#x# z!ZjC96EwRLasZE>4loe^?)MfSbk}N8F?2QDQ>_1up&*|KQ>|IksAOHZWHc1UY(Q&h z7=md43YRw5H;0Ts-XP@RsZ(=^HK*rK>{pkZ>fzyfPXB|vks7@8{Y0Mn@r&QevxA@*VCF()7Z2RekkmNoGO~O_GCzcaPUCU_n~OVD+lDR@%@r zWtD_5Tz;GPA-#umZwR7NP2b!}MkEQ{f7G8;nv8*;hqcM(U7;2LxXvr#-ht_tA?T!@ zo}$}4)cpO}XP;{4=~%32{@V_~O`L8k0(gwsFaEI#OT?bxBMv^UU*V_Tm%68I<1n^s zaSV&JtPubfI*;O?cHDhipxZurF-Kk)Y4Er7GG+i)+(O|a>)gEOl`nT42qT%xN3QfR-f}&Oi%TKu^UdTosHk+kbnf&1*2J7}J@U($>P-T;ERddHhQ{LA7hTUiZ zMq0oq;7%{uF7F4t^zzx`CM`%+*Kb(+X>n7Aq3enI?%@QpK)~@Bbxxdh6Jqj=`+)(A z3okZmtYz^M_Rwf$D{n)f6>+^~q$Hn-y{lzlae3<}&)-@AnQzK8tT-3FbEC7BBXgMN z&5}PX4FfPdSDc-8dY|@e$6y-gDSIlwp$qamqzH2N6`k0`XH+nI8Ucj7bvd^L1WEHY zX)NOTu+!Kkfzzbd`bDr;XPeIgud&kF2gylM{x81}kR0ss|6nUV)uAp<#ZM3&4t{#( zZ`B)5oye9M`u8RumFhAL^_mbwx(1E_-Fp~$*OILmJyzbqVZ7*gW@5IE3%`(GG|%6( zQK(9nyp!WKNc@C`HwrU++Up#qH*7o4>V@upn4R`;k+cs#1WHH?Z)hCb3`Zf~W^W#X z(YI8oC;7me+MT4vl4~^zk1`5TY8N6W$%FC!7a?P+3z<68_lDF?6CyhC7I@JT7za5s zMndp0Bq1|J61v7JO(NVFwFZL9IYMiyvn_6%-Dh|zo(-wcdBKh)wClrwCZ-f^1=P_g zvGk_x+g)t}U`QtV!xPA9^}0XJ))e%}UVO3{e7;F+uvs5y=kmEW;uKRL+EB$aZA-aF z=P@j5z}@1%G)C}m#q+qQs#-1aVr)%3ml4cf;;{$q794vsUkIS|^)9!3!g_n^npfCl ziM!1S!q8Wuhh$Sez1HUU(GX@%L z>+hx8?0_$$1&%>Qbh;CSDF`>$s$*iCv9|to<^`#&|-zLmB+~ zbn1IcK2wJ^FotI<;DB>rC`*TWZoI zAES*=*8RTR5>Bm|x~d{bB zX|*B^qkN|ut}fqiJGHg2>p#;^R{6qg=FkWSNdq!XM&7~#%Lk+6Q0!g{)t9>`;<)xJ zR%K%^1&t3;w3m^@*qm$k_SdWjTvtgH$N&6zF#UG-0pJLHN6a}C+OO6$k1<>K-P=Yk zy>@6S+x=E}H=5Q4IU%51%R9I!kKe+hneF@Mshus2uFkJ=QTekzx4Az~UOY?f2MQ{e zx~+zMFV!o5_DK*DyROkQ@_W=qm+U2SeGxZ;c%EouGnyZ@ znrUcCR^7pghESZAfzH3HdRo3;zn{qGLWS|mXXucpmDPW1#&7l6tL)UAHyDoiJILj?s=bHS$xea4|y7S3XWlE^0xpL0k zuI+NE^s)J|)Y?_ZE|WFcWOvH^4As?>9;)}I%**{}hBr1aEq;@MUJvh^voYVGw#Q%K z2#T)-mkVDP1`dfkhj0wznoiy1S)hgrA4y{&mq;EJA;^c8z$TACXcVHqlprrlKS8%L z@IUXJsI7qGjKOCvR1TVe)4E4SHsD|AiL&_Ao0J){^D&sj$lufE?iLrd7WcIJ4+{#p z^uIdC&l5f8wF!!VLc?7yyeKWmbRF-X3APFI}N6 zc6+vWU*@lWJ`G_A8m`9%5_e@*uBmxmJJUx1rSJ>n?s<-&qHt1yU!O8MtH5fQhwX@+ z+E!UO+_2B&G`lDtgfG>G1i93x?}Euj)iNopXz@T9nq0;pg}{=0WsyyhwdIAHhM;ou zHu@>RZv!uTKC9D;i0XWM+T2`OdG3GdOE$~1?t=xie;j@al|Z7#%N5>wxSjs%efJet z7o6osO&LP#6igfgI}S4GOc4qM$v6U%`W(mIqft2>u8vtrIapy-8NBFOzv-@QZpAb; zQ~6PsUlnR3*xN_uS&)9!S3kalA}cw?_8;~M5sv>c@)VvtBxJsxMV=Mj!o1d=x~M^N zfb&3IO}h_cLKzUYy}hcJfCUpgJq>ShjBP1+B3d?v{aKxt`Fj;yp#roZOu&KE_;>;h zAE5#{2JD6+VYoqrCn&$%zpYSFKnYsC`&bu2-u4jCJ?z6sw>tex0g-zGY@#aeO=P&3 zf|N0Mj}|$N#j5JipIv&CCp;sgdvb<&lQE6Fq+O)pqo2--`SyK%`1Vxz_@2eEC-mN; z(ZPKvnA~pdF)K5mJJ=YOn`k>rX_n_*|0!2w&-KqG;;(o|m+x4F8;cJLRm~ADxk33z zZ+Ru?k11J%-5rU*O+Go1dwn|w=1!=AOXP2fTFs!cC@+{fYKF89QQ*$oLzQSgQk5@C zuZZGbi1RugqF;ovi|iMuL-GA8>fHHfkN8W{k;cGJWww~SN`4UtVe;s2*LSQGM^6wx z3p+ZhfZNQDt%2>%61+7~pX6dkCT485QL9`6eI42^U&|aVKI%0GlOODRPkD zVV0n37b@$19p;(N$9%;u`Hm_rmh~B0=O2X*pc${c1o0IjgeHXLF!BZ&CWjp?qDN#{ zOwV(yTqZ+f_hReF<``^1Mkd0;6gD{;l^EIxTB9H9@KHuChQ;iKtd^GxEbvi$eBAI! zLWw8pnDW4+P!;V4`J?MHz56rJ{;$a{ab&B7uy7Y#g)V#!T5wpjziB6ZlK}smtb9K~ zY$Ucw@mU6yX<~_78VO1>#Ln=$&pc`+!iVT_MmilSEl~q{w1L!Edc%=6>VrXkB@!FI zbr_u*p@IZ&-soHD@#c#IN{}a`eC3U8;+`&(@W~aTeL+CzF67#o#hOCWrjG*l+G^11&4KM07!s6cYCD z1NDnILRYL*Q2#G+4MP_lyd;-{?4+%<^{NJs(;%4=edpy^;&u(8-eh@bKV6LigKBE=%7@qG zXUGEq`&TTI4gKHxI1q3(7yV*^4CEk%^VfvX!97Tj(EI5-@D=aX5y|P2hDm&o*9rq& z?=XC~NrNXydz7zF@gNT~s~#^&BPt@8Ud#J2lYa0S>YWjD9D$g4R_8a=`Ni!bos#V7 zRyM*sa+X&>CHJ=&1b7h_G0R<)bCM=1Bs(Xa1_s}K<17o53vqioSM{9vj13hp`|G{F zU8-tG1|EUHcX3B?(=25H+n`Irk6##c5{HyIg6fgcUnhR)#g_HW{yxy^3TutroB5R7 zgih5a!sZHrkq)Z(qpKK;uzAAMtvq3YZ#m>=;NJ-=kFAs(pOz|P_$Xm@lbBtM##?W0 zs$yq=`o1XOo){AHR7fjCCTS>!B7Q4B2d%bfAzQIJG^Rs2&6=AKe8pliiP)-q?j6| zkekQ}e|SZ6C}lKN@OFwQ5PO3F4@-jXz-ZxjZIT=$p?vi*b+{Y6w7 zZ-!B%?4|})PUXAia7AuHezC&P$w}^547)ZFL9x;0R()iZubO zWA!+M(f8i@{oOrP{jKT}a*Y|j2wRV+KNEQ%!yI6;DtT2gbdl%mKxXo5@7tKzOb-8r{=9x~i{a0f zd$8A(%^|IKvGeR2)5U^~DvU|wW!R;&BFvJz3D1{TW@w>1m>k5wvcs?eVx9haJu@<} zr9K7Be9M#kkoHcAVgtQcimxU&>OqRybj}Yl2RbQeaAS{UkwlFILnhk8Ppa%O2VkC6ydR)Oza<*8U%mq%%&A` zvB(a21?c3$IJ&ImNI0a#K1Ej{nJBnpeZWc0@w>5aFk-O%Eu8cfkKi0)YVe2ph#6zZ zWU5n$pkH}4CB+1`mLT3E1+3z1503jsQPk8Vzm}u;uZmBb(Ck0|N3Hd@aPD58srn2H zwSQ7N_DycA5S2^cbT_X7rnYBKwB`z}hHdm`xa>NXlPyAAzDKIr9*hq#q*@g3EHQ^Z zX1WwX{1nHc{2?Ju4#(I~PJ>38jmCX!r+E{(A`irhN*g-JXL9%N<*`JB)pEE_S-<(~ z82Iah0q1^5?>FKKOjn%dY2jS5k5VdH@P;J(X+*n`70i(3Ax%57d!20xDj8v+rgr95 zUl9rNDf1CZHtoefDr1zyQY3gZwTgb1CqC-rd4LBMdn_cO%u9JTYc&mCmelsD z$u~~?-zp}y9IG;GHQx)#i3BHtZaq8$=1Hf|!Pp&0kWnZ~BtbbPtr77tzv5Q3Flxjz zLSa|6kwo|3?|yMA7yZB`dYn8Hl*y??irD#eQCv>#dtA)BjfA!sYxt&y)CZja4qrl> zG~b{f|0Q{12bX0fQ+|27N7k)Q$AIT7G--i`g*zbE7lhJ;Z!Q(%SS+g{GN8ZqId`Nf zl<6nbCNYGcE1tc^5HIs0i3nJ%IvA1J8htteK`gb2>Y<1|nIK|m4^0V?K^g*H4h-=H zU7Q!U6L5$CY7byNI(`Kb`{F@*^ybONZ$^kS)wGh|)030(SuEcWBET&DP}rj)oKOcj zqR-R{ zwkSn%{=BX@6-~ZUqZ+?&$pom_Jfi-HIR2JY&)*wiVZy1!g}BHqti~|?P&A1E4}+c2 z`1Gc8ADHbO_Ty0Xzq9uW#uKAzqY!M?>a?llD#w}msE75~xn)lFaTHkbz6o?-b&9Fm z8g;_elVNbm3t!495^qA$fk7m(-hQFJsGFvm6>ArF+!2#9Uh=b!zH@*Nm9(_DY>H4X`SUM> zSVfLsLvPF+5N4H|Kd(opUXf62NV+P<&620VH*kU?<8U1Z=%-UP=d;V30de-J#-Anz zu)UY!tUE-}CEiVv5xgA#Sfljj<9lC4uV`d_EUb=6tq^4rPfk%^EE8Ep9o8y^!}~nr zW8n#}bRU`}kWbYcXxh*0ug4oYMB<5A^0{wMnM+)&S5-Xm$Ful=aKz_oYz24V5DeRl z0eEWvZZ-T>iH<}>w~Hi|a^|k99(cMFiw$@8&`g1DLR04_o!6k5YLJn5Xz*bBA3b>pu zAiABxG4Q@R$so0UKuTJ_$THb79nvdQaCrSMQ_wtFdFl!8=Kp5yj(hOT}=g)rShYgQ^I`Z5%Rh9p0Of zI+qtaqHh#mrWCg_)PNn9|En|wbg*8vl4YK~L)Dc++#fN8$CY!B#Z*B|Iqi#=vMjKp z7MNE!Atc->7a%T01;}^eC84~(E8pjF3*uaEP8$f=-xU`8@{>pNF!Q`15<KRFh=H zK%7s*hs~Dn*8%#}s5HVwW+1QN#7FChYb>9x5%gyx{!4lPm*^1~mDRtIvo60Q(<|l0 z%(sy5UKS;3tehxnGfC+&x&)HB^bLNLBtQ@LWW4QKvp-|aLE9a<+-;1StREY>miJE% z%J}sGSD&^QR6Ho)_%42aM}a`-+hT4dw0N@8$La_44^f?6KqUHq2+aXKYhPDm52b-s zcgHW6i4f7GSv|9qoGmkG3G9y!q>V(EWrb;GR>cH50{Ez*kmq;q5adE3`d(>vOvM;U zyLU)48+pb%D(bR=omKO-p>Z^FEN0A9y;~ttQ;k&|ZacrReThWn){Lga5VKZJ4E#@gv6}^S|TtXajltvLF9*Dg}T0?-EJus|}u*}5_HID2*H^2d! zCC-w0NBv*l*#_u4!MW(Z|2ertFhF8Mdx!!b5?B4MaOlG(FNza+QB(0|jzARA0sjuu z80LT=HiT;t>avO&ya5egi&fK;gTqAS7f3@XYQ2OXkj zg_N|#fps^Z2H*UVjQ&g0{>K(j<{qF+H)OPRVSJ_0tosoYTYtY$Dg?cEkZ%x^Ba#U@ z=X1Jn=(b*A{DF%nlbnrMW!17EgevBTm-hGhXXxVmwP) z!e5)9_c?oyQ`s=#tq|37=uI;1u!6b0Fsa3Ff<(kYcH9!{GmgKUGcVVn|2~Ym^tSp` z8_LB*TB&UIS;-?Ja<=x^#fgnj$R`b0WseAD)e9g{Nt+|971$6-5TT-Qh@~;coKoh= z^q>>n7|oQhTL%!Z(q$8vHli=Y8K=D$b3I|ElB;s7Qp0}j;{Io7axUmu~ zBrF!*j0Bv{M^}XWo>^8?BL3XP=_&CRvT-efp!{t69Q{m<5?HnviMTxfa3bP{yj{@y zr)p!TN<>FZQ9~ZtjrRX^A>Ce0n^2jSnOR`nB|MW|z^w6!QRo{x`U~+U;Zw)SN{zzv z_g~^b3q(7^r$WIF@*Si}J{`W9IMU0=##{V`}DEMa~O>K$cW+Y2! z#ukofx30{DiQq63frgz9GpX}0KRIf0G^kJBU^>FTOGlt}rCUv8r1}mNRz?9Yq@aUb zHBs*nt$nSl(&X>wRvoMARG63Wqg1zs20)dmt8cc$MW?Wm56{b(@#)Q_2BL9wBs0mz zz=bs-$N5HxPfWja5|(<3$+wdWpwwtG?$)9heTgaYWWDFE9p`~`y*pw$PVPS z!7xgl1_#r*N%krDLYC}HDIQ1pM!yN&oyIB_B-OY!$g*mCW@QPg(jkez2otJghqIpm%Y-(%(s(uw^`86^ZHv;VP^P=XDjM~c!9-2{J8C{0H!#_)H) zksw6B4k{i%8G##`8T%6QIfi?2AQOQZ%4C6FBqSx&8%aF8u|#b=G%?5rQ!~Z(0O2Dn zF7r0~YZ(%82=@YNa9*im7kWwp6^e!)nchV40`Kwn-ry3^>Oum%g|%CttAhT|ebzCG zZq9x&hykOCAbi5RW-_(CdBQK$)Y$MS(1Yss$^7hObM4d zkT`r9f>KOae&hroh5x%S`BMu42BjbKwg&UzxE~h$_Ef~ybOH&p=w&5pn1q}RR6oqH zcXefipt#GvpVCW9ucogGeG4XVcJPTC>PbF-GJ7x7wZ*6J)+Hjg;^q{~s3tZ`6Hp+y zfgGcb32O!&UV^3=kCP`|_SrxLvE#)xWvQtWDeJ2a`+wgmfE^Z>)makWpD$N#{mQ^d z6om^xbTLoYesLA5yJz7?9(5{N#Fxox321dO)K?B(bp*E)JvZa_kcBb!jIbl+#l$0% zNU4{at&+pfypqg|CcI)==0D4d-`);#spcyB&kY)!nvs9~$JB@QuQD__l!49^kBAOY zq6!u8x*x-6U{D=V!QE_7v3!ee-Q-2=SvtsqOvT9cq2Y(!A|t`$5ift6($TA_kMf>f z=^c~JI+-J%X=&y`c1Z8E)&b&IH+maMyBe~SBh>d#bE;F(Tx^EXgzAGFWB~o;-`>E# zZ5fVQ?mdmmi!2dlF#}!cFouk>3eM=ztP;7{km2RsCr6DsX57`KQK3mldbU?STl(>A zx>hM#B_GCG$M6l}8%b4u93QG!m3}tCTnQ|=QQl=!u6FT{PGq%H#BmXxB0t)X%Op^f zz{dK=*YuYc4M+?J|33M}26tIsHYq#dC8;?`6pP|H2!%ya2Sx92h)FEU7)$Z{1I3qY zQAE@z%xD|Iq|YOA8HR@TT+R6()0D_FRcFE}I$vT0iA%?F1j@-0tZ26dQPu2i&TKYq zM(%&n?3~Cz;LyLHbCoz@kfYi^($JUxY1s}+`lk^V84*3q;l#j*%jg`Tc?B0>$lf!S zrAqG_`pNZ2HA1xVQkWM;tT&$(?4)^{dJ@P4ksf*ebf2C zI&{8CKiWfIeOXLpaGf&7g-K%1@8NmQMp9*`n2saSWa@H}j?n4kB28n}LIG&hDA@cI z6Fm0zv?f#1Z6`HJDnbTO%v%;nSB@1nkbc%HBseV3j-#=9%r%J=$&DsjSx=czc& zt{AnA+$Qp$RQrrLAq+AT;wl!ab_`k6?z+e%iCD?*n^BIir8kI3v;9;OUI1MUDzZ9pv}@E3%b1#j3tc$Q9}8jeTw(m}>)L*DoK zLoom{gS-Co!~2I|_Md*gY5nsxhX4FKlLX1ni9iF`1~&q5IChOM`BP-cdux$FvhRw= zkFs|f#npGP3GqHX^eEiy%{;EQebiQ1ZGRNav0(P?2V5@wC| zz!JW#c^3qMWvEF+YM2B1`BktXXf!(Y+|&W8(StY0I9 z*}xu}r#C+$ZeezrqLis>+6KRDsAIx#Aod$?ys7wAoGT~p$gai<_)@uZ73!A83wL0& z+bhOjnxg=tEz0^3jBefbEk$|ubq=+RILKvrhk-uUBo|5uj;@T*=l6KeD<#JB_o~dV z!gmkXnUn8<+e2K-31~;Fedx4+igc=@jT)2cD!J~l1kL&34Jv{tM>k|nBxwO52 z^vGS3w^y5?3h5rMR*CZtxKZY47mKJFE?wCc0O+4N#_e%+&!B zw;l_Cpc@Vgz;loQAt2u!!r~MTMcPjF*}hBtcmly0v3i)YOpbBFr-OFKF;HO=*{t2^ zFk#cwb_w*-cJBnN%B2yQ^Jdg)JI2;c-?(J>$Op>H0>p=zu=+n4y0Oa8RBxyud6*vp zxZJMW-OfEVIXo8f8!FqLiYrx@?gAf36|D;aZKMm|JPqEDD2mAzK4rsdmEetW65vUi z50X>7E#Tp1O&lK!W7H+Ed8t`hJt+*gmbzvgj=1M3nDmH>=Ccs42{AG4QOvv<^btTe zyjC?8N0(F#nNY=wo&vD0P>E514qo?IdII!v3d`xG3)})L-rAo}lS!hf&_>x=i(C0& zpHnaSW!YU)*pBoyHOxQv2;CY+m9Xu)b7f z^*xtf<-^jbI}VYJnEf1!R?BZA9z5aAxWi;opF72Jxq~+cf#xu@DLP|F4Rx=6-<_>< zS}QVR=BKVQ#bTZ%L{!NEL`#%1CD4JpvbQYGL2TxFQZ)>64Cq!2>#k5jFJ6r41gr~g zj5KtzlwUJ~uEe_*ZH4aw_8-N7h|Y?^O-Z40De4oW(W}5T3NC>e}J=s zAKEp1$}qyEv8!_&pjCd5vDE=A@vuxT{6w}2FIE#naV zHIADN{FHn{y#(ahL(zFCKQ9aJkZjM?buBe3?V!wRO3Ng}FKZ_p%tLYb=pD`$2eYGY zK^J}y7W?%??PHUEW{@~M`UA_?E57 zv>Z*VR#F^>-tl4(k%Q9cYf)g8XTT}QN`B*o3Sv?0(&1&WbrbWE-TuLHR{f#~U1hg` z|IVGgs}QIE)a21R^45tJ>|yWPy32Wscv*P$XJ`6Y86k^TVOIm+vR-Syrb0jkK?28|L=0I3H;k`FnxMD%TE>2OC9n z3jb4Je$JB={Dd>8qJ^!Mah!4DZ59TMYKbS8ZejGFyZnWYzJdAAN+|J+5pXW-<%Llw zbyVgvM#~H`6*nyCtC5)qCab4&l*`-21Z*lAjjBEeEq(!eEE8C=cP&_Q?!-(6jc`XB zvUl~njm9_+eQ7f%8QDH^9o95A2=H*(36;nlj(t#g`0XALb-2=&FalGF)+P*Jn#5u` zX^^so+3?Xuuwzf0`k4E7Ioy4itF5NuueYv6p&FQ&(Yer2CESM`)7*s?YdMTkFr4c! z&dW+DD0742FuN*Ij(68r(VYJ0TRb#^|Wx{;uv$#My9bMJ&HT+^_q=(KTkbI&5w(<`f`> zSz}+tDA=+jKR|S;p0(OR0xo)u(hRqm7IsMywc$52fj2CH{`DC)%nky;1j?lIe4PYA z&cPLTr)3oZp3Z=_J-P40!>pCsgM(THOYZLY(kJW3iH|msxBEEO-JvTUZJyo;#(k9` z_u5|W@Hp#-taE31o1MP^)8Q=%`jS0Pws+JSY#^Jaf?s5zE3c;`qYRH3!6% zQB5aN8K5J7q^~ z%Qj5n@sRkC;VRH!AX!jD!#d~)L`n&N5V6U9?EIz%AV2fgfrE2Ckuf-tGL>jbeuv~} zR!28q*D;aOm7RR#U?iz zr{NK21F8p^lAdgAAlKPI<0Y)u&8uz@-m4Zic(GpDTS-6!Lzw?agFopKf#pX^h>dS5 z#JxnEZ{Z*_TB&G1Cytr!>&Hz*k)Qp$$F`)E+J&ya__ds7x&bT(XNK%nLrP-#_E_EmI4IerLel0%j;_6jkMfHsp+6lYVxv62T53FHY z1Qv)bpt%VR2&E6#S51-?Q&Z-$#6xRbOdjUfjqxlP>G=?kyvu@M_u}QhWjCC!j*d;Z zlC@X^dvskPh^QhO^gAjF;E|TrR&htMb&Wd0cw9Q#UuHbFcaa|4#wJ~!PhfIx+cko^ z3r9C2t76)=oxgE2f=8x+@-u)1r>>Zx>pP;In6b?Vv$FkVB)s^a51kA3yc1`i`Jnl! zu!tI%uESn%1m5P8p?;G;oT;V|D1$F{pmdT^XAY3ZcW}gi{M{UBGkiL#^dQQMLm%vr z)AqooxIHt1wcWzhwBFiorccx0DjR=-WQe9WmCJamV9u#Vp-gXJ2tbc6@z%kJD_H9j zB?d+TaFrYbF&aZ=a_?p2@)ELx<8db&tMO2v6T{FrK;hO7q{L3xUg$bGaumlM=VVNR zx~bAZZYx7vv%ZX7{ZebGOCor+noZvu0VT2Yl(rdio}H6xi#7+X+%=Wyj>nU)7LY8j zy||*;9y(O&u`0A`yAGrbFS|$Cyyj1ECIcY1yPqsL_4y24l22$Bax(|1dUcCn=&21O zUT8+94V_-&l^@uXm__jipPK8Dd-H=r8`x6k(%yy2rPLv2;h-TO7?P9X+8-t5g%myO zCf>?LdA-mA2%`jkN`9#~>7zEu`!I<-;PFJFgALMWd`_*JzU>Kz@5n#;LEQ168;Ky! zZ~e}HmJIk1PuB!qFG4_99LckxkB9JM1ujpfQn7!bmYitF+E|n0Tm4a z+e*$AlYx^OBwDR-%dWlrj!-E>3aE)#JzvK_;-AF7z#UTW#;#i0S$5iV4`-*Qx@aR; z!s~sQOJ2jpjvrk~M5*luz2rokO=V8N0I;%6w}wh$;7+gF-I}M+P*tW>R%EUh8h&MF zsKIEErHpcbS=Le#)q=`|5Z@&i%m&bK;x{X0q4#)L%~)ixnit)Kjx-z!V0~NLMN}(H zUghM1e$CiHY{;$YwFf6Qw@icSW=KwNP7bwR!rz`yU!()F;!;)!pjaA#*}a9ywdZD5 z|5_8|wsm(ili1*Gbn|?ON@-gWL6>_%eusyZ#$?q=SSdt2a%Br;#sEzn>MBifOhA(q zB<`OkNdgCci^7JJ&9K$-d&I?QUe2+4>Rx#N?>ppcbA(McLPttsVm+q}H&v~RGKn^# z=2se_c7q^Vk3^Z3Bin%OL*-R4P9gxn^%loR{0_<(mww5ddp}v@?Tqdibp}6(J!dVX z@gr$?dz)U_MX1?vBxA243p02tJjbFd4K;=`d50V2&5KxcD6}B9lc1SEzGUytpqRcG zmO&h%0m4g|?rR{}Vw3Srq3+`A1C(}@2?n_c+iRT%vlP27_jL{ME&yvHVY8I*;I1Da zIV|(se7LUUUTf{1*-5QbX?O@hLSkF>N_}Zvm>BXdn6+-mT%{?oSBYDDLTm0H(=%S? z3K)W^PQw}x+AK*c6Px)AZDW8VJeCLjjfMC2HAk5H!ms>DfLTRrvS(QmxFhN-2g&P| zqfz-N8m}5t#AzbqUxvSh~%PNKv=jJ9uUu-WXEnqmCAh_r@Bv z?I88~RgYG0{IvvW!h@>iew9tj+pn{kY~jS4tLlaUA@%OmwHQaGdob3zrB~(sTq%C$>hjl4^3~-PP42CcwMc@^ z%X$W;gGA?_We+@EZTD`@-s7j-K6#Js98+Z6$1vV@=u6Q*yZTN*dv`*}-XLD50=-jv? zVS5VG!>x6-xK)M!_Ky9kfX`xZi^Y7F?-cZNZ{GDyA%jnP;4=m?VE?_Ah?2kH;vfBx zUC=uJTp{OLlMjS2`#w}0<86pX%8@I25#O3~DWde~%V<>T(X3Mh>@{PRa^R=>E&&@_~bJ&7fr{ zb-~QXExM5}Jb*F7La6U$_k>uku`!q&mzOwh3`6_z(kteO)B`U4; zvYGmt{^cjBjv4q#&i1Kjm3NcwvtWz2A!nGs)xrm)VpPASkb$xK=*!dbC>A zm8bIY`aZ-MOCA|evLUcx(2 z! zaFA+`wxfMWi%qUONWQ5Kf7)IPQ?*wW$Q&z7EXr9BqDRh)kYHrgZ!YxS&LY*^;?H=B z!$K*3;EaZ+xBSbjIr)I?HNdxnB5C@<8-pak8WLu>)WcClQTBSCz*>arad+$w@%+0r zkfqAfM=wcoH~qL)O0EiG$ic0AAmhjwha%!=p?Ovq&C)CQd!bOc|Bm)g|C&(U6g(Dc zdd+Kb1h{CLGCBd-M1a?jO)MV-TCcIFSzPdeK(u>9+RR>_r9!}^E zQg9Z&RljWvLTb}eC#`~Ma;ko6d~&0CL~l5zR3_G5qOhRYKvnSv+R-Z zjv3`5a5R{<50sqYOUoUN&Opj|1EvCA@sI+c(L3rnEvI=Ng1O9GGwcnvi%D9hBtlc=g$=BUVIc8{Y4wG4Gx7h zeAQL|33(r94-#~aWqKuO%E+SavbUFj1P|TBJz-Vl{#d=+&v+FG8V7tdKdDWOd4qGi zyyt)$H)MWJSmclK#@TsYniOY2QMUL%3)6-#G9Vt&+BR{3(W|IMdCyh~h_54Utlt0??qKcr^Jy#H+Et?wYiO2C&u|QZi@+-Qtn1LPw92i>Mc+$2 z(w+!KDRKO5(OO$uyEi5Un};Ose)=w2c$UYupZGK$dqd3EdUFL*^F_2hnm@52L( zRI&Ty@Rfz=5J$S9z$YGVL%fenfi3GP@RYR06%1iY4>Q6iClB5(t*;{yHe2eho2$89 z%#rN&_+bK=$+SJ>pw?>(>yyRA7oi9G#-ULrZf{Hvi)q07%cBHfD z_p6_rvhR>=uZ~Qf|MI_Ae`h>fq-0rJI%{2f@#15Lmwe=Kz8ZU|%shO^nED5BwY;5g z8A#u-)uHWt)c0P%%Ikytrt(?0MhGd4q=2f&%xd%b&l3H3f0&OY@f<~PKQekdOq{4V zQoJq|4n?pN8J#5s1w}yg5-vH(wT{q;q-&&~V+Keak!nVj+?CUadWacT29r8JVS$-T zRNpnv^*%0zTBMKM+`G3`faEd*-%uB-bHLr&zO|3jx2XPg;czT8v<*q4;Zo<-@TCQn zaC0U(CQe^BJ@YDax7e?8hu~_V>x%=Hy#6pzvzq?;^{R@lS9r_`ez4!;kLc_p5=kF!fTB9*XWqFGvyY86KvP z5?Ec_EH42Fd33HRTlauf5`{`p(1d7Ro8DTdn8N+r*y%EE|75(tVx>gUAgnVtcV=@n z4DDa21`9P?ZJqJwIuX&mutNR3SRzhGC^jN;NVaBRvCD^@(yD@^1^0V{?}$C=^eN`? zMb$PBk*&cn7DRWeUIsdvfCV3%WMZ$K-wXBtl1tyF(OX`FiUlv{EH)+9og?q#n|c0M z3*hh$b2Vq7(oG$ElS=cI@ zx%aP^^-T4ThklP_U|-4M?gPGmi+=q4&Csi9V$yV?NC1*nG$`KrGk-Jpu}>XtY7BTj z(vn2v^Gg4WyO(CJcos_l$-y0$<}KaMv-oC0SR)_PAUxO|RxpU@oj~q{=?ZjCv^>|* zB*krDKAX}`6jx?2_)o7>Gt=CN!73L_ruE|tLkS6-1{A@Dxt-%~x{ep1ifbY2?Q6Cz zB&Z<8oxz^S_AgD)FO{=ltnA|}W7mt6ZsN_!qay;XoZ~-zkXdW*w0)+tur~8=9I&|{ zvWthI?$z9dza0~$lO^a5p5l+JOk625lUOpOQ}c&w4CaxZr;%RorL}TnxaU)A>@dC) z*Ck=23LBA>V#p>=Eq!7lo>GJ@OLd_@CAEMg75V6xgMA<|X+^Pcdqe^UoH84)0ZoC4 zZ`10G8b?Vl7v@QigQg3u+ctJ1=Vgt4u@1vKhSqb0i_TKuGXkl3tM_{OumE4b8#4F-)v|IS|6-T=R903=*4uZO(@uPlxR|G^9ui zpTY$)tGuhN=Hk($4gl6_kjn?PpghrMjqg#-#`h<}8Cw`zpHVG}^}?1ynzci$yvRb7 zxc%nGe-1#T<0ZNrJ}|$mU7SZt3__gp>>2dFmPRpI#d4@T%su2(bSK)qV|Pn>xCmTa zp|4yZeT@H{ZbRjMJD;xph$(Ope!8=WwRZA3=D>TooAl5-()RnTZ3^a1k+AlCgZQMK zr*^Jxlw~$a2cSc?r4Gs6RmAXlNriuAP$~ZE{U+IvoM|RqwWoSxQP{IY{uAyx*Cd~q ziCCUi^mM@hVc(O|eDRy%2I+VPg3kvD5pn0N>LmCoss2nSEFd)}V zD4@?zXz7!wRaW?nFQwO56q7T5Cm_}SOV?O-nd54>{!@Eal!KL&Exp{w=rix}ffM+} zxnx_;Z$W@7wt>wzKoAq>c>337I`AY!gs$K{=a%MsW9bzv9QhH2ZIo_1D&B1A| zgLoJyPgwX)lJ^tlkl~|PufosluVr69AUjg`AAv3@ z?~9VtsLLJWjlb=!)4UkF{C!yvfx@SLybrj;yTXv|jpZ$G3sRH78N*)Pi%bo61z{l} z#-{-5dBV#y91+WZg-{dV>>D+2RWh(M48&vc-o;vB{ikWXWmX~!__vU$lC6|6_jmnW zI47&I_8>*1DfL+2;*4=jEJk!CZ+5A(n>2f#zKg~q)b#!I(M4k4a8{e$!UQZBEem{Jb!l?xCgN1`s%%&(S~ zF(DxhX1Ysv$yEq$!Y-&%7&3IuYy&xAnv8H9duP)0*=HJv_@N8IywW3Rk|(`#dzm;z z3h|$yxV9MG%2;O&D>A*JzmB;}AG^fXbTQk$f|RXH@AV(s@kaa`0;Xy3CIZN%cdj6-qZTR4IL)A|Cz9PilGPa%Jd8SbG3SxH;)~fox zC1gYV2rqkhKHm4}+MA?jL>WtSoc8JxEieDrTv6Ujs^$3}=(4$l*;FMou*en!o%x0q z#`{@V%-UcXxZ3N`=4DhCezk|!p3}uJtV?6#=I`?a)5YWel+CZvqb{qP>e#jb9qjy4 zNOuVo_j{b{0V!ORs;O+1p1XoQd=5nQsOIIzb4)9|zNi%anyDGAmfyovaPw>kEP?sP zSN7lOETDf=wA$Wv+^w8_y51zQa%HFeFIHWDP`%mr9{eq zGbE1}jA&==1I=BG)>#6mLjTmoF_dx*+|lhsA9;Y$gh4b{K1)uNtLgEO(#(g{2u$!|((@$zyo2 z<`LBeP!N}h`mMKGWA{t!rnlwoj;fw&>+;)Mxcu%_%0pto9v((US284(@m%I9$@ZO( z)aO8xFE7O6ca*{qnAxUsM`zMeYBuOI9cqilxDVNpaPLron<-KP0b-q=NhR`Gv)ZRB0@nS zvM!9m;KpL@L-O+hHMItI)(em2-9A#DUNmGtCV6a2yr8t+DNfGd2OI%O;|nKmUt|Zl zgJ(Cvg>PG{&3rbi$WzADx|-lP@0uA$9R?9naAvZU`aa=0*P%(mOaBl~-ih#gtB*+~#ux&j?R$9i3@T zTEvfjzUX|5(nWm?l^?Zo)w6$kr=1fLDb04kJi`8pREkfftTS!jq4EI)2F3<)TF`6* zjQYQh2E4(4pk&=yT=SE8P*6e$&?j{#+1OBy2 zCmto@kM;uNivOYQU?n7=FHUql^Q^z)Oa~U)pNW*Ol=EM&zLo@zGtMYc^Qryuv!3p{ z4Tt(+<)ePz-8QF3W0s(f-O!Cl)|l;3J^N03%G|r~%gM*cRf+kPR5!&UzX(o%xDWw_ z>4#cXXjM1``ExBTHIuk_(Wi#wniE;D%xm=CjFbZ)+y1fvR~nvoO%Dy z7Z^7n*(a}E8Ho3luq8u-5wZfHfjR0caB@vFgzg&^dNZhZVBI9Y65wU2#VXkf+~DJljZyuT*z1D@%q5L# zXtpnS%lEj08U5GY!kX^ji?+NhL|z1e4fXfPA@nG~gi^I1a`uFu#g+bxZP*iw;J|(6 zVDq*R^KA{iUEj$m?~25LITMz(J8?5g45wDDu7S`*7*p~pe!BpeIpHbC{kf(mIup*c zi(-em&M#<>d!6$RtItbEASVJ10k-^Ff_=!H;)j0yMd@ zKPO>wqC35{S%7JsuFU}wiP3?iWF?+k%&Z&yi0B3k^Z2ne>eB7)ZuvBY`ee-VssFSP zrJ0NfxgnBPWKpr^2I55^ZpLEAHN-W*G^d<7;*(B%ZjIa{CbJh3(QkE&MHTl~(})Dk zc!!xAL_I>Mf0TAce)#pkWko&y^k-=uNt$iu`8t4>zgNJb&0d*)~+IMyk7gAN)Fa_it)y-q|>x#mNpM*AP`^ zS|0zOWc4$FWJ^x@iUY?>c>F1&Nppk^xt;a!s03KjKC&JQ=ww&>g`NUKf6*+c6_TE_ z?|Dl-62>fC%ch}JKBZg);|yHh_Tsi}%ml(dixODpwb7XgSEIFwu|=VE?|vy13_hDb z*Psl6X=01oz_(!TFE5xxe3xz&9kcoCAKGnWGlV+OP>W&tW%_7R2@}}GEPn0)_HIwI ztuo8H@2aE6*On6PykJ3GrZpF=+1SYQ{+#B|$TRK@{@9o-vw-9~GvhtFpw&kp-QvtJ z6U=>ThEebMf%7L>A%k)xc{xxP?FQ%6f}?-NZ@^FP%Tx3Qd%sj17ElPa#Td4*ZCbRP zheRef(a`BfIh|~uJa6hK%!#0_b|d1NaS7oqMbDfH>@&t{Z9g!1^|5fkjj<`~Sr5HQ z#@Z_oqMapU*ar5R`SStz2s0vVIZc5Og?M9`E##!TYk1cu3T#wo!Yn?9D@A~j z=Yy9t#?5%SVoWr`j{8kiyr2nVMS|aEA&;V~S@MZZrumiW!aK%X_Nkb6rLd#Sp6n%~ zt?p9cJ~zxEbHZFK%rR3rrrNUhkO-EeB%=xOK`{p(gM5^#%A^Hly$r@lVV(NG=71Hk zbR8>Q1M7t3!sNQO4b^yP1Vl(pJnd|Uz!rZbcY1ZUb_OXIGCrUBxSiUO@~R1)hA{_Q9Rcq%Yl7xyC#XLsHhY= zOrhOY#^FL_wS$LvLOObO2oUVk^aZoNSrdHj6AM1UMDrJ;N_xx=`9&I<@gr(@V=I|=- zm_slLe2-)$c&znT6k8U(gS2Cd5o>FgJYtQlHsOxfK$AonOo;hf5G07&`N_m49uKVy zNqHgnS6W4wIOZ+8s)5s$#41@$#eBA=Ao41jDF|8kPt`Xilo&MHiuZFTyG%O-wQRc1 z39Z@hlM=(snsDX|Vdo&r77A1sR9)AFjG0Mfn)f=IF-*~-5yw)1!Cp-c%*z(x_WQe; z|0L(y0x~)8h&|Qs{5LTaQRxHa(|02`fYmgJE zydO%V$Y3QM9?UJWPKwW&$^zP3;lOU;Rz-$wj(ssXR`1V5TWb~uX`Bi1SJ9cbr1A&B z%@s5M4i=KUH7Q8y+R3w{Z09KyCxagTB2}Ml!RO7+vohoBX7SyO+1D^E)VV(M*nk^8 zHO#l1xq@*HwD?P-jW)(*IR~30gRC0rR#eVsSK^JWB`15*{i(Ea!$t_-#I5W}`nd^K zK==K3wYPbfB!XiQVkVH2x`qc6Tz(A;it5Ir$~D8kBFBn6pp9Ahpx zpXwmb`g<))ZE-;z!xwrI>G|XmEg+Pn5MpWZSE(jCFLvaOrE!y0)*N%R;gdJI2XvxL z2a@fuUr<%kx^>6Gznzjxf2ZTxi?LjB*>SxOw=_SNZTQ|E(^q_7Nu*yEJbT2xI~ik1 zY`lyD*s?iuTk|3xb})~~jJ>NP>m(myCr{1k`xEq%lUfq7Ii!Dc%KpuwiJG&Lw`|x{~W!r3vdsUKyz*GzE&nY84obNd-mUuL3NS~-$x>4K} z^fjsG*X#S%$)vVyD!OdrG(cBj)~R5B!#3A@_GOdk$sNxj><4UKqGx%9@v1Bh=oO83 zwPYp=V0`w1*dty=X&b9U=U^NGfam7gqwdiS!4-h?1s!E)74uULTJcF?RfNY&y3UoP zACa6H^ZHq6!-fb0oSr5KL5BfXTK^0B&J_BYW%-|O`aYHJ?$M71Mo??n*ME6D$9@9> z)TK-wp(ZRQILqu~2@!zJ)Sm*=z!3Bm2TeS%oc&!m23KFd2KXGhW?p1Xa<)(<+}8 z8c5D(Nlj_Vf}r!NCBDk#CTZk&_IX#2*ojP+9BJcNU)^_F4$$0-C0)}QAIfTC&nd?g zspw)v;2B>ce+yK#Y19$b^eH)+v z5Iu1;y$m`Oz=~oWOjCwY(P2s{I5wNup=Hx9^~dfLQlpDrTc*Sh-3R z#Y|GzDvDvlw7=qo>xtjgpwvUmxS%l$|IpSZbBqDf9ZQ);97r?xMcw1W@lBZlc?X8nP@6B)}ZU zF6i*Z0EQlG1?Q|ta;{N71H&r+0%GYFj~@w^{St3KoExRQi&Q7l!w`l9$_r#SHPD^b zGr0?je@_hhaA3yAd7FQZ`VJ*BOM_rA+b1w{MmHuf8H`8jwT3h)9ZVuq8UvdkZt|nk zQcJb*Ml?1)AD5|BtFD>sPd)VyThrw|$Nnq9q0U%R{QYD%rVHYW-LZFRmy$S8->8di;H-=(YI30RdSN3j(Hsn z-U#qXdPYadqIIqZp%{VIxUetv75zjXbOKSnXUd4QD;CQ&t>uL@7BIEegId^T#&^hf6X<;TiG zrLU$LN;4LrmH%XQ3nj z*S9!6oVzVnEB(Kip{eE440}!qH&&^Yzco967mC&@p=JqV*JP$+on00vGFGAa9h+qN z+nfYKOV%3&OWKgmVBizhrbYKG+X_4+Zw^EN-0uTqtYPTA>sYart4>U&?G6$sW|=1K zwY-{wLMNxX?76})hljoT1HjnmnHx2)j2l+!Ov)v==wlbnUbVdvOB~!@xos_TO_+z` zmO~TQl(z{D^8>kd#+Buv*U%Y;A{OfFm}gZKoQf2g}kWzu1=k7BA@(g@sERyepc)ctt%H~!dTUgZ|k+D1B|e?$a-`cVm1Fx0h#3uC~nB^IGa!r zfm0;LAIj$Vk+W!?^{x<$kddClHcL z(1wp1BW1^pSxsM+zxU3R!cvs%7UlLVU|>Wzt{GHQDU<%ysTQdff2<_d;!T4KF(`(t zK6&@G^+!Y&_*Y+EkS%5i)0@Mw0QlM4R?nM~Zh<7lpB{rUMHZX|#Btac*2XyxDhH$S zlzt&P!h5aUXNoJ?pX}{%(B0%y928Z3Q)%dKq;=jirysTF13^bwe(LKIiWQ3$ujlzJ zR1!pM8=NI;bnoHBmJKDyvL^MDqP$Yjdoi)7cl=o^ODd3b^&4(<=1QmAA(ZWEF8Tz} zgBOJV96$25(%89Q?o-=6%-`4it0L*H@^seP1`l!u?1TtvWHS(lzsx_aF&0JcnRo9xEHpO~EH6 zuKX)za8o;v{*dIIW@uRACo(Upm7RrwDEEfhb(}Fa3xcyls&i4^RE1QrC23r#gmQ-J z&=snA_{^m125k3C!#KKu$#*SUO&L|pirtW1|Hv1Gm`h{QzGnWt)go+cIA?I1B$pkN zsaUGaYNja56!YdmC>=+HGnsy!0SK_-1-v0(R&hRIVv!Llr{M>RlON3)(eR)p8NT$O zPd&%;OUIk(i3N{0Rl~se&OGOxb{}ymW!iW<)lIB3?6l7eR02&kZL;~_-w}(Ef5Qcp(W&OWmeE7s0a@#gDuv*<-IY0ST z^(Agz<8ars`mAR3L>zGv?(A`e)8V2ac1%odc4D^9@#IdAAbdlayhxS}6)c6*mhH!! z9RX{gA$O#7Azmwv5|5;Z^%~P;SEl@jHYFrBh+x83~+K}!DP4T8QrXw9P?3L!7QMAAzmLm95 zMM6`mtdpsW6PPc`a#}4Uf2ZM9fnjo5k`LdsouGnKTHAehaSO^(J8q!!eakn zT$3K`s9nn+4XLz4)|duJ*Hz70tihe#$;Y z^U|TK{dP=YA-@G{+FBPo*D`cCap~OIS>#y1C6uiI+{O!OuB0Lr$w=@CaE5r3g(|Gm z{;Bl~@a|>KB(D!9%)%6>$vQXc;&Xp@`CV=kM+Gb60d=o(qB*M)o=itk=Z1J%HyxKJ z0$fCW7ZVa`B>1DmV!3Yi!RvIonfLi~r@66z^AG)clR2sLpUvY9I z-AQCZQuFVGC~OzDcq_9d|8A!L<$@Hh-~7C_R5imkN-KTS^F`KVwKc9lts73Hf*9B2D=TPCgJO8C zPdNJ~iZRJpt>)`sa-S-=u`^Lg)oB%|Wz^>IJ5nUTsrZY|q!j(%F&23PdAn+gBw<1f z8q)-x%WQI{_882MFK1^rS?$DZ)VEiNd1gnf$+ zgtM&W9`@b6(4t(dF527FW=B6P!u%Hn}(FqMN>a+?T8_UO#{ zK|Bfhk|qid?>BGizaq|B@}%Nl_e;aI+9WvhyXbBX=9(b&JZ?-K_$P*j0u;N2d11WO z)|4ARDPa`+Fe< zu*(ufUsD5USYcwFHC_fiEB-^=cJx$PNL?vWh#@o>HPVEa7_&9A*J-mo^5y74~S04cSgPu%9>@kGh$5L^Fv_YIs zl401j`NyOybIMgwD?ijpbZsuW9E$Et$5IQN5!&2Ls# zf9MXys0^a{%zPPQIc~?POxZ!u^uq`BP@ zWnj}Z9!pjxA4~oiPL*Dc6gjQC^Gz?~-A226)UW~H?zSOUU!J;dHVVxeXF-@l9fc+^ zZvw8>kj8>m4=VsXVg8lTHJNJLX7@P%7YuqAsXmn?Pg1I13TdED)-UpcpgmeeC-zDV zls7IF0y$ITNgie2k-@KGSs;liA3Eu?=A%!cOZR8#U_q7|zPP%j7{^4WmH7wC29b=f zIBDeH;gy|Dsi5?Mq{3RH6rz$0S-Pv0T%(?1l@>oieVoNzd8BK@8GoOqZ=TA*E}@s9 zpSN55(y}KHTiPUXVj!{VUyb-kc9{$HwRD}qQ|Dwa)&-x%?ad%#Ylx(;#nvKeLCT4> z$Xvz0dcob**?@OVelVDoO$I})VyG9H#H|ET3zfC!@xX;}?TaWzOiI)$l4Cn@n&{TJ z6?Ckgg*s-}R7M&DaL{N3%zpv6m3_RNe>_H9FbWQN3cmf~@z+1|{qc3PC{WV3Xbhbp zJyYS(a)v&wkzGN75L1F=vL_+_WoDZH?$4$Rt~P=v_nc_&W%xEK+}af3&AxNaHeeCd z7aAyaDVB?kt(?=d6T@_YqX!22MH4`XRNS>=Y z^avsg$J41i9@eYJ@ym@700$I=F!!R}oHq_Wtu$cAa4&Yz=&w$EiS50bS}NKFVRq}{ zgdhfVkl!bB#&ngMd_+%seU&h}GBu+wip3BVrsDE}+q~Dr6J0qF0eA^;AO#@`HXQ(z z4cAC-5psm1+JkN!z}r}!c`o!IA4f`Fmx8kA&I0bfv2_n0lW>TwDbx%?@Esk54SgE2 zTA&QX-IK}%iw7q;u{iBfM%F8%V+K(#jO;yF_EC!)^jY6tR{=?EFxdK5r!f-=Rcpdi z*H^zQ5U7fo>P6i2?C1>yoPG-MkQ*S{LichD)RuDbIpw)1GjtEiVaE1mbOBSum12YF zD8CeqFZZ_N`D9DiyCHR%H!5AhXwUE4iPpYVnpG^e9)hFzYO43g=;3BC( zK-pa%dOUl1V8|&VSl?$%3jelgad;- zjog|m3nKy6dVlh4B6aXKx;WFTJv*5ex*JP5B(N~#_q@-g>I)x@CVyKFpm%cAz_usy zYSLb-B`&yO0J7YK0%f?zavc>h);6Rpe%b*MVo{)Bp2Mu*D#^7`YS%OXf=H+sE(W(< z1R-ySUur`&?5u6{erffvo$XekshN3F{Tq=D!Nu?CwP~Pt5_aEd5~fTit(gFFw(bS~zRqDS zF)&Ak@4U4)*(J6!LJ6{dFmG&9Ug>%QR6VT&kMkUmd=D853;9up_F>PRWVIfu79mTq zyxv7?X~b`sf*Wsg;KRxO&u=stWn|2-)E;Q0RRJVanxRSSQo;xuH_cm+ZLUYBWxsD( zpjZhfI7)<|7YlCgDzXe(!rUKj8o#&>hviJr>1u5$PCfnwU zBqAVjxzsHu?8S5(6DKCJra8IUB!x*;2wxccUBQY2>O+DrxlQb0S}EZccHi{Jg~?~hg7@;dy^+#SJII+!$tJ^oQjFj1 zj~2&;34z*H5D#w5{1WVGa*glG8!J1u54 zxCL`PS*_W$R z^nRHFY(MS4;a5jd?b7bxo7>-hNUqh*ly!Ywwh+bwEQB)FVe0RzDFNk@~wWV9&DV{Yw9xOC@9RaFIOkVI7k zyn*K*Ah%Y9h;lQ@{*i z-XiMisw}rmT2HIIf>%&Dib4TVEo|umMcu?Jt-b7$9?X}nj?@i7TvI4HxScJRL=~6i zW$%?U4!$nnG1J$W&TjMCHBVDEycOPV0e-Qq8>a|>q|qvG zlQ#9Mv^&nFkh*%ZC1LxD672A!b&=)>$X;$!Y*Ip)*qW{7kc1ze+4CvW9By%qfofj|bLqusU0b4=9Yo!%=5(d7-^mE|Mtb3t5xUQTJP{WRQSAOEcw>VSKpwqArc z;ES>vnQ|du>o%?Lsf^N156kd--8W9_iM}|7Rx0tvNw_DPI~eJ+>iJGRJ93QkV+r)D z&rsi$HBNsHq-KS%ppFE$b+UPckoTr{>^ZJm!o|sJ_taXJST!fa-Lz%$dv@NJ^0|m3 z!=)hB=MPowtJeBK6tm-_lM~A=GSH2lLZh5n7FKt$xA(&%pMMOcNVR@vTZ?IG>`dVi z_mNN8&R6I=ub;eq`%Zq=RkQw$HGh=uJB_nvX5aQ_wZnXL(m;Lj8WLhhk%m7A*S%UY zWMIEL7=Mrc+M~V_Kbl6_8cG-4c|f%|r+UmYddypTxnTM;H!-2y3(G^b5{hJx{#ZQ2 zh30+%EDb~=-hr$jm8oA^x5lUIfdrmJ;?<^8#g+=fBO;-;mHzyyPkXw?v`&pQ~s2V=u-fD*(@SD6`|n6 z$L}IH$tht-)#YrUzIm%wofaA0?%!5gv$LvitLV>n;g&cXX0$Az@$VF&-V7eb(6vd7 z>MOOVP0Tsyk}`4ZOFDGa%;GJRT@>&IY{iV#pN*<0Y3aYID{kwm^%!$74{)l4s44g6 ztJzBNlB|MuD09K*2#rU#Ow^nxn{a1Gq-4QGF;W%GSjyKFP@dI@U#n?j$SCO}H5yY~ z*xRXG#hpdbmz6_!oQ7f3K?j_nnN&Z8xcc}zN8C2+BE7I3w@%=z<>{)4?9PFxLnx=X zgH29ud3zKl2V*G<#bE$->{o8@oA{}Bi{q*YVfz_T24CxjXFsd(FaUG@s?ib)&V&Fh z&bP0qEHJx|7i24FIGH2+`ZR&ya?#&cr0RIzD#m{cy9TdN?MxUcOP7|(T~XlR{$}3m z33s8yi|`C{Ns5rFz}5MH8OartGrv>cmg}$K(FoG}Q^l}AH5y~Y0q+QCbFaPV%%-uC zRh3ClqK?gDOCQy-`a-cqq7TT6l3m`h>3{$mvkrGqpYFNIk&fGep1v9ir-K$_?8D3N zwr)-S3O(2yl8LatYpd&H{r%B`GHvw>oCyCvq`?4g%OQGHwS>y(5xwIqG7qhu;E?8U z%OVLNiRB-Bsfr-M!@E(jd@UL+tH7qoW3a9#J9KxQqNC-&@IaQ?SOCaM4j%G>s3b`u#I# z8lu6;D9M7qW5G0+pY;`!A4m2Z^{78tY8pY*hN$BGih5wc$4@kJFW;9LiUJuZ34y>aZ}>x5HDK2p*VQ=60I2( z6`Ba#b6K!z|Adij>qw_9b@BV#*cMTl=KDX!E9Bp^{Cy}aS+VK`VVAqiNfIQ)FdLEv z+G1Rnc)Tnb=k|$pqPL7AwC^$fGya(lp|4$cg1UMF)F7Wt7UZeCu*=KT^8b7WF|gYe z%-b zbx0J`aQEtJGuMMlKiqXNE2MVCJP%S9&?(%KjRB80z9yvii0Nf$m70{^yx2akN z=wQG-k|Z_Nq-&eBZwQA{vG*ye1mi#&5Wacw|NTDLKz}E2=#}>F+B%^vPb#U37}p;u zOkhlq%s6D^d@!IrW0^YT4kg=6lAj&Td*f_DtrH7`|BuV)r<`_2yanr(TQW}xb8YvXmiduSJuDr zThp}9X$y~57{HieS+xJ$z$fSe{OwI1B6rabiglgk?xNdMbRq-xoO}qhDCwaog1o0A zvWH6Y(VoGgaHmf8bKJJtu8RepEJR zq_-58YVhe)&G-y$%Wk4|avfa?@7-5r@4hHeZI(cttXvP|>yYRRir>g|(5!o7o~553 zugssm__}`f$M$kO?Epr~5vtj+GTey-;{cdye(ZNSP(C?RzYl`8FoG->r{8YsFNg}A z8Z4*yPZ0hc#{d*M*7h=_1y4yR>GIw}t}+zmG6F&a5AWrf+U} zSEmJA`PMkr=VPrdv|1^co{EcrRG|;BB!u*pS8LRTZWRc#Z&oS4E2?~=7bbI$1PqL# zBHW4Ca2NL-(3m3WTOd>KlEv7O?Hgo;1{QQW|2yCrcL3|)>=mI85C4{e-5VO1_A^T0 z!M9jlz%mv$-h~GMRLf%j*IL1S`xwB9e`6HzAhhBc@M8GD2r2;GXaM&L=}2n_I)nZ$ zNk$Z7XOvZ4k{^@30hkYRXxi8>KszKr(fxlr^uHtY|FaE^QbCUXmMOiO@a1>O?U7ME z*I#FV$=<0+g5Q6&#(K5?-+UO4dCpCT7*0%-#H(_o7~>d!_~_o`v;gM%y*DSH%3#{Q z%YJKHUxBeD;tQqt6x8dFMlkrrWZ>V^%e4Qv@=*Im3Hko_NJ3m@=MA(uTw}0CE7Z?T zN1d+Hk)jfSuH0T56@GcR{`ckZ^P?EGJ2q+~R7|i&9vuH^&VGp+5F0^2fINIY{ zW15n3(pRii4CTZhCBP>x`iP4`tt(>;66p7+lF83{L;<3 zZU=#y0}5X7XsZKPF=6Sm#(+Q~ynCEeGN8Ewu7_Q}*4|m;Xws|g9-smFaV7|ll7@@o zL*`Z_P>uiFm7f^D&f{AvZ-DKGf$(;T+d)LsWxTKzXhoHO>*>wrr_a2Js|k_tDl;uH zpdud|?r6bfSt1#xBXKqz-*F_g!QjTQ58O6>#BsvLoi_|<&sc7+Xo7JpOUY3M0|VNf z1On!?wK+IJSql&~0~^Q`=mq{eLbMScQWwLfTp=+TK!y;AU6-T%``vC9n44x4BYE9-14u)2vwhsL-769jSuZE%5C@D8- ztfz<-79kzq>!5HcCkXw-$aLj8747~6cXoO#iP@d*dw@c|^A>2pn0w7MH@eE!iXS<0EG>Vf=ows8Gs}@OhG=_m_8j6gmv} zX_q12=m2}Tdo|bo|uRV60Y1$-_H(Ip#FEk$`Kh{o99NgOS{7M1fVwjL!lM_V96GgzvdvIC}i+B%!u zbBl_bFHFrYt1i7hY;6BmMY+hj0F!(Q8uL4$9SRWQ+n6}(DKvqcJ29b8y#M)@`Rxj| zSmV5LGx0F4!Rv_A@|EC;G)-l#*BN0)*PNiruD z;Tfl~ueNo*49S55b!E4@T{_BDGwfaju&ZoxpN(@|l^o*4edacj70nfxJ#y)OLn)?j^<3lO*#u~`>HNPSlp{xyFWeXRBi_5l+8 z&RdlgWEr5Jb@gzIV`8fpo7&@_8tN6z{CDLaueY0miD1gMJo5(M-1pd^41^AUR zX_UbS#k?fT0XjT5km&+ zIhigw4IkG@FX2tM0cI19W{ZWo-?ZB1Y-CfI=cE(^_MJq; zh=kePKHNovD-vYuud%^KBxaS+0Tjg!#WOoJH>rp{2&}^u%^}wEX&dD_h@>b!N!Jrt zoeQpZMKk5THpw^1KTR26kP1`?f|3v)w@Sb^NI z($<%qVhhn?2~c=^z62!3ssi|^CrGyp{xF7Z39k%I;0`j^CaPUZh?ZWcw^cA2;2XTi znSE!y9)CaenJEFA!v=w*q2_&8%&0ce8tn1 zf+!AYN=4VOfXF^l5)dgw+(69)5f$lUPRF#o1KoP$Prd%Ywl~Smfl8T?O8gfOYZl~g z&TT`{k7wzdX3WXEhQqoLT^o3+Q|=II#fJ4kHjtoee2<7$< zxhy#N2HPlaR-=e&BfkN-7_+H|n}E5XJ@c9-C8@(xJdr|-W6horkIUGZ*D^zUtD(>E zaa05tS|5;cET^XPr5w{?zPMmH&zlUs3oJi>@z*bd08x)EwOvwPs8Eb!mZ-WolP87Z zQwQ77MhPKNi`ZKpAi7RszlIw!ZnD{Q{&Ic`6TwEwwgOTap)~~CX)Q={Z1waRaGC%ydfy8FCVK1tb&Sh_@*447E}*HBQAH`B)Nha5YTqSpttha z81In=ci$knFrC@1(F{8r-$+BlrKVF>a>Gpv$Dbm(^ok1m==k0 z>&nipx@$}L)KssjGo@*R?&}Vc-_kk~7GEQnnU;al&4!6^>H8+`{l) zQU~x(_+G@0jbT0K{rQb3O-g@;q7PuCVS{I+1?Vi)v`jV9!$N%GE{j0xaepXp;-ccz zecuSbXcX0FI~cnN9AF#iS=3w6;zAP=Nq*>62y123$UR0xHxa#!ix{ zc2Q1;bgg?L%nXh8)IGm{QuY%{u@kO*_i$S!CilIF0o&2U!S2)HAGn!I3yZMl<%{nJ z!x?{%<&A!Ro~re^kGU=VZCCyVQ$L-za+AEdOxg?sN83VyhAYuPBBY0#Edy2(_NavX zni*HVrHdIsvFEfi$L}<2i?x zpb@CHQ}<);R4y)Csrj8T%^~>{_NghAv|kAzlrdA)wfa!vOxg7`!=J~e@7|B7ZrSUl zE1YZZ(xWmKKC3P3z}ahg&Hd2pVRPC#niOx>e5sCc2&b=L$}}kLi6SiSFluKjN4@5Ngv4{eu6?YpC)LeS-ow}4@8A0vSV?hR`5~)kXDR}t zPn(^&p9E#kNRO$%Qgs-Jysy^QYu2p+M9N*k<=efC?F}17PyO98{EvaZ>Wg^IS+uyR z>v5UH*HW+0&r z0=9yMOY?aVaL8nndwH!J)nE2*OncjV`kRL;>&p}utHYf(#bU;cs?mq_y6-njrla6xB2#!X>4-;dP9dAvNg67oUwhPlQ55$-+azu5esg0brLZEj?UMo=#RN}`3aDc_ zOP~8&E1nle9Q>!I5esS0n-(H3wi_IL4Cqd@u4Y3)Po4)u^{t;nc7B0R^A}A8>=`Z+Libuex-iP%(pOMSavf!xGc+fdKoGR2T&g<*9Oi+L?7YF*7`m# zir8#D9eQRNZXlV${_26CPz`#1r{9-0yKx97`A18{06iq7Lllzvx zy+XPe|E>K*?`eE%%=@6L%V+e#VA+ngVejfeHPaxQnyu!M@16XkhJ*Gnhionok@;7j z?6aP~w(N(M)`Cb4N*9<8s&FxSS&c4Lky{TmICOWNpPVjky$!f~?9_}T!BYHPe#;mR z+=X>#Q*N77Zl~oum{>cbF8vn6Hira~*L6o@#x%*WBZ9%#j!^V|;2C|WGtG0TqoH%0 zTnC5TT6_58!-@CgzyE|XIZV*h*^3wNgT$;K)ChfRb{*vpfrI!<{*mGN&7)gOe?grr7{0@-M-E>idS^)ykmSdkKqeuz&vs ziEQVFtN|`hcXFGchs9`9=PCYi3XTI;?3#Y3EDoh;^*5w{U@)^e>N1nKHay^YScb87 zhw@`737}Cd{Jx5#Ot60^_h` zE-nJzU(4cI_tVaHY&3Xn^|;wBTCIt(coYe%shSKHxqs+db#1=~^PoeTJV+J@LRUuk zYyuyRag8te9@0^o;F zC&Mo#E-*5!XM62nQK1-woZmIsN_8~9lL>#KKytZC@h0K0TK}L4M329X!cN~J#I_eQ zWE>rEc$I;~?6Ilvd+*h^<-1u+zu49IVa>bGtQRxO?JEALOB^2?E&x*t>ySw&wbC#5 zxJM5-MUHjCC0YGg+XI1FasxZZ)w?=lU;tp3S}Wlv6O5gh5$6`cJ)bd@Ve5}_nO3Y7 zuu>V$Oicy?(+0s$M>e|CrUxDO2jv#oO7J0ZXG3r+2ytk6ZZ2>COnvV%9E2&!wtRe? z@Owh{&ikIxk8d4p(RItW<(mBv>ok3p+VUt0WaFcE17+`-vEJ@nE=9O$e7>N1ETU>Y z67z40n|{1ss7+6$iVkPDIDe#jY!g_GMH#yt`5O>7Ta%!Tfg6@VP*sv|(K^Iw-DmuU zDouOe646GS_a>*Z(cFbY=d3(3>oEchWT4smUZYQM={~s*vg8=_5bt0i8u`={IRBt1 zFd{6`vAaR(|54%!iIA~)yp8ehmC@dg+jUYE$UAQ{Skce2iDU6icZ{**Z~J{iE7C5* zp9M%!O>_o$3%BvXF1%N!qO|)q!h^f4H^nh@GWJ_NY9}a!ceH*+AwlIY4{%O$688@M z0*C%JAc>^%2PymcNs=F7K?`jiB7Y5TbEAB7dFKR5X*c-YpOpUxTjYXrm>CP|6a8kc z-sN-X6~4_N3X_QX2S(L6pt?k|It}Tj79i`!Zs?y5ce+10jrR%O4R|tcF!?n<6$h~s z;pr^kMF*=qNJvWsa$7mT2W(0x-mn|trINN_1@LeMFbKo z8K=TM;AZGaP5}T=xrH^9XG7+xtG zMULP z0Vj6aP`^X0xadC}UmrBwCOq-Ns*2+L3IZa!!VQxbOt)eHFXn{yO~7uz=xEIqEna-7cf6^YOtpVCKM5 zJ5Abz%z%36!(th^r^*OuCH?E$EnDz$K z-uJ;&_fLSsWsMpiVrJgDF^Bp`H%uNP)tk%~;PyaS)`>4?h#Cq9AHn;`p5^)4 zKDERajGo9LaRljSJLMz6p4-2e`^L7{{|;5R-J%bmc4Nx##XbA+2CmV`q`l|ZGe(l) zua+ZPz9XA|Sta!KjKO@$sPn=Zz;Ox{5R;K*yIxg^3H>zjv*`Sq@4iM2_aII(U++Ec zpNk!U=S>DkToU5yDD;|N59ya$``+3&prQB4Bm4c=~LCsqct*1%XddO2JUCN{lUx? zg7_7`Gpa=x3s(V-Hdx8-d4+{6zlPW6q7ON$b`Qg?c)aH!%<)|D1v?nF3Kn9y>S+5W z`b(kSxC&y@z+A5lLxxYLIhxV*ddlaL9d*fRKwz1;KIb(FMciy2ewIP&L2QgVq-*xt$A2{%&hD8wl`L|EV>pCXmTLt z^va8GO#gKXbiAbeWX9nmgMD)sN8t$Mq!Bv1gwDS4$YXo%nI!}mmP_eq9tWqF)STVu!47R z5IMgMkPK$W3J8#K@R)jeN@}b@5n#VxW!EoN0x=hcCGqsR;f~q>52vAwV#|{!ZKzO%tfIc=Bh2!Sb%g#UC;71D6<6Vp<%-P`(>3INV*aTFRI43*3oly+E@Hc&2?2oqE> z9GwyPVbbNuvBOSLCosvEVJ|Ar52oOe3dr?mnxl<4pov@ya5;@IfQ2XQM7FGJk*iL0 z?{8wK5V`0A93h_rNIUdLAE6;H4nNY%Ureor*~(4YbGA$7f;XtDe}?tGtXOSQ*?kpj zXr75S^?gri;2U?jYFJ~vF7yD;hU@5SGuQM4fI|L3z^4^K_ew$CPQldlfKH+Ct2B%xmX%$;h&&tL&+utiV zm@K+t;O>`cg|3o;B2;3NKckL{)~c>SvFJ#l7j$tVM_9H^20_I}wz(xLz{SGS&o~N6 zKE1AXk}Ib*Zo|Jyc;qk6;Lsj6I4N&&2thivy?di_tMUsKPt@nYPEUR5*icejU5N;m z%cCO!b6&7-1|5HjxG{xx=qsLbn0H6vOQ|WX+ zhuO;=+r$Q|5u9Hamv$?*k&WyEM4s)UU z6Aka6SuruyYOfjWWmOS|=)6wEjRk(0jw65~g3hmO_UOk+`2j z3n^+1BR=Ob8KgF$v*;o2*8_$B{L1}^@*TTn>YYH=M7_yUFbk`DrdbmMc$eRAordK7 zz0hwHLlJJmd#N8u-Q4of@1KRmZAFLk%Z9hBzr9EI%=%m+LmE z6mx>jcul>mZz0+13tt>PX(`3KWbld7KK!V+V%cdp;{muE!*uEe#1b?*bjDi`UDSat+rXov?hDu6?6^M+M6=a}C4wdza zC01lK%11;C7Qc7exc~BPIsRdqn&Aj#X8)(;U@-H=&uyH)kK7YR+4Xi_Am;k7n=2zN zd+QrlOjtzo_o$Tn0~kIVeRb_(TZF;ZaQ5@UkcZ!c7uQX<;3sit-MVTxo%ia;B~d5S zmqgf`Btrm;fHUJ1-~VMxMgPnDOm^cQ$jgogLhGV~%IyREkBUjr;{#*L-%0lav9p(xWr?9_oYjIP>lk_^ z*EmgMl8%JSOd@lXz96#;Io)q>sh_QV|0AH57x7qs#Ix!zIEgU+IXp;5dM{AbcWJBj z`R-2JAg_OdR6ty)8Ev`dDzgHzUvOION?ESFDT zJC8)`t}k+79P4nz3b)E`(3f%rybJl7Ng-CP;Z6^ z9#*_PoD^?D92Gqjgal%;6akCHEb&ZP+I78WwRTvJtLk-W85AgngEQ`na|PC zilc@o>YdKCS)2L6^@cEUozL1QnW@!PvYxyV&tCA%MDBV?!Q^9Vz!Wy-1T?yo!1rUk zlPsCXuEixY(D0zLfyl-(S1|_5ZW}e1vETINtYIwO=d;wP&Q8SddRZlSaV1VQfjUtI ziHW9#U9erWZMp8QyFo}>jM`3a29u!@O;Y*-ynF>QpfMKnGwn-aRPA?3Xldyb`O5@^ zMTv=;xlRax4@IsmY6`P!dmyI`N|)YMZM8=ZIGE4Heo?AR7276ByMJ$g60Q%vs+bFh zPy4ey$!gs-5ni#<95CQ(J@M=Y)^^P@Qog)H@JYNF-)_6h_-l6asj2KaYiZ!-=H}!u z;6^L*g;8kz;QY*F;e|W?)DF9i2|$yluDR(sG_UCJPNo8?W;I8hcO;3d@%PH|_=Xih zsCKGyKg3LXM4u>Bfh6V@>InWUkOWsFR0XehSN$9ih5` z+~7d>N6t3sA0e&;FKC;h$mdXjlaxyA0v$&B%M`TvDr9c|5>j54lBaoIk z?IY_@Ko|F@e*zcXd-+-J)@x{zoj~C7ve}jx8}%)!!L{(ieai{%nqoeowgpbR+S2wy z#2?zG*%g$xoy-qU?mS60*O2xlyT3?R|tZ0blf+21--JTOFoU*cV*nQnbbbE zY^Dm;I9*#Yrt#;FF{=?vglU3eGU2Dgd4Er_in6DQGngpPsw<47`?aZl!d(OhiT_ow z#+EF|&@ExcppgoKIb0bv*BRv~1gY-Qf{zE@hAk2kRtI zetlDEJUt^{9CM`QP3O}KntvKXZyW$X6H?%!-xmP9pl%UjWm^#HS98oyK~y5|>OLZs zIad8&EC7`}!?bCs@n_X-8nK|xdf6lMVWgrr%!V0U{~TiDD~@oGLVg+}%R~+M$^ci3 zOPemE7V;VChuJCOGEA7MP~Ci){WLk1YOZ0c6zsu$2~7gvP}O3cv@s~~tS)?vxnEEu zCZxsME)gd}2tw?K=9E~u;*X0Bc$QEG6NOLCZKErzV8*c=cf{~H^Q~|hpVPtqw?(8y zU0KlSXV<}y=}&5*bbGFm=Mx1fUNqTd_@P>%L5%iExC#LOogXN4Yyb$v0k-rIwj|>( zB4vv~4lIth*GBtt!~DB%P&l~ecI7LMcn7y?uX=DR3?AnkY#~^rMuBjp@jUx1S;09; zp?EU&x(l+?7|Ljz2bJck;2(p3bUfkGunNZ+hukDX3zOBQz-32k(z$9;@KcQSos~2m zx^nwHqv!M}&1ZZT(QkUWuNgg;w88Uh%zN4U3MBNXFheyUXDm_^p~f)lbqVBc{Oz38 zS`>S-8$&~bt&tbWogb#hkwLgO%DQm0@rtH46^c}Z3`R~F~Jdr=dJBKB98txSAsOA_K;?aYnjy`M2`wzsR zev@hv2UBUpe&vC@S)4G|V)M&nY9xdA?_)PyTsSS6{UgcCL!-qcn@9@yakRIXq>$j? zPd0l4WnkiA;u`^l{r#Q6*r6nMweZ%>DRP0o#n6$Qo0#d*`5j63k)lz_a_%vgjMcLL zDY(~`NpFUZJGn^VL{*zuuc9fKwOBA;p{KI7WHKtips8CY(#yb(tDDe93B!*=v8;o` z))fYcAp;}R;9(hLM=s5^st^MLvcox8p(K&xj96-m#JuYYs^%T^16p+(gD|F;5A-OV zF9LaVA2H5KxdQ>r>yM7sginj#shSTkzv;EIb1^w<3{!kFR9myisW5Yf3XT!WbYaZ~ z6+}v9gi@@1`^2!NmORpD4-cHba%lN}N$D&xCB6DP`oOzRn?mji8~#z5q+%2%>#k^{ zO;aM)__Rdq`s?l|NY;^-{@S1ajA|~R)@OP(IE8BRNxJ_R<2BG&6iQ-(GXm>`?Cr-& zIT2z7YFiG>gOgeQa!^RqhXX7(8A%|ABqrhyh!HlP{nckbz^BgJ9d#H}wfZgs|7FWf zXFf;#_?Dp1ODMGTO?B|QO1s1*_#i;63c!gd?@?!PQ4%XtHV`toM)GLtpZ2!~A&gCe zKL=N+4IaoVajHW?qKhIEM!OYEXOchZr;KAs>`*LeFCX&6g20%6%*=S$ZLoN{ti=jHH}k8wQ;gK>YjTmms~+{E@k7JSgj6-ea@6 zJ-dh|?MqtY3Z73b?y1PS%TpM)7GBBOqOBx@0O<5rymsyjjh{K01`!!SO$)}1ZuDw4 zS!J~LhDQJ^kFaPwRnD#Yq<2Ke;@-iwEh>|5d5)txO9k zDwWvTF=LSqjmsQZlR>`5uP1)MzvR&tRx^8#6#ZSEo|w?Z(3+bK9%d}XPE%7-I|8c? z){CnClbLFK=8E?JxSdVK4Rwez?C|bEYb;_V8zjAd7ZJh8oJK)C`zwA9!)c(gj;o~* zZEC6IathLb!5#yfpD}G?SRDzOfRCA|nfbElJpm=0>lCa+s4-^&2;>!w2TZbJb88BDXy%Dhtz!rF5W}GewJLQ&>#Ybc!0p0jJOMQ$m9d1xuxs5S@{Z z8Q3+t%wDNM&!3PWX>F$}JmkPk5X#s1s*|7m+p6 z=2`p~We0%$w2dsAHU+C`m$>g8+$}A7FAC*of{C1%M|~U=Bs@oN?M@3#3Q1P)xPz`k zjbhEI1KX}~@ZsYI)}c65n4*!TyB%d8$v(YF_NB9Ux0<+nyVFrDhB807&Po?So^er4 z>6IN!Ngvowb}mX&Ff;K#_(%515-$AA87CRD-RW;yAP>OHdy9oc?Hn6;i>h7G=r6wY zal%8w5}V{HArc0wu8mZ_o+S+I49lQekR5T5pZC4ZGUJcKuU|=AjU&D(&O{I6D!Fk{ zomWrvz8IgUl8H5T6&@9(X|)2AsZ%1UTw~e(16r*8P8B_rsIO8z*Iqroq&pAk+nA@f zI84b#_awygq)3vt{>nV4ctQSlTzOPN z(l(dSJcn;c2`d!5Jv;5{Ds(?kmgO)0plO(aOmcp2X2D1p~diZj*J>Uk2I8XtC4HxM&%J*gW6A}lXleieI)0gRVmAPI9acYWVJF?U zR_Sn&-;+;rU}~%vVlPcqOoL4>hAq`oI-wQPPe7E{vKFOGO*8)opv5lhzvzGMwavdD zySFH;=tCH9hkWEj8+M+^a6{dpan0-dcI9m5&oiKvFE))}NSLIC(46Wvo&o*Uo*}nl zQiGk#81ygv^tZb(kBuchQkR23YHF$nd{)Il@~7(2c0UAM6Kc5>k0iH=+n&==?h{eL zW~UOFjMT_XFLE|@B||p=XTiVd)i--u3Qy)IR#l*A)A*DyunL`Ya)ZL+nLj-<@F^Hn zRy`P3Qm`~%If|%Dvabi08%`|9y1OIj0dTU`+0?GmV4SLuz!e{cG)3=d_Y|%#D~KB` z9XSFmDsX0v>l}<6dy16Ngp1m{g24l;giY6P6usWS*foZQiYxeM`xjTJQ4L$YC3Sb( z!@Vn#Nt}u^AAO5Tf!Rk8p$d)IMv+mnvLuP41mDAw=++U>dTv6g<$U5Q@?R+22`PoP zV#g{aBTK5BE{nFBB{zsOgElo8lXq^z*kbWbP!@5lRyRR#P@Gl^$>T>}ZdIyedKG$L zOA{P?k~AEQX2RD(+$}&(;*F|D*3JW81>0lzY za&A_V!>D0upCe6T!@3){8piud#>^IU|AVc~6Ckz8KKvqKu>y+IL^l5kXZ`^r^0`Jn zee{wQ8@bWg>4MQdvQkGzVkjCdimp`VyvWfC zt%m7fCFdltZX+LT`M#0i$r;B=rc`hb8Pq>cW+V;`wKw3+{3Ls16KnJw{J}1| zlwP0MMvO1nVkzA)Gn_xl)qKK1`9^DH9iwX}o;n;8wPV_pb~}CxG9mOIp!N%(-n~zw z;g_|YJBUwXt-}q(57uI|SJunfc7xKY4VH1<%XiLiJMmbb0qc+0K3D0U-;!4+e;h6z z5+(}Z*7``{3#s&;@%vwl9|ALBh;Jv2!#GJHRf3MOA8>VahNxA&X*t7+Ueuh6_mGt= zCOPtbxjOx6iTD`i&;gtK2e2y*=3Z=0Z2v=Aiex4i+)#p@f)o{L_m|Q7eQy{ILUx%e_Sl#Ypp?Q#p4zdcKOKUD@(zbA21VW1NO0b13$7o zR4`0={PIoOU6%%_e@k9{BeHzNgPB?hNQE2Jmzb4wVpt%ItCHjjd_Jb8;{kVC4diJdT-bWM^_4LTADb5kMUk6?T_%CZcq zhN4`3jy2;WevM8gb39)GJ2XI+=_q zgi%pGDwW7ER$-gZxhP{l{GqW{&wH2L4}fTs6^+PXZTH>G;hC`y+rsGK+w1MS@OTNd z3KWlN1Pg~*Pa%*Gmb(*Zz<-oekm47ti6%0b!zCU=_o<-LlEkXOt_h^P{x7uI>2a}K zFWj+JSsp2p=YwSf*i`VIVaK+L`PM!4JYn>GeW~0BPWg&H9?n%RcXRswVKehH(b!tW zr#GN_U=uPh5*?Srao=Ym^%h@az0;N-qp5%yuHfciwwah4jC*tyUGi*Kmyd?@k>nwH z06H-V9=>+KR;)9fQ2$C4$;*d;Dg*b`HeS(ufLLjLg07}>4j9zN-I>a#&^VQm1z)qE z1X7xnq(Gtk>+*68k$%U+GsJ<7OW^${VQIbsgP6oVN5&gSqQeVp_RROaoIHq32Kbp4 zS5_vbMyOz|QKjX*LO+N@K_`wZ1T!OMO)C=M?4J7Qo+-Je_;-$3GFFuk8qa-5WBxDK zz|Uq;TaG>c!+F1?%K>&psuV@dnACi;XzSEd=th3mOJHuP+~MS9dd6;GJd(C+xxms$ z!+tW{B8mxsIR7(F3QrW4o1q##WvNXZZK5wFMVc*~lSoeOOir-KeEO7)i&OSs6P@pc zr3BXzZ}#MbN9jS&dSu2c51cQ(Qan`BAK<@sXr(xt;Cw?4V_0`q3OUGQd-luYq*h0! zC4e-Px&<0qtBVUZ##7^Iaivk)Kxr$UqdahN%Zcwa(G#)J{7YgZD$D6k^^1zK zhvhgwNgiusKI7t<4((Ubo|)vd-k6nDS-?UJHniHAW6*{~(`>HzBsB$nC?AqX4J(x5 zloBV=Q%yd(Nx##3%NRy$w=E&E+Wp1%Om)0a){EmnJ=rEv?Ep3G(rF+;_^cVutBKeU zJmx_hqs>kn3sZ>Wr1(vW3e(gt)Kf8~dihQ&Lc;|y!X)SB3m+^B1eZrTic_WXCn3=_ z2@0aP9Cf;xJ)M`eCcsc|3LgFdq%U%MKDB+u0W2}te?~1M%+ZSEx2XC0x81}OSQ4yB zl~|*Bn##BM*sO38GUuW0=$(bNszHHRqz(#=ac#pgO_*f70<>5{q$VoN@D60T((ed} z#Y8cgD;8rjBopitXucv5lxTcBlbhZOc9OL)6DKRFftSH!qtP;w0gCefkXKALslabD zw1zk*W7!J~H0D-x%1CTLcEtCU&{+BU@i^h<`MFl53!50TiE_x zi+xA|SsOwG+l3b=aDX1$tT4EHQ4G2$@)^(UP&@I|~;ttIND zwl9an5>ep0_H(WT8s~k%DwYn|k{CNd&_E^#;hUFsih6!yu}h8KDtc=P60#WDP=6vy zcy-liG>KNO|Brllw4JvKx^Gz21Wyg@>Jq#tU}8&Qni2&Eu{bFfKc{e577CBhy&0T5o`bE8#j6c{)IrN0qk^{ zBx4|_Lq6Cb@r-}ltkf%A^vIn^z<^f43wms9$Y9lY(vKJ*t^Gso>Xtb^H8b+en4-w4 zV#ZIx+CcVS(h4OgC4@Z;X@0L@$%^EB>!$jF#yAA}1U{vIUABZ({}jd4ekBA|1m9M?NRAEa}uqfn8h6GxE{$dTnuk5Weaq8Ak};8L^MSL!K= z2&kIcmHcq#;edhBDQ|_!y6`6&{B~#+9M$ToCh6r$d-EDpiL99D#vBg1vUuZP?F8ouAyfyMQFOkB{*2R5z9^jl5$WrBaV z0I41PQ+y;aSrf4#2J_jf=48e3M4aHMKsF?GA|?hHdi$FgrfcuwHFM&U(D8B#+A#G& z-0CH~4ex7KxSszogXkv1XE!ChBU@qj5N(sDOSG%ooVJbdi|fYJX{%XKn$2$I%fVgo zvQ4q=*m{UhoUXH+vElgHJ&0C}J!<)WUnoIVR*hjS_viz(*@#Y@>FjQS+R^4RbrWDQ z7`~_|fpM=(w}sR{^kLlNdLLjk$FftLVVy~&AAZS25sIB1h0`}djqyi(JKlFwdfhU= z?q4Lq2268>AgL^|#%Q<<-AcVg^Ro|~XLD01zgw?nOEmH-Y~(j;`*E@Zn7d-uY!XIx z7qOSTS{L40qbI3&16wFBSJ2Aw*j%Cn-whA0z&*dBZFK=~;X<(hvHvpsVq#@={qnaH zyT$UHB~_jA>-2rlx8?+coB&xofyKfSLme9+l}wzrAC$9@gfHtqz*eMQ){?G9NyYk> z1XnLbXxDpzsE3Oma=@@XeB>w1q^*+Ay>!s<&1)-`dU=jogG4~J@9W~Zi>-|~{kT(k zYj_?!d>lLYZ?f?1K%`j%9~9Xq3^G&8(S}1l@m`_hr8P4A6%ywZ?G}D|CfVE`A~V^CMip08-&E3RXjM#n=ohEt7gv1h6Zh%^=xqY`KEC9f z5vWL=Cc!Y4d>&EDSOvqJ1Tf$BjipLpOANQ+RN5*dJ1gQOG7gxTLKoBTq;s>kOdToU zDX_z;CzTbxs(voqvj4jf8GiZn+5h>o^I)L$*)`j^PcFHDqosJW1q+TiQ6mx9)P8sW z2fpwsMv!|>ke|vw49U>F%8{a!^ql6PJClVb3!l*(P12_Dkx9FQ@uR}G6-Mmrh6alZ&wAK^L(uLpz)Um5IgDeCC}F8l*D;|f4)|CgK( zA2{|(Yd6OOXzl-U?9tz0z0%s-Q2|=}zcK>=tvwE)wI_T8XzkR-rhv=@z_TX?c=mgO z0MDMkdywTHo;?V7w~R*u@Zgt-kFTw5F>Maf&Y0N9r(JTm%!9-KL|-chjs z*T9_Q;r-sf|7^q6YnW%HdnTKcGi7;il=W(nfI18HI~OqBYK$B|=`_|k8lm;1kd`!&Yb<0R7MXRv9==8(vKm5O%har-l__=nZu zLmt}9^70JY`{iNlxK3gdkK@b7BOei?6=uHgZY&bRKlZ0}CJuf(*9a%QkDE9>*jTe) zzsoaOr~7^zh387ys<*Bq!6x!liZ`!8y8FX6HL2XOpm- z*Cj}WZkO4A7PHVf}^sImhV z*fRU`&u<8G7aThW{v54JpIjeS2%22qU(4r*b){R;qWy})YknU7{e7v4rc=Oj#Z3TW z8OP!dH)>p=1Fv)PPTDN}!wF7KwEeb%Wzu^z?)vo;p9#BPN}n5TVK=pJt}wWJ^nRku zoqxL_HXa{z2oa{P6b@1XSgl6NG)o9L4(b@|^Nb3cQkMK&U2<2a_G z1-MO!$hzrjzlG5x$Ei{pmED4P3F$VxKh9OqzI%!yhjW`QXg!AaJ1oY|{>%XD(KO!j zRLl(1W3JzV<6%@DZ!hbrs_|!#5*yVainy&0@TEl#7%A)C=RbE#XE3-ooj&n*b)PwC zW_-%~W#_79+y)*Hc`0=@G=Sbpn-u13+XBWk6PNKS>g61ZNCz878E+yP^E=z8CN86= z$)lGKt|v#%mM8{0Hk zQXg5&Gw1DIA*UJGol-%@@pDEHsw;fPX;bZcPrHv_a;wY=@S>gNT@g$W06VNbA_Gh$a0t}ai zo;l!i8%O00%%oA%f(+aA#RRmlXBIKNNxsIxmj#f%U-Q-E66vv09m#2};PbzK)Wp4n zqvfoVsiggWYxh{v(=%3jbYkWKt@J*R?bOmP7d{sFZURRImi1H8tv^57etQ$l+-Fa1 z|8**xT3!e2+Q_D|ipLGyUDXXVEzURM@Kepz!2EXN-9j(#|Ecb~znaXVb_G%5pg?43 z3PHdDl_nxpnzX1i8;Epp=$+65K_m(Y5*d^ZQ97atN)-Y`icukS34{`QKsuoWNWy&? z=dOF#_aA)u`Ca>*z0Y%Yd2-G^KWgHn7k~y}Xu?y@F>i!DVx6BblRchkJYN00L=aWH z@PI@k)SVaYz&&!`Bld|oFJ8m<0eWXPez(*vBE z%cgr4>vA+p@n6F(?k;x_$Iv*5ET%u8>a<;&3oi8KDb&cbSeH<^@~66{E9+5r6j(M& zU3r~6d(JZ;;GwwZK3}-h5X;9`k9e!tCHuF2wM6=^tY#x6JL9TUGoMx~Vp?3!4ZiS+yJ4*UOP z6PqIC7E?Bdi(eHPNWUp+Dn)+FynDkB3y`B?E``j)`r9`*4&lkAw z`u{jOfN2|{jt_q2yVIJ8UEE_YexC6|+KODHu_)EeaK072mzn~*mDGY}E45v5T z0-kVIR2DTZ8VOmSSIa^KH?}<}0dAQKoup{9Mtr(IGt{0H6OJ%x6&2}j>9yg}47H`F zF!50HXUz`6TE~0bSijEsxITTnHjEwh&X`I zA8)tWt6#YBp4ycM!57?$jb8)ZcG!)y+;_1x)_GD)MMlD5eEPvLVcQ^U?Hqbf80uYe z;lc^cB-h|Mo_QaSdXdQrH1X=Aj8l>=8Pvhdl2l6kAp(XNxCNN3X>+BFY;{{Gm-_SK z1M9BOooD~m!O*t%Irw?&KQaRgs#+uw1r>huZf@7?2jUNC&&C%KlAW+tT5Q5{1<5HOeU?t;feL**_?R1KO0>cr%Odn0V>JXiDJ05VJrK(BrEot)f1;QTJR9wsR-@gbYN3G{!VAC zu?X;2t;v{BxoK-sqd6>m)+;w$V;AWDFfOc>OtTs$=XJxtJ&!#0hKJkj&i)-q+7!xa z$|CGcJ_AoGUX2QQmS{so?Y_Rx^(`P9fI7q};Tm`xbU9JydD;$aRQU0ZOabADMmEBN z;@4)2NX+n0Z2Sh^W+rV~4~ZO)MDBokmuuSp8o!C=WY-(NvfibZFRxaKE(^_Js@_U| zvw-cG{mh9BZ}eT<%D;KGb19O)lm`B~P~fZM{o|DH$ehqz+IDJ5zu#w`??WRao3zN1 zy&GA{`+}s<<$y=Hceq3KVv}o3H1AwZD%?1mvDn%F9RM652tp^WtjI4ZDvw3SrKgHm z{s3^cD}k;aWev=K6k;jVhBZxHiyD}I+ewZ>%`-Di_qkAL3Q#*L;STIt7wy^1>xAX%HshZJ5+F0z`b5) z(fx9F*G6kSqpa2xo+7!VK~9ya9gdOQq)^C?xx8N*YCQ%s+&xHZFnK16}AC5JE8Lgw^6GaYPl3;0}^B}{9?ZY+Pmpy z9dACnt^8L;Vj~}-=2)=@R z(`o4X-um_U-FDyB?$In-!Du6MwDI3EBH`RpyV z*oj$&*Nk184-s@V25cx==qr{^8!H-Be-&N7;UV=1u_&(y`5r!WH&y|n_P#dfBCy?E zPT=?^Nl8F-Tn3A4UsVE9%kfNnLtmI26O?7fR~G8h3MM=618m>x-akO7Hw)`o2HF!~ z*o@`CEC_b{XRn}Nfbv0ddA#8w(>dEpLgTX`ay?@s9^^2VqW4C;7`XdvqJs;5Bf)Vyl zoQ*S7&IS^zlP~LA&B^Z_+WetHVpH>BQt6{fAUA?PBxDj1K>HT0<7e+n%{c1~93=~t zdi?FYHR8UhSmeXG`P*)4!Lr9&DMvolx{P@;u0M(}G>5c%AB81!Bx%0*j_L!|Mwxv_ zpPo`U;qwOY+(fsOHvY$Te(k6a`>`I`k@xcHr`__*i#~b)gKhVEYB$FdgeFa$^E1Nw zyYnCdwFs`f8#~6c393aT!FLA+1eAC)>4?j+3uW=;r%+O zIxD9zLfUO}+=4)~0+nv4L+N~5g+8AX)?Z0f!>H?-4c=2>c*$l9ENjzSuV@QUo3*uP zi@W6+(^A^HViki;tm{WL&cMI?R7BbKKLe)a;6GD1pS~ks3xunTT(YQwe=gY1X|H;Q z5&S0kQQ-GV`ssksW}^0+%UiUtQ#&MX^`7Ff&AOQlf5U5d)WtnQWM*ziKWAFQ3P^f< zlNQD~DURuG?7}YI8f(k;t$=GziSws_Smzkuw1KE7g}H93nJ}OCJ3sq)b*GM4oeYE* zN7wjH^|jU_>}rVZ-f8EvlpyyVD`&FNjvX|x*@Pnb{Prx=^cv22)<$7`HFUyGmQJT!LR->#pUx-D_ z(WXS*Uc)oG^|jS2Rc_6K$_s3!ytg|vC#O~zKP`Q^5SI)2%@l*5Ds(v8$ct?}sjG%f zQP=-WhczxVb@Q{@vYR#r$(46{gLzZah3Z<=3+%0S|Ie z*Bt(_hBCS@$6OJq2ePh^y7zx|FS+TGZa_1&rf3;K{S=kto2pEoYk6V)G5hjM(&Ir@ zkx;(Smz0&V)-;I1JlC zQ#lzz0JX!n$g2$fO+#@fJ{FmDTDSa<7P_p+F$0nw6!%R2P4$Qf0Mc5gK9K(-E1dx* z%Iv-VfXHuYYO;W|xOSWWOLw&aR8x*sT&enhB$E{&jVH0z^WTn#Kx0nvb5OO9+;?EV zrA<@-(jWqUKc#+8l}{gFubf5m=l&(aka#D6;nI{HfA?=AMS$CU228b0!+;1rdp2pP4|MD zGP+kA7}GHH;vcX6WKTV`Z=hZN@e?sX^GTM!mTI%hfj1?*C7pV#tfRfPfA*L76|*a= z)mP0!DN{}FrA4FhmCoqjEF@B@;#kxw)0^ZYYE1uum4af1rIw!{s5I7R)QZrZjInkV&=D-Z@Dnbv)O=cCvK| z7PF3d9~BGT<`5j_vLWSYme1^(3P+c?^_f!Vzq)-eudeLAq*$VKp^ca7PuA!<Ci@MiSa}YPb{Iq)|C@D#bz!`)XaZhj+~kz6pw*JIz{F zoZkA>aKbsS5Kbrcbc-;i^z9vT*s-@i)cF5cje=F>4Aqk62~=g zMEo*@%>Y>`t-1Kb=W~)@ULzlvEUHUML$oVl0OPxxu@b3 zF-r-C9`*%yA=Z|;Y$LYvBei;e{^H_}3mgC)Q=Dv6YB-e%{V#V$4qg1gpS&(%bRU#1mqp&j{Md)oZhroVLveL>>Qp(E`^KF}1w$z^(i<^JqP|s)Q^- zup2vZ*0WlUTDxoJGzwlC=dT*fnZEUFda32_wB;7}QNPh%GR)n9TjUqFS;|vH!2yEj!QMDAwC5Ee}ozng|To3keH|Qibc0)C0C749(Rq*ox32D;{YnY2R%I_!Lbc}Bhm7(J;)gDIG3x?(b=^z z*0OC^flDH#{A5uy_GsBRaEq+LbLYjUs6tN`E49P zkjil^DZQyTp~b-{yAwDo1E!0R8-S}4;4USJstbLwI-yoII}bR=v_i08UoWusJ5lo_ z7Tv>RpIF>(dhqSViSV{E!3#B$(&YPxo9UK$si>YK2fLg^mEV^_p2}J_3hPr6)AsIK z$Z8#$*wvx+=~Z_@{Xqm(Jbk-uo*zM5NKvpH<#s@a8v1EkX-vIuzr>-<38>lq#lre2 zzC;2zfN`heo#v$1Dkyc7IXTk73iX&KR*bA3I-}tT$l{tRCa^kVP3U zU6teb4(RPA-1s%{r1eYs5aN4JHcgM>;k(i4C)^a^QKhA*k(kb%Ex9E2;;p1DmP#?R z@6(%z1;E5`)GaatiLl2^YU!0LYLZ64lNPquUp)5TuCqN!*=v~SeuzhXezZK*QC;b2 zo@5=fPnrYy0bLHum*mqh7yoB0!y6e0>6p<&421kd?Y_`JZk_ru3%AIza{d)ZhxeK*V3jVIiu32~ zM{yr)FN$Ag1I#VmbHeipuTUbMSS(q{M-~7VQxeNdIy(1xxa@bto~SE55Z%~!@FHfx+OgpQ9bcX+y1vJ&pxW{RHBe5em~M`x4~ z_1os)<+=bIL=WN6FX)*7{LK~PunoJBtIwsl?~@=efqTyNw!_QzmnH%F(Z|Lv7(=8K zTMfu0VAm4C;`aYECN9_41e+&m@INUfM~~&h;H=7RhP#b4wvabN0LU)-fALfsfp92% zza++yEL}r-h{f0#(8nAeKaR7ucNnRZOS8t|rE5;15M-{{jzxllj;r8~%%gAsl07B| z3z`3%)l~d-9~bJhvcl;pjDvl#465ZbPB^XSsBvhE5`}@&;uSpXJ2+R56(4nniR(nf ze)XDh0hTeBsa`aimbmz{d*w_i*ZTfds%m6-p0!^Ci1&^?t^9)k+&SnoBsnkl3m-~u1D-)s#x4Q5#w=)fES3{GDy6`xy%0QubRd9BgdL^?zCgGf6|s)f31^r#SvxB zBZkCDzJg(I#Jd6Qy|K@CWu}e|rpJ)~JPgGD&*gHzc--)%bj$mtQvO(cPWL_<m`B>GO?a)jk^Vkxp|mFm95-6KLsX%5U0Ph28pn-V@cb^40?#RmaJbhN-!14OJ_d}xJQC4E^;KFh!I28o~0Cq z*D~EgyLVQvBwPoX90lCC$7ZaxIEPdcey@CcAGlj*xO4rcX8&O-e1`eP^1ot9@Gwpq z;>)f7tMiT@B5t0uTJLwA*8~CtDX`oB{DbZA1N_y)NMoc^%Ky7kTQfYICV1_g self.score_threshold + if self.post_center_range is not None: + self.post_center_range = torch.tensor( + self.post_center_range, device=scores.device) + mask = (final_box_preds[..., :3] >= + self.post_center_range[:3]).all(1) + mask &= (final_box_preds[..., :3] <= + self.post_center_range[3:]).all(1) + + if self.score_threshold: + mask &= thresh_mask + + boxes3d = final_box_preds[mask] + scores = final_scores[mask] + labels = final_preds[mask] + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels + } + + else: + raise NotImplementedError( + 'Need to reorganize output as a batch, only ' + 'support post_center_range is not None for now!') + return predictions_dict + + def decode(self, preds_dicts): + """Decode bboxes. + Args: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [nb_dec, bs, num_query, 9]. + Returns: + list[dict]: Decoded boxes. + """ + task_num = len(preds_dicts) + + pred_bbox_list, pred_logits_list, task_ids_list = [], [], [] + for task_id in range(task_num): + task_pred_dict = preds_dicts[task_id][0] + task_pred_bbox = torch.cat( + (task_pred_dict['center'][-1], task_pred_dict['height'][-1], + task_pred_dict['dim'][-1], task_pred_dict['rot'][-1], + task_pred_dict['vel'][-1]), + dim=-1 + ) + task_pred_logits = task_pred_dict['cls_logits'][-1] + pred_bbox_list.append(task_pred_bbox) + pred_logits_list.append(task_pred_logits) + + task_ids = task_pred_logits.new_ones(task_pred_logits.shape).int() * task_id + task_ids_list.append(task_ids) + + + all_pred_logits = torch.cat(pred_logits_list, dim=-1) # bs * nq * 10 + all_pred_bbox = torch.cat(pred_bbox_list, dim=1) # bs * (task nq) * 10 + all_task_ids = torch.cat(task_ids_list, dim=-1) # bs * nq * 10 + + batch_size = all_pred_logits.shape[0] + predictions_list = [] + for i in range(batch_size): + predictions_list.append( + self.decode_single(all_pred_logits[i], all_pred_bbox[i], all_task_ids[i])) + return predictions_list \ No newline at end of file diff --git a/projects/mmdet3d_plugin/core/bbox/match_costs/__init__.py b/projects/mmdet3d_plugin/core/bbox/match_costs/__init__.py new file mode 100644 index 0000000..d6aa375 --- /dev/null +++ b/projects/mmdet3d_plugin/core/bbox/match_costs/__init__.py @@ -0,0 +1,4 @@ +from mmdet.core.bbox.match_costs import build_match_cost +from .match_cost import BBox3DL1Cost, BBoxBEVL1Cost, IoU3DCost + +__all__ = ['build_match_cost', 'BBox3DL1Cost', 'BBoxBEVL1Cost', 'IoU3DCost'] diff --git a/projects/mmdet3d_plugin/core/bbox/match_costs/match_cost.py b/projects/mmdet3d_plugin/core/bbox/match_costs/match_cost.py new file mode 100644 index 0000000..7c68199 --- /dev/null +++ b/projects/mmdet3d_plugin/core/bbox/match_costs/match_cost.py @@ -0,0 +1,52 @@ +import torch +from mmdet.core.bbox.match_costs.builder import MATCH_COST + + +@MATCH_COST.register_module() +class BBox3DL1Cost(object): + """BBox3DL1Cost. + Args: + weight (int | float, optional): loss_weight + """ + + def __init__(self, weight=1.): + self.weight = weight + + def __call__(self, bbox_pred, gt_bboxes): + """ + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + gt_bboxes (Tensor): Ground truth boxes with normalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + Returns: + torch.Tensor: bbox_cost value with weight + """ + bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) + return bbox_cost * self.weight + + +@MATCH_COST.register_module() +class BBoxBEVL1Cost(object): + def __init__(self, weight): + self.weight = weight + + def __call__(self, bboxes, gt_bboxes, pc_range): + pc_start = bboxes.new(pc_range[0:2]) + pc_range = bboxes.new(pc_range[3:5]) - bboxes.new(pc_range[0:2]) + # normalize the box center to [0, 1] + normalized_bboxes_xy = (bboxes[:, :2] - pc_start) / pc_range + normalized_gt_bboxes_xy = (gt_bboxes[:, :2] - pc_start) / pc_range + reg_cost = torch.cdist(normalized_bboxes_xy, normalized_gt_bboxes_xy, p=1) + return reg_cost * self.weight + + +@MATCH_COST.register_module() +class IoU3DCost(object): + def __init__(self, weight): + self.weight = weight + + def __call__(self, iou): + iou_cost = - iou + return iou_cost * self.weight \ No newline at end of file diff --git a/projects/mmdet3d_plugin/core/bbox/util.py b/projects/mmdet3d_plugin/core/bbox/util.py new file mode 100644 index 0000000..f28cd9d --- /dev/null +++ b/projects/mmdet3d_plugin/core/bbox/util.py @@ -0,0 +1,82 @@ +import torch + +import numpy as np +import mmdet3d +from mmdet3d.core import limit_period + + +def normalize_bbox(bboxes, pc_range=None): + + cx = bboxes[..., 0:1] + cy = bboxes[..., 1:2] + cz = bboxes[..., 2:3] + w = bboxes[..., 3:4].log() + l = bboxes[..., 4:5].log() + h = bboxes[..., 5:6].log() + + rot = bboxes[..., 6:7] + if bboxes.size(-1) > 7: + vx = bboxes[..., 7:8] + vy = bboxes[..., 8:9] + normalized_bboxes = torch.cat( + (cx, cy, w, l, cz, h, rot.sin(), rot.cos(), vx, vy), dim=-1 + ) + else: + normalized_bboxes = torch.cat( + (cx, cy, w, l, cz, h, rot.sin(), rot.cos()), dim=-1 + ) + return normalized_bboxes + + +def denormalize_bbox(normalized_bboxes, pc_range=None): + # rotation + rot_sine = normalized_bboxes[..., 6:7] + + rot_cosine = normalized_bboxes[..., 7:8] + rot = torch.atan2(rot_sine, rot_cosine) + + # center in the bev + cx = normalized_bboxes[..., 0:1] + cy = normalized_bboxes[..., 1:2] + cz = normalized_bboxes[..., 4:5] + + # size + w = normalized_bboxes[..., 2:3] + l = normalized_bboxes[..., 3:4] + h = normalized_bboxes[..., 5:6] + + w = w.exp() + l = l.exp() + h = h.exp() + + if normalized_bboxes.size(-1) > 8: + # velocity + vx = normalized_bboxes[..., 8:9] + vy = normalized_bboxes[..., 9:10] + denormalized_bboxes = torch.cat([cx, cy, cz, w, l, h, rot, vx, vy], dim=-1) + else: + denormalized_bboxes = torch.cat([cx, cy, cz, w, l, h, rot], dim=-1) + return denormalized_bboxes + + +def bbox3d_mapping_back(bboxes, rot_degree, scale_factor, flip_horizontal, flip_vertical): + """Map bboxes from testing scale to original image scale. + + Args: + bboxes (:obj:`BaseInstance3DBoxes`): Boxes to be mapped back. + scale_factor (float): Scale factor. + flip_horizontal (bool): Whether to flip horizontally. + flip_vertical (bool): Whether to flip vertically. + + Returns: + :obj:`BaseInstance3DBoxes`: Boxes mapped back. + """ + new_bboxes = bboxes.clone() + if flip_horizontal: + new_bboxes.flip('horizontal') + if flip_vertical: + new_bboxes.flip('vertical') + new_bboxes.scale(1 / scale_factor) + new_bboxes.rotate(-rot_degree) + + return new_bboxes \ No newline at end of file diff --git a/projects/mmdet3d_plugin/datasets/__init__.py b/projects/mmdet3d_plugin/datasets/__init__.py new file mode 100644 index 0000000..997b48b --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/__init__.py @@ -0,0 +1,2 @@ +from .custom_nuscenes_dataset import CustomNuScenesDataset +from .pipelines import * \ No newline at end of file diff --git a/projects/mmdet3d_plugin/datasets/custom_nuscenes_dataset.py b/projects/mmdet3d_plugin/datasets/custom_nuscenes_dataset.py new file mode 100644 index 0000000..2986c23 --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/custom_nuscenes_dataset.py @@ -0,0 +1,91 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2023 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from DETR3D (https://github.com/WangYueFt/detr3d) +# Copyright (c) 2021 Wang, Yue +# ------------------------------------------------------------------------ +# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ + +import numpy as np +from mmdet.datasets import DATASETS +from mmdet3d.datasets import NuScenesDataset + + +@DATASETS.register_module() +class CustomNuScenesDataset(NuScenesDataset): + r"""NuScenes Dataset. + + This datset only add camera intrinsics and extrinsics to the results. + """ + + def get_data_info(self, index): + """Get data info according to the given index. + + Args: + index (int): Index of the sample data to get. + + Returns: + dict: Data information that will be passed to the data \ + preprocessing pipelines. It includes the following keys: + + - sample_idx (str): Sample index. + - pts_filename (str): Filename of point clouds. + - sweeps (list[dict]): Infos of sweeps. + - timestamp (float): Sample timestamp. + - img_filename (str, optional): Image filename. + - lidar2img (list[np.ndarray], optional): Transformations \ + from lidar to different cameras. + - ann_info (dict): Annotation info. + """ + info = self.data_infos[index] + # standard protocal modified from SECOND.Pytorch + input_dict = dict( + sample_idx=info['token'], + pts_filename=info['lidar_path'], + sweeps=info['sweeps'], + timestamp=info['timestamp'] / 1e6, + img_sweeps=None if 'img_sweeps' not in info else info['img_sweeps'], + radar_info=None if 'radars' not in info else info['radars'] + ) + + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + lidar2cam_rts = [] + cam_intrinsics = [] + img_timestamp = [] + for cam_type, cam_info in info['cams'].items(): + img_timestamp.append(cam_info['timestamp'] / 1e6) + image_paths.append(cam_info['data_path']) + # obtain lidar to image transformation matrix + lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation']) + lidar2cam_t = cam_info[ + 'sensor2lidar_translation'] @ lidar2cam_r.T + lidar2cam_rt = np.eye(4) + lidar2cam_rt[:3, :3] = lidar2cam_r.T + lidar2cam_rt[3, :3] = -lidar2cam_t + intrinsic = cam_info['cam_intrinsic'] + viewpad = np.eye(4) + viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2img_rt = (viewpad @ lidar2cam_rt.T) + lidar2img_rts.append(lidar2img_rt) + + cam_intrinsics.append(viewpad) + lidar2cam_rts.append(lidar2cam_rt.T) + + input_dict.update( + dict( + img_timestamp=img_timestamp, + img_filename=image_paths, + lidar2img=lidar2img_rts, + cam_intrinsic=cam_intrinsics, + lidar2cam=lidar2cam_rts, + )) + + if not self.test_mode: + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + + return input_dict diff --git a/projects/mmdet3d_plugin/datasets/pipelines/__init__.py b/projects/mmdet3d_plugin/datasets/pipelines/__init__.py new file mode 100644 index 0000000..b28b880 --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/pipelines/__init__.py @@ -0,0 +1,2 @@ +from .dbsampler import UnifiedDataBaseSampler +from .transform_3d import * \ No newline at end of file diff --git a/projects/mmdet3d_plugin/datasets/pipelines/dbsampler.py b/projects/mmdet3d_plugin/datasets/pipelines/dbsampler.py new file mode 100644 index 0000000..a4d5839 --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/pipelines/dbsampler.py @@ -0,0 +1,268 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import mmcv +import numpy as np +import os + +from mmdet3d.core.bbox import box_np_ops +from mmdet3d.datasets.pipelines import data_augment_utils +from mmdet3d.datasets import PIPELINES +from mmdet3d.datasets.builder import OBJECTSAMPLERS +from mmdet3d.datasets.pipelines.dbsampler import BatchSampler + + +@OBJECTSAMPLERS.register_module() +class UnifiedDataBaseSampler(object): + """Class for sampling data from the ground truth database. + + Args: + info_path (str): Path of groundtruth database info. + data_root (str): Path of groundtruth database. + rate (float): Rate of actual sampled over maximum sampled number. + prepare (dict): Name of preparation functions and the input value. + sample_groups (dict): Sampled classes and numbers. + classes (list[str]): List of classes. Default: None. + points_loader(dict): Config of points loader. Default: dict( + type='LoadPointsFromFile', load_dim=4, use_dim=[0,1,2,3]) + """ + + def __init__(self, + info_path, + data_root, + rate, + prepare, + sample_groups, + classes=None, + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=[0, 1, 2, 3])): + super().__init__() + self.data_root = data_root + self.info_path = info_path + self.rate = rate + self.prepare = prepare + self.classes = classes + self.cat2label = {name: i for i, name in enumerate(classes)} + self.label2cat = {i: name for i, name in enumerate(classes)} + self.points_loader = mmcv.build_from_cfg(points_loader, PIPELINES) + + db_infos = mmcv.load(info_path) + + # filter database infos + from mmdet3d.utils import get_root_logger + logger = get_root_logger() + for k, v in db_infos.items(): + logger.info(f'load {len(v)} {k} database infos') + for prep_func, val in prepare.items(): + db_infos = getattr(self, prep_func)(db_infos, val) + logger.info('After filter database:') + for k, v in db_infos.items(): + logger.info(f'load {len(v)} {k} database infos') + + self.db_infos = db_infos + + # load sample groups + # TODO: more elegant way to load sample groups + self.sample_groups = [] + for name, num in sample_groups.items(): + self.sample_groups.append({name: int(num)}) + + self.group_db_infos = self.db_infos # just use db_infos + self.sample_classes = [] + self.sample_max_nums = [] + for group_info in self.sample_groups: + self.sample_classes += list(group_info.keys()) + self.sample_max_nums += list(group_info.values()) + + self.sampler_dict = {} + for k, v in self.group_db_infos.items(): + self.sampler_dict[k] = BatchSampler(v, k, shuffle=True) + # TODO: No group_sampling currently + + @staticmethod + def filter_by_difficulty(db_infos, removed_difficulty): + """Filter ground truths by difficulties. + + Args: + db_infos (dict): Info of groundtruth database. + removed_difficulty (list): Difficulties that are not qualified. + + Returns: + dict: Info of database after filtering. + """ + new_db_infos = {} + for key, dinfos in db_infos.items(): + new_db_infos[key] = [ + info for info in dinfos + if info['difficulty'] not in removed_difficulty + ] + return new_db_infos + + @staticmethod + def filter_by_min_points(db_infos, min_gt_points_dict): + """Filter ground truths by number of points in the bbox. + + Args: + db_infos (dict): Info of groundtruth database. + min_gt_points_dict (dict): Different number of minimum points + needed for different categories of ground truths. + + Returns: + dict: Info of database after filtering. + """ + for name, min_num in min_gt_points_dict.items(): + min_num = int(min_num) + if min_num > 0: + filtered_infos = [] + for info in db_infos[name]: + if info['num_points_in_gt'] >= min_num: + filtered_infos.append(info) + db_infos[name] = filtered_infos + return db_infos + + def sample_all(self, gt_bboxes, gt_labels, with_img=False): + """Sampling all categories of bboxes. + + Args: + gt_bboxes (np.ndarray): Ground truth bounding boxes. + gt_labels (np.ndarray): Ground truth labels of boxes. + + Returns: + dict: Dict of sampled 'pseudo ground truths'. + + - gt_labels_3d (np.ndarray): ground truths labels \ + of sampled objects. + - gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): \ + sampled ground truth 3D bounding boxes + - points (np.ndarray): sampled points + - group_ids (np.ndarray): ids of sampled ground truths + """ + sampled_num_dict = {} + sample_num_per_class = [] + + for class_name, max_sample_num in zip(self.sample_classes, + self.sample_max_nums): + class_label = self.cat2label[class_name] + # sampled_num = int(max_sample_num - + # np.sum([n == class_name for n in gt_names])) + sampled_num = int(max_sample_num - + np.sum([n == class_label for n in gt_labels])) + sampled_num = np.round(self.rate * sampled_num).astype(np.int64) + sampled_num_dict[class_name] = sampled_num + sample_num_per_class.append(sampled_num) + + sampled = [] + sampled_gt_bboxes = [] + avoid_coll_boxes = gt_bboxes + + for class_name, sampled_num in zip(self.sample_classes, + sample_num_per_class): + if sampled_num > 0: + sampled_cls = self.sample_class_v2(class_name, sampled_num, + avoid_coll_boxes) + + sampled += sampled_cls + if len(sampled_cls) > 0: + if len(sampled_cls) == 1: + sampled_gt_box = sampled_cls[0]['box3d_lidar'][ + np.newaxis, ...] + else: + sampled_gt_box = np.stack( + [s['box3d_lidar'] for s in sampled_cls], axis=0) + + sampled_gt_bboxes += [sampled_gt_box] + avoid_coll_boxes = np.concatenate( + [avoid_coll_boxes, sampled_gt_box], axis=0) + + ret = None + if len(sampled) > 0: + sampled_gt_bboxes = np.concatenate(sampled_gt_bboxes, axis=0) + # center = sampled_gt_bboxes[:, 0:3] + s_points_list = [] + s_idx_list = [] + s_imgs_list = [] + count = 0 + for info in sampled: + file_path = os.path.join( + self.data_root, + info['path']) if self.data_root else info['path'] + results = dict(pts_filename=file_path) + if 'nori_id' in info: + results['pts_nori_path'] = info['nori_id'] + s_points = self.points_loader(results)['points'] + s_points.translate(info['box3d_lidar'][:3]) + idx_points = count * np.ones(len(s_points), dtype=np.int) + s_points_list.append(s_points) + s_idx_list.append(idx_points) + count += 1 + + if with_img: + if len(info['image_path']) > 0: + img_path = os.path.join( + self.data_root, + info['image_path']) if self.data_root else info['image_path'] + s_img = mmcv.imread(img_path,'unchanged') + else: + s_img = [] + s_imgs_list.append(s_img) + + gt_labels = np.array([self.cat2label[s['name']] for s in sampled], + dtype=np.long) + ret = { + 'gt_labels_3d': + gt_labels, + 'gt_bboxes_3d': + sampled_gt_bboxes, + 'points': + s_points_list[0].cat(s_points_list), + "points_idx": + np.concatenate(s_idx_list, axis=0), + 'images': + s_imgs_list, + 'group_ids': + np.arange(gt_bboxes.shape[0], + gt_bboxes.shape[0] + len(sampled)) + } + + return ret + + def sample_class_v2(self, name, num, gt_bboxes): + """Sampling specific categories of bounding boxes. + + Args: + name (str): Class of objects to be sampled. + num (int): Number of sampled bboxes. + gt_bboxes (np.ndarray): Ground truth boxes. + + Returns: + list[dict]: Valid samples after collision test. + """ + sampled = self.sampler_dict[name].sample(num) + sampled = copy.deepcopy(sampled) + num_gt = gt_bboxes.shape[0] + num_sampled = len(sampled) + gt_bboxes_bv = box_np_ops.center_to_corner_box2d( + gt_bboxes[:, 0:2], gt_bboxes[:, 3:5], gt_bboxes[:, 6]) + + sp_boxes = np.stack([i['box3d_lidar'] for i in sampled], axis=0) + boxes = np.concatenate([gt_bboxes, sp_boxes], axis=0).copy() + + sp_boxes_new = boxes[gt_bboxes.shape[0]:] + sp_boxes_bv = box_np_ops.center_to_corner_box2d( + sp_boxes_new[:, 0:2], sp_boxes_new[:, 3:5], sp_boxes_new[:, 6]) + + total_bv = np.concatenate([gt_bboxes_bv, sp_boxes_bv], axis=0) + coll_mat = data_augment_utils.box_collision_test(total_bv, total_bv) + diag = np.arange(total_bv.shape[0]) + coll_mat[diag, diag] = False + + valid_samples = [] + for i in range(num_gt, num_gt + num_sampled): + if coll_mat[i].any(): + coll_mat[i] = False + coll_mat[:, i] = False + else: + valid_samples.append(sampled[i - num_gt]) + return valid_samples diff --git a/projects/mmdet3d_plugin/datasets/pipelines/transform_3d.py b/projects/mmdet3d_plugin/datasets/pipelines/transform_3d.py new file mode 100644 index 0000000..f3ed073 --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/pipelines/transform_3d.py @@ -0,0 +1,969 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2023 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ + +import numpy as np +from numpy import random +import torch +import mmcv +import cv2 + +from mmcv.utils import build_from_cfg +from mmdet.datasets.builder import PIPELINES +from mmdet.datasets.pipelines import RandomFlip +from mmdet3d.core.bbox import box_np_ops +from mmdet3d.datasets.builder import OBJECTSAMPLERS + + +@PIPELINES.register_module() +class PadMultiViewImage(object): + """Pad the multi-view image. + There are two padding modes: (1) pad to a fixed size and (2) pad to the + minimum size that is divisible by some number. + Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", + Args: + size (tuple, optional): Fixed padding size. + size_divisor (int, optional): The divisor of padded size. + pad_val (float, optional): Padding value, 0 by default. + """ + + def __init__(self, size=None, size_divisor=None, pad_val=0): + self.size = size + self.size_divisor = size_divisor + self.pad_val = pad_val + # only one of size and size_divisor should be valid + assert size is not None or size_divisor is not None + assert size is None or size_divisor is None + + def _pad_img(self, results): + """Pad images according to ``self.size``.""" + if self.size is not None: + padded_img = [mmcv.impad( + img, shape=self.size, pad_val=self.pad_val) for img in results['img']] + elif self.size_divisor is not None: + padded_img = [mmcv.impad_to_multiple( + img, self.size_divisor, pad_val=self.pad_val) for img in results['img']] + results['img'] = padded_img + results['img_shape'] = [img.shape for img in padded_img] + results['pad_shape'] = [img.shape for img in padded_img] + results['pad_fixed_size'] = self.size + results['pad_size_divisor'] = self.size_divisor + + def __call__(self, results): + """Call function to pad images, masks, semantic segmentation maps. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Updated result dict. + """ + self._pad_img(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.size}, ' + repr_str += f'size_divisor={self.size_divisor}, ' + repr_str += f'pad_val={self.pad_val})' + return repr_str + + +@PIPELINES.register_module() +class NormalizeMultiviewImage(object): + """Normalize the image. + Added key is "img_norm_cfg". + Args: + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB, + default is true. + """ + + def __init__(self, mean, std, to_rgb=True): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb + + def __call__(self, results): + """Call function to normalize images. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Normalized results, 'img_norm_cfg' key is added into + result dict. + """ + results['img'] = [mmcv.imnormalize( + img, self.mean, self.std, self.to_rgb) for img in results['img']] + results['img_norm_cfg'] = dict( + mean=self.mean, std=self.std, to_rgb=self.to_rgb) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})' + return repr_str + + +@PIPELINES.register_module() +class UnifiedObjectSample(object): + """Sample GT objects to the data. + + Args: + db_sampler (dict): Config dict of the database sampler. + sample_2d (bool): Whether to also paste 2D image patch to the images + This should be true when applying multi-modality cut-and-paste. + Defaults to False. + """ + + def __init__(self, db_sampler, sample_2d=False, sample_method='depth', modify_points=False, mixup_rate=-1): + self.sampler_cfg = db_sampler + self.sample_2d = sample_2d + self.sample_method = sample_method + self.modify_points = modify_points + self.mixup_rate = mixup_rate + if 'type' not in db_sampler.keys(): + db_sampler['type'] = 'DataBaseSampler' + self.db_sampler = build_from_cfg(db_sampler, OBJECTSAMPLERS) + + @staticmethod + def remove_points_in_boxes(points, boxes): + """Remove the points in the sampled bounding boxes. + + Args: + points (:obj:`BasePoints`): Input point cloud array. + boxes (np.ndarray): Sampled ground truth boxes. + + Returns: + np.ndarray: Points with those in the boxes removed. + """ + masks = box_np_ops.points_in_rbbox(points.coord.numpy(), boxes) + points = points[np.logical_not(masks.any(-1))] + return points + + def __call__(self, input_dict): + """Call function to sample ground truth objects to the data. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after object sampling augmentation, \ + 'points', 'gt_bboxes_3d', 'gt_labels_3d' keys are updated \ + in the result dict. + """ + gt_bboxes_3d = input_dict['gt_bboxes_3d'] + gt_labels_3d = input_dict['gt_labels_3d'] + + # change to float for blending operation + points = input_dict['points'] + if self.sample_2d: + # Assume for now 3D & 2D bboxes are the same + sampled_dict = self.db_sampler.sample_all( + gt_bboxes_3d.tensor.numpy(), + gt_labels_3d, + with_img=True) + else: + sampled_dict = self.db_sampler.sample_all( + gt_bboxes_3d.tensor.numpy(), gt_labels_3d, with_img=False) + + if sampled_dict is not None: + sampled_gt_bboxes_3d = sampled_dict['gt_bboxes_3d'] + sampled_points = sampled_dict['points'] + sampled_points_idx = sampled_dict["points_idx"] + sampled_gt_labels = sampled_dict['gt_labels_3d'] + + gt_labels_3d = np.concatenate([gt_labels_3d, sampled_gt_labels], + axis=0) + gt_bboxes_3d = gt_bboxes_3d.new_box( + np.concatenate( + [gt_bboxes_3d.tensor.numpy(), sampled_gt_bboxes_3d])) + + points = self.remove_points_in_boxes(points, sampled_gt_bboxes_3d) + points_idx = -1 * np.ones(len(points), dtype=np.int) + # check the points dimension + # points = points.cat([sampled_points, points]) + points = points.cat([points, sampled_points]) + points_idx = np.concatenate([points_idx, sampled_points_idx], axis=0) + + if self.sample_2d: + imgs = input_dict['img'] + lidar2img = input_dict['lidar2img'] + sampled_img = sampled_dict['images'] + sampled_num = len(sampled_gt_bboxes_3d) + imgs, points_keep = self.unified_sample(imgs, lidar2img, + points.tensor.numpy(), + points_idx, gt_bboxes_3d.corners.numpy(), + sampled_img, sampled_num) + + input_dict['img'] = imgs + + if self.modify_points: + points = points[points_keep] + + input_dict['gt_bboxes_3d'] = gt_bboxes_3d + input_dict['gt_labels_3d'] = gt_labels_3d.astype(np.long) + input_dict['points'] = points + + return input_dict + + def unified_sample(self, imgs, lidar2img, points, points_idx, bboxes_3d, sampled_img, sampled_num): + # for boxes + bboxes_3d = np.concatenate([bboxes_3d, np.ones_like(bboxes_3d[..., :1])], -1) + is_raw = np.ones(len(bboxes_3d)) + is_raw[-sampled_num:] = 0 + is_raw = is_raw.astype(bool) + raw_num = len(is_raw)-sampled_num + # for point cloud + points_3d = points[:,:4].copy() + points_3d[:,-1] = 1 + points_keep = np.ones(len(points_3d)).astype(np.bool) + new_imgs = imgs + + assert len(imgs)==len(lidar2img) and len(sampled_img)==sampled_num + for _idx, (_img, _lidar2img) in enumerate(zip(imgs, lidar2img)): + coord_img = bboxes_3d @ _lidar2img.T + coord_img[...,:2] /= coord_img[...,2,None] + depth = coord_img[...,2] + img_mask = (depth > 0).all(axis=-1) + img_count = img_mask.nonzero()[0] + if img_mask.sum() == 0: + continue + depth = depth.mean(1)[img_mask] + coord_img = coord_img[...,:2][img_mask] + minxy = np.min(coord_img, axis=-2) + maxxy = np.max(coord_img, axis=-2) + bbox = np.concatenate([minxy, maxxy], axis=-1).astype(int) + bbox[:,0::2] = np.clip(bbox[:,0::2], a_min=0, a_max=_img.shape[1]-1) + bbox[:,1::2] = np.clip(bbox[:,1::2], a_min=0, a_max=_img.shape[0]-1) + img_mask = ((bbox[:,2:]-bbox[:,:2]) > 1).all(axis=-1) + if img_mask.sum() == 0: + continue + depth = depth[img_mask] + if 'depth' in self.sample_method: + paste_order = depth.argsort() + paste_order = paste_order[::-1] + else: + paste_order = np.arange(len(depth), dtype=np.int64) + img_count = img_count[img_mask][paste_order] + bbox = bbox[img_mask][paste_order] + + paste_mask = -255 * np.ones(_img.shape[:2], dtype=np.int) + fg_mask = np.zeros(_img.shape[:2], dtype=np.int) + # first crop image from raw image + raw_img = [] + for _count, _box in zip(img_count, bbox): + if is_raw[_count]: + raw_img.append(_img[_box[1]:_box[3],_box[0]:_box[2]]) + + # then stitch the crops to raw image + for _count, _box in zip(img_count, bbox): + if is_raw[_count]: + if self.mixup_rate < 0: + _img[_box[1]:_box[3],_box[0]:_box[2]] = raw_img.pop(0) + else: + _img[_box[1]:_box[3],_box[0]:_box[2]] = \ + _img[_box[1]:_box[3],_box[0]:_box[2]] * (1 - self.mixup_rate) + raw_img.pop(0) * self.mixup_rate + fg_mask[_box[1]:_box[3],_box[0]:_box[2]] = 1 + else: + img_crop = sampled_img[_count-raw_num] + if len(img_crop)==0: continue + img_crop = cv2.resize(img_crop, tuple(_box[[2,3]]-_box[[0,1]])) + if self.mixup_rate < 0: + _img[_box[1]:_box[3],_box[0]:_box[2]] = img_crop + else: + _img[_box[1]:_box[3],_box[0]:_box[2]] = \ + _img[_box[1]:_box[3],_box[0]:_box[2]] * (1 - self.mixup_rate) + img_crop * self.mixup_rate + + paste_mask[_box[1]:_box[3],_box[0]:_box[2]] = _count + + new_imgs[_idx] = _img + + # calculate modify mask + if self.modify_points: + points_img = points_3d @ _lidar2img.T + points_img[:,:2] /= points_img[:,2,None] + depth = points_img[:,2] + img_mask = depth > 0 + if img_mask.sum() == 0: + continue + img_mask = (points_img[:,0] > 0) & (points_img[:,0] < _img.shape[1]) & \ + (points_img[:,1] > 0) & (points_img[:,1] < _img.shape[0]) & img_mask + points_img = points_img[img_mask].astype(int) + new_mask = paste_mask[points_img[:,1], points_img[:,0]]==(points_idx[img_mask]+raw_num) + raw_fg = (fg_mask == 1) & (paste_mask >= 0) & (paste_mask < raw_num) + raw_bg = (fg_mask == 0) & (paste_mask < 0) + raw_mask = raw_fg[points_img[:,1], points_img[:,0]] | raw_bg[points_img[:,1], points_img[:,0]] + keep_mask = new_mask | raw_mask + points_keep[img_mask] = points_keep[img_mask] & keep_mask + + return new_imgs, points_keep + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f' sample_2d={self.sample_2d},' + repr_str += f' data_root={self.sampler_cfg.data_root},' + repr_str += f' info_path={self.sampler_cfg.info_path},' + repr_str += f' rate={self.sampler_cfg.rate},' + repr_str += f' prepare={self.sampler_cfg.prepare},' + repr_str += f' classes={self.sampler_cfg.classes},' + repr_str += f' sample_groups={self.sampler_cfg.sample_groups}' + return repr_str + + +@PIPELINES.register_module() +class ResizeCropFlipImage(object): + """Random resize, Crop and flip the image + Args: + size (tuple, optional): Fixed padding size. + """ + + def __init__(self, data_aug_conf=None, training=True, pic_wise=False): + self.data_aug_conf = data_aug_conf + self.training = training + self.pic_wise = pic_wise + + + def __call__(self, results): + """Call function to pad images, masks, semantic segmentation maps. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Updated result dict. + """ + + imgs = results["img"] + N = len(imgs) + new_imgs = [] + new_depths = [] + resize, resize_dims, crop, flip, rotate = self._sample_augmentation() + for i in range(N): + post_rot = torch.eye(2) + post_tran = torch.zeros(2) + img = imgs[i] + + # augmentation (resize, crop, horizontal flip, rotate) + if self.pic_wise: + resize, resize_dims, crop, flip, rotate = self._sample_augmentation() + img, post_rot2, post_tran2 = self._img_transform( + img, + post_rot, + post_tran, + resize=resize, + resize_dims=resize_dims, + crop=crop, + flip=flip, + rotate=rotate, + ) + if "depths" in results.keys(): + depth = results['depths'][i] + depth = self._depth_transform( + depth, + resize=resize, + resize_dims=self.data_aug_conf["final_dim"], + crop=crop, + flip=flip, + rotate=rotate, + ) + new_depths.append(depth.astype(np.float32)) + + new_imgs.append(img) + results['cam_intrinsic'][i][:2, :3] = post_rot2 @ results['cam_intrinsic'][i][:2, :3] + results['cam_intrinsic'][i][:2, 2] = post_tran2 + results['cam_intrinsic'][i][:2, 2] + + results["img"] = new_imgs + results["depths"] = new_depths + results['lidar2img'] = [results['cam_intrinsic'][i] @ results['lidar2cam'][i] for i in range(len(results['lidar2cam']))] + + return results + + def _get_rot(self, h): + + return torch.Tensor( + [ + [np.cos(h), np.sin(h)], + [-np.sin(h), np.cos(h)], + ] + ) + + def _img_transform(self, img, post_rot, post_tran, resize, resize_dims, crop, flip, rotate): + # adjust image + resized_img = cv2.resize(img, resize_dims) + img = np.zeros((crop[3] - crop[1], crop[2] - crop[0], 3)) + + hsize, wsize = crop[3] - crop[1], crop[2] - crop[0] + dh, dw, sh, sw = crop[1], crop[0], 0, 0 + + if dh < 0: + sh = -dh + hsize += dh + dh = 0 + if dh + hsize > resized_img.shape[0]: + hsize = resized_img.shape[0] - dh + if dw < 0: + sw = -dw + wsize += dw + dw = 0 + if dw + wsize > resized_img.shape[1]: + wsize = resized_img.shape[1] - dw + img[sh : sh + hsize, sw : sw + wsize] = resized_img[dh: dh + hsize, dw: dw + wsize] + + (h, w) = img.shape[:2] + center = (w / 2, h / 2) + if flip: + img = cv2.flip(img, 1) + M = cv2.getRotationMatrix2D(center, rotate, scale=1.0) + img = cv2.warpAffine(img, M, (w, h)) + # post-homography transformation + post_rot *= resize + post_tran -= torch.Tensor(crop[:2]) + if flip: + A = torch.Tensor([[-1, 0], [0, 1]]) + b = torch.Tensor([crop[2] - crop[0], 0]) + post_rot = A.matmul(post_rot) + post_tran = A.matmul(post_tran) + b + A = self._get_rot(rotate / 180 * np.pi) + b = torch.Tensor([crop[2] - crop[0], crop[3] - crop[1]]) / 2 + b = A.matmul(-b) + b + post_rot = A.matmul(post_rot) + post_tran = A.matmul(post_tran) + b + + return img, post_rot, post_tran + + def _sample_augmentation(self): + H, W = self.data_aug_conf["H"], self.data_aug_conf["W"] + fH, fW = self.data_aug_conf["final_dim"] + if self.training: + resize = np.random.uniform(*self.data_aug_conf["resize_lim"]) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = int((1 - np.random.uniform(*self.data_aug_conf["bot_pct_lim"])) * newH) - fH + crop_w = int(np.random.uniform(0, max(0, newW - fW))) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + flip = False + if self.data_aug_conf["rand_flip"] and np.random.choice([0, 1]): + flip = True + rotate = np.random.uniform(*self.data_aug_conf["rot_lim"]) + else: + resize = max(fH / H, fW / W) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = int((1 - np.mean(self.data_aug_conf["bot_pct_lim"])) * newH) - fH + crop_w = int(max(0, newW - fW) / 2) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + flip = False + rotate = 0 + return resize, resize_dims, crop, flip, rotate + + def _depth_transform(self, cam_depth, resize, resize_dims, crop, flip, rotate): + """ + Input: + cam_depth: Nx3, 3: x,y,d + resize: a float value + resize_dims: self.ida_aug_conf["final_dim"] -> [H, W] + crop: x1, y1, x2, y2 + flip: bool value + rotate: an angle + Output: + cam_depth: [h/down_ratio, w/down_ratio, d] + """ + + H, W = resize_dims + cam_depth[:, :2] = cam_depth[:, :2] * resize + cam_depth[:, 0] -= crop[0] + cam_depth[:, 1] -= crop[1] + if flip: + cam_depth[:, 0] = resize_dims[1] - cam_depth[:, 0] + + cam_depth[:, 0] -= W / 2.0 + cam_depth[:, 1] -= H / 2.0 + + h = rotate / 180 * np.pi + rot_matrix = [ + [np.cos(h), np.sin(h)], + [-np.sin(h), np.cos(h)], + ] + cam_depth[:, :2] = np.matmul(rot_matrix, cam_depth[:, :2].T).T + + cam_depth[:, 0] += W / 2.0 + cam_depth[:, 1] += H / 2.0 + + depth_coords = cam_depth[:, :2].astype(np.int16) + + depth_map = np.zeros((H, W, 3)) + valid_mask = ( + (depth_coords[:, 1] < resize_dims[0]) + & (depth_coords[:, 0] < resize_dims[1]) + & (depth_coords[:, 1] >= 0) + & (depth_coords[:, 0] >= 0) + ) + depth_map[depth_coords[valid_mask, 1], depth_coords[valid_mask, 0], :] = cam_depth[valid_mask, :] + + return depth_map + + +@PIPELINES.register_module() +class GlobalRotScaleTransAll(object): + """Apply global rotation, scaling and translation to a 3D scene. + + Args: + rot_range (list[float]): Range of rotation angle. + Defaults to [-0.78539816, 0.78539816] (close to [-pi/4, pi/4]). + scale_ratio_range (list[float]): Range of scale ratio. + Defaults to [0.95, 1.05]. + translation_std (list[float]): The standard deviation of translation + noise. This applies random translation to a scene by a noise, which + is sampled from a gaussian distribution whose standard deviation + is set by ``translation_std``. Defaults to [0, 0, 0] + shift_height (bool): Whether to shift height. + (the fourth dimension of indoor points) when scaling. + Defaults to False. + """ + + def __init__(self, + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0], + shift_height=False): + seq_types = (list, tuple, np.ndarray) + if not isinstance(rot_range, seq_types): + assert isinstance(rot_range, (int, float)), \ + f'unsupported rot_range type {type(rot_range)}' + rot_range = [-rot_range, rot_range] + self.rot_range = rot_range + + assert isinstance(scale_ratio_range, seq_types), \ + f'unsupported scale_ratio_range type {type(scale_ratio_range)}' + self.scale_ratio_range = scale_ratio_range + + if not isinstance(translation_std, seq_types): + assert isinstance(translation_std, (int, float)), \ + f'unsupported translation_std type {type(translation_std)}' + translation_std = [ + translation_std, translation_std, translation_std + ] + assert all([std >= 0 for std in translation_std]), \ + 'translation_std should be positive' + self.translation_std = translation_std + self.shift_height = shift_height + + def _trans_bbox_points(self, input_dict): + """Private function to translate bounding boxes and points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after translation, 'points', 'pcd_trans' \ + and keys in input_dict['bbox3d_fields'] are updated \ + in the result dict. + """ + translation_std = np.array(self.translation_std, dtype=np.float32) + trans_factor = np.random.normal(scale=translation_std, size=3).T + + input_dict['points'].translate(trans_factor) + if 'radar' in input_dict: + input_dict['radar'].translate(trans_factor) + input_dict['pcd_trans'] = trans_factor + for key in input_dict['bbox3d_fields']: + input_dict[key].translate(trans_factor) + + trans_mat = np.eye(4) + trans_mat[:3, -1] = trans_factor + trans_mat_inv = np.linalg.inv(trans_mat) + for view in range(len(input_dict["lidar2img"])): + input_dict["lidar2img"][view] = input_dict["lidar2img"][view] @ trans_mat_inv + input_dict["lidar2cam"][view] = input_dict["lidar2cam"][view] @ trans_mat_inv + + def _rot_bbox_points(self, input_dict): + """Private function to rotate bounding boxes and points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after rotation, 'points', 'pcd_rotation' \ + and keys in input_dict['bbox3d_fields'] are updated \ + in the result dict. + """ + if 'rot_degree' not in input_dict: + rotation = self.rot_range + noise_rotation = np.random.uniform(rotation[0], rotation[1]) + else: + noise_rotation = input_dict['rot_degree'] + + # if no bbox in input_dict, only rotate points + if len(input_dict['bbox3d_fields']) == 0: + if 'rot_degree' not in input_dict: + rot_mat_T = input_dict['points'].rotate(noise_rotation) + if 'radar' in input_dict: + input_dict['radar'].rotate(noise_rotation) + else: + rot_mat_T = input_dict['points'].rotate(-noise_rotation) + if 'radar' in input_dict: + input_dict['radar'].rotate(-noise_rotation) + input_dict['pcd_rotation'] = rot_mat_T + + rot_mat = torch.eye(4) + rot_mat[:3, :3].copy_(rot_mat_T) + rot_mat[0, 1], rot_mat[1, 0] = -rot_mat[0, 1], -rot_mat[1, 0] + rot_mat_inv = torch.inverse(rot_mat) + for view in range(len(input_dict["lidar2img"])): + input_dict["lidar2img"][view] = (torch.tensor(input_dict["lidar2img"][view]).float() @ rot_mat_inv).numpy() + input_dict["lidar2cam"][view] = (torch.tensor(input_dict["lidar2cam"][view]).float() @ rot_mat_inv).numpy() + return + + # rotate points with bboxes + for key in input_dict['bbox3d_fields']: + if len(input_dict[key].tensor) != 0: + points, rot_mat_T = input_dict[key].rotate( + noise_rotation, input_dict['points']) + input_dict['points'] = points + input_dict['pcd_rotation'] = rot_mat_T + if 'radar' in input_dict: + input_dict['radar'].rotate(-noise_rotation) + + rot_mat = torch.eye(4) + rot_mat[:3, :3].copy_(rot_mat_T) + rot_mat[0, 1], rot_mat[1, 0] = -rot_mat[0, 1], -rot_mat[1, 0] + rot_mat_inv = torch.inverse(rot_mat) + for view in range(len(input_dict["lidar2img"])): + input_dict["lidar2img"][view] = (torch.tensor(input_dict["lidar2img"][view]).float() @ rot_mat_inv).numpy() + input_dict["lidar2cam"][view] = (torch.tensor(input_dict["lidar2cam"][view]).float() @ rot_mat_inv).numpy() + + + def _scale_bbox_points(self, input_dict): + """Private function to scale bounding boxes and points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after scaling, 'points'and keys in \ + input_dict['bbox3d_fields'] are updated in the result dict. + """ + scale = input_dict['pcd_scale_factor'] + points = input_dict['points'] + points.scale(scale) + if self.shift_height: + assert 'height' in points.attribute_dims.keys(), \ + 'setting shift_height=True but points have no height attribute' + points.tensor[:, points.attribute_dims['height']] *= scale + input_dict['points'] = points + + if 'radar' in input_dict: + input_dict['radar'].scale(scale) + + for key in input_dict['bbox3d_fields']: + input_dict[key].scale(scale) + + scale_mat = torch.tensor( + [ + [scale, 0, 0, 0], + [0, scale, 0, 0], + [0, 0, scale, 0], + [0, 0, 0, 1], + ] + ) + scale_mat_inv = torch.inverse(scale_mat) + for view in range(len(input_dict["lidar2img"])): + input_dict["lidar2img"][view] = (torch.tensor(input_dict["lidar2img"][view]).float() @ scale_mat_inv).numpy() + input_dict["lidar2cam"][view] = (torch.tensor(input_dict["lidar2cam"][view]).float() @ scale_mat_inv).numpy() + + def _random_scale(self, input_dict): + """Private function to randomly set the scale factor. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after scaling, 'pcd_scale_factor' are updated \ + in the result dict. + """ + scale_factor = np.random.uniform(self.scale_ratio_range[0], + self.scale_ratio_range[1]) + input_dict['pcd_scale_factor'] = scale_factor + + def __call__(self, input_dict): + """Private function to rotate, scale and translate bounding boxes and \ + points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after scaling, 'points', 'pcd_rotation', + 'pcd_scale_factor', 'pcd_trans' and keys in \ + input_dict['bbox3d_fields'] are updated in the result dict. + """ + if 'transformation_3d_flow' not in input_dict: + input_dict['transformation_3d_flow'] = [] + + self._rot_bbox_points(input_dict) + + if 'pcd_scale_factor' not in input_dict: + self._random_scale(input_dict) + self._scale_bbox_points(input_dict) + + self._trans_bbox_points(input_dict) + + input_dict['transformation_3d_flow'].extend(['R', 'S', 'T']) + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(rot_range={self.rot_range},' + repr_str += f' scale_ratio_range={self.scale_ratio_range},' + repr_str += f' translation_std={self.translation_std},' + repr_str += f' shift_height={self.shift_height})' + return repr_str + + +@PIPELINES.register_module() +class CustomRandomFlip3D(object): + """Flip the points & bbox. + + If the input dict contains the key "flip", then the flag will be used, + otherwise it will be randomly decided by a ratio specified in the init + method. + + Args: + sync_2d (bool, optional): Whether to apply flip according to the 2D + images. If True, it will apply the same flip as that to 2D images. + If False, it will decide whether to flip randomly and independently + to that of 2D images. Defaults to True. + flip_ratio_bev_horizontal (float, optional): The flipping probability + in horizontal direction. Defaults to 0.0. + flip_ratio_bev_vertical (float, optional): The flipping probability + in vertical direction. Defaults to 0.0. + """ + + def __init__(self, + flip_ratio_bev_horizontal=0.0, + flip_ratio_bev_vertical=0.0, + **kwargs): + super(CustomRandomFlip3D, self).__init__() + self.flip_ratio_bev_horizontal = flip_ratio_bev_horizontal + self.flip_ratio_bev_vertical = flip_ratio_bev_vertical + if flip_ratio_bev_horizontal is not None: + assert isinstance( + flip_ratio_bev_horizontal, + (int, float)) and 0 <= flip_ratio_bev_horizontal <= 1 + if flip_ratio_bev_vertical is not None: + assert isinstance( + flip_ratio_bev_vertical, + (int, float)) and 0 <= flip_ratio_bev_vertical <= 1 + + def random_flip_data_3d(self, input_dict, direction='horizontal'): + """Flip 3D data randomly. + + Args: + input_dict (dict): Result dict from loading pipeline. + direction (str): Flip direction. Default: horizontal. + + Returns: + dict: Flipped results, 'points', 'bbox3d_fields' keys are \ + updated in the result dict. + """ + assert direction in ['horizontal', 'vertical'] + if len(input_dict['bbox3d_fields']) == 0: # test mode + input_dict['bbox3d_fields'].append('empty_box3d') + input_dict['empty_box3d'] = input_dict['box_type_3d']( + np.array([], dtype=np.float32)) + assert len(input_dict['bbox3d_fields']) == 1 + for key in input_dict['bbox3d_fields']: + if 'points' in input_dict: + input_dict['points'] = input_dict[key].flip( + direction, points=input_dict['points']) + else: + input_dict[key].flip(direction) + if 'radar' in input_dict: + input_dict['radar'].flip(direction) + + def __call__(self, input_dict): + """Call function to flip points, values in the ``bbox3d_fields`` and \ + also flip 2D image and its annotations. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Flipped results, 'flip', 'flip_direction', \ + 'pcd_horizontal_flip' and 'pcd_vertical_flip' keys are added \ + into result dict. + """ + if 'pcd_horizontal_flip' not in input_dict: + flip_horizontal = True if np.random.rand( + ) < self.flip_ratio_bev_horizontal else False + input_dict['pcd_horizontal_flip'] = flip_horizontal + if 'pcd_vertical_flip' not in input_dict: + flip_vertical = True if np.random.rand( + ) < self.flip_ratio_bev_vertical else False + input_dict['pcd_vertical_flip'] = flip_vertical + + if 'transformation_3d_flow' not in input_dict: + input_dict['transformation_3d_flow'] = [] + + flip_mat = np.eye(4) + if input_dict['pcd_horizontal_flip']: + self.random_flip_data_3d(input_dict, 'horizontal') + input_dict['transformation_3d_flow'].extend(['HF']) + flip_mat[1, 1] = -1 + if input_dict['pcd_vertical_flip']: + self.random_flip_data_3d(input_dict, 'vertical') + input_dict['transformation_3d_flow'].extend(['VF']) + flip_mat[0, 0] = -1 + for view in range(len(input_dict["lidar2img"])): + input_dict["lidar2img"][view] = input_dict["lidar2img"][view] @ flip_mat + input_dict["lidar2cam"][view] = input_dict["lidar2cam"][view] @ flip_mat + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(sync_2d={self.sync_2d},' + repr_str += f' flip_ratio_bev_vertical={self.flip_ratio_bev_vertical})' + return repr_str + + +@PIPELINES.register_module() +class ModalMask3D(object): + + def __init__(self, mode='test', mask_modal='image', **kwargs): + super(ModalMask3D, self).__init__() + self.mode = mode + self.mask_modal = mask_modal + + def __call__(self, input_dict): + if self.mode == 'test': + if self.mask_modal == 'image': + input_dict['img'] = [0. * item for item in input_dict['img']] + if self.mask_modal == 'points': + input_dict['points'].tensor = input_dict['points'].tensor * 0.0 + else: + seed = np.random.rand() + if seed > 0.75: + input_dict['img'] = [0. * item for item in input_dict['img']] + elif seed > 0.5: + input_dict['points'].tensor = input_dict['points'].tensor * 0.0 + + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + return repr_str + + +@PIPELINES.register_module() +class GlobalRotScaleTransImage(object): + """Random resize, Crop and flip the image + Args: + size (tuple, optional): Fixed padding size. + """ + + def __init__( + self, + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0], + reverse_angle=False, + training=True, + flip_dx_ratio=0.0, + flip_dy_ratio=0.0 + ): + + self.rot_range = rot_range + self.scale_ratio_range = scale_ratio_range + self.translation_std = translation_std + + self.reverse_angle = reverse_angle + self.training = training + + self.flip_dx_ratio = flip_dx_ratio + self.flip_dy_ratio = flip_dy_ratio + + def __call__(self, results): + """Call function to pad images, masks, semantic segmentation maps. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Updated result dict. + """ + # random rotate + rot_angle = np.random.uniform(*self.rot_range) + + self.rotate_bev_along_z(results, rot_angle) + if self.reverse_angle: + rot_angle *= -1 + results["gt_bboxes_3d"].rotate( + np.array(rot_angle) + ) # mmdet LiDARInstance3DBoxes存的角度方向是反的(rotate函数实现的是绕着z轴由y向x转) + + # random scale + scale_ratio = np.random.uniform(*self.scale_ratio_range) + self.scale_xyz(results, scale_ratio) + results["gt_bboxes_3d"].scale(scale_ratio) + + # TODO: support translation + + self.flip_xy(results) + + return results + + def rotate_bev_along_z(self, results, angle): + rot_cos = torch.cos(torch.tensor(angle)) + rot_sin = torch.sin(torch.tensor(angle)) + + rot_mat = torch.tensor([[rot_cos, -rot_sin, 0, 0], [rot_sin, rot_cos, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) + rot_mat_inv = torch.inverse(rot_mat) + + num_view = len(results["lidar2img"]) + for view in range(num_view): + results["lidar2img"][view] = (torch.tensor(results["lidar2img"][view]).float() @ rot_mat_inv).numpy() + results["lidar2cam"][view] = (torch.tensor(results["lidar2cam"][view]).float() @ rot_mat_inv).numpy() + + return + + def scale_xyz(self, results, scale_ratio): + rot_mat = torch.tensor( + [ + [scale_ratio, 0, 0, 0], + [0, scale_ratio, 0, 0], + [0, 0, scale_ratio, 0], + [0, 0, 0, 1], + ] + ) + + rot_mat_inv = torch.inverse(rot_mat) + + num_view = len(results["lidar2img"]) + for view in range(num_view): + results["lidar2img"][view] = (torch.tensor(results["lidar2img"][view]).float() @ rot_mat_inv).numpy() + results["lidar2cam"][view] = (torch.tensor(results["lidar2cam"][view]).float() @ rot_mat_inv).numpy() + return + + def flip_xy(self, results): + mat = torch.tensor( + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1] + ] + ) + if np.random.rand() < self.flip_dx_ratio: + mat[0][0] = -1 + results["gt_bboxes_3d"].flip(bev_direction='vertical') + if np.random.rand() < self.flip_dy_ratio: + mat[1][1] = -1 + results["gt_bboxes_3d"].flip(bev_direction='horizontal') + + num_view = len(results['lidar2img']) + for view in range(num_view): + results["lidar2img"][view] = (torch.tensor(results["lidar2img"][view]).float() @ mat.float()).numpy() + results["lidar2cam"][view] = (torch.tensor(results["lidar2cam"][view]).float() @ mat.float()).numpy() + return \ No newline at end of file diff --git a/projects/mmdet3d_plugin/mmcv_custom/__init__.py b/projects/mmdet3d_plugin/mmcv_custom/__init__.py new file mode 100644 index 0000000..fadd57b --- /dev/null +++ b/projects/mmdet3d_plugin/mmcv_custom/__init__.py @@ -0,0 +1,2 @@ +from .runner import * +from .ops import * \ No newline at end of file diff --git a/projects/mmdet3d_plugin/mmcv_custom/ops/__init__.py b/projects/mmdet3d_plugin/mmcv_custom/ops/__init__.py new file mode 100644 index 0000000..1bd9149 --- /dev/null +++ b/projects/mmdet3d_plugin/mmcv_custom/ops/__init__.py @@ -0,0 +1 @@ +from .voxel import * \ No newline at end of file diff --git a/projects/mmdet3d_plugin/mmcv_custom/ops/voxel/__init__.py b/projects/mmdet3d_plugin/mmcv_custom/ops/voxel/__init__.py new file mode 100644 index 0000000..24958c4 --- /dev/null +++ b/projects/mmdet3d_plugin/mmcv_custom/ops/voxel/__init__.py @@ -0,0 +1 @@ +from .spconv_voxelize import SPConvVoxelization \ No newline at end of file diff --git a/projects/mmdet3d_plugin/mmcv_custom/ops/voxel/spconv_voxelize.py b/projects/mmdet3d_plugin/mmcv_custom/ops/voxel/spconv_voxelize.py new file mode 100644 index 0000000..0477d1f --- /dev/null +++ b/projects/mmdet3d_plugin/mmcv_custom/ops/voxel/spconv_voxelize.py @@ -0,0 +1,71 @@ +# Copyright (c) 2023 megvii-model. All Rights Reserved. + +import numpy as np +from torch import nn +from spconv.pytorch.utils import PointToVoxel # spconv-cu111 2.1.21 +import torch +import torch.nn.functional as F +from torch.nn.modules.utils import _pair + + +class SPConvVoxelization(nn.Module): + def __init__(self, voxel_size, point_cloud_range, max_num_points, max_voxels, num_point_features, device=torch.device("cuda")): + super().__init__() + assert len(voxel_size) == 3 + assert len(point_cloud_range) == 6 + self.voxel_size = np.array(voxel_size) + self.point_cloud_range = np.array(point_cloud_range) + self.max_num_points = max_num_points + self.num_point_features = num_point_features + self.device = device + if isinstance(max_voxels, tuple): + self.max_voxels = max_voxels + else: + self.max_voxels = _pair(max_voxels) + self.voxel_generator = PointToVoxel( + vsize_xyz=voxel_size, + coors_range_xyz=point_cloud_range, + max_num_points_per_voxel=max_num_points, + max_num_voxels=self.max_voxels[0], + num_point_features=num_point_features, + device=device, + ) + grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(voxel_size) + self.grid_size = np.round(grid_size).astype(np.int64) + + def train(self, mode: bool = True): + if mode: + self.voxel_generator = PointToVoxel( + vsize_xyz=self.voxel_size.tolist(), + coors_range_xyz=self.point_cloud_range.tolist(), + max_num_points_per_voxel=self.max_num_points, + max_num_voxels=self.max_voxels[0], + num_point_features=self.num_point_features, + device=self.device, + ) + else: + self.voxel_generator = PointToVoxel( + vsize_xyz=self.voxel_size.tolist(), + coors_range_xyz=self.point_cloud_range.tolist(), + max_num_points_per_voxel=self.max_num_points, + max_num_voxels=self.max_voxels[1], + num_point_features=self.num_point_features, + device=self.device, + ) + + return super().train(mode) + + def forward(self, points): + voxel_output = self.voxel_generator(points) + voxels, coordinates, num_points = voxel_output + return torch.clone(voxels), torch.clone(coordinates), torch.clone(num_points) + + def __repr__(self): + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'voxel_size=' + str(self.voxel_size) + tmpstr += ', point_cloud_range=' + str(self.point_cloud_range) + tmpstr += ', max_num_points=' + str(self.max_num_points) + tmpstr += ', max_voxels=' + str(self.max_voxels) + tmpstr += ', num_point_features=' + str(self.num_point_features) + tmpstr += ')' + return tmpstr diff --git a/projects/mmdet3d_plugin/mmcv_custom/runner/__init__.py b/projects/mmdet3d_plugin/mmcv_custom/runner/__init__.py new file mode 100644 index 0000000..c3c024c --- /dev/null +++ b/projects/mmdet3d_plugin/mmcv_custom/runner/__init__.py @@ -0,0 +1 @@ +from .hooks import * \ No newline at end of file diff --git a/projects/mmdet3d_plugin/mmcv_custom/runner/hooks/__init__.py b/projects/mmdet3d_plugin/mmcv_custom/runner/hooks/__init__.py new file mode 100644 index 0000000..52c9ce1 --- /dev/null +++ b/projects/mmdet3d_plugin/mmcv_custom/runner/hooks/__init__.py @@ -0,0 +1 @@ +from .optimizer import CustomFp16OptimizerHook \ No newline at end of file diff --git a/projects/mmdet3d_plugin/mmcv_custom/runner/hooks/optimizer.py b/projects/mmdet3d_plugin/mmcv_custom/runner/hooks/optimizer.py new file mode 100644 index 0000000..22111fe --- /dev/null +++ b/projects/mmdet3d_plugin/mmcv_custom/runner/hooks/optimizer.py @@ -0,0 +1,23 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmcv.runner.hooks.optimizer import Fp16OptimizerHook +from mmcv.runner.hooks import HOOKS + + +@HOOKS.register_module() +class CustomFp16OptimizerHook(Fp16OptimizerHook): + + def __init__(self, + custom_fp16={}, + *args, + **kwargs): + super(CustomFp16OptimizerHook, self).__init__(*args, **kwargs) + self.custom_fp16 = custom_fp16 + + def before_run(self, runner) -> None: + super().before_run(runner) + for module_name, v in self.custom_fp16.items(): + runner.model.module._modules[module_name].fp16_enabled = v + \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/__init__.py b/projects/mmdet3d_plugin/models/__init__.py new file mode 100644 index 0000000..980210a --- /dev/null +++ b/projects/mmdet3d_plugin/models/__init__.py @@ -0,0 +1,5 @@ +from .backbones import * +from .detectors import * +from .dense_heads import * +from .necks import * + diff --git a/projects/mmdet3d_plugin/models/backbones/__init__.py b/projects/mmdet3d_plugin/models/backbones/__init__.py new file mode 100644 index 0000000..dafe8e3 --- /dev/null +++ b/projects/mmdet3d_plugin/models/backbones/__init__.py @@ -0,0 +1,3 @@ +from .vovnet import VoVNet + +__all__ = ['VoVNet', ] \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/backbones/vovnet.py b/projects/mmdet3d_plugin/models/backbones/vovnet.py new file mode 100644 index 0000000..33c0b40 --- /dev/null +++ b/projects/mmdet3d_plugin/models/backbones/vovnet.py @@ -0,0 +1,390 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2023 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from DETR3D (https://github.com/WangYueFt/detr3d) +# Copyright (c) 2021 Wang, Yue +# ------------------------------------------------------------------------ +# Copyright (c) Youngwan Lee (ETRI) All Rights Reserved. +# Copyright 2021 Toyota Research Institute. All rights reserved. +# ------------------------------------------------------------------------ + + +from collections import OrderedDict +from mmcv.runner import BaseModule +from mmdet.models.builder import BACKBONES +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.modules.batchnorm import _BatchNorm + + +VoVNet19_slim_dw_eSE = { + 'stem': [64, 64, 64], + 'stage_conv_ch': [64, 80, 96, 112], + 'stage_out_ch': [112, 256, 384, 512], + "layer_per_block": 3, + "block_per_stage": [1, 1, 1, 1], + "eSE": True, + "dw": True +} + +VoVNet19_dw_eSE = { + 'stem': [64, 64, 64], + "stage_conv_ch": [128, 160, 192, 224], + "stage_out_ch": [256, 512, 768, 1024], + "layer_per_block": 3, + "block_per_stage": [1, 1, 1, 1], + "eSE": True, + "dw": True +} + +VoVNet19_slim_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [64, 80, 96, 112], + 'stage_out_ch': [112, 256, 384, 512], + 'layer_per_block': 3, + 'block_per_stage': [1, 1, 1, 1], + 'eSE': True, + "dw": False +} + +VoVNet19_eSE = { + 'stem': [64, 64, 128], + "stage_conv_ch": [128, 160, 192, 224], + "stage_out_ch": [256, 512, 768, 1024], + "layer_per_block": 3, + "block_per_stage": [1, 1, 1, 1], + "eSE": True, + "dw": False +} + +VoVNet39_eSE = { + 'stem': [64, 64, 128], + "stage_conv_ch": [128, 160, 192, 224], + "stage_out_ch": [256, 512, 768, 1024], + "layer_per_block": 5, + "block_per_stage": [1, 1, 2, 2], + "eSE": True, + "dw": False +} + +VoVNet57_eSE = { + 'stem': [64, 64, 128], + "stage_conv_ch": [128, 160, 192, 224], + "stage_out_ch": [256, 512, 768, 1024], + "layer_per_block": 5, + "block_per_stage": [1, 1, 4, 3], + "eSE": True, + "dw": False +} + +VoVNet99_eSE = { + 'stem': [64, 64, 128], + "stage_conv_ch": [128, 160, 192, 224], + "stage_out_ch": [256, 512, 768, 1024], + "layer_per_block": 5, + "block_per_stage": [1, 3, 9, 3], + "eSE": True, + "dw": False +} + +_STAGE_SPECS = { + "V-19-slim-dw-eSE": VoVNet19_slim_dw_eSE, + "V-19-dw-eSE": VoVNet19_dw_eSE, + "V-19-slim-eSE": VoVNet19_slim_eSE, + "V-19-eSE": VoVNet19_eSE, + "V-39-eSE": VoVNet39_eSE, + "V-57-eSE": VoVNet57_eSE, + "V-99-eSE": VoVNet99_eSE, +} + + +def dw_conv3x3(in_channels, out_channels, module_name, postfix, stride=1, kernel_size=3, padding=1): + """3x3 convolution with padding""" + return [ + ( + '{}_{}/dw_conv3x3'.format(module_name, postfix), + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=out_channels, + bias=False + ) + ), + ( + '{}_{}/pw_conv1x1'.format(module_name, postfix), + nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, groups=1, bias=False) + ), + ('{}_{}/pw_norm'.format(module_name, postfix), nn.BatchNorm2d(out_channels)), + ('{}_{}/pw_relu'.format(module_name, postfix), nn.ReLU(inplace=True)), + ] + + +def conv3x3(in_channels, out_channels, module_name, postfix, stride=1, groups=1, kernel_size=3, padding=1): + """3x3 convolution with padding""" + return [ + ( + f"{module_name}_{postfix}/conv", + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False, + ), + ), + (f"{module_name}_{postfix}/norm", nn.BatchNorm2d(out_channels)), + (f"{module_name}_{postfix}/relu", nn.ReLU(inplace=True)), + ] + + +def conv1x1(in_channels, out_channels, module_name, postfix, stride=1, groups=1, kernel_size=1, padding=0): + """1x1 convolution with padding""" + return [ + ( + f"{module_name}_{postfix}/conv", + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False, + ), + ), + (f"{module_name}_{postfix}/norm", nn.BatchNorm2d(out_channels)), + (f"{module_name}_{postfix}/relu", nn.ReLU(inplace=True)), + ] + + +class Hsigmoid(nn.Module): + def __init__(self, inplace=True): + super(Hsigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return F.relu6(x + 3.0, inplace=self.inplace) / 6.0 + + +class eSEModule(nn.Module): + def __init__(self, channel, reduction=4): + super(eSEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0) + self.hsigmoid = Hsigmoid() + + def forward(self, x): + input = x + x = self.avg_pool(x) + x = self.fc(x) + x = self.hsigmoid(x) + return input * x + + +class _OSA_module(nn.Module): + def __init__( + self, in_ch, stage_ch, concat_ch, layer_per_block, module_name, SE=False, identity=False, depthwise=False + ): + + super(_OSA_module, self).__init__() + + self.identity = identity + self.depthwise = depthwise + self.isReduced = False + self.layers = nn.ModuleList() + in_channel = in_ch + if self.depthwise and in_channel != stage_ch: + self.isReduced = True + self.conv_reduction = nn.Sequential( + OrderedDict(conv1x1(in_channel, stage_ch, "{}_reduction".format(module_name), "0")) + ) + for i in range(layer_per_block): + if self.depthwise: + self.layers.append(nn.Sequential(OrderedDict(dw_conv3x3(stage_ch, stage_ch, module_name, i)))) + else: + self.layers.append(nn.Sequential(OrderedDict(conv3x3(in_channel, stage_ch, module_name, i)))) + in_channel = stage_ch + + # feature aggregation + in_channel = in_ch + layer_per_block * stage_ch + self.concat = nn.Sequential(OrderedDict(conv1x1(in_channel, concat_ch, module_name, "concat"))) + + self.ese = eSEModule(concat_ch) + + def forward(self, x): + + identity_feat = x + + output = [] + output.append(x) + if self.depthwise and self.isReduced: + x = self.conv_reduction(x) + for layer in self.layers: + x = layer(x) + output.append(x) + + x = torch.cat(output, dim=1) + xt = self.concat(x) + + xt = self.ese(xt) + + if self.identity: + xt = xt + identity_feat + + return xt + + +class _OSA_stage(nn.Sequential): + def __init__( + self, in_ch, stage_ch, concat_ch, block_per_stage, layer_per_block, stage_num, SE=False, depthwise=False + ): + + super(_OSA_stage, self).__init__() + + if not stage_num == 2: + self.add_module("Pooling", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)) + + if block_per_stage != 1: + SE = False + module_name = f"OSA{stage_num}_1" + self.add_module( + module_name, _OSA_module(in_ch, stage_ch, concat_ch, layer_per_block, module_name, SE, depthwise=depthwise) + ) + for i in range(block_per_stage - 1): + if i != block_per_stage - 2: # last block + SE = False + module_name = f"OSA{stage_num}_{i + 2}" + self.add_module( + module_name, + _OSA_module( + concat_ch, + stage_ch, + concat_ch, + layer_per_block, + module_name, + SE, + identity=True, + depthwise=depthwise + ), + ) + + +@BACKBONES.register_module() +class VoVNet(BaseModule): + def __init__(self, spec_name, input_ch=3, out_features=None, + frozen_stages=-1, norm_eval=True, pretrained=None, init_cfg=None): + """ + Args: + input_ch(int) : the number of input channel + out_features (list[str]): name of the layers whose outputs should + be returned in forward. Can be anything in "stem", "stage2" ... + """ + super(VoVNet, self).__init__(init_cfg) + self.fp16_enabled = False + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + stage_specs = _STAGE_SPECS[spec_name] + + stem_ch = stage_specs["stem"] + config_stage_ch = stage_specs["stage_conv_ch"] + config_concat_ch = stage_specs["stage_out_ch"] + block_per_stage = stage_specs["block_per_stage"] + layer_per_block = stage_specs["layer_per_block"] + SE = stage_specs["eSE"] + depthwise = stage_specs["dw"] + + self._out_features = out_features + + # Stem module + conv_type = dw_conv3x3 if depthwise else conv3x3 + stem = conv3x3(input_ch, stem_ch[0], "stem", "1", 2) + stem += conv_type(stem_ch[0], stem_ch[1], "stem", "2", 1) + stem += conv_type(stem_ch[1], stem_ch[2], "stem", "3", 2) + self.add_module("stem", nn.Sequential((OrderedDict(stem)))) + current_stirde = 4 + self._out_feature_strides = {"stem": current_stirde, "stage2": current_stirde} + self._out_feature_channels = {"stem": stem_ch[2]} + + stem_out_ch = [stem_ch[2]] + in_ch_list = stem_out_ch + config_concat_ch[:-1] + # OSA stages + self.stage_names = [] + for i in range(4): # num_stages + name = "stage%d" % (i + 2) # stage 2 ... stage 5 + self.stage_names.append(name) + self.add_module( + name, + _OSA_stage( + in_ch_list[i], + config_stage_ch[i], + config_concat_ch[i], + block_per_stage[i], + layer_per_block, + i + 2, + SE, + depthwise, + ), + ) + + self._out_feature_channels[name] = config_concat_ch[i] + if not i == 0: + self._out_feature_strides[name] = current_stirde = int(current_stirde * 2) + + # initialize weights + # self._initialize_weights() + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + + def init_weights(self): + super().init_weights() + self._freeze_stages() + + def forward(self, x): + outputs = {} + x = self.stem(x) + if "stem" in self._out_features: + outputs["stem"] = x + for name in self.stage_names: + x = getattr(self, name)(x) + if name in self._out_features: + outputs[name] = x + + return outputs + + def _freeze_stages(self): + if self.frozen_stages >= 0: + m = getattr(self, 'stem') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'stage{i+1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(VoVNet, self).train(mode) + # self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/dense_heads/__init__.py b/projects/mmdet3d_plugin/models/dense_heads/__init__.py new file mode 100644 index 0000000..047394e --- /dev/null +++ b/projects/mmdet3d_plugin/models/dense_heads/__init__.py @@ -0,0 +1,8 @@ +from .cmt_head import ( + SeparateTaskHead, + CmtHead, + CmtImageHead, + CmtLidarHead +) + +__all__ = ['SeparateTaskHead', 'CmtHead', 'CmtLidarHead', 'CmtImageHead'] \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/dense_heads/cmt_head.py b/projects/mmdet3d_plugin/models/dense_heads/cmt_head.py new file mode 100644 index 0000000..725956f --- /dev/null +++ b/projects/mmdet3d_plugin/models/dense_heads/cmt_head.py @@ -0,0 +1,1086 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2023 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ + +from distutils.command.build import build +import enum +from turtle import down +import math +import copy +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule, build_conv_layer +from mmcv.cnn.bricks.transformer import FFN, build_positional_encoding +from mmcv.runner import BaseModule, force_fp32 +from mmcv.cnn import xavier_init, constant_init, kaiming_init +from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh, + build_assigner, build_sampler, multi_apply, + reduce_mean, build_bbox_coder) +from mmdet.models.utils import build_transformer +from mmdet.models import HEADS, build_loss +from mmdet.models.utils import NormedLinear +from mmdet.models.dense_heads.anchor_free_head import AnchorFreeHead +from mmdet.models.utils.transformer import inverse_sigmoid +from mmdet3d.models.utils.clip_sigmoid import clip_sigmoid +from mmdet3d.models import builder +from mmdet3d.core import (circle_nms, draw_heatmap_gaussian, gaussian_radius, + xywhr2xyxyr) +from einops import rearrange +import collections + +from functools import reduce +from projects.mmdet3d_plugin.core.bbox.util import normalize_bbox + + +def pos2embed(pos, num_pos_feats=128, temperature=10000): + scale = 2 * math.pi + pos = pos * scale + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device) + dim_t = 2 * (dim_t // 2) / num_pos_feats + 1 + pos_x = pos[..., 0, None] / dim_t + pos_y = pos[..., 1, None] / dim_t + pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2) + pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2) + posemb = torch.cat((pos_y, pos_x), dim=-1) + return posemb + + +class LayerNormFunction(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, weight, bias, groups, eps): + ctx.groups = groups + ctx.eps = eps + N, C, L = x.size() + x = x.view(N, groups, C // groups, L) + mu = x.mean(2, keepdim=True) + var = (x - mu).pow(2).mean(2, keepdim=True) + y = (x - mu) / (var + eps).sqrt() + ctx.save_for_backward(y, var, weight) + y = weight.view(1, C, 1) * y.view(N, C, L) + bias.view(1, C, 1) + return y + + @staticmethod + def backward(ctx, grad_output): + groups = ctx.groups + eps = ctx.eps + + N, C, L = grad_output.size() + y, var, weight = ctx.saved_variables + g = grad_output * weight.view(1, C, 1) + g = g.view(N, groups, C//groups, L) + mean_g = g.mean(dim=2, keepdim=True) + mean_gy = (g * y).mean(dim=2, keepdim=True) + gx = 1. / torch.sqrt(var + eps) * (g - y * mean_gy - mean_g) + return gx.view(N, C, L), (grad_output * y.view(N, C, L)).sum(dim=2).sum(dim=0), grad_output.sum(dim=2).sum( + dim=0), None, None + + +class GroupLayerNorm1d(nn.Module): + + def __init__(self, channels, groups=1, eps=1e-6): + super(GroupLayerNorm1d, self).__init__() + self.register_parameter('weight', nn.Parameter(torch.ones(channels))) + self.register_parameter('bias', nn.Parameter(torch.zeros(channels))) + self.groups = groups + self.eps = eps + + def forward(self, x): + return LayerNormFunction.apply(x, self.weight, self.bias, self.groups, self.eps) + + +@HEADS.register_module() +class SeparateTaskHead(BaseModule): + """SeparateHead for CenterHead. + + Args: + in_channels (int): Input channels for conv_layer. + heads (dict): Conv information. + head_conv (int): Output channels. + Default: 64. + final_kernal (int): Kernal size for the last conv layer. + Deafult: 1. + init_bias (float): Initial bias. Default: -2.19. + conv_cfg (dict): Config of conv layer. + Default: dict(type='Conv2d') + norm_cfg (dict): Config of norm layer. + Default: dict(type='BN2d'). + bias (str): Type of bias. Default: 'auto'. + """ + + def __init__(self, + in_channels, + heads, + groups=1, + head_conv=64, + final_kernel=1, + init_bias=-2.19, + init_cfg=None, + **kwargs): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super(SeparateTaskHead, self).__init__(init_cfg=init_cfg) + self.heads = heads + self.groups = groups + self.init_bias = init_bias + for head in self.heads: + classes, num_conv = self.heads[head] + + conv_layers = [] + c_in = in_channels + for i in range(num_conv - 1): + conv_layers.extend([ + nn.Conv1d( + c_in * groups, + head_conv * groups, + kernel_size=final_kernel, + stride=1, + padding=final_kernel // 2, + groups=groups, + bias=False), + GroupLayerNorm1d(head_conv * groups, groups=groups), + nn.ReLU(inplace=True) + ]) + c_in = head_conv + + conv_layers.append( + nn.Conv1d( + head_conv * groups, + classes * groups, + kernel_size=final_kernel, + stride=1, + padding=final_kernel // 2, + groups=groups, + bias=True)) + conv_layers = nn.Sequential(*conv_layers) + + self.__setattr__(head, conv_layers) + + if init_cfg is None: + self.init_cfg = dict(type='Kaiming', layer='Conv1d') + + def init_weights(self): + """Initialize weights.""" + super().init_weights() + for head in self.heads: + if head == 'cls_logits': + self.__getattr__(head)[-1].bias.data.fill_(self.init_bias) + + def forward(self, x): + """Forward function for SepHead. + + Args: + x (torch.Tensor): Input feature map with the shape of + [N, B, query, C]. + + Returns: + dict[str: torch.Tensor]: contains the following keys: + + -reg (torch.Tensor): 2D regression value with the \ + shape of [N, B, query, 2]. + -height (torch.Tensor): Height value with the \ + shape of [N, B, query, 1]. + -dim (torch.Tensor): Size value with the shape \ + of [N, B, query, 3]. + -rot (torch.Tensor): Rotation value with the \ + shape of [N, B, query, 2]. + -vel (torch.Tensor): Velocity value with the \ + shape of [N, B, query, 2]. + """ + N, B, query_num, c1 = x.shape + x = rearrange(x, "n b q c -> b (n c) q") + ret_dict = dict() + + for head in self.heads: + head_output = self.__getattr__(head)(x) + ret_dict[head] = rearrange(head_output, "b (n c) q -> n b q c", n=N) + + return ret_dict + + +@HEADS.register_module() +class CmtHead(BaseModule): + + def __init__(self, + in_channels, + num_query=900, + hidden_dim=128, + depth_num=64, + norm_bbox=True, + downsample_scale=8, + scalar=10, + noise_scale=1.0, + noise_trans=0.0, + dn_weight=1.0, + split=0.75, + train_cfg=None, + test_cfg=None, + common_heads=dict( + center=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2) + ), + tasks=[ + dict(num_class=1, class_names=['car']), + dict(num_class=2, class_names=['truck', 'construction_vehicle']), + dict(num_class=2, class_names=['bus', 'trailer']), + dict(num_class=1, class_names=['barrier']), + dict(num_class=2, class_names=['motorcycle', 'bicycle']), + dict(num_class=2, class_names=['pedestrian', 'traffic_cone']), + ], + transformer=None, + bbox_coder=None, + loss_cls=dict( + type="FocalLoss", + use_sigmoid=True, + reduction="mean", + gamma=2, alpha=0.25, loss_weight=1.0 + ), + loss_bbox=dict( + type="L1Loss", + reduction="mean", + loss_weight=0.25, + ), + loss_heatmap=dict( + type="GaussianFocalLoss", + reduction="mean" + ), + separate_head=dict( + type='SeparateMlpHead', init_bias=-2.19, final_kernel=3), + init_cfg=None, + **kwargs): + assert init_cfg is None + super(CmtHead, self).__init__(init_cfg=init_cfg) + self.num_classes = [len(t["class_names"]) for t in tasks] + self.class_names = [t["class_names"] for t in tasks] + self.hidden_dim = hidden_dim + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.num_query = num_query + self.in_channels = in_channels + self.depth_num = depth_num + self.norm_bbox = norm_bbox + self.downsample_scale = downsample_scale + self.scalar = scalar + self.bbox_noise_scale = noise_scale + self.bbox_noise_trans = noise_trans + self.dn_weight = dn_weight + self.split = split + + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.loss_heatmap = build_loss(loss_heatmap) + self.bbox_coder = build_bbox_coder(bbox_coder) + self.pc_range = self.bbox_coder.pc_range + self.fp16_enabled = False + + self.shared_conv = ConvModule( + in_channels, + hidden_dim, + kernel_size=3, + padding=1, + conv_cfg=dict(type="Conv2d"), + norm_cfg=dict(type="BN2d") + ) + + # transformer + self.transformer = build_transformer(transformer) + self.reference_points = nn.Embedding(num_query, 3) + self.bev_embedding = nn.Sequential( + nn.Linear(hidden_dim * 2, hidden_dim), + nn.ReLU(inplace=True), + nn.Linear(hidden_dim, hidden_dim) + ) + self.rv_embedding = nn.Sequential( + nn.Linear(self.depth_num * 3, self.hidden_dim * 4), + nn.ReLU(inplace=True), + nn.Linear(self.hidden_dim * 4, self.hidden_dim) + ) + # task head + self.task_heads = nn.ModuleList() + for num_cls in self.num_classes: + heads = copy.deepcopy(common_heads) + heads.update(dict(cls_logits=(num_cls, 2))) + separate_head.update( + in_channels=hidden_dim, + heads=heads, num_cls=num_cls, + groups=transformer.decoder.num_layers + ) + self.task_heads.append(builder.build_head(separate_head)) + + # assigner + if train_cfg: + self.assigner = build_assigner(train_cfg["assigner"]) + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + + def init_weights(self): + super(CmtHead, self).init_weights() + nn.init.uniform_(self.reference_points.weight.data, 0, 1) + + @property + def coords_bev(self): + cfg = self.train_cfg if self.train_cfg else self.test_cfg + x_size, y_size = ( + cfg['grid_size'][1] // self.downsample_scale, + cfg['grid_size'][0] // self.downsample_scale + ) + meshgrid = [[0, x_size - 1, x_size], [0, y_size - 1, y_size]] + batch_y, batch_x = torch.meshgrid(*[torch.linspace(it[0], it[1], it[2]) for it in meshgrid]) + batch_x = (batch_x + 0.5) / x_size + batch_y = (batch_y + 0.5) / y_size + coord_base = torch.cat([batch_x[None], batch_y[None]], dim=0) + coord_base = coord_base.view(2, -1).transpose(1, 0) # (H*W, 2) + return coord_base + + def prepare_for_dn(self, batch_size, reference_points, img_metas): + if self.training: + targets = [torch.cat((img_meta['gt_bboxes_3d']._data.gravity_center, img_meta['gt_bboxes_3d']._data.tensor[:, 3:]),dim=1) for img_meta in img_metas ] + labels = [img_meta['gt_labels_3d']._data for img_meta in img_metas ] + known = [(torch.ones_like(t)).cuda() for t in labels] + know_idx = known + unmask_bbox = unmask_label = torch.cat(known) + known_num = [t.size(0) for t in targets] + labels = torch.cat([t for t in labels]) + boxes = torch.cat([t for t in targets]) + batch_idx = torch.cat([torch.full((t.size(0), ), i) for i, t in enumerate(targets)]) + + known_indice = torch.nonzero(unmask_label + unmask_bbox) + known_indice = known_indice.view(-1) + # add noise + groups = min(self.scalar, self.num_query // max(known_num)) + known_indice = known_indice.repeat(groups, 1).view(-1) + known_labels = labels.repeat(groups, 1).view(-1).long().to(reference_points.device) + known_labels_raw = labels.repeat(groups, 1).view(-1).long().to(reference_points.device) + known_bid = batch_idx.repeat(groups, 1).view(-1) + known_bboxs = boxes.repeat(groups, 1).to(reference_points.device) + known_bbox_center = known_bboxs[:, :3].clone() + known_bbox_scale = known_bboxs[:, 3:6].clone() + + if self.bbox_noise_scale > 0: + diff = known_bbox_scale / 2 + self.bbox_noise_trans + rand_prob = torch.rand_like(known_bbox_center) * 2 - 1.0 + known_bbox_center += torch.mul(rand_prob, + diff) * self.bbox_noise_scale + known_bbox_center[..., 0:1] = (known_bbox_center[..., 0:1] - self.pc_range[0]) / (self.pc_range[3] - self.pc_range[0]) + known_bbox_center[..., 1:2] = (known_bbox_center[..., 1:2] - self.pc_range[1]) / (self.pc_range[4] - self.pc_range[1]) + known_bbox_center[..., 2:3] = (known_bbox_center[..., 2:3] - self.pc_range[2]) / (self.pc_range[5] - self.pc_range[2]) + known_bbox_center = known_bbox_center.clamp(min=0.0, max=1.0) + mask = torch.norm(rand_prob, 2, 1) > self.split + known_labels[mask] = sum(self.num_classes) + + single_pad = int(max(known_num)) + pad_size = int(single_pad * groups) + padding_bbox = torch.zeros(pad_size, 3).to(reference_points.device) + padded_reference_points = torch.cat([padding_bbox, reference_points], dim=0).unsqueeze(0).repeat(batch_size, 1, 1) + + if len(known_num): + map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num]) # [1,2, 1,2,3] + map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(groups)]).long() + if len(known_bid): + padded_reference_points[(known_bid.long(), map_known_indice)] = known_bbox_center.to(reference_points.device) + + tgt_size = pad_size + self.num_query + attn_mask = torch.ones(tgt_size, tgt_size).to(reference_points.device) < 0 + # match query cannot see the reconstruct + attn_mask[pad_size:, :pad_size] = True + # reconstruct cannot see each other + for i in range(groups): + if i == 0: + attn_mask[single_pad * i:single_pad * (i + 1), single_pad * (i + 1):pad_size] = True + if i == groups - 1: + attn_mask[single_pad * i:single_pad * (i + 1), :single_pad * i] = True + else: + attn_mask[single_pad * i:single_pad * (i + 1), single_pad * (i + 1):pad_size] = True + attn_mask[single_pad * i:single_pad * (i + 1), :single_pad * i] = True + + mask_dict = { + 'known_indice': torch.as_tensor(known_indice).long(), + 'batch_idx': torch.as_tensor(batch_idx).long(), + 'map_known_indice': torch.as_tensor(map_known_indice).long(), + 'known_lbs_bboxes': (known_labels, known_bboxs), + 'known_labels_raw': known_labels_raw, + 'know_idx': know_idx, + 'pad_size': pad_size + } + + else: + padded_reference_points = reference_points.unsqueeze(0).repeat(batch_size, 1, 1) + attn_mask = None + mask_dict = None + + return padded_reference_points, attn_mask, mask_dict + + def _rv_pe(self, img_feats, img_metas): + BN, C, H, W = img_feats.shape + pad_h, pad_w, _ = img_metas[0]['pad_shape'][0] + coords_h = torch.arange(H, device=img_feats[0].device).float() * pad_h / H + coords_w = torch.arange(W, device=img_feats[0].device).float() * pad_w / W + coords_d = 1 + torch.arange(self.depth_num, device=img_feats[0].device).float() * (self.pc_range[3] - 1) / self.depth_num + coords_h, coords_w, coords_d = torch.meshgrid([coords_h, coords_w, coords_d]) + + coords = torch.stack([coords_w, coords_h, coords_d, coords_h.new_ones(coords_h.shape)], dim=-1) + coords[..., :2] = coords[..., :2] * coords[..., 2:3] + + imgs2lidars = np.concatenate([np.linalg.inv(meta['lidar2img']) for meta in img_metas]) + imgs2lidars = torch.from_numpy(imgs2lidars).float().to(coords.device) + coords_3d = torch.einsum('hwdo, bco -> bhwdc', coords, imgs2lidars) + coords_3d = (coords_3d[..., :3] - coords_3d.new_tensor(self.pc_range[:3])[None, None, None, :] )\ + / (coords_3d.new_tensor(self.pc_range[3:]) - coords_3d.new_tensor(self.pc_range[:3]))[None, None, None, :] + return self.rv_embedding(coords_3d.reshape(*coords_3d.shape[:-2], -1)) + + def _bev_query_embed(self, ref_points, img_metas): + bev_embeds = self.bev_embedding(pos2embed(ref_points, num_pos_feats=self.hidden_dim)) + return bev_embeds + + def _rv_query_embed(self, ref_points, img_metas): + pad_h, pad_w, _ = img_metas[0]['pad_shape'][0] + lidars2imgs = np.stack([meta['lidar2img'] for meta in img_metas]) + lidars2imgs = torch.from_numpy(lidars2imgs).float().to(ref_points.device) + imgs2lidars = np.stack([np.linalg.inv(meta['lidar2img']) for meta in img_metas]) + imgs2lidars = torch.from_numpy(imgs2lidars).float().to(ref_points.device) + + ref_points = ref_points * (ref_points.new_tensor(self.pc_range[3:]) - ref_points.new_tensor(self.pc_range[:3])) + ref_points.new_tensor(self.pc_range[:3]) + proj_points = torch.einsum('bnd, bvcd -> bvnc', torch.cat([ref_points, ref_points.new_ones(*ref_points.shape[:-1], 1)], dim=-1), lidars2imgs) + + proj_points_clone = proj_points.clone() + z_mask = proj_points_clone[..., 2:3].detach() > 0 + proj_points_clone[..., :3] = proj_points[..., :3] / (proj_points[..., 2:3].detach() + z_mask * 1e-6 - (~z_mask) * 1e-6) + # proj_points_clone[..., 2] = proj_points.new_ones(proj_points[..., 2].shape) + + mask = (proj_points_clone[..., 0] < pad_w) & (proj_points_clone[..., 0] >= 0) & (proj_points_clone[..., 1] < pad_h) & (proj_points_clone[..., 1] >= 0) + mask &= z_mask.squeeze(-1) + + coords_d = 1 + torch.arange(self.depth_num, device=ref_points.device).float() * (self.pc_range[3] - 1) / self.depth_num + proj_points_clone = torch.einsum('bvnc, d -> bvndc', proj_points_clone, coords_d) + proj_points_clone = torch.cat([proj_points_clone[..., :3], proj_points_clone.new_ones(*proj_points_clone.shape[:-1], 1)], dim=-1) + projback_points = torch.einsum('bvndo, bvco -> bvndc', proj_points_clone, imgs2lidars) + + projback_points = (projback_points[..., :3] - projback_points.new_tensor(self.pc_range[:3])[None, None, None, :] )\ + / (projback_points.new_tensor(self.pc_range[3:]) - projback_points.new_tensor(self.pc_range[:3]))[None, None, None, :] + + rv_embeds = self.rv_embedding(projback_points.reshape(*projback_points.shape[:-2], -1)) + rv_embeds = (rv_embeds * mask.unsqueeze(-1)).sum(dim=1) + return rv_embeds + + def query_embed(self, ref_points, img_metas): + ref_points = inverse_sigmoid(ref_points.clone()).sigmoid() + bev_embeds = self._bev_query_embed(ref_points, img_metas) + rv_embeds = self._rv_query_embed(ref_points, img_metas) + return bev_embeds, rv_embeds + + def forward_single(self, x, x_img, img_metas): + """ + x: [bs c h w] + return List(dict(head_name: [num_dec x bs x num_query * head_dim]) ) x task_num + """ + ret_dicts = [] + x = self.shared_conv(x) + + reference_points = self.reference_points.weight + reference_points, attn_mask, mask_dict = self.prepare_for_dn(x.shape[0], reference_points, img_metas) + + mask = x.new_zeros(x.shape[0], x.shape[2], x.shape[3]) + + rv_pos_embeds = self._rv_pe(x_img, img_metas) + bev_pos_embeds = self.bev_embedding(pos2embed(self.coords_bev.to(x.device), num_pos_feats=self.hidden_dim)) + + bev_query_embeds, rv_query_embeds = self.query_embed(reference_points, img_metas) + query_embeds = bev_query_embeds + rv_query_embeds + + outs_dec, _ = self.transformer( + x, x_img, query_embeds, + bev_pos_embeds, rv_pos_embeds, + attn_masks=attn_mask + ) + outs_dec = torch.nan_to_num(outs_dec) + + reference = inverse_sigmoid(reference_points.clone()) + + flag = 0 + for task_id, task in enumerate(self.task_heads, 0): + outs = task(outs_dec) + center = (outs['center'] + reference[None, :, :, :2]).sigmoid() + height = (outs['height'] + reference[None, :, :, 2:3]).sigmoid() + _center, _height = center.new_zeros(center.shape), height.new_zeros(height.shape) + _center[..., 0:1] = center[..., 0:1] * (self.pc_range[3] - self.pc_range[0]) + self.pc_range[0] + _center[..., 1:2] = center[..., 1:2] * (self.pc_range[4] - self.pc_range[1]) + self.pc_range[1] + _height[..., 0:1] = height[..., 0:1] * (self.pc_range[5] - self.pc_range[2]) + self.pc_range[2] + outs['center'] = _center + outs['height'] = _height + + if mask_dict and mask_dict['pad_size'] > 0: + task_mask_dict = copy.deepcopy(mask_dict) + class_name = self.class_names[task_id] + + known_lbs_bboxes_label = task_mask_dict['known_lbs_bboxes'][0] + known_labels_raw = task_mask_dict['known_labels_raw'] + new_lbs_bboxes_label = known_lbs_bboxes_label.new_zeros(known_lbs_bboxes_label.shape) + new_lbs_bboxes_label[:] = len(class_name) + new_labels_raw = known_labels_raw.new_zeros(known_labels_raw.shape) + new_labels_raw[:] = len(class_name) + task_masks = [ + torch.where(known_lbs_bboxes_label == class_name.index(i) + flag) + for i in class_name + ] + task_masks_raw = [ + torch.where(known_labels_raw == class_name.index(i) + flag) + for i in class_name + ] + for cname, task_mask, task_mask_raw in zip(class_name, task_masks, task_masks_raw): + new_lbs_bboxes_label[task_mask] = class_name.index(cname) + new_labels_raw[task_mask_raw] = class_name.index(cname) + task_mask_dict['known_lbs_bboxes'] = (new_lbs_bboxes_label, task_mask_dict['known_lbs_bboxes'][1]) + task_mask_dict['known_labels_raw'] = new_labels_raw + flag += len(class_name) + + for key in list(outs.keys()): + outs['dn_' + key] = outs[key][:, :, :mask_dict['pad_size'], :] + outs[key] = outs[key][:, :, mask_dict['pad_size']:, :] + outs['dn_mask_dict'] = task_mask_dict + + ret_dicts.append(outs) + + return ret_dicts + + def forward(self, pts_feats, img_feats=None, img_metas=None): + """ + list([bs, c, h, w]) + """ + img_metas = [img_metas for _ in range(len(pts_feats))] + return multi_apply(self.forward_single, pts_feats, img_feats, img_metas) + + def _get_targets_single(self, gt_bboxes_3d, gt_labels_3d, pred_bboxes, pred_logits): + """"Compute regression and classification targets for one image. + Outputs from a single decoder layer of a single feature level are used. + Args: + + gt_bboxes_3d (Tensor): LiDARInstance3DBoxes(num_gts, 9) + gt_labels_3d (Tensor): Ground truth class indices (num_gts, ) + pred_bboxes (list[Tensor]): num_tasks x (num_query, 10) + pred_logits (list[Tensor]): num_tasks x (num_query, task_classes) + Returns: + tuple[Tensor]: a tuple containing the following. + - labels_tasks (list[Tensor]): num_tasks x (num_query, ). + - label_weights_tasks (list[Tensor]): num_tasks x (num_query, ). + - bbox_targets_tasks (list[Tensor]): num_tasks x (num_query, 9). + - bbox_weights_tasks (list[Tensor]): num_tasks x (num_query, 10). + - pos_inds (list[Tensor]): num_tasks x Sampled positive indices. + - neg_inds (Tensor): num_tasks x Sampled negative indices. + """ + device = gt_labels_3d.device + gt_bboxes_3d = torch.cat( + (gt_bboxes_3d.gravity_center, gt_bboxes_3d.tensor[:, 3:]), dim=1 + ).to(device) + + task_masks = [] + flag = 0 + for class_name in self.class_names: + task_masks.append([ + torch.where(gt_labels_3d == class_name.index(i) + flag) + for i in class_name + ]) + flag += len(class_name) + + task_boxes = [] + task_classes = [] + flag2 = 0 + for idx, mask in enumerate(task_masks): + task_box = [] + task_class = [] + for m in mask: + task_box.append(gt_bboxes_3d[m]) + task_class.append(gt_labels_3d[m] - flag2) + task_boxes.append(torch.cat(task_box, dim=0).to(device)) + task_classes.append(torch.cat(task_class).long().to(device)) + flag2 += len(mask) + + def task_assign(bbox_pred, logits_pred, gt_bboxes, gt_labels, num_classes): + num_bboxes = bbox_pred.shape[0] + assign_results = self.assigner.assign(bbox_pred, logits_pred, gt_bboxes, gt_labels) + sampling_result = self.sampler.sample(assign_results, bbox_pred, gt_bboxes) + pos_inds, neg_inds = sampling_result.pos_inds, sampling_result.neg_inds + # label targets + labels = gt_bboxes.new_full((num_bboxes, ), + num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + # bbox_targets + code_size = gt_bboxes.shape[1] + bbox_targets = torch.zeros_like(bbox_pred)[..., :code_size] + bbox_weights = torch.zeros_like(bbox_pred) + bbox_weights[pos_inds] = 1.0 + + if len(sampling_result.pos_gt_bboxes) > 0: + bbox_targets[pos_inds] = sampling_result.pos_gt_bboxes + return labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds + + labels_tasks, labels_weights_tasks, bbox_targets_tasks, bbox_weights_tasks, pos_inds_tasks, neg_inds_tasks\ + = multi_apply(task_assign, pred_bboxes, pred_logits, task_boxes, task_classes, self.num_classes) + + return labels_tasks, labels_weights_tasks, bbox_targets_tasks, bbox_weights_tasks, pos_inds_tasks, neg_inds_tasks + + def get_targets(self, gt_bboxes_3d, gt_labels_3d, preds_bboxes, preds_logits): + """"Compute regression and classification targets for a batch image. + Outputs from a single decoder layer of a single feature level are used. + Args: + gt_bboxes_3d (list[LiDARInstance3DBoxes]): batch_size * (num_gts, 9) + gt_labels_3d (list[Tensor]): Ground truth class indices. batch_size * (num_gts, ) + pred_bboxes (list[list[Tensor]]): batch_size x num_task x [num_query, 10]. + pred_logits (list[list[Tensor]]): batch_size x num_task x [num_query, task_classes] + Returns: + tuple: a tuple containing the following targets. + - task_labels_list (list(list[Tensor])): num_tasks x batch_size x (num_query, ). + - task_labels_weight_list (list[Tensor]): num_tasks x batch_size x (num_query, ) + - task_bbox_targets_list (list[Tensor]): num_tasks x batch_size x (num_query, 9) + - task_bbox_weights_list (list[Tensor]): num_tasks x batch_size x (num_query, 10) + - num_total_pos_tasks (list[int]): num_tasks x Number of positive samples + - num_total_neg_tasks (list[int]): num_tasks x Number of negative samples. + """ + (labels_list, labels_weight_list, bbox_targets_list, + bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( + self._get_targets_single, gt_bboxes_3d, gt_labels_3d, preds_bboxes, preds_logits + ) + task_num = len(labels_list[0]) + num_total_pos_tasks, num_total_neg_tasks = [], [] + task_labels_list, task_labels_weight_list, task_bbox_targets_list, \ + task_bbox_weights_list = [], [], [], [] + + for task_id in range(task_num): + num_total_pos_task = sum((inds[task_id].numel() for inds in pos_inds_list)) + num_total_neg_task = sum((inds[task_id].numel() for inds in neg_inds_list)) + num_total_pos_tasks.append(num_total_pos_task) + num_total_neg_tasks.append(num_total_neg_task) + task_labels_list.append([labels_list[batch_idx][task_id] for batch_idx in range(len(gt_bboxes_3d))]) + task_labels_weight_list.append([labels_weight_list[batch_idx][task_id] for batch_idx in range(len(gt_bboxes_3d))]) + task_bbox_targets_list.append([bbox_targets_list[batch_idx][task_id] for batch_idx in range(len(gt_bboxes_3d))]) + task_bbox_weights_list.append([bbox_weights_list[batch_idx][task_id] for batch_idx in range(len(gt_bboxes_3d))]) + + return (task_labels_list, task_labels_weight_list, task_bbox_targets_list, + task_bbox_weights_list, num_total_pos_tasks, num_total_neg_tasks) + + def _loss_single_task(self, + pred_bboxes, + pred_logits, + labels_list, + labels_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_pos, + num_total_neg): + """"Compute loss for single task. + Outputs from a single decoder layer of a single feature level are used. + Args: + pred_bboxes (Tensor): (batch_size, num_query, 10) + pred_logits (Tensor): (batch_size, num_query, task_classes) + labels_list (list[Tensor]): batch_size x (num_query, ) + labels_weights_list (list[Tensor]): batch_size x (num_query, ) + bbox_targets_list(list[Tensor]): batch_size x (num_query, 9) + bbox_weights_list(list[Tensor]): batch_size x (num_query, 10) + num_total_pos: int + num_total_neg: int + Returns: + loss_cls + loss_bbox + """ + labels = torch.cat(labels_list, dim=0) + labels_weights = torch.cat(labels_weights_list, dim=0) + bbox_targets = torch.cat(bbox_targets_list, dim=0) + bbox_weights = torch.cat(bbox_weights_list, dim=0) + + pred_bboxes_flatten = pred_bboxes.flatten(0, 1) + pred_logits_flatten = pred_logits.flatten(0, 1) + + cls_avg_factor = num_total_pos * 1.0 + num_total_neg * 0.1 + cls_avg_factor = max(cls_avg_factor, 1) + loss_cls = self.loss_cls( + pred_logits_flatten, labels, labels_weights, avg_factor=cls_avg_factor + ) + + normalized_bbox_targets = normalize_bbox(bbox_targets, self.pc_range) + isnotnan = torch.isfinite(normalized_bbox_targets).all(dim=-1) + bbox_weights = bbox_weights * bbox_weights.new_tensor(self.train_cfg.code_weights)[None, :] + + loss_bbox = self.loss_bbox( + pred_bboxes_flatten[isnotnan, :10], + normalized_bbox_targets[isnotnan, :10], + bbox_weights[isnotnan, :10], + avg_factor=num_total_pos + ) + + loss_cls = torch.nan_to_num(loss_cls) + loss_bbox = torch.nan_to_num(loss_bbox) + return loss_cls, loss_bbox + + def loss_single(self, + pred_bboxes, + pred_logits, + gt_bboxes_3d, + gt_labels_3d): + """"Loss function for outputs from a single decoder layer of a single + feature level. + Args: + pred_bboxes (list[Tensor]): num_tasks x [bs, num_query, 10]. + pred_logits (list(Tensor]): num_tasks x [bs, num_query, task_classes] + gt_bboxes_3d (list[LiDARInstance3DBoxes]): batch_size * (num_gts, 9) + gt_labels_list (list[Tensor]): Ground truth class indices. batch_size * (num_gts, ) + Returns: + dict[str, Tensor]: A dictionary of loss components for outputs from + a single decoder layer. + """ + batch_size = pred_bboxes[0].shape[0] + pred_bboxes_list, pred_logits_list = [], [] + for idx in range(batch_size): + pred_bboxes_list.append([task_pred_bbox[idx] for task_pred_bbox in pred_bboxes]) + pred_logits_list.append([task_pred_logits[idx] for task_pred_logits in pred_logits]) + cls_reg_targets = self.get_targets( + gt_bboxes_3d, gt_labels_3d, pred_bboxes_list, pred_logits_list + ) + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + loss_cls_tasks, loss_bbox_tasks = multi_apply( + self._loss_single_task, + pred_bboxes, + pred_logits, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_pos, + num_total_neg + ) + + return sum(loss_cls_tasks), sum(loss_bbox_tasks) + + def _dn_loss_single_task(self, + pred_bboxes, + pred_logits, + mask_dict): + known_labels, known_bboxs = mask_dict['known_lbs_bboxes'] + map_known_indice = mask_dict['map_known_indice'].long() + known_indice = mask_dict['known_indice'].long() + batch_idx = mask_dict['batch_idx'].long() + bid = batch_idx[known_indice] + known_labels_raw = mask_dict['known_labels_raw'] + + pred_logits = pred_logits[(bid, map_known_indice)] + pred_bboxes = pred_bboxes[(bid, map_known_indice)] + num_tgt = known_indice.numel() + + # filter task bbox + task_mask = known_labels_raw != pred_logits.shape[-1] + task_mask_sum = task_mask.sum() + + if task_mask_sum > 0: + # pred_logits = pred_logits[task_mask] + # known_labels = known_labels[task_mask] + pred_bboxes = pred_bboxes[task_mask] + known_bboxs = known_bboxs[task_mask] + + # classification loss + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_tgt * 3.14159 / 6 * self.split * self.split * self.split + + label_weights = torch.ones_like(known_labels) + cls_avg_factor = max(cls_avg_factor, 1) + loss_cls = self.loss_cls( + pred_logits, known_labels.long(), label_weights, avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes accross all gpus, for + # normalization purposes + num_tgt = loss_cls.new_tensor([num_tgt]) + num_tgt = torch.clamp(reduce_mean(num_tgt), min=1).item() + + # regression L1 loss + normalized_bbox_targets = normalize_bbox(known_bboxs, self.pc_range) + isnotnan = torch.isfinite(normalized_bbox_targets).all(dim=-1) + bbox_weights = torch.ones_like(pred_bboxes) + bbox_weights = bbox_weights * bbox_weights.new_tensor(self.train_cfg.code_weights)[None, :] + # bbox_weights[:, 6:8] = 0 + loss_bbox = self.loss_bbox( + pred_bboxes[isnotnan, :10], normalized_bbox_targets[isnotnan, :10], bbox_weights[isnotnan, :10], avg_factor=num_tgt) + + loss_cls = torch.nan_to_num(loss_cls) + loss_bbox = torch.nan_to_num(loss_bbox) + + if task_mask_sum == 0: + # loss_cls = loss_cls * 0.0 + loss_bbox = loss_bbox * 0.0 + + return self.dn_weight * loss_cls, self.dn_weight * loss_bbox + + def dn_loss_single(self, + pred_bboxes, + pred_logits, + dn_mask_dict): + loss_cls_tasks, loss_bbox_tasks = multi_apply( + self._dn_loss_single_task, pred_bboxes, pred_logits, dn_mask_dict + ) + return sum(loss_cls_tasks), sum(loss_bbox_tasks) + + @force_fp32(apply_to=('preds_dicts')) + def loss(self, gt_bboxes_3d, gt_labels_3d, preds_dicts, **kwargs): + """"Loss function. + Args: + gt_bboxes_3d (list[LiDARInstance3DBoxes]): batch_size * (num_gts, 9) + gt_labels_3d (list[Tensor]): Ground truth class indices. batch_size * (num_gts, ) + preds_dicts(tuple[list[dict]]): nb_tasks x num_lvl + center: (num_dec, batch_size, num_query, 2) + height: (num_dec, batch_size, num_query, 1) + dim: (num_dec, batch_size, num_query, 3) + rot: (num_dec, batch_size, num_query, 2) + vel: (num_dec, batch_size, num_query, 2) + cls_logits: (num_dec, batch_size, num_query, task_classes) + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + num_decoder = preds_dicts[0][0]['center'].shape[0] + all_pred_bboxes, all_pred_logits = collections.defaultdict(list), collections.defaultdict(list) + + for task_id, preds_dict in enumerate(preds_dicts, 0): + for dec_id in range(num_decoder): + pred_bbox = torch.cat( + (preds_dict[0]['center'][dec_id], preds_dict[0]['height'][dec_id], + preds_dict[0]['dim'][dec_id], preds_dict[0]['rot'][dec_id], + preds_dict[0]['vel'][dec_id]), + dim=-1 + ) + all_pred_bboxes[dec_id].append(pred_bbox) + all_pred_logits[dec_id].append(preds_dict[0]['cls_logits'][dec_id]) + all_pred_bboxes = [all_pred_bboxes[idx] for idx in range(num_decoder)] + all_pred_logits = [all_pred_logits[idx] for idx in range(num_decoder)] + + loss_cls, loss_bbox = multi_apply( + self.loss_single, all_pred_bboxes, all_pred_logits, + [gt_bboxes_3d for _ in range(num_decoder)], + [gt_labels_3d for _ in range(num_decoder)], + ) + + loss_dict = dict() + loss_dict['loss_cls'] = loss_cls[-1] + loss_dict['loss_bbox'] = loss_bbox[-1] + + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i in zip(loss_cls[:-1], + loss_bbox[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i + num_dec_layer += 1 + + dn_pred_bboxes, dn_pred_logits = collections.defaultdict(list), collections.defaultdict(list) + dn_mask_dicts = collections.defaultdict(list) + for task_id, preds_dict in enumerate(preds_dicts, 0): + for dec_id in range(num_decoder): + pred_bbox = torch.cat( + (preds_dict[0]['dn_center'][dec_id], preds_dict[0]['dn_height'][dec_id], + preds_dict[0]['dn_dim'][dec_id], preds_dict[0]['dn_rot'][dec_id], + preds_dict[0]['dn_vel'][dec_id]), + dim=-1 + ) + dn_pred_bboxes[dec_id].append(pred_bbox) + dn_pred_logits[dec_id].append(preds_dict[0]['dn_cls_logits'][dec_id]) + dn_mask_dicts[dec_id].append(preds_dict[0]['dn_mask_dict']) + dn_pred_bboxes = [dn_pred_bboxes[idx] for idx in range(num_decoder)] + dn_pred_logits = [dn_pred_logits[idx] for idx in range(num_decoder)] + dn_mask_dicts = [dn_mask_dicts[idx] for idx in range(num_decoder)] + dn_loss_cls, dn_loss_bbox = multi_apply( + self.dn_loss_single, dn_pred_bboxes, dn_pred_logits, dn_mask_dicts + ) + + loss_dict['dn_loss_cls'] = dn_loss_cls[-1] + loss_dict['dn_loss_bbox'] = dn_loss_bbox[-1] + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i in zip(dn_loss_cls[:-1], + dn_loss_bbox[:-1]): + loss_dict[f'd{num_dec_layer}.dn_loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.dn_loss_bbox'] = loss_bbox_i + num_dec_layer += 1 + + return loss_dict + + @force_fp32(apply_to=('preds_dicts')) + def get_bboxes(self, preds_dicts, img_metas, img=None, rescale=False): + preds_dicts = self.bbox_coder.decode(preds_dicts) + num_samples = len(preds_dicts) + + ret_list = [] + for i in range(num_samples): + preds = preds_dicts[i] + bboxes = preds['bboxes'] + bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5 + bboxes = img_metas[i]['box_type_3d'](bboxes, bboxes.size(-1)) + scores = preds['scores'] + labels = preds['labels'] + ret_list.append([bboxes, scores, labels]) + return ret_list + + +@HEADS.register_module() +class CmtImageHead(CmtHead): + + def __init__(self, *args, **kwargs): + super(CmtImageHead, self). __init__(*args, **kwargs) + self.shared_conv = None + + def forward_single(self, x, x_img, img_metas): + """ + x: [bs c h w] + return List(dict(head_name: [num_dec x bs x num_query * head_dim]) ) x task_num + """ + assert x is None + ret_dicts = [] + + reference_points = self.reference_points.weight + reference_points, attn_mask, mask_dict = self.prepare_for_dn(len(img_metas), reference_points, img_metas) + + rv_pos_embeds = self._rv_pe(x_img, img_metas) + + bev_query_embeds, rv_query_embeds = self.query_embed(reference_points, img_metas) + query_embeds = bev_query_embeds + rv_query_embeds + + outs_dec, _ = self.transformer( + x_img, query_embeds, + rv_pos_embeds, + attn_masks=attn_mask, + bs=len(img_metas) + ) + outs_dec = torch.nan_to_num(outs_dec) + + reference = inverse_sigmoid(reference_points.clone()) + + flag = 0 + for task_id, task in enumerate(self.task_heads, 0): + outs = task(outs_dec) + center = (outs['center'] + reference[None, :, :, :2]).sigmoid() + height = (outs['height'] + reference[None, :, :, 2:3]).sigmoid() + _center, _height = center.new_zeros(center.shape), height.new_zeros(height.shape) + _center[..., 0:1] = center[..., 0:1] * (self.pc_range[3] - self.pc_range[0]) + self.pc_range[0] + _center[..., 1:2] = center[..., 1:2] * (self.pc_range[4] - self.pc_range[1]) + self.pc_range[1] + _height[..., 0:1] = height[..., 0:1] * (self.pc_range[5] - self.pc_range[2]) + self.pc_range[2] + outs['center'] = _center + outs['height'] = _height + + if mask_dict and mask_dict['pad_size'] > 0: + task_mask_dict = copy.deepcopy(mask_dict) + class_name = self.class_names[task_id] + + known_lbs_bboxes_label = task_mask_dict['known_lbs_bboxes'][0] + known_labels_raw = task_mask_dict['known_labels_raw'] + new_lbs_bboxes_label = known_lbs_bboxes_label.new_zeros(known_lbs_bboxes_label.shape) + new_lbs_bboxes_label[:] = len(class_name) + new_labels_raw = known_labels_raw.new_zeros(known_labels_raw.shape) + new_labels_raw[:] = len(class_name) + task_masks = [ + torch.where(known_lbs_bboxes_label == class_name.index(i) + flag) + for i in class_name + ] + task_masks_raw = [ + torch.where(known_labels_raw == class_name.index(i) + flag) + for i in class_name + ] + for cname, task_mask, task_mask_raw in zip(class_name, task_masks, task_masks_raw): + new_lbs_bboxes_label[task_mask] = class_name.index(cname) + new_labels_raw[task_mask_raw] = class_name.index(cname) + task_mask_dict['known_lbs_bboxes'] = (new_lbs_bboxes_label, task_mask_dict['known_lbs_bboxes'][1]) + task_mask_dict['known_labels_raw'] = new_labels_raw + flag += len(class_name) + + for key in list(outs.keys()): + outs['dn_' + key] = outs[key][:, :, :mask_dict['pad_size'], :] + outs[key] = outs[key][:, :, mask_dict['pad_size']:, :] + outs['dn_mask_dict'] = task_mask_dict + + ret_dicts.append(outs) + + return ret_dicts + + +@HEADS.register_module() +class CmtLidarHead(CmtHead): + + def __init__(self, *args, **kwargs): + super(CmtLidarHead, self). __init__(*args, **kwargs) + self.rv_embedding = None + + def query_embed(self, ref_points, img_metas): + ref_points = inverse_sigmoid(ref_points.clone()).sigmoid() + bev_embeds = self._bev_query_embed(ref_points, img_metas) + return bev_embeds, None + + def forward_single(self, x, x_img, img_metas): + """ + x: [bs c h w] + return List(dict(head_name: [num_dec x bs x num_query * head_dim]) ) x task_num + """ + assert x_img is None + + ret_dicts = [] + x = self.shared_conv(x) + + reference_points = self.reference_points.weight + reference_points, attn_mask, mask_dict = self.prepare_for_dn(x.shape[0], reference_points, img_metas) + + mask = x.new_zeros(x.shape[0], x.shape[2], x.shape[3]) + + bev_pos_embeds = self.bev_embedding(pos2embed(self.coords_bev.to(x.device), num_pos_feats=self.hidden_dim)) + bev_query_embeds, _ = self.query_embed(reference_points, img_metas) + + query_embeds = bev_query_embeds + outs_dec, _ = self.transformer( + x, mask, query_embeds, + bev_pos_embeds, + attn_masks=attn_mask + ) + outs_dec = torch.nan_to_num(outs_dec) + + reference = inverse_sigmoid(reference_points.clone()) + + flag = 0 + for task_id, task in enumerate(self.task_heads, 0): + outs = task(outs_dec) + center = (outs['center'] + reference[None, :, :, :2]).sigmoid() + height = (outs['height'] + reference[None, :, :, 2:3]).sigmoid() + _center, _height = center.new_zeros(center.shape), height.new_zeros(height.shape) + _center[..., 0:1] = center[..., 0:1] * (self.pc_range[3] - self.pc_range[0]) + self.pc_range[0] + _center[..., 1:2] = center[..., 1:2] * (self.pc_range[4] - self.pc_range[1]) + self.pc_range[1] + _height[..., 0:1] = height[..., 0:1] * (self.pc_range[5] - self.pc_range[2]) + self.pc_range[2] + outs['center'] = _center + outs['height'] = _height + + if mask_dict and mask_dict['pad_size'] > 0: + task_mask_dict = copy.deepcopy(mask_dict) + class_name = self.class_names[task_id] + + known_lbs_bboxes_label = task_mask_dict['known_lbs_bboxes'][0] + known_labels_raw = task_mask_dict['known_labels_raw'] + new_lbs_bboxes_label = known_lbs_bboxes_label.new_zeros(known_lbs_bboxes_label.shape) + new_lbs_bboxes_label[:] = len(class_name) + new_labels_raw = known_labels_raw.new_zeros(known_labels_raw.shape) + new_labels_raw[:] = len(class_name) + task_masks = [ + torch.where(known_lbs_bboxes_label == class_name.index(i) + flag) + for i in class_name + ] + task_masks_raw = [ + torch.where(known_labels_raw == class_name.index(i) + flag) + for i in class_name + ] + for cname, task_mask, task_mask_raw in zip(class_name, task_masks, task_masks_raw): + new_lbs_bboxes_label[task_mask] = class_name.index(cname) + new_labels_raw[task_mask_raw] = class_name.index(cname) + task_mask_dict['known_lbs_bboxes'] = (new_lbs_bboxes_label, task_mask_dict['known_lbs_bboxes'][1]) + task_mask_dict['known_labels_raw'] = new_labels_raw + flag += len(class_name) + + for key in list(outs.keys()): + outs['dn_' + key] = outs[key][:, :, :mask_dict['pad_size'], :] + outs[key] = outs[key][:, :, mask_dict['pad_size']:, :] + outs['dn_mask_dict'] = task_mask_dict + + ret_dicts.append(outs) + + return ret_dicts \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/detectors/__init__.py b/projects/mmdet3d_plugin/models/detectors/__init__.py new file mode 100644 index 0000000..4b008ff --- /dev/null +++ b/projects/mmdet3d_plugin/models/detectors/__init__.py @@ -0,0 +1,3 @@ +from .cmt import CmtDetector + +__all__ = ['CmtDetector'] \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/detectors/cmt.py b/projects/mmdet3d_plugin/models/detectors/cmt.py new file mode 100644 index 0000000..0f10aee --- /dev/null +++ b/projects/mmdet3d_plugin/models/detectors/cmt.py @@ -0,0 +1,252 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2022 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ + +import mmcv +import copy +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np + +from mmcv.runner import force_fp32, auto_fp16 +from mmdet.core import multi_apply +from mmdet.models import DETECTORS +from mmdet.models.builder import build_backbone +from mmdet3d.core import (Box3DMode, Coord3DMode, bbox3d2result, + merge_aug_bboxes_3d, show_result) +from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector + +from projects.mmdet3d_plugin.models.utils.grid_mask import GridMask +from projects.mmdet3d_plugin import SPConvVoxelization + + +@DETECTORS.register_module() +class CmtDetector(MVXTwoStageDetector): + + def __init__(self, + use_grid_mask=False, + **kwargs): + pts_voxel_cfg = kwargs.get('pts_voxel_layer', None) + kwargs['pts_voxel_layer'] = None + super(CmtDetector, self).__init__(**kwargs) + + self.use_grid_mask = use_grid_mask + self.grid_mask = GridMask(True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7) + if pts_voxel_cfg: + self.pts_voxel_layer = SPConvVoxelization(**pts_voxel_cfg) + + def init_weights(self): + """Initialize model weights.""" + super(CmtDetector, self).init_weights() + + @auto_fp16(apply_to=('img'), out_fp32=True) + def extract_img_feat(self, img, img_metas): + """Extract features of images.""" + if self.with_img_backbone and img is not None: + input_shape = img.shape[-2:] + # update real input shape of each single img + for img_meta in img_metas: + img_meta.update(input_shape=input_shape) + + if img.dim() == 5 and img.size(0) == 1: + img.squeeze_(0) + elif img.dim() == 5 and img.size(0) > 1: + B, N, C, H, W = img.size() + img = img.view(B * N, C, H, W) + if self.use_grid_mask: + img = self.grid_mask(img) + img_feats = self.img_backbone(img.float()) + if isinstance(img_feats, dict): + img_feats = list(img_feats.values()) + else: + return None + if self.with_img_neck: + img_feats = self.img_neck(img_feats) + return img_feats + + @force_fp32(apply_to=('pts', 'img_feats')) + def extract_pts_feat(self, pts, img_feats, img_metas): + """Extract features of points.""" + if not self.with_pts_bbox: + return None + if pts is None: + return None + voxels, num_points, coors = self.voxelize(pts) + voxel_features = self.pts_voxel_encoder(voxels, num_points, coors, + ) + batch_size = coors[-1, 0] + 1 + x = self.pts_middle_encoder(voxel_features, coors, batch_size) + x = self.pts_backbone(x) + if self.with_pts_neck: + x = self.pts_neck(x) + return x + + @torch.no_grad() + @force_fp32() + def voxelize(self, points): + """Apply dynamic voxelization to points. + + Args: + points (list[torch.Tensor]): Points of each sample. + + Returns: + tuple[torch.Tensor]: Concatenated points, number of points + per voxel, and coordinates. + """ + voxels, coors, num_points = [], [], [] + for res in points: + res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res) + voxels.append(res_voxels) + coors.append(res_coors) + num_points.append(res_num_points) + voxels = torch.cat(voxels, dim=0) + num_points = torch.cat(num_points, dim=0) + coors_batch = [] + for i, coor in enumerate(coors): + coor_pad = F.pad(coor, (1, 0), mode='constant', value=i) + coors_batch.append(coor_pad) + coors_batch = torch.cat(coors_batch, dim=0) + return voxels, num_points, coors_batch + + def forward_train(self, + points=None, + img_metas=None, + gt_bboxes_3d=None, + gt_labels_3d=None, + gt_labels=None, + gt_bboxes=None, + img=None, + proposals=None, + gt_bboxes_ignore=None): + """Forward training function. + + Args: + points (list[torch.Tensor], optional): Points of each sample. + Defaults to None. + img_metas (list[dict], optional): Meta information of each sample. + Defaults to None. + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional): + Ground truth 3D boxes. Defaults to None. + gt_labels_3d (list[torch.Tensor], optional): Ground truth labels + of 3D boxes. Defaults to None. + gt_labels (list[torch.Tensor], optional): Ground truth labels + of 2D boxes in images. Defaults to None. + gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in + images. Defaults to None. + img (torch.Tensor optional): Images of each sample with shape + (N, C, H, W). Defaults to None. + proposals ([list[torch.Tensor], optional): Predicted proposals + used for training Fast RCNN. Defaults to None. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + 2D boxes in images to be ignored. Defaults to None. + + Returns: + dict: Losses of different branches. + """ + + img_feats, pts_feats = self.extract_feat( + points, img=img, img_metas=img_metas) + losses = dict() + if pts_feats or img_feats: + losses_pts = self.forward_pts_train(pts_feats, img_feats, gt_bboxes_3d, + gt_labels_3d, img_metas, + gt_bboxes_ignore) + losses.update(losses_pts) + return losses + + @force_fp32(apply_to=('pts_feats', 'img_feats')) + def forward_pts_train(self, + pts_feats, + img_feats, + gt_bboxes_3d, + gt_labels_3d, + img_metas, + gt_bboxes_ignore=None): + """Forward function for point cloud branch. + + Args: + pts_feats (list[torch.Tensor]): Features of point cloud branch + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth + boxes for each sample. + gt_labels_3d (list[torch.Tensor]): Ground truth labels for + boxes of each sampole + img_metas (list[dict]): Meta information of samples. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + boxes to be ignored. Defaults to None. + + Returns: + dict: Losses of each branch. + """ + if pts_feats is None: + pts_feats = [None] + if img_feats is None: + img_feats = [None] + outs = self.pts_bbox_head(pts_feats, img_feats, img_metas) + loss_inputs = [gt_bboxes_3d, gt_labels_3d, outs] + losses = self.pts_bbox_head.loss(*loss_inputs) + return losses + + def forward_test(self, + points=None, + img_metas=None, + img=None, **kwargs): + """ + Args: + points (list[torch.Tensor]): the outer list indicates test-time + augmentations and inner torch.Tensor should have a shape NxC, + which contains all points in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch + img (list[torch.Tensor], optional): the outer + list indicates test-time augmentations and inner + torch.Tensor should have a shape NxCxHxW, which contains + all images in the batch. Defaults to None. + """ + if points is None: + points = [None] + if img is None: + img = [None] + for var, name in [(points, 'points'), (img, 'img'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + + return self.simple_test(points[0], img_metas[0], img[0], **kwargs) + + @force_fp32(apply_to=('x', 'x_img')) + def simple_test_pts(self, x, x_img, img_metas, rescale=False): + """Test function of point cloud branch.""" + outs = self.pts_bbox_head(x, x_img, img_metas) + bbox_list = self.pts_bbox_head.get_bboxes( + outs, img_metas, rescale=rescale) + bbox_results = [ + bbox3d2result(bboxes, scores, labels) + for bboxes, scores, labels in bbox_list + ] + return bbox_results + + def simple_test(self, points, img_metas, img=None, rescale=False): + img_feats, pts_feats = self.extract_feat( + points, img=img, img_metas=img_metas) + if pts_feats is None: + pts_feats = [None] + if img_feats is None: + img_feats = [None] + + bbox_list = [dict() for i in range(len(img_metas))] + if (pts_feats or img_feats) and self.with_pts_bbox: + bbox_pts = self.simple_test_pts( + pts_feats, img_feats, img_metas, rescale=rescale) + for result_dict, pts_bbox in zip(bbox_list, bbox_pts): + result_dict['pts_bbox'] = pts_bbox + if img_feats and self.with_img_bbox: + bbox_img = self.simple_test_img( + img_feats, img_metas, rescale=rescale) + for result_dict, img_bbox in zip(bbox_list, bbox_img): + result_dict['img_bbox'] = img_bbox + return bbox_list diff --git a/projects/mmdet3d_plugin/models/necks/__init__.py b/projects/mmdet3d_plugin/models/necks/__init__.py new file mode 100644 index 0000000..eee08db --- /dev/null +++ b/projects/mmdet3d_plugin/models/necks/__init__.py @@ -0,0 +1,3 @@ +from .cp_fpn import CPFPN + +__all__ = ['CPFPN'] \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/necks/cp_fpn.py b/projects/mmdet3d_plugin/models/necks/cp_fpn.py new file mode 100644 index 0000000..9e28a10 --- /dev/null +++ b/projects/mmdet3d_plugin/models/necks/cp_fpn.py @@ -0,0 +1,204 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, auto_fp16 + +from mmdet.models import NECKS + + +####This FPN remove the unused parameters which can used with checkpoint (with_cp = True in Backbone) +@NECKS.register_module() +class CPFPN(BaseModule): + r"""Feature Pyramid Network. + + This is an implementation of paper `Feature Pyramid Networks for Object + Detection `_. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool | str): If bool, it decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, it is equivalent to `add_extra_convs='on_input'`. + If str, it specifies the source feature map of the extra convs. + Only the following options are allowed + + - 'on_input': Last feat map of neck inputs (i.e. backbone feature). + - 'on_lateral': Last feature map after lateral convs. + - 'on_output': The last output feature map after fpn convs. + relu_before_extra_convs (bool): Whether to apply relu before the extra + conv. Default: False. + no_norm_on_lateral (bool): Whether to apply norm on lateral. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (str): Config dict for activation layer in ConvModule. + Default: None. + upsample_cfg (dict): Config dict for interpolate layer. + Default: `dict(mode='nearest')` + init_cfg (dict or list[dict], optional): Initialization config dict. + + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = FPN(in_channels, 11, len(in_channels)).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest'), + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super(CPFPN, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + self.add_extra_convs = 'on_input' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + self.lateral_convs.append(l_conv) + if i == 0 : + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + @auto_fp16() + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + laterals[i - 1] += F.interpolate(laterals[i], + **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] += F.interpolate( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) if i==0 else laterals[i] for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) diff --git a/projects/mmdet3d_plugin/models/utils/__init__.py b/projects/mmdet3d_plugin/models/utils/__init__.py new file mode 100644 index 0000000..2028003 --- /dev/null +++ b/projects/mmdet3d_plugin/models/utils/__init__.py @@ -0,0 +1,2 @@ +from .cmt_transformer import * +from .petr_transformer import * diff --git a/projects/mmdet3d_plugin/models/utils/attention.py b/projects/mmdet3d_plugin/models/utils/attention.py new file mode 100644 index 0000000..b9d2b46 --- /dev/null +++ b/projects/mmdet3d_plugin/models/utils/attention.py @@ -0,0 +1,138 @@ +# Copyright (c) 2023 megvii-model. All Rights Reserved. + +import math +import torch +import torch.nn as nn +from torch.nn.init import ( + xavier_uniform_, + constant_, + xavier_normal_ +) +from torch.nn.functional import linear + +from einops import rearrange +from mmcv.runner import auto_fp16 +from mmcv.runner.base_module import BaseModule + +from flash_attn.flash_attn_interface import flash_attn_unpadded_kvpacked_func +from flash_attn.bert_padding import unpad_input, pad_input, index_first_axis + + +def _in_projection_packed(q, k, v, w, b = None): + w_q, w_k, w_v = w.chunk(3) + if b is None: + b_q = b_k = b_v = None + else: + b_q, b_k, b_v = b.chunk(3) + return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v) + + +class FlashAttention(nn.Module): + """Implement the scaled dot product attention with softmax. + Arguments + --------- + softmax_scale: The temperature to use for the softmax attention. + (default: 1/sqrt(d_keys) where d_keys is computed at + runtime) + attention_dropout: The dropout rate to apply to the attention + (default: 0.1) + """ + def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None): + super().__init__() + self.softmax_scale = softmax_scale + self.dropout_p = attention_dropout + self.fp16_enabled = True + + @auto_fp16(apply_to=('q', 'kv'), out_fp32=True) + def forward(self, q, kv, + causal=False, + key_padding_mask=None): + """Implements the multihead softmax attention. + Arguments + --------- + q: The tensor containing the query. (B, T, H, D) + kv: The tensor containing the key, and value. (B, S, 2, H, D) + key_padding_mask: a bool tensor of shape (B, S) + """ + assert q.dtype in [torch.float16, torch.bfloat16] and kv.dtype in [torch.float16, torch.bfloat16] + assert q.is_cuda and kv.is_cuda + assert q.shape[0] == kv.shape[0] and q.shape[-2] == kv.shape[-2] and q.shape[-1] == kv.shape[-1] + + batch_size = q.shape[0] + seqlen_q, seqlen_k = q.shape[1], kv.shape[1] + if key_padding_mask is None: + q, kv = rearrange(q, 'b s ... -> (b s) ...'), rearrange(kv, 'b s ... -> (b s) ...') + max_sq, max_sk = seqlen_q, seqlen_k + cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, + device=q.device) + cu_seqlens_k = torch.arange(0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, + device=kv.device) + output = flash_attn_unpadded_kvpacked_func( + q, kv, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, + self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + output = rearrange(output, '(b s) ... -> b s ...', b=batch_size) + else: + nheads = kv.shape[-2] + q = rearrange(q, 'b s ... -> (b s) ...') + max_sq = seqlen_q + cu_seqlens_q = torch.arange(0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, + device=q.device) + x = rearrange(kv, 'b s two h d -> b s (two h d)') + x_unpad, indices, cu_seqlens_k, max_sk = unpad_input(x, key_padding_mask) + x_unpad = rearrange(x_unpad, 'nnz (two h d) -> nnz two h d', two=2, h=nheads) + output_unpad = flash_attn_unpadded_kvpacked_func( + q, x_unpad, cu_seqlens_q, cu_seqlens_k, max_sq, max_sk, + self.dropout_p if self.training else 0.0, + softmax_scale=self.softmax_scale, causal=causal + ) + output = rearrange(output_unpad, '(b s) ... -> b s ...', b=batch_size) + + return output, None + + +class FlashMHA(nn.Module): + + def __init__(self, embed_dim, num_heads, bias=True, batch_first=True, attention_dropout=0.0, + causal=False, device=None, dtype=None, **kwargs) -> None: + assert batch_first + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + self.embed_dim = embed_dim + self.causal = causal + self.bias = bias + + self.num_heads = num_heads + assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads" + self.head_dim = self.embed_dim // num_heads + assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" + + self.in_proj_weight = nn.Parameter(torch.empty((3 * embed_dim, embed_dim))) + if bias: + self.in_proj_bias = nn.Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.inner_attn = FlashAttention(attention_dropout=attention_dropout, **factory_kwargs) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self._reset_parameters() + + def _reset_parameters(self) -> None: + xavier_uniform_(self.in_proj_weight) + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + + def forward(self, q, k, v, key_padding_mask=None): + """x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) + key_padding_mask: bool tensor of shape (batch, seqlen) + """ + # q, k, v = self.Wq(q), self.Wk(k), self.Wv(v) + q, k, v = _in_projection_packed(q, k, v, self.in_proj_weight, self.in_proj_bias) + q = rearrange(q, 'b s (h d) -> b s h d', h=self.num_heads) + k = rearrange(k, 'b s (h d) -> b s h d', h=self.num_heads) + v = rearrange(v, 'b s (h d) -> b s h d', h=self.num_heads) + kv = torch.stack([k, v], dim=2) + + context, attn_weights = self.inner_attn(q, kv, key_padding_mask=key_padding_mask, causal=self.causal) + return self.out_proj(rearrange(context, 'b s h d -> b s (h d)')), attn_weights diff --git a/projects/mmdet3d_plugin/models/utils/cmt_transformer.py b/projects/mmdet3d_plugin/models/utils/cmt_transformer.py new file mode 100644 index 0000000..4c9948d --- /dev/null +++ b/projects/mmdet3d_plugin/models/utils/cmt_transformer.py @@ -0,0 +1,282 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2022 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from DETR3D (https://github.com/WangYueFt/detr3d) +# Copyright (c) 2021 Wang, Yue +# ------------------------------------------------------------------------ +# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ + +import math +import copy +import warnings +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp + +from typing import Sequence +from einops import rearrange +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.runner.base_module import BaseModule +from mmcv.cnn.bricks.transformer import ( + BaseTransformerLayer, + TransformerLayerSequence, + build_transformer_layer_sequence +) +from mmcv.cnn import ( + build_activation_layer, + build_conv_layer, + build_norm_layer, + xavier_init +) +from mmcv.cnn.bricks.registry import ( + ATTENTION, + TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE +) +from mmcv.utils import ( + ConfigDict, + build_from_cfg, + deprecated_api_warning, + to_2tuple +) +from mmdet.models.utils.builder import TRANSFORMER + + +@TRANSFORMER.register_module() +class CmtTransformer(BaseModule): + """Implements the DETR transformer. + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + See `paper: End-to-End Object Detection with Transformers + `_ for details. + Args: + encoder (`mmcv.ConfigDict` | Dict): Config of + TransformerEncoder. Defaults to None. + decoder ((`mmcv.ConfigDict` | Dict)): Config of + TransformerDecoder. Defaults to None + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Defaults to None. + """ + + def __init__(self, encoder=None, decoder=None, init_cfg=None, cross=False): + super(CmtTransformer, self).__init__(init_cfg=init_cfg) + if encoder is not None: + self.encoder = build_transformer_layer_sequence(encoder) + else: + self.encoder = None + self.decoder = build_transformer_layer_sequence(decoder) + self.embed_dims = self.decoder.embed_dims + self.cross = cross + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, x, x_img, query_embed, bev_pos_embed, rv_pos_embed, attn_masks=None, reg_branch=None): + """Forward function for `Transformer`. + Args: + x (Tensor): Input query with shape [bs, c, h, w] where + c = embed_dims. + mask (Tensor): The key_padding_mask used for encoder and decoder, + with shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, with shape + [num_query, c]. + pos_embed (Tensor): The positional encoding for encoder and + decoder, with the same shape as `x`. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - out_dec: Output from decoder. If return_intermediate_dec \ + is True output has shape [num_dec_layers, bs, + num_query, embed_dims], else has shape [1, bs, \ + num_query, embed_dims]. + - memory: Output results from encoder, with shape \ + [bs, embed_dims, h, w]. + """ + bs, c, h, w = x.shape + bev_memory = rearrange(x, "bs c h w -> (h w) bs c") # [bs, n, c, h, w] -> [n*h*w, bs, c] + rv_memory = rearrange(x_img, "(bs v) c h w -> (v h w) bs c", bs=bs) + bev_pos_embed = bev_pos_embed.unsqueeze(1).repeat(1, bs, 1) # [bs, n, c, h, w] -> [n*h*w, bs, c] + rv_pos_embed = rearrange(rv_pos_embed, "(bs v) h w c -> (v h w) bs c", bs=bs) + + memory, pos_embed = torch.cat([bev_memory, rv_memory], dim=0), torch.cat([bev_pos_embed, rv_pos_embed], dim=0) + query_embed = query_embed.transpose(0, 1) # [num_query, dim] -> [num_query, bs, dim] + mask = memory.new_zeros(bs, memory.shape[0]) # [bs, n, h, w] -> [bs, n*h*w] + + target = torch.zeros_like(query_embed) + # out_dec: [num_layers, num_query, bs, dim] + out_dec = self.decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=mask, + attn_masks=[attn_masks, None], + reg_branch=reg_branch, + ) + out_dec = out_dec.transpose(1, 2) + return out_dec, memory + + +@TRANSFORMER.register_module() +class CmtLidarTransformer(BaseModule): + """Implements the DETR transformer. + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + See `paper: End-to-End Object Detection with Transformers + `_ for details. + Args: + encoder (`mmcv.ConfigDict` | Dict): Config of + TransformerEncoder. Defaults to None. + decoder ((`mmcv.ConfigDict` | Dict)): Config of + TransformerDecoder. Defaults to None + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Defaults to None. + """ + + def __init__(self, encoder=None, decoder=None, init_cfg=None, cross=False): + super(CmtLidarTransformer, self).__init__(init_cfg=init_cfg) + if encoder is not None: + self.encoder = build_transformer_layer_sequence(encoder) + else: + self.encoder = None + self.decoder = build_transformer_layer_sequence(decoder) + self.embed_dims = self.decoder.embed_dims + self.cross = cross + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, x, mask, query_embed, pos_embed, attn_masks=None, reg_branch=None): + """Forward function for `Transformer`. + Args: + x (Tensor): Input query with shape [bs, c, h, w] where + c = embed_dims. + mask (Tensor): The key_padding_mask used for encoder and decoder, + with shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, with shape + [num_query, c]. + pos_embed (Tensor): The positional encoding for encoder and + decoder, with the same shape as `x`. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - out_dec: Output from decoder. If return_intermediate_dec \ + is True output has shape [num_dec_layers, bs, + num_query, embed_dims], else has shape [1, bs, \ + num_query, embed_dims]. + - memory: Output results from encoder, with shape \ + [bs, embed_dims, h, w]. + """ + bs, c, h, w = x.shape + memory = rearrange(x, "bs c h w -> (h w) bs c") # [bs, n, c, h, w] -> [n*h*w, bs, c] + pos_embed = pos_embed.unsqueeze(1).repeat(1, bs, 1) # [bs, n, c, h, w] -> [n*h*w, bs, c] + query_embed = query_embed.transpose(0, 1) # [num_query, dim] -> [num_query, bs, dim] + mask = mask.view(bs, -1) # [bs, n, h, w] -> [bs, n*h*w] + target = torch.zeros_like(query_embed) + # out_dec: [num_layers, num_query, bs, dim] + out_dec = self.decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=mask, + attn_masks=[attn_masks, None], + reg_branch=reg_branch, + ) + out_dec = out_dec.transpose(1, 2) + return out_dec, memory + + +@TRANSFORMER.register_module() +class CmtImageTransformer(BaseModule): + """Implements the DETR transformer. + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + See `paper: End-to-End Object Detection with Transformers + `_ for details. + Args: + encoder (`mmcv.ConfigDict` | Dict): Config of + TransformerEncoder. Defaults to None. + decoder ((`mmcv.ConfigDict` | Dict)): Config of + TransformerDecoder. Defaults to None + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Defaults to None. + """ + + def __init__(self, encoder=None, decoder=None, init_cfg=None, cross=False): + super(CmtImageTransformer, self).__init__(init_cfg=init_cfg) + if encoder is not None: + self.encoder = build_transformer_layer_sequence(encoder) + else: + self.encoder = None + self.decoder = build_transformer_layer_sequence(decoder) + self.embed_dims = self.decoder.embed_dims + self.cross = cross + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, x_img, query_embed, rv_pos_embed, attn_masks=None, reg_branch=None, bs=2): + """Forward function for `Transformer`. + Args: + x (Tensor): Input query with shape [bs, c, h, w] where + c = embed_dims. + mask (Tensor): The key_padding_mask used for encoder and decoder, + with shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, with shape + [num_query, c]. + pos_embed (Tensor): The positional encoding for encoder and + decoder, with the same shape as `x`. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - out_dec: Output from decoder. If return_intermediate_dec \ + is True output has shape [num_dec_layers, bs, + num_query, embed_dims], else has shape [1, bs, \ + num_query, embed_dims]. + - memory: Output results from encoder, with shape \ + [bs, embed_dims, h, w]. + """ + memory = rearrange(x_img, "(bs v) c h w -> (v h w) bs c", bs=bs) + pos_embed = rearrange(rv_pos_embed, "(bs v) h w c -> (v h w) bs c", bs=bs) + + query_embed = query_embed.transpose(0, 1) # [num_query, dim] -> [num_query, bs, dim] + mask = memory.new_zeros(bs, memory.shape[0]) # [bs, n, h, w] -> [bs, n*h*w] + + target = torch.zeros_like(query_embed) + # out_dec: [num_layers, num_query, bs, dim] + out_dec = self.decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=mask, + attn_masks=[attn_masks, None], + reg_branch=reg_branch, + ) + out_dec = out_dec.transpose(1, 2) + return out_dec, memory diff --git a/projects/mmdet3d_plugin/models/utils/grid_mask.py b/projects/mmdet3d_plugin/models/utils/grid_mask.py new file mode 100644 index 0000000..84caf00 --- /dev/null +++ b/projects/mmdet3d_plugin/models/utils/grid_mask.py @@ -0,0 +1,124 @@ +import torch +import torch.nn as nn +import numpy as np +from PIL import Image + + +class Grid(object): + def __init__(self, use_h, use_w, rotate = 1, offset=False, ratio = 0.5, mode=0, prob = 1.): + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode=mode + self.st_prob = prob + self.prob = prob + + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch + + def __call__(self, img, label): + if np.random.rand() > self.prob: + return img, label + h = img.size(1) + w = img.size(2) + self.d1 = 2 + self.d2 = min(h, w) + hh = int(1.5*h) + ww = int(1.5*w) + d = np.random.randint(self.d1, self.d2) + if self.ratio == 1: + self.l = np.random.randint(1, d) + else: + self.l = min(max(int(d*self.ratio+0.5),1),d-1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh//d): + s = d*i + st_h + t = min(s+self.l, hh) + mask[s:t,:] *= 0 + if self.use_w: + for i in range(ww//d): + s = d*i + st_w + t = min(s+self.l, ww) + mask[:,s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh-h)//2:(hh-h)//2+h, (ww-w)//2:(ww-w)//2+w] + + mask = torch.from_numpy(mask).float() + if self.mode == 1: + mask = 1-mask + + mask = mask.expand_as(img) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h,w) - 0.5)).float() + offset = (1 - mask) * offset + img = img * mask + offset + else: + img = img * mask + + return img, label + + +class GridMask(nn.Module): + def __init__(self, use_h, use_w, rotate = 1, offset=False, ratio = 0.5, mode=0, prob = 1.): + super(GridMask, self).__init__() + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch #+ 1.#0.5 + + def forward(self, x): + if np.random.rand() > self.prob or not self.training: + return x + n,c,h,w = x.size() + x = x.view(-1,h,w) + hh = int(1.5*h) + ww = int(1.5*w) + d = np.random.randint(2, h) + self.l = min(max(int(d*self.ratio+0.5),1),d-1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh//d): + s = d*i + st_h + t = min(s+self.l, hh) + mask[s:t,:] *= 0 + if self.use_w: + for i in range(ww//d): + s = d*i + st_w + t = min(s+self.l, ww) + mask[:,s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh-h)//2:(hh-h)//2+h, (ww-w)//2:(ww-w)//2+w] + + mask = torch.from_numpy(mask).float().cuda() + if self.mode == 1: + mask = 1-mask + mask = mask.expand_as(x) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h,w) - 0.5)).float().cuda() + x = x * mask + offset * (1 - mask) + else: + x = x * mask + + return x.view(n,c,h,w) \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/utils/petr_transformer.py b/projects/mmdet3d_plugin/models/utils/petr_transformer.py new file mode 100644 index 0000000..a11886e --- /dev/null +++ b/projects/mmdet3d_plugin/models/utils/petr_transformer.py @@ -0,0 +1,487 @@ +import math +import copy +import warnings +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp + +from einops import rearrange +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.runner.base_module import BaseModule + +from mmcv.cnn.bricks.transformer import ( + BaseTransformerLayer, + TransformerLayerSequence, + build_transformer_layer_sequence +) +from mmcv.cnn import ( + build_activation_layer, + build_conv_layer, + build_norm_layer, + xavier_init +) +from mmcv.cnn.bricks.registry import ( + ATTENTION,TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE +) +from mmcv.utils import ( + ConfigDict, + build_from_cfg, + deprecated_api_warning, + to_2tuple +) +from mmdet.models.utils.builder import TRANSFORMER + + +@ATTENTION.register_module() +class PETRMultiheadAttention(BaseModule): + """A wrapper for ``torch.nn.MultiheadAttention``. + This module implements MultiheadAttention with identity connection, + and positional encoding is also passed as input. + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default to False. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=dict(type='Dropout', drop_prob=0.), + init_cfg=None, + batch_first=False, + **kwargs): + super(PETRMultiheadAttention, self).__init__(init_cfg) + if 'dropout' in kwargs: + warnings.warn( + 'The arguments `dropout` in MultiheadAttention ' + 'has been deprecated, now you can separately ' + 'set `attn_drop`(float), proj_drop(float), ' + 'and `dropout_layer`(dict) ', DeprecationWarning) + attn_drop = kwargs['dropout'] + dropout_layer['drop_prob'] = kwargs.pop('dropout') + + self.embed_dims = embed_dims + self.num_heads = num_heads + self.batch_first = batch_first + + self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, + **kwargs) + + self.proj_drop = nn.Dropout(proj_drop) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else nn.Identity() + + @deprecated_api_warning({'residual': 'identity'}, + cls_name='MultiheadAttention') + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `MultiheadAttention`. + **kwargs allow passing a more general data flow when combining + with other operations in `transformerlayer`. + Args: + query (Tensor): The input query with shape [num_queries, bs, + embed_dims] if self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + If None, the ``query`` will be used. Defaults to None. + value (Tensor): The value tensor with same shape as `key`. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + If None, the `key` will be used. + identity (Tensor): This tensor, with the same shape as x, + will be used for the identity link. + If None, `x` will be used. Defaults to None. + query_pos (Tensor): The positional encoding for query, with + the same shape as `x`. If not None, it will + be added to `x` before forward function. Defaults to None. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. Defaults to None. If not None, it will + be added to `key` before forward function. If None, and + `query_pos` has the same shape as `key`, then `query_pos` + will be used for `key_pos`. Defaults to None. + attn_mask (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Defaults to None. + Returns: + Tensor: forwarded results with shape + [num_queries, bs, embed_dims] + if self.batch_first is False, else + [bs, num_queries embed_dims]. + """ + + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + else: + warnings.warn(f'position encoding of key is' + f'missing in {self.__class__.__name__}.') + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + query = query.transpose(0, 1) + key = key.transpose(0, 1) + value = value.transpose(0, 1) + + out = self.attn( + query=query, + key=key, + value=value, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask)[0] + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) + + +from .attention import FlashMHA + +@ATTENTION.register_module() +class PETRMultiheadFlashAttention(BaseModule): + """A wrapper for ``torch.nn.MultiheadAttention``. + This module implements MultiheadAttention with identity connection, + and positional encoding is also passed as input. + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default to False. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=dict(type='Dropout', drop_prob=0.), + init_cfg=None, + batch_first=True, + **kwargs): + super(PETRMultiheadFlashAttention, self).__init__(init_cfg) + if 'dropout' in kwargs: + warnings.warn( + 'The arguments `dropout` in MultiheadAttention ' + 'has been deprecated, now you can separately ' + 'set `attn_drop`(float), proj_drop(float), ' + 'and `dropout_layer`(dict) ', DeprecationWarning) + attn_drop = kwargs['dropout'] + dropout_layer['drop_prob'] = kwargs.pop('dropout') + + self.embed_dims = embed_dims + self.num_heads = num_heads + self.batch_first = True + + self.attn = FlashMHA(embed_dims, num_heads, attn_drop, dtype=torch.float16, device='cuda', + **kwargs) + + self.proj_drop = nn.Dropout(proj_drop) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else nn.Identity() + + @deprecated_api_warning({'residual': 'identity'}, + cls_name='MultiheadAttention') + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `MultiheadAttention`. + **kwargs allow passing a more general data flow when combining + with other operations in `transformerlayer`. + Args: + query (Tensor): The input query with shape [num_queries, bs, + embed_dims] if self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + If None, the ``query`` will be used. Defaults to None. + value (Tensor): The value tensor with same shape as `key`. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + If None, the `key` will be used. + identity (Tensor): This tensor, with the same shape as x, + will be used for the identity link. + If None, `x` will be used. Defaults to None. + query_pos (Tensor): The positional encoding for query, with + the same shape as `x`. If not None, it will + be added to `x` before forward function. Defaults to None. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. Defaults to None. If not None, it will + be added to `key` before forward function. If None, and + `query_pos` has the same shape as `key`, then `query_pos` + will be used for `key_pos`. Defaults to None. + attn_mask (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Defaults to None. + Returns: + Tensor: forwarded results with shape + [num_queries, bs, embed_dims] + if self.batch_first is False, else + [bs, num_queries embed_dims]. + """ + + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + else: + warnings.warn(f'position encoding of key is' + f'missing in {self.__class__.__name__}.') + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + query = query.transpose(0, 1) + key = key.transpose(0, 1) + value = value.transpose(0, 1) + + out = self.attn( + q=query, + k=key, + v=value, + key_padding_mask=None)[0] + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class PETRTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer. + Args: + return_intermediate (bool): Whether to return intermediate outputs. + post_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, + *args, + post_norm_cfg=dict(type='LN'), + return_intermediate=False, + **kwargs): + + super(PETRTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + if post_norm_cfg is not None: + self.post_norm = build_norm_layer(post_norm_cfg, + self.embed_dims)[1] + else: + self.post_norm = None + + def forward(self, query, *args, **kwargs): + """Forward function for `TransformerDecoder`. + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + if not self.return_intermediate: + x = super().forward(query, *args, **kwargs) + if self.post_norm: + x = self.post_norm(x)[None] + return x + + intermediate = [] + for layer in self.layers: + query = layer(query, *args, **kwargs) + if self.return_intermediate: + if self.post_norm is not None: + intermediate.append(self.post_norm(query)) + else: + intermediate.append(query) + return torch.stack(intermediate) + + +@TRANSFORMER_LAYER.register_module() +class PETRTransformerDecoderLayer(BaseTransformerLayer): + """Implements decoder layer in DETR transformer. + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): + Configs for self_attention or cross_attention, the order + should be consistent with it in `operation_order`. If it is + a dict, it would be expand to the number of attention in + `operation_order`. + feedforward_channels (int): The hidden dimension for FFNs. + ffn_dropout (float): Probability of an element to be zeroed + in ffn. Default 0.0. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Default:None + act_cfg (dict): The activation config for FFNs. Default: `LN` + norm_cfg (dict): Config dict for normalization layer. + Default: `LN`. + ffn_num_fcs (int): The number of fully-connected layers in FFNs. + Default:2. + """ + + def __init__(self, + attn_cfgs, + feedforward_channels, + ffn_dropout=0.0, + operation_order=None, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + ffn_num_fcs=2, + with_cp=True, + **kwargs): + super(PETRTransformerDecoderLayer, self).__init__( + attn_cfgs=attn_cfgs, + feedforward_channels=feedforward_channels, + ffn_dropout=ffn_dropout, + operation_order=operation_order, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ffn_num_fcs=ffn_num_fcs, + **kwargs) + assert len(operation_order) == 6 + assert set(operation_order) == set( + ['self_attn', 'norm', 'cross_attn', 'ffn']) + self.use_checkpoint = with_cp + + def _forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + ): + """Forward function for `TransformerCoder`. + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + x = super(PETRTransformerDecoderLayer, self).forward( + query, + key=key, + value=value, + query_pos=query_pos, + key_pos=key_pos, + attn_masks=attn_masks, + query_key_padding_mask=query_key_padding_mask, + key_padding_mask=key_padding_mask, + ) + + return x + + def forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs + ): + """Forward function for `TransformerCoder`. + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if self.use_checkpoint and self.training: + x = cp.checkpoint( + self._forward, + query, + key, + value, + query_pos, + key_pos, + attn_masks, + query_key_padding_mask, + key_padding_mask, + ) + else: + x = self._forward( + query, + key=key, + value=value, + query_pos=query_pos, + key_pos=key_pos, + attn_masks=attn_masks, + query_key_padding_mask=query_key_padding_mask, + key_padding_mask=key_padding_mask + ) + + return x diff --git a/tools/create_data.py b/tools/create_data.py new file mode 100644 index 0000000..05f898c --- /dev/null +++ b/tools/create_data.py @@ -0,0 +1,103 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from os import path as osp + +from data_converter import nuscenes_converter as nuscenes_converter +from data_converter.create_unified_gt_database import create_groundtruth_database + + +def nuscenes_data_prep(root_path, + info_prefix, + version, + dataset_name, + out_dir, + max_sweeps=10): + """Prepare data related to nuScenes dataset. + + Related data consists of '.pkl' files recording basic infos, + 2D annotations and groundtruth database. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + dataset_name (str): The dataset class name. + out_dir (str): Output directory of the groundtruth database info. + max_sweeps (int): Number of input consecutive frames. Default: 10 + """ + nuscenes_converter.create_nuscenes_infos( + root_path, info_prefix, version=version, max_sweeps=max_sweeps) + + if version == 'v1.0-test': + info_test_path = osp.join(root_path, f'{info_prefix}_infos_test.pkl') + nuscenes_converter.export_2d_annotation( + root_path, info_test_path, version=version) + return + + info_train_path = osp.join(root_path, f'{info_prefix}_infos_train.pkl') + info_val_path = osp.join(root_path, f'{info_prefix}_infos_val.pkl') + nuscenes_converter.export_2d_annotation( + root_path, info_train_path, version=version) + nuscenes_converter.export_2d_annotation( + root_path, info_val_path, version=version) + create_groundtruth_database(dataset_name, root_path, info_prefix, + f'{out_dir}/{info_prefix}_infos_train.pkl') + + +parser = argparse.ArgumentParser(description='Data converter arg parser') +parser.add_argument('dataset', metavar='nuscenes', help='name of the dataset') +parser.add_argument( + '--root-path', + type=str, + default='./data/nuscenes', + help='specify the root path of dataset') +parser.add_argument( + '--version', + type=str, + default='v1.0', + required=False, + help='specify the dataset version, no need for nuscenes') +parser.add_argument( + '--max-sweeps', + type=int, + default=10, + required=False, + help='specify sweeps of lidar per example') +parser.add_argument( + '--out-dir', + type=str, + default='./data/nuscenes', + required='False', + help='name of info pkl') +parser.add_argument('--extra-tag', type=str, default='nuscenes') +parser.add_argument( + '--workers', type=int, default=4, help='number of threads to be used') +args = parser.parse_args() + +if __name__ == '__main__': + if args.dataset == 'nuscenes' and args.version != 'v1.0-mini': + train_version = f'{args.version}-trainval' + nuscenes_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesSweepDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + test_version = f'{args.version}-test' + nuscenes_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=test_version, + dataset_name='NuScenesSweepDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + elif args.dataset == 'nuscenes' and args.version == 'v1.0-mini': + train_version = f'{args.version}' + nuscenes_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesSweepDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) diff --git a/tools/create_data.sh b/tools/create_data.sh new file mode 100755 index 0000000..9a57852 --- /dev/null +++ b/tools/create_data.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x +export PYTHONPATH=`pwd`:$PYTHONPATH + +PARTITION=$1 +JOB_NAME=$2 +DATASET=$3 +GPUS=${GPUS:-1} +GPUS_PER_NODE=${GPUS_PER_NODE:-1} +SRUN_ARGS=${SRUN_ARGS:-""} +JOB_NAME=create_data + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/create_data.py ${DATASET} \ + --root-path ./data/${DATASET} \ + --out-dir ./data/${DATASET} \ + --extra-tag ${DATASET} diff --git a/tools/data_converter/create_unified_gt_database.py b/tools/data_converter/create_unified_gt_database.py new file mode 100644 index 0000000..2ad6868 --- /dev/null +++ b/tools/data_converter/create_unified_gt_database.py @@ -0,0 +1,272 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2023 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from UVTR (https://github.com/dvlab-research/UVTR) +# Copyright (c) 2022 Li, Yanwei +# ------------------------------------------------------------------------ +# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ + +import mmcv +import numpy as np +import pickle +import argparse +import os +import importlib + +from mmcv import track_iter_progress +from os import path as osp + +from mmdet3d.core.bbox import box_np_ops as box_np_ops +from mmdet3d.datasets import build_dataset +from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps + + +def create_groundtruth_database(dataset_class_name, + data_path, + info_prefix, + info_path=None, + used_classes=None, + database_save_path=None, + db_info_save_path=None, + with_mask=False): + """Given the raw data, generate the ground truth database. + + Args: + dataset_class_name (str): Name of the input dataset. + data_path (str): Path of the data. + info_prefix (str): Prefix of the info file. + info_path (str): Path of the info file. + Default: None. + mask_anno_path (str): Path of the mask_anno. + Default: None. + used_classes (list[str]): Classes have been used. + Default: None. + database_save_path (str): Path to save database. + Default: None. + db_info_save_path (str): Path to save db_info. + Default: None. + relative_path (bool): Whether to use relative path. + Default: True. + with_mask (bool): Whether to use mask. + Default: False. + """ + print(f'Create GT Database of {dataset_class_name}') + dataset_cfg = dict( + type=dataset_class_name, data_root=data_path, ann_file=info_path, return_gt_info=True) + if dataset_class_name == 'NuScenesSweepDataset': + dataset_cfg.update( + use_valid_flag=True, + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True) + ]) + + dataset = build_dataset(dataset_cfg) + + if database_save_path is None: + database_save_path = osp.join(data_path, f'{info_prefix}_gt_database') + if db_info_save_path is None: + db_info_save_path = osp.join(data_path, + f'{info_prefix}_dbinfos_train.pkl') + database_pts_path = osp.join(database_save_path, 'pts_dir') + database_img_path = osp.join(database_save_path, 'img_dir') + mmcv.mkdir_or_exist(database_save_path) + mmcv.mkdir_or_exist(database_pts_path) + mmcv.mkdir_or_exist(database_img_path) + all_db_infos = dict() + + group_counter = 0 + for j in track_iter_progress(list(range(len(dataset)))): + + input_dict = dataset.get_data_info(j) + dataset.pre_pipeline(input_dict) + example = dataset.pipeline(input_dict) + annos = example['ann_info'] + image_idx = example['sample_idx'] + points = example['points'].tensor.numpy() + gt_boxes_3d = annos['gt_bboxes_3d'].tensor.numpy() + names = annos['gt_names'] + group_dict = dict() + if 'group_ids' in annos: + group_ids = annos['group_ids'] + else: + group_ids = np.arange(gt_boxes_3d.shape[0], dtype=np.int64) + difficulty = np.zeros(gt_boxes_3d.shape[0], dtype=np.int32) + if 'difficulty' in annos: + difficulty = annos['difficulty'] + + num_obj = gt_boxes_3d.shape[0] + point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d) + + # load multi-view image + input_img = {} + input_info = {} + for _cam in example['info']['cams']: + cam_info = example['info']['cams'][_cam] + _path = cam_info['data_path'] + _img = mmcv.imread(_path, 'unchanged') + input_img[_cam] = _img + + # obtain lidar to image transformation matrix + lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation']) + lidar2cam_t = cam_info[ + 'sensor2lidar_translation'] @ lidar2cam_r.T + lidar2cam_rt = np.eye(4) + lidar2cam_rt[:3, :3] = lidar2cam_r.T + lidar2cam_rt[3, :3] = -lidar2cam_t + intrinsic = cam_info['cam_intrinsic'] + viewpad = np.eye(4) + viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2img_rt = (viewpad @ lidar2cam_rt.T) + + input_info[_cam]={ + 'lidar2img': lidar2img_rt, + 'lidar2cam': lidar2cam_rt, + 'cam_intrinsic': viewpad} + + for i in range(num_obj): + pts_filename = f'{image_idx}_{names[i]}_{i}.bin' + img_filename = f'{image_idx}_{names[i]}_{i}.png' + abs_filepath = osp.join(database_pts_path, pts_filename) + abs_img_filepath = osp.join(database_img_path, img_filename) + rel_filepath = osp.join(f'{info_prefix}_gt_database', 'pts_dir', pts_filename) + rel_img_filepath = osp.join(f'{info_prefix}_gt_database', 'img_dir', img_filename) + + # save point clouds and image patches for each object + gt_points = points[point_indices[:, i]] + gt_points[:, :3] -= gt_boxes_3d[i, :3] + + with open(abs_filepath, 'w') as f: + gt_points.tofile(f) + + img_crop, crop_key, crop_depth = find_img_crop(annos['gt_bboxes_3d'][i].corners.numpy(), input_img, input_info, points[point_indices[:, i]]) + if img_crop is not None: + mmcv.imwrite(img_crop, abs_img_filepath) + + if (used_classes is None) or names[i] in used_classes: + db_info = { + 'name': names[i], + 'path': rel_filepath, + 'image_idx': image_idx, + 'image_path': rel_img_filepath if img_crop is not None else '', + 'image_crop_key': crop_key if img_crop is not None else '', + 'image_crop_depth': crop_depth, + 'gt_idx': i, + 'box3d_lidar': gt_boxes_3d[i], + 'num_points_in_gt': gt_points.shape[0], + 'difficulty': difficulty[i], + } + local_group_id = group_ids[i] + # if local_group_id >= 0: + if local_group_id not in group_dict: + group_dict[local_group_id] = group_counter + group_counter += 1 + db_info['group_id'] = group_dict[local_group_id] + if 'score' in annos: + db_info['score'] = annos['score'][i] + if names[i] in all_db_infos: + all_db_infos[names[i]].append(db_info) + else: + all_db_infos[names[i]] = [db_info] + + for k, v in all_db_infos.items(): + print(f'load {len(v)} {k} database infos') + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) + + +def find_img_crop(gt_boxes_3d, input_img, input_info, points): + coord_3d = np.concatenate([gt_boxes_3d, np.ones_like(gt_boxes_3d[..., :1])], -1) + coord_3d = coord_3d.squeeze(0) + max_crop, crop_key = None, None + crop_area, crop_depth = 0, 0 + + for _key in input_img: + lidar2img = np.array(input_info[_key]['lidar2img']) + coord_img = coord_3d @ lidar2img.T + coord_img[:,:2] /= coord_img[:,2,None] + image_shape = input_img[_key].shape + if (coord_img[2] <= 0).any(): + continue + + avg_depth = coord_img[:,2].mean() + minxy = np.min(coord_img[:,:2], axis=-2) + maxxy = np.max(coord_img[:,:2], axis=-2) + bbox = np.concatenate([minxy, maxxy], axis=-1) + bbox[0::2] = np.clip(bbox[0::2], a_min=0, a_max=image_shape[1]-1) + bbox[1::2] = np.clip(bbox[1::2], a_min=0, a_max=image_shape[0]-1) + bbox = bbox.astype(int) + if ((bbox[2:]-bbox[:2]) <= 10).any(): + continue + + img_crop = input_img[_key][bbox[1]:bbox[3],bbox[0]:bbox[2]] + if img_crop.shape[0] * img_crop.shape[1] > crop_area: + max_crop = img_crop + crop_key = _key + crop_depth = avg_depth + + return max_crop, crop_key, crop_depth + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Data converter arg parser') + parser.add_argument( + '--dataset', + type=str, + default='NuScenesSweepDataset', + required=False, + help='specify dataset name') + parser.add_argument( + '--root-path', + type=str, + default='./data/nuscenes', + help='specify the root path of dataset') + parser.add_argument( + '--version', + type=str, + default='v1.0', + required=False, + help='specify the dataset version, no need for kitti') + parser.add_argument( + '--out-dir', + type=str, + default='./data/nuscenes', + required=False, + help='output data dir') + parser.add_argument( + '--info-path', + type=str, + default='./data/nuscenes/nuscenes_img_pro_infos_train.pkl', + required=False, + help='name of info pkl') + parser.add_argument('--extra-tag', type=str, default='nuscenes_unified') + args = parser.parse_args() + + plugin_dir = 'projects/mmdet3d_plugin/' + _module_dir = os.path.dirname(plugin_dir) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + + create_groundtruth_database(args.dataset, args.root_path, args.extra_tag, + args.info_path) \ No newline at end of file diff --git a/tools/data_converter/nusc_radar.py b/tools/data_converter/nusc_radar.py new file mode 100644 index 0000000..92e8c1e --- /dev/null +++ b/tools/data_converter/nusc_radar.py @@ -0,0 +1,651 @@ +# ------------------------------------------------------------------------ +# Copyright (c) 2023 megvii-model. All Rights Reserved. +# ------------------------------------------------------------------------ +# Modified from FUTR3D (https://github.com/Tsinghua-MARS-Lab/futr3d) +# Copyright (c) 2022 Chen, Xuanyao +# ------------------------------------------------------------------------ +# Modified from mmdetection3d (https://github.com/open-mmlab/mmdetection3d) +# Copyright (c) OpenMMLab. All rights reserved. +# ------------------------------------------------------------------------ + +import mmcv +import numpy as np +import os +from collections import OrderedDict +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.geometry_utils import view_points +from os import path as osp +from pyquaternion import Quaternion +from shapely.geometry import MultiPoint, box +from typing import List, Tuple, Union + +from mmdet3d.core.bbox.box_np_ops import points_cam2img +from mmdet3d.datasets import NuScenesDataset + +# remove the classes barrier, trafficcone and construction_vehicle +nus_categories = ('car', 'truck', 'trailer', 'bus', + 'bicycle', 'motorcycle', 'pedestrian',) + +nus_attributes = ('cycle.with_rider', 'cycle.without_rider', + 'pedestrian.moving', 'pedestrian.standing', + 'pedestrian.sitting_lying_down', 'vehicle.moving', + 'vehicle.parked', 'vehicle.stopped', 'None') + + +def create_nuscenes_infos(root_path, + info_prefix, + version='v1.0-trainval', + max_sweeps=10): + """Create info file of nuscene dataset. + Given the raw data, generate its related info file in pkl format. + Args: + root_path (str): Path of the data root. + info_prefix (str): Prefix of the info file to be generated. + version (str): Version of the data. + Default: 'v1.0-trainval' + max_sweeps (int): Max number of sweeps. + Default: 10 + """ + from nuscenes.nuscenes import NuScenes + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + from nuscenes.utils import splits + available_vers = ['v1.0-trainval', 'v1.0-test', 'v1.0-mini'] + assert version in available_vers + if version == 'v1.0-trainval': + train_scenes = splits.train + val_scenes = splits.val + elif version == 'v1.0-test': + train_scenes = splits.test + val_scenes = [] + elif version == 'v1.0-mini': + train_scenes = splits.mini_train + val_scenes = splits.mini_val + else: + raise ValueError('unknown') + + # filter existing scenes. + available_scenes = get_available_scenes(nusc) + available_scene_names = [s['name'] for s in available_scenes] + train_scenes = list( + filter(lambda x: x in available_scene_names, train_scenes)) + val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) + train_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in train_scenes + ]) + val_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in val_scenes + ]) + + test = 'test' in version + if test: + print('test scene: {}'.format(len(train_scenes))) + else: + print('train scene: {}, val scene: {}'.format( + len(train_scenes), len(val_scenes))) + train_nusc_infos, val_nusc_infos = _fill_trainval_infos( + nusc, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + + metadata = dict(version=version) + if test: + print('test sample: {}'.format(len(train_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(root_path, + '{}_infos_test.pkl'.format(info_prefix)) + mmcv.dump(data, info_path) + else: + print('train sample: {}, val sample: {}'.format( + len(train_nusc_infos), len(val_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(root_path, + '{}_infos_train.pkl'.format(info_prefix)) + mmcv.dump(data, info_path) + data['infos'] = val_nusc_infos + info_val_path = osp.join(root_path, + '{}_infos_val.pkl'.format(info_prefix)) + mmcv.dump(data, info_val_path) + + +def get_available_scenes(nusc): + """Get available scenes from the input nuscenes class. + Given the raw data, get the information of available scenes for + further info generation. + Args: + nusc (class): Dataset class in the nuScenes dataset. + Returns: + available_scenes (list[dict]): List of basic information for the + available scenes. + """ + available_scenes = [] + print('total scene num: {}'.format(len(nusc.scene))) + for scene in nusc.scene: + scene_token = scene['token'] + scene_rec = nusc.get('scene', scene_token) + sample_rec = nusc.get('sample', scene_rec['first_sample_token']) + sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + has_more_frames = True + scene_not_exist = False + while has_more_frames: + lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) + lidar_path = str(lidar_path) + if os.getcwd() in lidar_path: + # path from lyftdataset is absolute path + lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] + # relative path + if not mmcv.is_filepath(lidar_path): + scene_not_exist = True + break + else: + break + if scene_not_exist: + continue + available_scenes.append(scene) + print('exist scene num: {}'.format(len(available_scenes))) + return available_scenes + + +def _fill_trainval_infos(nusc, + train_scenes, + val_scenes, + test=False, + max_sweeps=10): + """Generate the train/val infos from the raw data. + Args: + nusc (:obj:`NuScenes`): Dataset class in the nuScenes dataset. + train_scenes (list[str]): Basic information of training scenes. + val_scenes (list[str]): Basic information of validation scenes. + test (bool): Whether use the test mode. In the test mode, no + annotations can be accessed. Default: False. + max_sweeps (int): Max number of sweeps. Default: 10. + Returns: + tuple[list[dict]]: Information of training set and validation set + that will be saved to the info file. + """ + train_nusc_infos = [] + val_nusc_infos = [] + + for sample in mmcv.track_iter_progress(nusc.sample): + lidar_token = sample['data']['LIDAR_TOP'] + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) + + mmcv.check_file_exist(lidar_path) + + info = { + 'lidar_path': lidar_path, + 'token': sample['token'], + 'sweeps': [], + 'cams': dict(), + 'radars': dict(), + 'lidar2ego_translation': cs_record['translation'], + 'lidar2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sample['timestamp'], + } + + l2e_r = info['lidar2ego_rotation'] + l2e_t = info['lidar2ego_translation'] + e2g_r = info['ego2global_rotation'] + e2g_t = info['ego2global_translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + # obtain 6 image's information per frame + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + for cam in camera_types: + cam_token = sample['data'][cam] + cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) + cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, + e2g_t, e2g_r_mat, cam) + cam_info.update(cam_intrinsic=cam_intrinsic) + info['cams'].update({cam: cam_info}) + + # radar + radar_names = ['RADAR_FRONT', 'RADAR_FRONT_LEFT', 'RADAR_FRONT_RIGHT', 'RADAR_BACK_LEFT', 'RADAR_BACK_RIGHT'] + + for radar_name in radar_names: + radar_token = sample['data'][radar_name] + radar_rec = nusc.get('sample_data', radar_token) + sweeps = [] + + while len(sweeps) < 5: + if not radar_rec['prev'] == '': + radar_path, _, radar_intrin = nusc.get_sample_data(radar_token) + + radar_info = obtain_sensor2top(nusc, radar_token, l2e_t, l2e_r_mat, + e2g_t, e2g_r_mat, radar_name) + sweeps.append(radar_info) + radar_token = radar_rec['prev'] + radar_rec = nusc.get('sample_data', radar_token) + else: + radar_path, _, radar_intrin = nusc.get_sample_data(radar_token) + + radar_info = obtain_sensor2top(nusc, radar_token, l2e_t, l2e_r_mat, + e2g_t, e2g_r_mat, radar_name) + sweeps.append(radar_info) + + info['radars'].update({radar_name: sweeps}) + + # obtain sweeps for a single key-frame + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + sweeps = [] + while len(sweeps) < max_sweeps: + if not sd_rec['prev'] == '': + sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, + l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + sweeps.append(sweep) + sd_rec = nusc.get('sample_data', sd_rec['prev']) + else: + break + info['sweeps'] = sweeps + # obtain annotation + if not test: + annotations = [ + nusc.get('sample_annotation', token) + for token in sample['anns'] + ] + locs = np.array([b.center for b in boxes]).reshape(-1, 3) + dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) + rots = np.array([b.orientation.yaw_pitch_roll[0] + for b in boxes]).reshape(-1, 1) + velocity = np.array( + [nusc.box_velocity(token)[:2] for token in sample['anns']]) + valid_flag = np.array( + [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 + for anno in annotations], + dtype=bool).reshape(-1) + # convert velo from global to lidar + for i in range(len(boxes)): + velo = np.array([*velocity[i], 0.0]) + velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( + l2e_r_mat).T + velocity[i] = velo[:2] + + names = [b.name for b in boxes] + for i in range(len(names)): + if names[i] in NuScenesDataset.NameMapping: + names[i] = NuScenesDataset.NameMapping[names[i]] + names = np.array(names) + # update valid now + name_in_track = [_a in nus_categories for _a in names] + name_in_track = np.array(name_in_track) + valid_flag = np.logical_and(valid_flag, name_in_track) + + # add instance_ids + instance_inds = [nusc.getind('instance', ann['instance_token']) for ann in annotations] + # we need to convert rot to SECOND format. + gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) + assert len(gt_boxes) == len( + annotations), f'{len(gt_boxes)}, {len(annotations)}' + info['gt_boxes'] = gt_boxes + info['gt_names'] = names + info['gt_velocity'] = velocity.reshape(-1, 2) + info['num_lidar_pts'] = np.array( + [a['num_lidar_pts'] for a in annotations]) + info['num_radar_pts'] = np.array( + [a['num_radar_pts'] for a in annotations]) + info['valid_flag'] = valid_flag + info['instance_inds'] = instance_inds + + if sample['scene_token'] in train_scenes: + train_nusc_infos.append(info) + else: + val_nusc_infos.append(info) + + return train_nusc_infos, val_nusc_infos + + +def obtain_sensor2top(nusc, + sensor_token, + l2e_t, + l2e_r_mat, + e2g_t, + e2g_r_mat, + sensor_type='lidar'): + """Obtain the info with RT matric from general sensor to Top LiDAR. + Args: + nusc (class): Dataset class in the nuScenes dataset. + sensor_token (str): Sample data token corresponding to the + specific sensor type. + l2e_t (np.ndarray): Translation from lidar to ego in shape (1, 3). + l2e_r_mat (np.ndarray): Rotation matrix from lidar to ego + in shape (3, 3). + e2g_t (np.ndarray): Translation from ego to global in shape (1, 3). + e2g_r_mat (np.ndarray): Rotation matrix from ego to global + in shape (3, 3). + sensor_type (str): Sensor to calibrate. Default: 'lidar'. + Returns: + sweep (dict): Sweep information after transformation. + """ + sd_rec = nusc.get('sample_data', sensor_token) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + data_path = str(nusc.get_sample_data_path(sd_rec['token'])) + if os.getcwd() in data_path: # path from lyftdataset is absolute path + data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path + sweep = { + 'data_path': data_path, + 'type': sensor_type, + 'sample_data_token': sd_rec['token'], + 'sensor2ego_translation': cs_record['translation'], + 'sensor2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sd_rec['timestamp'] + } + l2e_r_s = sweep['sensor2ego_rotation'] + l2e_t_s = sweep['sensor2ego_translation'] + e2g_r_s = sweep['ego2global_rotation'] + e2g_t_s = sweep['ego2global_translation'] + + # obtain the RT from sensor to Top LiDAR + # sweep->ego->global->ego'->lidar + l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix + e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix + R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + l2e_t @ np.linalg.inv(l2e_r_mat).T + sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T + sweep['sensor2lidar_translation'] = T + return sweep + + +def export_2d_annotation(root_path, info_path, version, mono3d=True): + """Export 2d annotation from the info file and raw data. + Args: + root_path (str): Root path of the raw data. + info_path (str): Path of the info file. + version (str): Dataset version. + mono3d (bool): Whether to export mono3d annotation. Default: True. + """ + # get bbox annotations for camera + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + nusc_infos = mmcv.load(info_path)['infos'] + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + # info_2d_list = [] + cat2Ids = [ + dict(id=nus_categories.index(cat_name), name=cat_name) + for cat_name in nus_categories + ] + coco_ann_id = 0 + coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) + for info in mmcv.track_iter_progress(nusc_infos): + for cam in camera_types: + cam_info = info['cams'][cam] + coco_infos = get_2d_boxes( + nusc, + cam_info['sample_data_token'], + visibilities=['', '1', '2', '3', '4'], + mono3d=mono3d) + (height, width, _) = mmcv.imread(cam_info['data_path']).shape + coco_2d_dict['images'].append( + dict( + file_name=cam_info['data_path'].split('data/nuscenes/') + [-1], + id=cam_info['sample_data_token'], + token=info['token'], + cam2ego_rotation=cam_info['sensor2ego_rotation'], + cam2ego_translation=cam_info['sensor2ego_translation'], + ego2global_rotation=info['ego2global_rotation'], + ego2global_translation=info['ego2global_translation'], + cam_intrinsic=cam_info['cam_intrinsic'], + width=width, + height=height)) + for coco_info in coco_infos: + if coco_info is None: + continue + # add an empty key for coco format + coco_info['segmentation'] = [] + coco_info['id'] = coco_ann_id + coco_2d_dict['annotations'].append(coco_info) + coco_ann_id += 1 + if mono3d: + json_prefix = f'{info_path[:-4]}_mono3d' + else: + json_prefix = f'{info_path[:-4]}' + mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json') + + +def get_2d_boxes(nusc, + sample_data_token: str, + visibilities: List[str], + mono3d=True): + """Get the 2D annotation records for a given `sample_data_token`. + Args: + sample_data_token (str): Sample data token belonging to a camera \ + keyframe. + visibilities (list[str]): Visibility filter. + mono3d (bool): Whether to get boxes with mono3d annotation. + Return: + list[dict]: List of 2D annotation record that belongs to the input + `sample_data_token`. + """ + + # Get the sample data and the sample corresponding to that sample data. + sd_rec = nusc.get('sample_data', sample_data_token) + + assert sd_rec[ + 'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works' \ + ' for camera sample_data!' + if not sd_rec['is_key_frame']: + raise ValueError( + 'The 2D re-projections are available only for keyframes.') + + s_rec = nusc.get('sample', sd_rec['sample_token']) + + # Get the calibrated sensor and ego pose + # record to get the transformation matrices. + cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) + pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token']) + camera_intrinsic = np.array(cs_rec['camera_intrinsic']) + + # Get all the annotation with the specified visibilties. + ann_recs = [ + nusc.get('sample_annotation', token) for token in s_rec['anns'] + ] + ann_recs = [ + ann_rec for ann_rec in ann_recs + if (ann_rec['visibility_token'] in visibilities) + ] + + repro_recs = [] + + for ann_rec in ann_recs: + # Augment sample_annotation with token information. + ann_rec['sample_annotation_token'] = ann_rec['token'] + ann_rec['sample_data_token'] = sample_data_token + + # Get the box in global coordinates. + box = nusc.get_box(ann_rec['token']) + + # Move them to the ego-pose frame. + box.translate(-np.array(pose_rec['translation'])) + box.rotate(Quaternion(pose_rec['rotation']).inverse) + + # Move them to the calibrated sensor frame. + box.translate(-np.array(cs_rec['translation'])) + box.rotate(Quaternion(cs_rec['rotation']).inverse) + + # Filter out the corners that are not in front of the calibrated + # sensor. + corners_3d = box.corners() + in_front = np.argwhere(corners_3d[2, :] > 0).flatten() + corners_3d = corners_3d[:, in_front] + + # Project 3d box to 2d. + corner_coords = view_points(corners_3d, camera_intrinsic, + True).T[:, :2].tolist() + + # Keep only corners that fall within the image. + final_coords = post_process_coords(corner_coords) + + # Skip if the convex hull of the re-projected corners + # does not intersect the image canvas. + if final_coords is None: + continue + else: + min_x, min_y, max_x, max_y = final_coords + + # Generate dictionary record to be included in the .json file. + repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, + sample_data_token, sd_rec['filename']) + + # If mono3d=True, add 3D annotations in camera coordinates + if mono3d and (repro_rec is not None): + loc = box.center.tolist() + dim = box.wlh.tolist() + rot = [box.orientation.yaw_pitch_roll[0]] + + global_velo2d = nusc.box_velocity(box.token)[:2] + global_velo3d = np.array([*global_velo2d, 0.0]) + e2g_r_mat = Quaternion(pose_rec['rotation']).rotation_matrix + c2e_r_mat = Quaternion(cs_rec['rotation']).rotation_matrix + cam_velo3d = global_velo3d @ np.linalg.inv( + e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + velo = cam_velo3d[0::2].tolist() + + repro_rec['bbox_cam3d'] = loc + dim + rot + repro_rec['velo_cam3d'] = velo + + center3d = np.array(loc).reshape([1, 3]) + center2d = points_cam2img( + center3d, camera_intrinsic, with_depth=True) + repro_rec['center2d'] = center2d.squeeze().tolist() + # normalized center2D + depth + # if samples with depth < 0 will be removed + if repro_rec['center2d'][2] <= 0: + continue + + ann_token = nusc.get('sample_annotation', + box.token)['attribute_tokens'] + if len(ann_token) == 0: + attr_name = 'None' + else: + attr_name = nusc.get('attribute', ann_token[0])['name'] + attr_id = nus_attributes.index(attr_name) + repro_rec['attribute_name'] = attr_name + repro_rec['attribute_id'] = attr_id + + repro_recs.append(repro_rec) + + return repro_recs + + +def post_process_coords( + corner_coords: List, imsize: Tuple[int, int] = (1600, 900) +) -> Union[Tuple[float, float, float, float], None]: + """Get the intersection of the convex hull of the reprojected bbox corners + and the image canvas, return None if no intersection. + Args: + corner_coords (list[int]): Corner coordinates of reprojected + bounding box. + imsize (tuple[int]): Size of the image canvas. + Return: + tuple [float]: Intersection of the convex hull of the 2D box + corners and the image canvas. + """ + polygon_from_2d_box = MultiPoint(corner_coords).convex_hull + img_canvas = box(0, 0, imsize[0], imsize[1]) + + if polygon_from_2d_box.intersects(img_canvas): + img_intersection = polygon_from_2d_box.intersection(img_canvas) + intersection_coords = np.array( + [coord for coord in img_intersection.exterior.coords]) + + min_x = min(intersection_coords[:, 0]) + min_y = min(intersection_coords[:, 1]) + max_x = max(intersection_coords[:, 0]) + max_y = max(intersection_coords[:, 1]) + + return min_x, min_y, max_x, max_y + else: + return None + + +def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, + sample_data_token: str, filename: str) -> OrderedDict: + """Generate one 2D annotation record given various informations on top of + the 2D bounding box coordinates. + Args: + ann_rec (dict): Original 3d annotation record. + x1 (float): Minimum value of the x coordinate. + y1 (float): Minimum value of the y coordinate. + x2 (float): Maximum value of the x coordinate. + y2 (float): Maximum value of the y coordinate. + sample_data_token (str): Sample data token. + filename (str):The corresponding image file where the annotation + is present. + Returns: + dict: A sample 2D annotation record. + - file_name (str): flie name + - image_id (str): sample data token + - area (float): 2d box area + - category_name (str): category name + - category_id (int): category id + - bbox (list[float]): left x, top y, dx, dy of 2d box + - iscrowd (int): whether the area is crowd + """ + repro_rec = OrderedDict() + repro_rec['sample_data_token'] = sample_data_token + coco_rec = dict() + + relevant_keys = [ + 'attribute_tokens', + 'category_name', + 'instance_token', + 'next', + 'num_lidar_pts', + 'num_radar_pts', + 'prev', + 'sample_annotation_token', + 'sample_data_token', + 'visibility_token', + ] + + for key, value in ann_rec.items(): + if key in relevant_keys: + repro_rec[key] = value + + repro_rec['bbox_corners'] = [x1, y1, x2, y2] + repro_rec['filename'] = filename + + coco_rec['file_name'] = filename + coco_rec['image_id'] = sample_data_token + coco_rec['area'] = (y2 - y1) * (x2 - x1) + + if repro_rec['category_name'] not in NuScenesDataset.NameMapping: + return None + cat_name = NuScenesDataset.NameMapping[repro_rec['category_name']] + coco_rec['category_name'] = cat_name + coco_rec['category_id'] = nus_categories.index(cat_name) + coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] + coco_rec['iscrowd'] = 0 + + return coco_rec + + +if __name__ == '__main__': + create_nuscenes_infos('/data/datasets/nuScenes/', 'radar_nuscenes_5sweeps', version='v1.0-trainval') \ No newline at end of file diff --git a/tools/data_converter/nuscenes_converter.py b/tools/data_converter/nuscenes_converter.py new file mode 100644 index 0000000..14bd08f --- /dev/null +++ b/tools/data_converter/nuscenes_converter.py @@ -0,0 +1,648 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import os +from collections import OrderedDict +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.geometry_utils import view_points +from os import path as osp +from pyquaternion import Quaternion +from shapely.geometry import MultiPoint, box +from typing import List, Tuple, Union + +from mmdet3d.core.bbox.box_np_ops import points_cam2img +from projects.mmdet3d_plugin.datasets import NuScenesSweepDataset + +nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', + 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', + 'barrier') + +nus_attributes = ('cycle.with_rider', 'cycle.without_rider', + 'pedestrian.moving', 'pedestrian.standing', + 'pedestrian.sitting_lying_down', 'vehicle.moving', + 'vehicle.parked', 'vehicle.stopped', 'None') + + +def create_nuscenes_infos(root_path, + info_prefix, + version='v1.0-trainval', + max_sweeps=10): + """Create info file of nuscene dataset. + + Given the raw data, generate its related info file in pkl format. + + Args: + root_path (str): Path of the data root. + info_prefix (str): Prefix of the info file to be generated. + version (str): Version of the data. + Default: 'v1.0-trainval' + max_sweeps (int): Max number of sweeps. + Default: 10 + """ + from nuscenes.nuscenes import NuScenes + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + from nuscenes.utils import splits + available_vers = ['v1.0-trainval', 'v1.0-test', 'v1.0-mini'] + assert version in available_vers + if version == 'v1.0-trainval': + train_scenes = splits.train + val_scenes = splits.val + elif version == 'v1.0-test': + train_scenes = splits.test + val_scenes = [] + elif version == 'v1.0-mini': + train_scenes = splits.mini_train + val_scenes = splits.mini_val + else: + raise ValueError('unknown') + + # filter existing scenes. + available_scenes = get_available_scenes(nusc) + available_scene_names = [s['name'] for s in available_scenes] + train_scenes = list( + filter(lambda x: x in available_scene_names, train_scenes)) + val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) + train_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in train_scenes + ]) + val_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in val_scenes + ]) + + test = 'test' in version + if test: + print('test scene: {}'.format(len(train_scenes))) + else: + print('train scene: {}, val scene: {}'.format( + len(train_scenes), len(val_scenes))) + train_nusc_infos, val_nusc_infos = _fill_trainval_infos( + nusc, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + + metadata = dict(version=version) + if test: + print('test sample: {}'.format(len(train_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(root_path, + '{}_infos_test.pkl'.format(info_prefix)) + mmcv.dump(data, info_path) + else: + print('train sample: {}, val sample: {}'.format( + len(train_nusc_infos), len(val_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(root_path, + '{}_infos_train.pkl'.format(info_prefix)) + mmcv.dump(data, info_path) + data['infos'] = val_nusc_infos + info_val_path = osp.join(root_path, + '{}_infos_val.pkl'.format(info_prefix)) + mmcv.dump(data, info_val_path) + + +def get_available_scenes(nusc): + """Get available scenes from the input nuscenes class. + + Given the raw data, get the information of available scenes for + further info generation. + + Args: + nusc (class): Dataset class in the nuScenes dataset. + + Returns: + available_scenes (list[dict]): List of basic information for the + available scenes. + """ + available_scenes = [] + print('total scene num: {}'.format(len(nusc.scene))) + for scene in nusc.scene: + scene_token = scene['token'] + scene_rec = nusc.get('scene', scene_token) + sample_rec = nusc.get('sample', scene_rec['first_sample_token']) + sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + has_more_frames = True + scene_not_exist = False + while has_more_frames: + lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) + lidar_path = str(lidar_path) + if os.getcwd() in lidar_path: + # path from lyftdataset is absolute path + lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] + # relative path + if not mmcv.is_filepath(lidar_path): + scene_not_exist = True + break + else: + break + if scene_not_exist: + continue + available_scenes.append(scene) + print('exist scene num: {}'.format(len(available_scenes))) + return available_scenes + + +def _fill_trainval_infos(nusc, + train_scenes, + val_scenes, + test=False, + max_sweeps=10): + """Generate the train/val infos from the raw data. + + Args: + nusc (:obj:`NuScenes`): Dataset class in the nuScenes dataset. + train_scenes (list[str]): Basic information of training scenes. + val_scenes (list[str]): Basic information of validation scenes. + test (bool): Whether use the test mode. In the test mode, no + annotations can be accessed. Default: False. + max_sweeps (int): Max number of sweeps. Default: 10. + + Returns: + tuple[list[dict]]: Information of training set and validation set + that will be saved to the info file. + """ + train_nusc_infos = [] + val_nusc_infos = [] + + for sample in mmcv.track_iter_progress(nusc.sample): + lidar_token = sample['data']['LIDAR_TOP'] + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) + + mmcv.check_file_exist(lidar_path) + + info = { + 'lidar_path': lidar_path, + 'token': sample['token'], + 'sweeps': [], + 'cams': dict(), + 'lidar2ego_translation': cs_record['translation'], + 'lidar2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sample['timestamp'], + } + + l2e_r = info['lidar2ego_rotation'] + l2e_t = info['lidar2ego_translation'] + e2g_r = info['ego2global_rotation'] + e2g_t = info['ego2global_translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + # obtain 6 image's information per frame + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + for cam in camera_types: + cam_token = sample['data'][cam] + cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) + cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, + e2g_t, e2g_r_mat, cam) + cam_info.update(cam_intrinsic=cam_intrinsic) + info['cams'].update({cam: cam_info}) + + # obtain sweeps for a single key-frame + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + sweeps = [] + while len(sweeps) < max_sweeps: + if not sd_rec['prev'] == '': + sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, + l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + sweeps.append(sweep) + sd_rec = nusc.get('sample_data', sd_rec['prev']) + else: + break + info['sweeps'] = sweeps + + # obtain image sweeps for a single key-frame + info['cam_sweeps'] = {} + info['cam_sweeps_info'] = {} + for cam in camera_types: + cam_rec = nusc.get('sample_data', sample['data'][cam]) + cam_sweeps = [] + info_sweeps = [] + # max sweep for camera is actually 6 + while len(cam_sweeps) < max_sweeps: + if not cam_rec['prev'] == '': + cam_token = cam_rec['token'] + cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) + cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, + e2g_t, e2g_r_mat, cam) + cam_info.update(cam_intrinsic=cam_intrinsic) + cam_sweeps.append(cam_path) + info_sweeps.append(cam_info) + cam_rec = nusc.get('sample_data', cam_rec['prev']) + else: + break + info['cam_sweeps'][cam] = cam_sweeps + info['cam_sweeps_info'][cam] = info_sweeps + + # obtain annotation + if not test: + annotations = [ + nusc.get('sample_annotation', token) + for token in sample['anns'] + ] + locs = np.array([b.center for b in boxes]).reshape(-1, 3) + dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) + rots = np.array([b.orientation.yaw_pitch_roll[0] + for b in boxes]).reshape(-1, 1) + velocity = np.array( + [nusc.box_velocity(token)[:2] for token in sample['anns']]) + valid_flag = np.array( + [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 + for anno in annotations], + dtype=bool).reshape(-1) + # convert velo from global to lidar + for i in range(len(boxes)): + velo = np.array([*velocity[i], 0.0]) + velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( + l2e_r_mat).T + velocity[i] = velo[:2] + + names = [b.name for b in boxes] + for i in range(len(names)): + if names[i] in NuScenesSweepDataset.NameMapping: + names[i] = NuScenesSweepDataset.NameMapping[names[i]] + names = np.array(names) + # we need to convert rot to SECOND format. + gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) + assert len(gt_boxes) == len( + annotations), f'{len(gt_boxes)}, {len(annotations)}' + info['gt_boxes'] = gt_boxes + info['gt_names'] = names + info['gt_velocity'] = velocity.reshape(-1, 2) + info['num_lidar_pts'] = np.array( + [a['num_lidar_pts'] for a in annotations]) + info['num_radar_pts'] = np.array( + [a['num_radar_pts'] for a in annotations]) + info['valid_flag'] = valid_flag + + if sample['scene_token'] in train_scenes: + train_nusc_infos.append(info) + else: + val_nusc_infos.append(info) + + return train_nusc_infos, val_nusc_infos + + +def obtain_sensor2top(nusc, + sensor_token, + l2e_t, + l2e_r_mat, + e2g_t, + e2g_r_mat, + sensor_type='lidar'): + """Obtain the info with RT matric from general sensor to Top LiDAR. + + Args: + nusc (class): Dataset class in the nuScenes dataset. + sensor_token (str): Sample data token corresponding to the + specific sensor type. + l2e_t (np.ndarray): Translation from lidar to ego in shape (1, 3). + l2e_r_mat (np.ndarray): Rotation matrix from lidar to ego + in shape (3, 3). + e2g_t (np.ndarray): Translation from ego to global in shape (1, 3). + e2g_r_mat (np.ndarray): Rotation matrix from ego to global + in shape (3, 3). + sensor_type (str): Sensor to calibrate. Default: 'lidar'. + + Returns: + sweep (dict): Sweep information after transformation. + """ + sd_rec = nusc.get('sample_data', sensor_token) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + data_path = str(nusc.get_sample_data_path(sd_rec['token'])) + if os.getcwd() in data_path: # path from lyftdataset is absolute path + data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path + sweep = { + 'data_path': data_path, + 'type': sensor_type, + 'sample_data_token': sd_rec['token'], + 'sensor2ego_translation': cs_record['translation'], + 'sensor2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sd_rec['timestamp'] + } + l2e_r_s = sweep['sensor2ego_rotation'] + l2e_t_s = sweep['sensor2ego_translation'] + e2g_r_s = sweep['ego2global_rotation'] + e2g_t_s = sweep['ego2global_translation'] + + # obtain the RT from sensor to Top LiDAR + # sweep->ego->global->ego'->lidar + l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix + e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix + R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + l2e_t @ np.linalg.inv(l2e_r_mat).T + sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T + sweep['sensor2lidar_translation'] = T + return sweep + + +def export_2d_annotation(root_path, info_path, version, mono3d=True): + """Export 2d annotation from the info file and raw data. + + Args: + root_path (str): Root path of the raw data. + info_path (str): Path of the info file. + version (str): Dataset version. + mono3d (bool): Whether to export mono3d annotation. Default: True. + """ + # get bbox annotations for camera + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + nusc_infos = mmcv.load(info_path)['infos'] + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + # info_2d_list = [] + cat2Ids = [ + dict(id=nus_categories.index(cat_name), name=cat_name) + for cat_name in nus_categories + ] + coco_ann_id = 0 + coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) + for info in mmcv.track_iter_progress(nusc_infos): + for cam in camera_types: + cam_info = info['cams'][cam] + coco_infos = get_2d_boxes( + nusc, + cam_info['sample_data_token'], + visibilities=['', '1', '2', '3', '4'], + mono3d=mono3d) + (height, width, _) = mmcv.imread(cam_info['data_path']).shape + coco_2d_dict['images'].append( + dict( + file_name=cam_info['data_path'].split('data/nuscenes/') + [-1], + id=cam_info['sample_data_token'], + token=info['token'], + cam2ego_rotation=cam_info['sensor2ego_rotation'], + cam2ego_translation=cam_info['sensor2ego_translation'], + ego2global_rotation=info['ego2global_rotation'], + ego2global_translation=info['ego2global_translation'], + cam_intrinsic=cam_info['cam_intrinsic'], + width=width, + height=height)) + for coco_info in coco_infos: + if coco_info is None: + continue + # add an empty key for coco format + coco_info['segmentation'] = [] + coco_info['id'] = coco_ann_id + coco_2d_dict['annotations'].append(coco_info) + coco_ann_id += 1 + if mono3d: + json_prefix = f'{info_path[:-4]}_mono3d' + else: + json_prefix = f'{info_path[:-4]}' + mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json') + + +def get_2d_boxes(nusc, + sample_data_token: str, + visibilities: List[str], + mono3d=True): + """Get the 2D annotation records for a given `sample_data_token`. + + Args: + sample_data_token (str): Sample data token belonging to a camera \ + keyframe. + visibilities (list[str]): Visibility filter. + mono3d (bool): Whether to get boxes with mono3d annotation. + + Return: + list[dict]: List of 2D annotation record that belongs to the input + `sample_data_token`. + """ + + # Get the sample data and the sample corresponding to that sample data. + sd_rec = nusc.get('sample_data', sample_data_token) + + assert sd_rec[ + 'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works' \ + ' for camera sample_data!' + if not sd_rec['is_key_frame']: + raise ValueError( + 'The 2D re-projections are available only for keyframes.') + + s_rec = nusc.get('sample', sd_rec['sample_token']) + + # Get the calibrated sensor and ego pose + # record to get the transformation matrices. + cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) + pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token']) + camera_intrinsic = np.array(cs_rec['camera_intrinsic']) + + # Get all the annotation with the specified visibilties. + ann_recs = [ + nusc.get('sample_annotation', token) for token in s_rec['anns'] + ] + ann_recs = [ + ann_rec for ann_rec in ann_recs + if (ann_rec['visibility_token'] in visibilities) + ] + + repro_recs = [] + + for ann_rec in ann_recs: + # Augment sample_annotation with token information. + ann_rec['sample_annotation_token'] = ann_rec['token'] + ann_rec['sample_data_token'] = sample_data_token + + # Get the box in global coordinates. + box = nusc.get_box(ann_rec['token']) + + # Move them to the ego-pose frame. + box.translate(-np.array(pose_rec['translation'])) + box.rotate(Quaternion(pose_rec['rotation']).inverse) + + # Move them to the calibrated sensor frame. + box.translate(-np.array(cs_rec['translation'])) + box.rotate(Quaternion(cs_rec['rotation']).inverse) + + # Filter out the corners that are not in front of the calibrated + # sensor. + corners_3d = box.corners() + in_front = np.argwhere(corners_3d[2, :] > 0).flatten() + corners_3d = corners_3d[:, in_front] + + # Project 3d box to 2d. + corner_coords = view_points(corners_3d, camera_intrinsic, + True).T[:, :2].tolist() + + # Keep only corners that fall within the image. + final_coords = post_process_coords(corner_coords) + + # Skip if the convex hull of the re-projected corners + # does not intersect the image canvas. + if final_coords is None: + continue + else: + min_x, min_y, max_x, max_y = final_coords + + # Generate dictionary record to be included in the .json file. + repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, + sample_data_token, sd_rec['filename']) + + # If mono3d=True, add 3D annotations in camera coordinates + if mono3d and (repro_rec is not None): + loc = box.center.tolist() + + dim = box.wlh + dim[[0, 1, 2]] = dim[[1, 2, 0]] # convert wlh to our lhw + dim = dim.tolist() + + rot = box.orientation.yaw_pitch_roll[0] + rot = [-rot] # convert the rot to our cam coordinate + + global_velo2d = nusc.box_velocity(box.token)[:2] + global_velo3d = np.array([*global_velo2d, 0.0]) + e2g_r_mat = Quaternion(pose_rec['rotation']).rotation_matrix + c2e_r_mat = Quaternion(cs_rec['rotation']).rotation_matrix + cam_velo3d = global_velo3d @ np.linalg.inv( + e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + velo = cam_velo3d[0::2].tolist() + + repro_rec['bbox_cam3d'] = loc + dim + rot + repro_rec['velo_cam3d'] = velo + + center3d = np.array(loc).reshape([1, 3]) + center2d = points_cam2img( + center3d, camera_intrinsic, with_depth=True) + repro_rec['center2d'] = center2d.squeeze().tolist() + # normalized center2D + depth + # if samples with depth < 0 will be removed + if repro_rec['center2d'][2] <= 0: + continue + + ann_token = nusc.get('sample_annotation', + box.token)['attribute_tokens'] + if len(ann_token) == 0: + attr_name = 'None' + else: + attr_name = nusc.get('attribute', ann_token[0])['name'] + attr_id = nus_attributes.index(attr_name) + repro_rec['attribute_name'] = attr_name + repro_rec['attribute_id'] = attr_id + + repro_recs.append(repro_rec) + + return repro_recs + + +def post_process_coords( + corner_coords: List, imsize: Tuple[int, int] = (1600, 900) +) -> Union[Tuple[float, float, float, float], None]: + """Get the intersection of the convex hull of the reprojected bbox corners + and the image canvas, return None if no intersection. + + Args: + corner_coords (list[int]): Corner coordinates of reprojected + bounding box. + imsize (tuple[int]): Size of the image canvas. + + Return: + tuple [float]: Intersection of the convex hull of the 2D box + corners and the image canvas. + """ + polygon_from_2d_box = MultiPoint(corner_coords).convex_hull + img_canvas = box(0, 0, imsize[0], imsize[1]) + + if polygon_from_2d_box.intersects(img_canvas): + img_intersection = polygon_from_2d_box.intersection(img_canvas) + intersection_coords = np.array( + [coord for coord in img_intersection.exterior.coords]) + + min_x = min(intersection_coords[:, 0]) + min_y = min(intersection_coords[:, 1]) + max_x = max(intersection_coords[:, 0]) + max_y = max(intersection_coords[:, 1]) + + return min_x, min_y, max_x, max_y + else: + return None + + +def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, + sample_data_token: str, filename: str) -> OrderedDict: + """Generate one 2D annotation record given various informations on top of + the 2D bounding box coordinates. + + Args: + ann_rec (dict): Original 3d annotation record. + x1 (float): Minimum value of the x coordinate. + y1 (float): Minimum value of the y coordinate. + x2 (float): Maximum value of the x coordinate. + y2 (float): Maximum value of the y coordinate. + sample_data_token (str): Sample data token. + filename (str):The corresponding image file where the annotation + is present. + + Returns: + dict: A sample 2D annotation record. + - file_name (str): flie name + - image_id (str): sample data token + - area (float): 2d box area + - category_name (str): category name + - category_id (int): category id + - bbox (list[float]): left x, top y, dx, dy of 2d box + - iscrowd (int): whether the area is crowd + """ + repro_rec = OrderedDict() + repro_rec['sample_data_token'] = sample_data_token + coco_rec = dict() + + relevant_keys = [ + 'attribute_tokens', + 'category_name', + 'instance_token', + 'next', + 'num_lidar_pts', + 'num_radar_pts', + 'prev', + 'sample_annotation_token', + 'sample_data_token', + 'visibility_token', + ] + + for key, value in ann_rec.items(): + if key in relevant_keys: + repro_rec[key] = value + + repro_rec['bbox_corners'] = [x1, y1, x2, y2] + repro_rec['filename'] = filename + + coco_rec['file_name'] = filename + coco_rec['image_id'] = sample_data_token + coco_rec['area'] = (y2 - y1) * (x2 - x1) + + if repro_rec['category_name'] not in NuScenesSweepDataset.NameMapping: + return None + cat_name = NuScenesSweepDataset.NameMapping[repro_rec['category_name']] + coco_rec['category_name'] = cat_name + coco_rec['category_id'] = nus_categories.index(cat_name) + coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] + coco_rec['iscrowd'] = 0 + + return coco_rec diff --git a/tools/dist_test.sh b/tools/dist_test.sh new file mode 100755 index 0000000..dea131b --- /dev/null +++ b/tools/dist_test.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/test.py \ + $CONFIG \ + $CHECKPOINT \ + --launcher pytorch \ + ${@:4} diff --git a/tools/dist_train.sh b/tools/dist_train.sh new file mode 100755 index 0000000..aa71bf4 --- /dev/null +++ b/tools/dist_train.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --seed 0 \ + --launcher pytorch ${@:3} diff --git a/tools/test.py b/tools/test.py new file mode 100644 index 0000000..cf6bbf2 --- /dev/null +++ b/tools/test.py @@ -0,0 +1,289 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import warnings + +import mmcv +import torch +from mmcv import Config, DictAction +from mmcv.cnn import fuse_conv_bn +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, + wrap_fp16_model) + +import mmdet +from mmdet3d.apis import single_gpu_test +from mmdet3d.datasets import build_dataloader, build_dataset +from mmdet3d.models import build_model +from mmdet.apis import multi_gpu_test, set_random_seed +from mmdet.datasets import replace_ImageToTensor + +if mmdet.__version__ > '2.23.0': + # If mmdet version > 2.23.0, setup_multi_processes would be imported and + # used from mmdet instead of mmdet3d. + from mmdet.utils import setup_multi_processes +else: + from mmdet3d.utils import setup_multi_processes + +try: + # If mmdet version > 2.23.0, compat_cfg would be imported and + # used from mmdet instead of mmdet3d. + from mmdet.utils import compat_cfg +except ImportError: + from mmdet3d.utils import compat_cfg + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', help='output result file in pickle format') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + parser.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='(Deprecated, please use --gpu-id) ids of gpus to use ' + '(only applicable to non-distributed training)') + parser.add_argument( + '--gpu-id', + type=int, + default=0, + help='id of gpu to use ' + '(only applicable to non-distributed testing)') + parser.add_argument( + '--format-only', + action='store_true', + help='Format the output results without perform evaluation. It is' + 'useful when you want to format the result to a specific format and ' + 'submit it to the test server') + parser.add_argument( + '--eval', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., "bbox",' + ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where results will be saved') + parser.add_argument( + '--gpu-collect', + action='store_true', + help='whether to use gpu to collect results.') + parser.add_argument( + '--tmpdir', + help='tmp directory used for collecting results from multiple ' + 'workers, available when gpu-collect is not specified') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function (deprecate), ' + 'change to --eval-options instead.') + parser.add_argument( + '--eval-options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.eval_options: + raise ValueError( + '--options and --eval-options cannot be both specified, ' + '--options is deprecated in favor of --eval-options') + if args.options: + warnings.warn('--options is deprecated in favor of --eval-options') + args.eval_options = args.options + return args + + +def main(): + args = parse_args() + + assert args.out or args.eval or args.format_only or args.show \ + or args.show_dir, \ + ('Please specify at least one operation (save/eval/format/show the ' + 'results / save the results) with the argument "--out", "--eval"' + ', "--format-only", "--show" or "--show-dir"') + + if args.eval and args.format_only: + raise ValueError('--eval and --format_only cannot be both specified') + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # import modules from string list. + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + + # import modules from plguin/xx, registry will be updated + if hasattr(cfg, 'plugin'): + if cfg.plugin: + import importlib + if hasattr(cfg, 'plugin_dir'): + plugin_dir = cfg.plugin_dir + _module_dir = os.path.dirname(plugin_dir) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + else: + # import dir is the dirpath for the config file + _module_dir = os.path.dirname(args.config) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + + cfg = compat_cfg(cfg) + + # set multi-process settings + setup_multi_processes(cfg) + + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + + cfg.model.pretrained = None + + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids[0:1] + warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' + 'Because we only support single GPU mode in ' + 'non-distributed testing. Use the first GPU ' + 'in `gpu_ids` now.') + else: + cfg.gpu_ids = [args.gpu_id] + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + test_dataloader_default_args = dict( + samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False) + + # in case the test dataset is concatenated + if isinstance(cfg.data.test, dict): + cfg.data.test.test_mode = True + if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.test.pipeline = replace_ImageToTensor( + cfg.data.test.pipeline) + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + ds_cfg.test_mode = True + if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1: + for ds_cfg in cfg.data.test: + ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) + + test_loader_cfg = { + **test_dataloader_default_args, + **cfg.data.get('test_dataloader', {}) + } + + # set random seeds + if args.seed is not None: + set_random_seed(args.seed, deterministic=args.deterministic) + + # build the dataloader + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader(dataset, **test_loader_cfg) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_model(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + if args.fuse_conv_bn: + model = fuse_conv_bn(model) + # old versions did not save class info in checkpoints, this walkaround is + # for backward compatibility + if 'CLASSES' in checkpoint.get('meta', {}): + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + model.CLASSES = dataset.CLASSES + # palette for visualization in segmentation tasks + if 'PALETTE' in checkpoint.get('meta', {}): + model.PALETTE = checkpoint['meta']['PALETTE'] + elif hasattr(dataset, 'PALETTE'): + # segmentation dataset has `PALETTE` attribute + model.PALETTE = dataset.PALETTE + + if not distributed: + model = MMDataParallel(model, device_ids=cfg.gpu_ids) + outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) + else: + model = MMDistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + outputs = multi_gpu_test(model, data_loader, args.tmpdir, + args.gpu_collect) + + rank, _ = get_dist_info() + if rank == 0: + if args.out: + print(f'\nwriting results to {args.out}') + mmcv.dump(outputs, args.out) + kwargs = {} if args.eval_options is None else args.eval_options + if args.format_only: + dataset.format_results(outputs, **kwargs) + if args.eval: + eval_kwargs = cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', + 'rule' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=args.eval, **kwargs)) + print(dataset.evaluate(outputs, **eval_kwargs)) + + +if __name__ == '__main__': + main() diff --git a/tools/test_speed.py b/tools/test_speed.py new file mode 100644 index 0000000..82b4d21 --- /dev/null +++ b/tools/test_speed.py @@ -0,0 +1,85 @@ +# Copyright (c) 2023 megvii-model. All Rights Reserved. + +import torch +from mmcv import Config +from mmcv.parallel import MMDataParallel +from mmcv.runner import load_checkpoint +from mmdet3d.datasets import build_dataloader, build_dataset +from mmdet3d.models import build_detector + +import os +import time +import importlib +import matplotlib.pyplot as plt +import torchvision +import numpy as np +import cv2 +import pickle +import torch.nn as nn +from PIL import Image +import torch.nn.functional as F +import matplotlib + + +class Wrapper: + + def __init__(self, + cfg, + checkpoint=None) -> None: + self.cfg = Config.fromfile(cfg) + self.save_dir = './tmp' + self.init() + self.model = self._build_model(checkpoint) + self.dataset = self._build_dataset() + + def init(self): + self.cfg.model.pretrained = None + self.cfg.data.test.test_mode = True + plugin_dir = self.cfg.plugin_dir + _module_dir = os.path.dirname(plugin_dir) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + + def _build_model(self, checkpoint=None): + model = build_detector(self.cfg.model, test_cfg=self.cfg.get('test_cfg')) + if checkpoint: + load_checkpoint(model, checkpoint, map_location='cpu') + model = MMDataParallel(model, device_ids=[0]) + model.eval() + return model + + def _build_dataset(self): + dataset = build_dataset(self.cfg.data.val) + return dataset + + def test_speed(self, num_iters=100, amp=False): + data_loader = build_dataloader( + self.dataset, + samples_per_gpu=1, + workers_per_gpu=self.cfg.data.workers_per_gpu, + dist=False, + shuffle=False) + loader = iter(data_loader) + total_time = 0 + + with torch.cuda.amp.autocast(enabled=amp): + with torch.no_grad(): + for _ in range(num_iters): + data = next(loader) + t1 = time.time() + self.model(**data, return_loss=False) + total_time += time.time() - t1 + + print(f'Average time: {total_time / num_iters}') + + +if __name__ == '__main__': + wrapper = Wrapper( + cfg='your path to config file', + ) + wrapper.test_speed(amp=False) + \ No newline at end of file diff --git a/tools/train.py b/tools/train.py new file mode 100644 index 0000000..85acaaf --- /dev/null +++ b/tools/train.py @@ -0,0 +1,293 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from __future__ import division +import argparse +import copy +import os +import time +import warnings +from os import path as osp + +import mmcv +import torch +import torch.distributed as dist +from mmcv import Config, DictAction +from mmcv.runner import get_dist_info, init_dist + +from mmdet import __version__ as mmdet_version +from mmdet3d import __version__ as mmdet3d_version +from mmdet3d.apis import init_random_seed, train_model +from mmdet3d.datasets import build_dataset +from mmdet3d.models import build_model +from mmdet3d.utils import collect_env, get_root_logger +from mmdet.apis import set_random_seed +from mmseg import __version__ as mmseg_version + +try: + # If mmdet version > 2.20.0, setup_multi_processes would be imported and + # used from mmdet instead of mmdet3d. + from mmdet.utils import setup_multi_processes +except ImportError: + from mmdet3d.utils import setup_multi_processes + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume-from', help='the checkpoint file to resume from') + parser.add_argument( + '--auto-resume', + action='store_true', + help='resume from the latest checkpoint automatically') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + group_gpus = parser.add_mutually_exclusive_group() + group_gpus.add_argument( + '--gpus', + type=int, + help='(Deprecated, please use --gpu-id) number of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='(Deprecated, please use --gpu-id) ids of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-id', + type=int, + default=0, + help='number of gpus to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument( + '--diff-seed', + action='store_true', + help='Whether or not set different seeds for different ranks') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file (deprecate), ' + 'change to --cfg-options instead.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument( + '--autoscale-lr', + action='store_true', + help='automatically scale lr with the number of gpus') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both specified, ' + '--options is deprecated in favor of --cfg-options') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # set multi-process settings + setup_multi_processes(cfg) + + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + + # import modules from plguin/xx, registry will be updated + if hasattr(cfg, 'plugin'): + if cfg.plugin: + import importlib + if hasattr(cfg, 'plugin_dir'): + plugin_dir = cfg.plugin_dir + _module_dir = os.path.dirname(plugin_dir) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + else: + # import dir is the dirpath for the config file + _module_dir = os.path.dirname(args.config) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + + plg_lib = importlib.import_module('mmdetection3d.mmdet3d') + + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + if args.resume_from is not None: + cfg.resume_from = args.resume_from + + if args.auto_resume: + cfg.auto_resume = args.auto_resume + warnings.warn('`--auto-resume` is only supported when mmdet' + 'version >= 2.20.0 for 3D detection model or' + 'mmsegmentation verision >= 0.21.0 for 3D' + 'segmentation model') + + if args.gpus is not None: + cfg.gpu_ids = range(1) + warnings.warn('`--gpus` is deprecated because we only support ' + 'single GPU mode in non-distributed training. ' + 'Use `gpus=1` now.') + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids[0:1] + warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. ' + 'Because we only support single GPU mode in ' + 'non-distributed training. Use the first GPU ' + 'in `gpu_ids` now.') + if args.gpus is None and args.gpu_ids is None: + cfg.gpu_ids = [args.gpu_id] + + if args.autoscale_lr: + # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) + cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + # re-set gpu_ids with distributed training mode + _, world_size = get_dist_info() + cfg.gpu_ids = range(world_size) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # dump config + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + # init the logger before other steps + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + # specify logger name, if we still use 'mmdet', the output info will be + # filtered and won't be saved in the log_file + # TODO: ugly workaround to judge whether we are training det or seg model + if cfg.model.type in ['EncoderDecoder3D']: + logger_name = 'mmseg' + else: + logger_name = 'mmdet' + logger = get_root_logger( + log_file=log_file, log_level=cfg.log_level, name=logger_name) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + logger.info('Environment info:\n' + dash_line + env_info + '\n' + + dash_line) + meta['env_info'] = env_info + meta['config'] = cfg.pretty_text + + # log some basic info + logger.info(f'Distributed training: {distributed}') + logger.info(f'Config:\n{cfg.pretty_text}') + + # set random seeds + seed = init_random_seed(args.seed) + seed = seed + dist.get_rank() if args.diff_seed else seed + logger.info(f'Set random seed to {seed}, ' + f'deterministic: {args.deterministic}') + set_random_seed(seed, deterministic=args.deterministic) + cfg.seed = seed + meta['seed'] = seed + meta['exp_name'] = osp.basename(args.config) + + model = build_model( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + model.init_weights() + + logger.info(f'Model:\n{model}') + datasets = [build_dataset(cfg.data.train)] + if len(cfg.workflow) == 2: + val_dataset = copy.deepcopy(cfg.data.val) + # in case we use a dataset wrapper + if 'dataset' in cfg.data.train: + val_dataset.pipeline = cfg.data.train.dataset.pipeline + else: + val_dataset.pipeline = cfg.data.train.pipeline + # set test_mode=False here in deep copied config + # which do not affect AP/AR calc ulation later + # refer to https://mmdetection3d.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-workflow # noqa + val_dataset.test_mode = False + datasets.append(build_dataset(val_dataset)) + if cfg.checkpoint_config is not None: + # save mmdet version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + mmdet_version=mmdet_version, + mmseg_version=mmseg_version, + mmdet3d_version=mmdet3d_version, + config=cfg.pretty_text, + CLASSES=datasets[0].CLASSES, + PALETTE=datasets[0].PALETTE # for segmentors + if hasattr(datasets[0], 'PALETTE') else None) + # add an attribute for visualization convenience + model.CLASSES = datasets[0].CLASSES + train_model( + model, + datasets, + cfg, + distributed=distributed, + validate=(not args.no_validate), + timestamp=timestamp, + meta=meta) + + +if __name__ == '__main__': + main() \ No newline at end of file